if_ixlv.c revision 270631
1/******************************************************************************
2
3  Copyright (c) 2013-2014, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixl/if_ixlv.c 270631 2014-08-25 22:04:29Z jfv $*/
34
35#include "opt_inet.h"
36#include "opt_inet6.h"
37#include "ixl.h"
38#include "ixlv.h"
39
40/*********************************************************************
41 *  Driver version
42 *********************************************************************/
43char ixlv_driver_version[] = "1.1.4";
44
45/*********************************************************************
46 *  PCI Device ID Table
47 *
48 *  Used by probe to select devices to load on
49 *  Last field stores an index into ixlv_strings
50 *  Last entry must be all 0s
51 *
52 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
53 *********************************************************************/
54
55static ixl_vendor_info_t ixlv_vendor_info_array[] =
56{
57	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
58	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
59	/* required last entry */
60	{0, 0, 0, 0, 0}
61};
62
63/*********************************************************************
64 *  Table of branding strings
65 *********************************************************************/
66
67static char    *ixlv_strings[] = {
68	"Intel(R) Ethernet Connection XL710 VF Driver"
69};
70
71
72/*********************************************************************
73 *  Function prototypes
74 *********************************************************************/
75static int      ixlv_probe(device_t);
76static int      ixlv_attach(device_t);
77static int      ixlv_detach(device_t);
78static int      ixlv_shutdown(device_t);
79static void	ixlv_init_locked(struct ixlv_sc *);
80static int	ixlv_allocate_pci_resources(struct ixlv_sc *);
81static void	ixlv_free_pci_resources(struct ixlv_sc *);
82static int	ixlv_assign_msix(struct ixlv_sc *);
83static int	ixlv_init_msix(struct ixlv_sc *);
84static int	ixlv_init_taskqueue(struct ixlv_sc *);
85static int	ixlv_setup_queues(struct ixlv_sc *);
86static void	ixlv_config_rss(struct ixlv_sc *);
87static void	ixlv_stop(struct ixlv_sc *);
88static void	ixlv_add_multi(struct ixl_vsi *);
89static void	ixlv_del_multi(struct ixl_vsi *);
90static void	ixlv_update_link_status(struct ixlv_sc *);
91static void	ixlv_free_queues(struct ixl_vsi *);
92static int	ixlv_setup_interface(device_t, struct ixlv_sc *);
93
94static int	ixlv_media_change(struct ifnet *);
95static void	ixlv_media_status(struct ifnet *, struct ifmediareq *);
96
97static void	ixlv_local_timer(void *);
98
99static int	ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
100static void	ixlv_init_filters(struct ixlv_sc *);
101static void	ixlv_free_filters(struct ixlv_sc *);
102
103static void	ixlv_msix_que(void *);
104static void	ixlv_msix_adminq(void *);
105static void	ixlv_do_adminq(void *, int);
106static void	ixlv_sched_aq(void *);
107static void	ixlv_handle_que(void *, int);
108static int	ixlv_reset(struct ixlv_sc *);
109static int	ixlv_reset_complete(struct i40e_hw *);
110static void	ixlv_set_queue_rx_itr(struct ixl_queue *);
111static void	ixlv_set_queue_tx_itr(struct ixl_queue *);
112
113static void	ixlv_enable_adminq_irq(struct i40e_hw *);
114static void	ixlv_disable_adminq_irq(struct i40e_hw *);
115static void	ixlv_enable_queue_irq(struct i40e_hw *, int);
116static void	ixlv_disable_queue_irq(struct i40e_hw *, int);
117
118static void	ixlv_setup_vlan_filters(struct ixlv_sc *);
119static void	ixlv_register_vlan(void *, struct ifnet *, u16);
120static void	ixlv_unregister_vlan(void *, struct ifnet *, u16);
121
122static void	ixlv_cap_txcsum_tso(struct ixl_vsi *,
123		    struct ifnet *, int);
124
125static void	ixlv_add_stats_sysctls(struct ixlv_sc *);
126
127/*********************************************************************
128 *  FreeBSD Device Interface Entry Points
129 *********************************************************************/
130
131static device_method_t ixlv_methods[] = {
132	/* Device interface */
133	DEVMETHOD(device_probe, ixlv_probe),
134	DEVMETHOD(device_attach, ixlv_attach),
135	DEVMETHOD(device_detach, ixlv_detach),
136	DEVMETHOD(device_shutdown, ixlv_shutdown),
137	{0, 0}
138};
139
140static driver_t ixlv_driver = {
141	"ixlv", ixlv_methods, sizeof(struct ixlv_sc),
142};
143
144devclass_t ixlv_devclass;
145DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
146
147MODULE_DEPEND(ixlv, pci, 1, 1, 1);
148MODULE_DEPEND(ixlv, ether, 1, 1, 1);
149
150/*
151** TUNEABLE PARAMETERS:
152*/
153
154static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
155                   "IXLV driver parameters");
156
157/*
158** Number of descriptors per ring:
159**   - TX and RX are the same size
160*/
161static int ixlv_ringsz = DEFAULT_RING;
162TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
163SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
164    &ixlv_ringsz, 0, "Descriptor Ring Size");
165
166/* Set to zero to auto calculate  */
167int ixlv_max_queues = 0;
168TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
169SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
170    &ixlv_max_queues, 0, "Number of Queues");
171
172/*
173** Number of entries in Tx queue buf_ring.
174** Increasing this will reduce the number of
175** errors when transmitting fragmented UDP
176** packets.
177*/
178static int ixlv_txbrsz = DEFAULT_TXBRSZ;
179TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
180SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
181    &ixlv_txbrsz, 0, "TX Buf Ring Size");
182
183/*
184** Controls for Interrupt Throttling
185**      - true/false for dynamic adjustment
186**      - default values for static ITR
187*/
188int ixlv_dynamic_rx_itr = 0;
189TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
190SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
191    &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
192
193int ixlv_dynamic_tx_itr = 0;
194TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
195SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
196    &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
197
198int ixlv_rx_itr = IXL_ITR_8K;
199TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
200SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
201    &ixlv_rx_itr, 0, "RX Interrupt Rate");
202
203int ixlv_tx_itr = IXL_ITR_4K;
204TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
205SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
206    &ixlv_tx_itr, 0, "TX Interrupt Rate");
207
208
209/*********************************************************************
210 *  Device identification routine
211 *
212 *  ixlv_probe determines if the driver should be loaded on
213 *  the hardware based on PCI vendor/device id of the device.
214 *
215 *  return BUS_PROBE_DEFAULT on success, positive on failure
216 *********************************************************************/
217
218static int
219ixlv_probe(device_t dev)
220{
221	ixl_vendor_info_t *ent;
222
223	u16	pci_vendor_id, pci_device_id;
224	u16	pci_subvendor_id, pci_subdevice_id;
225	char	device_name[256];
226
227	INIT_DEBUGOUT("ixlv_probe: begin");
228
229	pci_vendor_id = pci_get_vendor(dev);
230	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
231		return (ENXIO);
232
233	pci_device_id = pci_get_device(dev);
234	pci_subvendor_id = pci_get_subvendor(dev);
235	pci_subdevice_id = pci_get_subdevice(dev);
236
237	ent = ixlv_vendor_info_array;
238	while (ent->vendor_id != 0) {
239		if ((pci_vendor_id == ent->vendor_id) &&
240		    (pci_device_id == ent->device_id) &&
241
242		    ((pci_subvendor_id == ent->subvendor_id) ||
243		     (ent->subvendor_id == 0)) &&
244
245		    ((pci_subdevice_id == ent->subdevice_id) ||
246		     (ent->subdevice_id == 0))) {
247			sprintf(device_name, "%s, Version - %s",
248				ixlv_strings[ent->index],
249				ixlv_driver_version);
250			device_set_desc_copy(dev, device_name);
251			return (BUS_PROBE_DEFAULT);
252		}
253		ent++;
254	}
255	return (ENXIO);
256}
257
258/*********************************************************************
259 *  Device initialization routine
260 *
261 *  The attach entry point is called when the driver is being loaded.
262 *  This routine identifies the type of hardware, allocates all resources
263 *  and initializes the hardware.
264 *
265 *  return 0 on success, positive on failure
266 *********************************************************************/
267
268static int
269ixlv_attach(device_t dev)
270{
271	struct ixlv_sc	*sc;
272	struct i40e_hw	*hw;
273	struct ixl_vsi 	*vsi;
274	int            	bufsz, error = 0, retries = 0;
275
276	INIT_DBG_DEV(dev, "begin");
277
278	/* Allocate, clear, and link in our primary soft structure */
279	sc = device_get_softc(dev);
280	sc->dev = sc->osdep.dev = dev;
281	hw = &sc->hw;
282	vsi = &sc->vsi;
283	vsi->dev = dev;
284
285	/* Allocate filter lists */
286	ixlv_init_filters(sc);
287
288	/* Core Lock Init*/
289	mtx_init(&sc->mtx, device_get_nameunit(dev),
290	    "IXL SC Lock", MTX_DEF);
291	mtx_init(&sc->aq_task_mtx, device_get_nameunit(dev),
292	    "IXL AQ Task Lock", MTX_DEF);
293
294	/* Set up the timer & aq watchdog callouts */
295	callout_init_mtx(&sc->timer, &sc->mtx, 0);
296	callout_init_mtx(&sc->aq_task, &sc->aq_task_mtx, 0);
297
298	/* Save off the information about this board */
299	hw->vendor_id = pci_get_vendor(dev);
300	hw->device_id = pci_get_device(dev);
301	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
302	hw->subsystem_vendor_id =
303	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
304	hw->subsystem_device_id =
305	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
306
307	hw->bus.device = pci_get_slot(dev);
308	hw->bus.func = pci_get_function(dev);
309
310	/* Do PCI setup - map BAR0, etc */
311	if (ixlv_allocate_pci_resources(sc)) {
312		device_printf(dev, "%s: Allocation of PCI resources failed\n",
313		    __func__);
314		error = ENXIO;
315		goto err_early;
316	}
317
318	INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
319
320	error = i40e_set_mac_type(hw);
321	if (error) {
322		device_printf(dev, "%s: set_mac_type failed: %d\n",
323		    __func__, error);
324		goto err_pci_res;
325	}
326
327	error = ixlv_reset_complete(hw);
328	if (error) {
329		device_printf(dev, "%s: Device is still being reset\n",
330		    __func__);
331		goto err_pci_res;
332	}
333
334	INIT_DBG_DEV(dev, "VF Device is ready for configuration");
335
336	hw->aq.num_arq_entries = IXL_AQ_LEN;
337	hw->aq.num_asq_entries = IXL_AQ_LEN;
338	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
339	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
340
341	error = i40e_init_adminq(hw);
342	if (error) {
343		device_printf(dev, "%s: init_adminq failed: %d\n",
344		    __func__, error);
345		goto err_pci_res;
346	}
347
348	INIT_DBG_DEV(dev, "Initialized Admin Queue");
349
350	error = ixlv_send_api_ver(sc);
351	if (error) {
352		device_printf(dev, "%s: unable to send to PF (%d)\n",
353		     __func__, error);
354		goto err_aq;
355	}
356
357	while (!i40e_asq_done(hw)) {
358		if (++retries > IXLV_AQ_MAX_ERR) {
359			device_printf(dev, "%s: Admin Queue timeout "
360			    "(waiting for send_api_ver)\n", __func__);
361			error = ENXIO;
362			goto err_aq;
363		}
364		i40e_msec_delay(10);
365	}
366
367	INIT_DBG_DEV(dev, "Sent API version message to PF");
368
369	/* Wait for API version msg to arrive */
370	error = ixlv_verify_api_ver(sc);
371	if (error) {
372		device_printf(dev,
373		    "%s: Unable to verify API version, error %d\n",
374			    __func__, error);
375		goto err_aq;
376	}
377
378	INIT_DBG_DEV(dev, "PF API version verified");
379
380	/* Need API version before sending reset message */
381	error = ixlv_reset(sc);
382	if (error) {
383		device_printf(dev, "VF reset failed; reload the driver\n");
384		goto err_aq;
385	}
386
387	INIT_DBG_DEV(dev, "VF reset complete");
388
389	/* Ask for VF config from PF */
390	error = ixlv_send_vf_config_msg(sc);
391	if (error) {
392		device_printf(dev,
393		    "%s: Unable to send VF config request, error %d\n",
394		    __func__, error);
395		goto err_aq;
396	}
397
398	retries = 0;
399	while (!i40e_asq_done(hw)) {
400		if (++retries > IXLV_AQ_MAX_ERR) {
401			device_printf(dev, "%s: Admin Queue timeout "
402			    "(waiting for send_vf_config_msg)\n", __func__);
403			error = ENXIO;
404			goto err_aq;
405		}
406		i40e_msec_delay(10);
407	}
408
409	INIT_DBG_DEV(dev, "Sent VF config message to PF");
410
411	bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
412	    (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
413	sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
414	if (!sc->vf_res) {
415		device_printf(dev,
416		    "%s: Unable to allocate memory for VF configuration"
417		    " message from PF\n", __func__);
418		error = ENOMEM;
419		goto err_aq;
420	}
421
422	/* Check for VF config response */
423	error = ixlv_get_vf_config(sc);
424	if (error) {
425		device_printf(dev,
426		    "%s: Unable to get VF configuration from PF\n",
427		    __func__);
428		error = EBUSY;
429		goto err_res_buf;
430	}
431
432	INIT_DBG_DEV(dev, "Received valid VF config from PF");
433	INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
434	    sc->vf_res->num_vsis,
435	    sc->vf_res->num_queue_pairs,
436	    sc->vf_res->max_vectors,
437	    sc->vf_res->max_mtu);
438	INIT_DBG_DEV(dev, "Offload flags: %#010x",
439	    sc->vf_res->vf_offload_flags);
440
441	/* got VF config message back from PF, now we can parse it */
442	for (int i = 0; i < sc->vf_res->num_vsis; i++) {
443		if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
444			sc->vsi_res = &sc->vf_res->vsi_res[i];
445	}
446	if (!sc->vsi_res) {
447		device_printf(dev, "%s: no LAN VSI found\n", __func__);
448		goto err_res_buf;
449	}
450
451	INIT_DBG_DEV(dev, "Resource Acquisition complete");
452
453	/* If no mac address was assigned just make a random one */
454	if (!ixlv_check_ether_addr(hw->mac.addr)) {
455		u8 addr[ETHER_ADDR_LEN];
456		arc4rand(&addr, sizeof(addr), 0);
457		addr[0] &= 0xFE;
458		addr[0] |= 0x02;
459		bcopy(addr, hw->mac.addr, sizeof(addr));
460	}
461
462	vsi->id = sc->vsi_res->vsi_id;
463	vsi->back = (void *)sc;
464
465	/* Link in this virtual environment is always 'up' */
466	vsi->link_up = TRUE;
467
468	/* This allocates the memory and early settings */
469	if (ixlv_setup_queues(sc) != 0) {
470		device_printf(dev, "%s: setup queues failed!\n",
471		    __func__);
472		goto out;
473	}
474
475	/* Setup the stack interface */
476	if (ixlv_setup_interface(dev, sc) != 0) {
477		device_printf(dev, "%s: setup interface failed!\n",
478		    __func__);
479		goto out;
480	}
481
482	INIT_DBG_DEV(dev, "Queue memory and interface setup");
483
484	/* Do queue interrupt setup */
485	ixlv_assign_msix(sc);
486
487	/* Start AdminQ taskqueue */
488	ixlv_init_taskqueue(sc);
489
490	/* Start the admin queue scheduler timer */
491	callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc);
492
493	/* Initialize stats */
494	bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
495	ixlv_add_stats_sysctls(sc);
496
497	/* Register for VLAN events */
498	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
499	    ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
500	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
501	    ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
502
503	/* We want AQ enabled early */
504	ixlv_enable_adminq_irq(hw);
505
506	/* Set things up to run init */
507	sc->aq_pending = 0;
508	sc->aq_required = 0;
509	sc->init_state = IXLV_INIT_READY;
510
511	INIT_DBG_DEV(dev, "end");
512	return (error);
513
514out:
515	ixlv_free_queues(vsi);
516err_res_buf:
517	free(sc->vf_res, M_DEVBUF);
518err_aq:
519	i40e_shutdown_adminq(hw);
520err_pci_res:
521	ixlv_free_pci_resources(sc);
522err_early:
523	mtx_destroy(&sc->mtx);
524	mtx_destroy(&sc->aq_task_mtx);
525	ixlv_free_filters(sc);
526	INIT_DBG_DEV(dev, "end: error %d", error);
527	return (error);
528}
529
530/*********************************************************************
531 *  Device removal routine
532 *
533 *  The detach entry point is called when the driver is being removed.
534 *  This routine stops the adapter and deallocates all the resources
535 *  that were allocated for driver operation.
536 *
537 *  return 0 on success, positive on failure
538 *********************************************************************/
539
540static int
541ixlv_detach(device_t dev)
542{
543	struct ixlv_sc	*sc = device_get_softc(dev);
544	struct ixl_vsi 	*vsi = &sc->vsi;
545	int 			 retries = 0;
546
547	INIT_DBG_DEV(dev, "begin");
548
549	/* Make sure VLANS are not using driver */
550	if (vsi->ifp->if_vlantrunk != NULL) {
551		device_printf(dev, "Vlan in use, detach first\n");
552		INIT_DBG_DEV(dev, "end");
553		return (EBUSY);
554	}
555
556	/* Stop driver */
557	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
558		mtx_lock(&sc->mtx);
559		ixlv_stop(sc);
560		mtx_unlock(&sc->mtx);
561
562		/*
563		** Ensure queues are disabled before examining
564		** admin queue state later in detach.
565		*/
566		while (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING
567		    && ++retries < IXLV_AQ_MAX_ERR) {
568			i40e_msec_delay(10);
569		}
570#ifdef IXL_DEBUG
571		if (retries >= IXLV_AQ_MAX_ERR)
572			device_printf(dev, "Issue disabling queues for detach\n");
573#endif
574	}
575
576	/* Unregister VLAN events */
577	if (vsi->vlan_attach != NULL)
578		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
579	if (vsi->vlan_detach != NULL)
580		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
581
582	/* Stop AQ callout */
583	callout_drain(&sc->aq_task);
584	callout_stop(&sc->aq_task);
585
586#ifdef IXL_DEBUG
587	/* Report on possible AQ failures */
588	if (sc->aq_required || sc->aq_pending) {
589		device_printf(dev, "AQ status on detach:\n");
590		device_printf(dev, "required  : 0x%4b\n", sc->aq_required,
591		    IXLV_FLAGS);
592		device_printf(dev, "pending   : 0x%4b\n", sc->aq_pending,
593		    IXLV_FLAGS);
594		device_printf(dev, "current_op: %d\n", sc->current_op);
595	}
596#endif
597
598	i40e_shutdown_adminq(&sc->hw);
599	while (taskqueue_cancel(sc->tq, &sc->aq_irq, NULL) != 0)
600		taskqueue_drain(sc->tq, &sc->aq_irq);
601	taskqueue_free(sc->tq);
602
603	/* force the state down */
604	vsi->ifp->if_flags &= ~IFF_UP;
605	ether_ifdetach(vsi->ifp);
606	if_free(vsi->ifp);
607
608	free(sc->vf_res, M_DEVBUF);
609	ixlv_free_pci_resources(sc);
610	ixlv_free_queues(vsi);
611	mtx_destroy(&sc->mtx);
612	mtx_destroy(&sc->aq_task_mtx);
613	ixlv_free_filters(sc);
614
615	bus_generic_detach(dev);
616	INIT_DBG_DEV(dev, "end");
617	return (0);
618}
619
620/*********************************************************************
621 *
622 *  Shutdown entry point
623 *
624 **********************************************************************/
625
626static int
627ixlv_shutdown(device_t dev)
628{
629	struct ixlv_sc	*sc = device_get_softc(dev);
630
631	INIT_DBG_DEV(dev, "begin");
632
633	mtx_lock(&sc->mtx);
634	ixlv_stop(sc);
635	mtx_unlock(&sc->mtx);
636
637	INIT_DBG_DEV(dev, "end");
638	return (0);
639}
640
641/*
642 * Configure TXCSUM(IPV6) and TSO(4/6)
643 *	- the hardware handles these together so we
644 *	  need to tweak them
645 */
646static void
647ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
648{
649	/* Enable/disable TXCSUM/TSO4 */
650	if (!(ifp->if_capenable & IFCAP_TXCSUM)
651	    && !(ifp->if_capenable & IFCAP_TSO4)) {
652		if (mask & IFCAP_TXCSUM) {
653			ifp->if_capenable |= IFCAP_TXCSUM;
654			/* enable TXCSUM, restore TSO if previously enabled */
655			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
656				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
657				ifp->if_capenable |= IFCAP_TSO4;
658			}
659		}
660		else if (mask & IFCAP_TSO4) {
661			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
662			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
663			if_printf(ifp,
664			    "TSO4 requires txcsum, enabling both...\n");
665		}
666	} else if((ifp->if_capenable & IFCAP_TXCSUM)
667	    && !(ifp->if_capenable & IFCAP_TSO4)) {
668		if (mask & IFCAP_TXCSUM)
669			ifp->if_capenable &= ~IFCAP_TXCSUM;
670		else if (mask & IFCAP_TSO4)
671			ifp->if_capenable |= IFCAP_TSO4;
672	} else if((ifp->if_capenable & IFCAP_TXCSUM)
673	    && (ifp->if_capenable & IFCAP_TSO4)) {
674		if (mask & IFCAP_TXCSUM) {
675			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
676			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
677			if_printf(ifp,
678			    "TSO4 requires txcsum, disabling both...\n");
679		} else if (mask & IFCAP_TSO4)
680			ifp->if_capenable &= ~IFCAP_TSO4;
681	}
682
683	/* Enable/disable TXCSUM_IPV6/TSO6 */
684	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
685	    && !(ifp->if_capenable & IFCAP_TSO6)) {
686		if (mask & IFCAP_TXCSUM_IPV6) {
687			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
688			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
689				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
690				ifp->if_capenable |= IFCAP_TSO6;
691			}
692		} else if (mask & IFCAP_TSO6) {
693			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
694			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
695			if_printf(ifp,
696			    "TSO6 requires txcsum6, enabling both...\n");
697		}
698	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
699	    && !(ifp->if_capenable & IFCAP_TSO6)) {
700		if (mask & IFCAP_TXCSUM_IPV6)
701			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
702		else if (mask & IFCAP_TSO6)
703			ifp->if_capenable |= IFCAP_TSO6;
704	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
705	    && (ifp->if_capenable & IFCAP_TSO6)) {
706		if (mask & IFCAP_TXCSUM_IPV6) {
707			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
708			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
709			if_printf(ifp,
710			    "TSO6 requires txcsum6, disabling both...\n");
711		} else if (mask & IFCAP_TSO6)
712			ifp->if_capenable &= ~IFCAP_TSO6;
713	}
714}
715
716/*********************************************************************
717 *  Ioctl entry point
718 *
719 *  ixlv_ioctl is called when the user wants to configure the
720 *  interface.
721 *
722 *  return 0 on success, positive on failure
723 **********************************************************************/
724
725static int
726ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
727{
728	struct ixl_vsi		*vsi = ifp->if_softc;
729	struct ixlv_sc	*sc = vsi->back;
730	struct ifreq		*ifr = (struct ifreq *)data;
731#if defined(INET) || defined(INET6)
732	struct ifaddr 		*ifa = (struct ifaddr *)data;
733	bool			avoid_reset = FALSE;
734#endif
735	int             	error = 0;
736
737
738	switch (command) {
739
740        case SIOCSIFADDR:
741#ifdef INET
742		if (ifa->ifa_addr->sa_family == AF_INET)
743			avoid_reset = TRUE;
744#endif
745#ifdef INET6
746		if (ifa->ifa_addr->sa_family == AF_INET6)
747			avoid_reset = TRUE;
748#endif
749#if defined(INET) || defined(INET6)
750		/*
751		** Calling init results in link renegotiation,
752		** so we avoid doing it when possible.
753		*/
754		if (avoid_reset) {
755			ifp->if_flags |= IFF_UP;
756			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
757				ixlv_init(sc);
758			if (!(ifp->if_flags & IFF_NOARP))
759				arp_ifinit(ifp, ifa);
760		} else
761			error = ether_ioctl(ifp, command, data);
762		break;
763#endif
764	case SIOCSIFMTU:
765		IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
766		mtx_lock(&sc->mtx);
767		if (ifr->ifr_mtu > IXL_MAX_FRAME -
768		    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
769			error = EINVAL;
770			IOCTL_DBG_IF(ifp, "mtu too large");
771		} else {
772			IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
773			// ERJ: Interestingly enough, these types don't match
774			ifp->if_mtu = ifr->ifr_mtu;
775			vsi->max_frame_size =
776			    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
777			    + ETHER_VLAN_ENCAP_LEN;
778
779			ixlv_init_locked(sc);
780		}
781		mtx_unlock(&sc->mtx);
782		break;
783	case SIOCSIFFLAGS:
784		IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
785		mtx_lock(&sc->mtx);
786		if (ifp->if_flags & IFF_UP) {
787			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
788				ixlv_init_locked(sc);
789		} else
790			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
791				ixlv_stop(sc);
792		sc->if_flags = ifp->if_flags;
793		mtx_unlock(&sc->mtx);
794		break;
795	case SIOCADDMULTI:
796		IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
797		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
798			mtx_lock(&sc->mtx);
799			ixlv_disable_intr(vsi);
800			ixlv_add_multi(vsi);
801			ixlv_enable_intr(vsi);
802			mtx_unlock(&sc->mtx);
803		}
804		break;
805	case SIOCDELMULTI:
806		IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
807		if (sc->init_state == IXLV_RUNNING) {
808			mtx_lock(&sc->mtx);
809			ixlv_disable_intr(vsi);
810			ixlv_del_multi(vsi);
811			ixlv_enable_intr(vsi);
812			mtx_unlock(&sc->mtx);
813		}
814		break;
815	case SIOCSIFMEDIA:
816	case SIOCGIFMEDIA:
817		IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
818		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
819		break;
820	case SIOCSIFCAP:
821	{
822		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
823		IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
824
825		ixlv_cap_txcsum_tso(vsi, ifp, mask);
826
827		if (mask & IFCAP_RXCSUM)
828			ifp->if_capenable ^= IFCAP_RXCSUM;
829		if (mask & IFCAP_RXCSUM_IPV6)
830			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
831		if (mask & IFCAP_LRO)
832			ifp->if_capenable ^= IFCAP_LRO;
833		if (mask & IFCAP_VLAN_HWTAGGING)
834			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
835		if (mask & IFCAP_VLAN_HWFILTER)
836			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
837		if (mask & IFCAP_VLAN_HWTSO)
838			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
839		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
840			ixlv_init(sc);
841		}
842		VLAN_CAPABILITIES(ifp);
843
844		break;
845	}
846
847	default:
848		IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
849		error = ether_ioctl(ifp, command, data);
850		break;
851	}
852
853	return (error);
854}
855
856/*
857** To do a reinit on the VF is unfortunately more complicated
858** than a physical device, we must have the PF more or less
859** completely recreate our memory, so many things that were
860** done only once at attach in traditional drivers now must be
861** redone at each reinitialization. This function does that
862** 'prelude' so we can then call the normal locked init code.
863*/
864int
865ixlv_reinit_locked(struct ixlv_sc *sc)
866{
867	struct i40e_hw		*hw = &sc->hw;
868	struct ixl_vsi		*vsi = &sc->vsi;
869	struct ifnet		*ifp = vsi->ifp;
870	struct ixlv_vlan_filter	*vf;
871	int			error = 0;
872
873	INIT_DBG_IF(ifp, "begin");
874
875	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
876		ixlv_stop(sc);
877
878	if ((sc->init_state == IXLV_RESET_REQUIRED) ||
879	    (sc->init_state == IXLV_RESET_PENDING))
880		error = ixlv_reset(sc);
881
882	/* set the state in case we went thru RESET */
883	sc->init_state = IXLV_RUNNING;
884
885	if (vsi->num_vlans != 0)
886		SLIST_FOREACH(vf, sc->vlan_filters, next)
887			vf->flags = IXL_FILTER_ADD;
888	else { /* clean any stale filters */
889		while (!SLIST_EMPTY(sc->vlan_filters)) {
890			vf = SLIST_FIRST(sc->vlan_filters);
891			SLIST_REMOVE_HEAD(sc->vlan_filters, next);
892			free(vf, M_DEVBUF);
893		}
894	}
895
896	ixlv_enable_adminq_irq(hw);
897	sc->aq_pending = 0;
898	sc->aq_required = 0;
899
900	INIT_DBG_IF(ifp, "end");
901	return (error);
902}
903
904
905static void
906ixlv_init_locked(struct ixlv_sc *sc)
907{
908	struct i40e_hw		*hw = &sc->hw;
909	struct ixl_vsi		*vsi = &sc->vsi;
910	struct ixl_queue	*que = vsi->queues;
911	struct ifnet		*ifp = vsi->ifp;
912	int			 error = 0;
913
914	INIT_DBG_IF(ifp, "begin");
915
916	/* Verify we have the core lock */
917	if (!mtx_owned(&sc->mtx)) {
918		if_printf(ifp, "%s: sc mutex not owned; acquire"
919		    "before calling this function!\n", __func__);
920		goto init_done;
921	}
922
923	/* Do a reinit first if an init has already been done */
924	if ((sc->init_state == IXLV_RUNNING) ||
925	    (sc->init_state == IXLV_RESET_REQUIRED) ||
926	    (sc->init_state == IXLV_RESET_PENDING))
927		error = ixlv_reinit_locked(sc);
928	/* Don't bother with init if we failed reinit */
929	if (error)
930		goto init_done;
931
932	/* Check for an LAA mac address... */
933	bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
934
935	ifp->if_hwassist = 0;
936	if (ifp->if_capenable & IFCAP_TSO)
937		ifp->if_hwassist |= CSUM_TSO;
938	if (ifp->if_capenable & IFCAP_TXCSUM)
939		ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
940	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
941		ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
942
943	/* Add mac filter for this VF to PF */
944	error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
945
946	// send message, then enqueue another task
947	if (!error || error == EEXIST) {
948		sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
949		callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
950		    ixlv_sched_aq, sc);
951	}
952
953	/* Setup vlan's if needed */
954	ixlv_setup_vlan_filters(sc);
955
956	/*
957	** Prepare the queues for operation
958	*/
959	for (int i = 0; i < vsi->num_queues; i++, que++) {
960		struct  rx_ring	*rxr = &que->rxr;
961
962		ixl_init_tx_ring(que);
963
964		/* Need to set mbuf size now */
965		if (vsi->max_frame_size <= 2048)
966			rxr->mbuf_sz = MCLBYTES;
967		else
968			rxr->mbuf_sz = MJUMPAGESIZE;
969		ixl_init_rx_ring(que);
970	}
971
972	/* Configure queues */
973	sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_QUEUES;
974	callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
975	    ixlv_sched_aq, sc);
976
977	/* Set up RSS */
978	ixlv_config_rss(sc);
979
980	/* Map vectors */
981	sc->aq_required |= IXLV_FLAG_AQ_MAP_VECTORS;
982	callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
983	    ixlv_sched_aq, sc);
984
985	/* Enable queues */
986	sc->aq_required |= IXLV_FLAG_AQ_ENABLE_QUEUES;
987	callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
988	    ixlv_sched_aq, sc);
989
990	/* Start the local timer */
991	callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
992
993	sc->init_state = IXLV_RUNNING;
994
995init_done:
996	INIT_DBG_IF(ifp, "end");
997	return;
998}
999
1000/*
1001**  Init entry point for the stack
1002*/
1003void
1004ixlv_init(void *arg)
1005{
1006	struct ixlv_sc *sc = arg;
1007
1008	mtx_lock(&sc->mtx);
1009	ixlv_init_locked(sc);
1010	mtx_unlock(&sc->mtx);
1011	return;
1012}
1013
1014/*
1015 * Allocate MSI/X vectors, setup the AQ vector early
1016 */
1017static int
1018ixlv_init_msix(struct ixlv_sc *sc)
1019{
1020	device_t dev = sc->dev;
1021	int rid, want, vectors, queues, available;
1022
1023	rid = PCIR_BAR(IXL_BAR);
1024	sc->msix_mem = bus_alloc_resource_any(dev,
1025	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1026       	if (!sc->msix_mem) {
1027		/* May not be enabled */
1028		device_printf(sc->dev,
1029		    "Unable to map MSIX table \n");
1030		goto fail;
1031	}
1032
1033	available = pci_msix_count(dev);
1034	if (available == 0) { /* system has msix disabled */
1035		bus_release_resource(dev, SYS_RES_MEMORY,
1036		    rid, sc->msix_mem);
1037		sc->msix_mem = NULL;
1038		goto fail;
1039	}
1040
1041	/* Figure out a reasonable auto config value */
1042	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
1043
1044	/* Override with hardcoded value if sane */
1045	if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
1046		queues = ixlv_max_queues;
1047
1048	/* Enforce the VF max value */
1049	if (queues > IXLV_MAX_QUEUES)
1050		queues = IXLV_MAX_QUEUES;
1051
1052	/*
1053	** Want one vector (RX/TX pair) per queue
1054	** plus an additional for the admin queue.
1055	*/
1056	want = queues + 1;
1057	if (want <= available)	/* Have enough */
1058		vectors = want;
1059	else {
1060		device_printf(sc->dev,
1061		    "MSIX Configuration Problem, "
1062		    "%d vectors available but %d wanted!\n",
1063		    available, want);
1064		goto fail;
1065	}
1066
1067	if (pci_alloc_msix(dev, &vectors) == 0) {
1068		device_printf(sc->dev,
1069		    "Using MSIX interrupts with %d vectors\n", vectors);
1070		sc->msix = vectors;
1071		sc->vsi.num_queues = queues;
1072	}
1073
1074	/*
1075	** Explicitly set the guest PCI BUSMASTER capability
1076	** and we must rewrite the ENABLE in the MSIX control
1077	** register again at this point to cause the host to
1078	** successfully initialize us.
1079	*/
1080	{
1081		u16 pci_cmd_word;
1082		int msix_ctrl;
1083		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1084		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
1085		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1086		pci_find_cap(dev, PCIY_MSIX, &rid);
1087		rid += PCIR_MSIX_CTRL;
1088		msix_ctrl = pci_read_config(dev, rid, 2);
1089		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1090		pci_write_config(dev, rid, msix_ctrl, 2);
1091	}
1092
1093	/* Next we need to setup the vector for the Admin Queue */
1094	rid = 1;	// zero vector + 1
1095	sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1096	    &rid, RF_SHAREABLE | RF_ACTIVE);
1097	if (sc->res == NULL) {
1098		device_printf(dev,"Unable to allocate"
1099		    " bus resource: AQ interrupt \n");
1100		goto fail;
1101	}
1102	if (bus_setup_intr(dev, sc->res,
1103	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1104	    ixlv_msix_adminq, sc, &sc->tag)) {
1105		sc->res = NULL;
1106		device_printf(dev, "Failed to register AQ handler");
1107		goto fail;
1108	}
1109	bus_describe_intr(dev, sc->res, sc->tag, "adminq");
1110
1111	return (vectors);
1112
1113fail:
1114	/* The VF driver MUST use MSIX */
1115	return (0);
1116}
1117
1118static int
1119ixlv_allocate_pci_resources(struct ixlv_sc *sc)
1120{
1121	int             rid;
1122	device_t        dev = sc->dev;
1123
1124	rid = PCIR_BAR(0);
1125	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1126	    &rid, RF_ACTIVE);
1127
1128	if (!(sc->pci_mem)) {
1129		device_printf(dev,"Unable to allocate bus resource: memory\n");
1130		return (ENXIO);
1131	}
1132
1133	sc->osdep.mem_bus_space_tag =
1134		rman_get_bustag(sc->pci_mem);
1135	sc->osdep.mem_bus_space_handle =
1136		rman_get_bushandle(sc->pci_mem);
1137	sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
1138	sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
1139
1140	sc->hw.back = &sc->osdep;
1141
1142	/* May need to pre-emptively disable adminq interrupts */
1143	ixlv_disable_adminq_irq(&sc->hw);
1144
1145	/*
1146	** Now setup MSI/X, it will return
1147	** us the number of supported vectors
1148	*/
1149	sc->msix = ixlv_init_msix(sc);
1150
1151	/* We fail without MSIX support */
1152	if (sc->msix == 0)
1153		return (ENXIO);
1154
1155	return (0);
1156}
1157
1158static void
1159ixlv_free_pci_resources(struct ixlv_sc *sc)
1160{
1161	struct ixl_vsi         *vsi = &sc->vsi;
1162	struct ixl_queue       *que = vsi->queues;
1163	device_t                dev = sc->dev;
1164
1165	/* We may get here before stations are setup */
1166	if (que == NULL)
1167		goto early;
1168
1169	/*
1170	**  Release all msix queue resources:
1171	*/
1172	for (int i = 0; i < vsi->num_queues; i++, que++) {
1173		int rid = que->msix + 1;
1174		if (que->tag != NULL) {
1175			bus_teardown_intr(dev, que->res, que->tag);
1176			que->tag = NULL;
1177		}
1178		if (que->res != NULL)
1179			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1180	}
1181
1182early:
1183	/* Clean the AdminQ interrupt */
1184	if (sc->tag != NULL) {
1185		bus_teardown_intr(dev, sc->res, sc->tag);
1186		sc->tag = NULL;
1187	}
1188	if (sc->res != NULL)
1189		bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
1190
1191	pci_release_msi(dev);
1192
1193	if (sc->msix_mem != NULL)
1194		bus_release_resource(dev, SYS_RES_MEMORY,
1195		    PCIR_BAR(IXL_BAR), sc->msix_mem);
1196
1197	if (sc->pci_mem != NULL)
1198		bus_release_resource(dev, SYS_RES_MEMORY,
1199		    PCIR_BAR(0), sc->pci_mem);
1200
1201	return;
1202}
1203
1204static int
1205ixlv_init_taskqueue(struct ixlv_sc *sc)
1206{
1207	int error = 0;
1208
1209	/* Tasklet for AQ Interrupts */
1210	TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
1211
1212	sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
1213	    taskqueue_thread_enqueue, &sc->tq);
1214	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
1215	    device_get_nameunit(sc->dev));
1216
1217	return (error);
1218}
1219
1220/*********************************************************************
1221 *
1222 *  Setup MSIX Interrupt resources and handlers for the VSI queues
1223 *
1224 **********************************************************************/
1225static int
1226ixlv_assign_msix(struct ixlv_sc *sc)
1227{
1228	device_t	dev = sc->dev;
1229	struct 		ixl_vsi *vsi = &sc->vsi;
1230	struct 		ixl_queue *que = vsi->queues;
1231	struct		tx_ring	 *txr;
1232	int 		error, rid, vector = 1;
1233
1234	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
1235		rid = vector + 1;
1236		txr = &que->txr;
1237		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1238		    RF_SHAREABLE | RF_ACTIVE);
1239		if (que->res == NULL) {
1240			device_printf(dev,"Unable to allocate"
1241		    	    " bus resource: que interrupt [%d]\n", vector);
1242			return (ENXIO);
1243		}
1244		/* Set the handler function */
1245		error = bus_setup_intr(dev, que->res,
1246		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1247		    ixlv_msix_que, que, &que->tag);
1248		if (error) {
1249			que->res = NULL;
1250			device_printf(dev, "Failed to register que handler");
1251			return (error);
1252		}
1253		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1254		/* Bind the vector to a CPU */
1255		bus_bind_intr(dev, que->res, i);
1256		que->msix = vector;
1257        	vsi->que_mask |= (u64)(1 << que->msix);
1258		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
1259		TASK_INIT(&que->task, 0, ixlv_handle_que, que);
1260		que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
1261		    taskqueue_thread_enqueue, &que->tq);
1262		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1263		    device_get_nameunit(sc->dev));
1264	}
1265
1266	return (0);
1267}
1268
1269/*
1270** XXX: Assumes the vf's admin queue has been initialized.
1271*/
1272static int
1273ixlv_reset(struct ixlv_sc *sc)
1274{
1275	struct i40e_hw	*hw = &sc->hw;
1276	device_t	dev = sc->dev;
1277	int		error = 0;
1278
1279	/* Ask the PF to reset us if we are initiating */
1280	if (sc->init_state != IXLV_RESET_PENDING)
1281		ixlv_request_reset(sc);
1282
1283	i40e_msec_delay(100);
1284	error = ixlv_reset_complete(hw);
1285	if (error) {
1286		device_printf(dev, "%s: VF reset failed\n",
1287		    __func__);
1288		return (error);
1289	}
1290
1291	error = i40e_shutdown_adminq(hw);
1292	if (error) {
1293		device_printf(dev, "%s: shutdown_adminq failed: %d\n",
1294		    __func__, error);
1295		return (error);
1296	}
1297
1298	error = i40e_init_adminq(hw);
1299	if (error) {
1300		device_printf(dev, "%s: init_adminq failed: %d\n",
1301		    __func__, error);
1302		return(error);
1303	}
1304
1305	return (0);
1306}
1307
1308static int
1309ixlv_reset_complete(struct i40e_hw *hw)
1310{
1311	u32 reg;
1312
1313	for (int i = 0; i < 100; i++) {
1314		reg = rd32(hw, I40E_VFGEN_RSTAT) &
1315		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1316
1317                if ((reg == I40E_VFR_VFACTIVE) ||
1318		    (reg == I40E_VFR_COMPLETED))
1319			return (0);
1320		i40e_usec_delay(20);
1321	}
1322
1323	return (EBUSY);
1324}
1325
1326
1327/*********************************************************************
1328 *
1329 *  Setup networking device structure and register an interface.
1330 *
1331 **********************************************************************/
1332static int
1333ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
1334{
1335	struct ifnet		*ifp;
1336	struct ixl_vsi		*vsi = &sc->vsi;
1337	struct ixl_queue	*que = vsi->queues;
1338
1339	INIT_DBG_DEV(dev, "begin");
1340
1341	ifp = vsi->ifp = if_alloc(IFT_ETHER);
1342	if (ifp == NULL) {
1343		device_printf(dev, "can not allocate ifnet structure\n");
1344		return (-1);
1345	}
1346
1347	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1348
1349	ifp->if_mtu = ETHERMTU;
1350	ifp->if_baudrate = 4000000000;  // ??
1351	ifp->if_init = ixlv_init;
1352	ifp->if_softc = vsi;
1353	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1354	ifp->if_ioctl = ixlv_ioctl;
1355
1356	ifp->if_transmit = ixl_mq_start;
1357
1358	ifp->if_qflush = ixl_qflush;
1359	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
1360
1361	ether_ifattach(ifp, sc->hw.mac.addr);
1362
1363	vsi->max_frame_size =
1364	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1365	    + ETHER_VLAN_ENCAP_LEN;
1366
1367	/*
1368	 * Tell the upper layer(s) we support long frames.
1369	 */
1370	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1371
1372	ifp->if_capabilities |= IFCAP_HWCSUM;
1373	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
1374	ifp->if_capabilities |= IFCAP_TSO;
1375	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1376
1377	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1378			     |  IFCAP_VLAN_HWTSO
1379			     |  IFCAP_VLAN_MTU
1380			     |  IFCAP_VLAN_HWCSUM
1381			     |  IFCAP_LRO;
1382	ifp->if_capenable = ifp->if_capabilities;
1383
1384	/*
1385	** Don't turn this on by default, if vlans are
1386	** created on another pseudo device (eg. lagg)
1387	** then vlan events are not passed thru, breaking
1388	** operation, but with HW FILTER off it works. If
1389	** using vlans directly on the ixl driver you can
1390	** enable this and get full hardware tag filtering.
1391	*/
1392	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1393
1394	/*
1395	 * Specify the media types supported by this adapter and register
1396	 * callbacks to update media and link information
1397	 */
1398	ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
1399		     ixlv_media_status);
1400
1401	// JFV Add media types later?
1402
1403	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1404	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1405
1406	INIT_DBG_DEV(dev, "end");
1407	return (0);
1408}
1409
1410/*
1411** Allocate and setup the interface queues
1412*/
1413static int
1414ixlv_setup_queues(struct ixlv_sc *sc)
1415{
1416	device_t		dev = sc->dev;
1417	struct ixl_vsi		*vsi;
1418	struct ixl_queue	*que;
1419	struct tx_ring		*txr;
1420	struct rx_ring		*rxr;
1421	int 			rsize, tsize;
1422	int			error = I40E_SUCCESS;
1423
1424	vsi = &sc->vsi;
1425	vsi->back = (void *)sc;
1426	vsi->hw = &sc->hw;
1427	vsi->num_vlans = 0;
1428
1429	/* Get memory for the station queues */
1430	if (!(vsi->queues =
1431		(struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
1432		vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1433			device_printf(dev, "Unable to allocate queue memory\n");
1434			error = ENOMEM;
1435			goto early;
1436	}
1437
1438	for (int i = 0; i < vsi->num_queues; i++) {
1439		que = &vsi->queues[i];
1440		que->num_desc = ixlv_ringsz;
1441		que->me = i;
1442		que->vsi = vsi;
1443		/* mark the queue as active */
1444		vsi->active_queues |= (u64)1 << que->me;
1445
1446		txr = &que->txr;
1447		txr->que = que;
1448		txr->tail = I40E_QTX_TAIL1(que->me);
1449		/* Initialize the TX lock */
1450		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
1451		    device_get_nameunit(dev), que->me);
1452		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
1453		/*
1454		** Create the TX descriptor ring, the extra int is
1455		** added as the location for HEAD WB.
1456		*/
1457		tsize = roundup2((que->num_desc *
1458		    sizeof(struct i40e_tx_desc)) +
1459		    sizeof(u32), DBA_ALIGN);
1460		if (i40e_allocate_dma(&sc->hw,
1461		    &txr->dma, tsize, DBA_ALIGN)) {
1462			device_printf(dev,
1463			    "Unable to allocate TX Descriptor memory\n");
1464			error = ENOMEM;
1465			goto fail;
1466		}
1467		txr->base = (struct i40e_tx_desc *)txr->dma.va;
1468		bzero((void *)txr->base, tsize);
1469		/* Now allocate transmit soft structs for the ring */
1470		if (ixl_allocate_tx_data(que)) {
1471			device_printf(dev,
1472			    "Critical Failure setting up TX structures\n");
1473			error = ENOMEM;
1474			goto fail;
1475		}
1476		/* Allocate a buf ring */
1477		txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
1478		    M_WAITOK, &txr->mtx);
1479		if (txr->br == NULL) {
1480			device_printf(dev,
1481			    "Critical Failure setting up TX buf ring\n");
1482			error = ENOMEM;
1483			goto fail;
1484		}
1485
1486		/*
1487		 * Next the RX queues...
1488		 */
1489		rsize = roundup2(que->num_desc *
1490		    sizeof(union i40e_rx_desc), DBA_ALIGN);
1491		rxr = &que->rxr;
1492		rxr->que = que;
1493		rxr->tail = I40E_QRX_TAIL1(que->me);
1494
1495		/* Initialize the RX side lock */
1496		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
1497		    device_get_nameunit(dev), que->me);
1498		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
1499
1500		if (i40e_allocate_dma(&sc->hw,
1501		    &rxr->dma, rsize, 4096)) { //JFV - should this be DBA?
1502			device_printf(dev,
1503			    "Unable to allocate RX Descriptor memory\n");
1504			error = ENOMEM;
1505			goto fail;
1506		}
1507		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
1508		bzero((void *)rxr->base, rsize);
1509
1510		/* Allocate receive soft structs for the ring*/
1511		if (ixl_allocate_rx_data(que)) {
1512			device_printf(dev,
1513			    "Critical Failure setting up receive structs\n");
1514			error = ENOMEM;
1515			goto fail;
1516		}
1517	}
1518
1519	return (0);
1520
1521fail:
1522	free(vsi->queues, M_DEVBUF);
1523	for (int i = 0; i < vsi->num_queues; i++) {
1524		que = &vsi->queues[i];
1525		rxr = &que->rxr;
1526		txr = &que->txr;
1527		if (rxr->base)
1528			i40e_free_dma(&sc->hw, &rxr->dma);
1529		if (txr->base)
1530			i40e_free_dma(&sc->hw, &txr->dma);
1531	}
1532
1533early:
1534	return (error);
1535}
1536
1537/*
1538** This routine is run via an vlan config EVENT,
1539** it enables us to use the HW Filter table since
1540** we can get the vlan id. This just creates the
1541** entry in the soft version of the VFTA, init will
1542** repopulate the real table.
1543*/
1544static void
1545ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1546{
1547	struct ixl_vsi			*vsi = ifp->if_softc;
1548	struct ixlv_sc		*sc = vsi->back;
1549	struct ixlv_vlan_filter	*v;
1550
1551
1552	if (ifp->if_softc !=  arg)   /* Not our event */
1553		return;
1554
1555	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1556		return;
1557
1558	/* Sanity check - make sure it doesn't already exist */
1559	SLIST_FOREACH(v, sc->vlan_filters, next) {
1560		if (v->vlan == vtag)
1561			return;
1562	}
1563
1564	mtx_lock(&sc->mtx);
1565	++vsi->num_vlans;
1566	v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1567	SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
1568	v->vlan = vtag;
1569	v->flags = IXL_FILTER_ADD;
1570	sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
1571	mtx_unlock(&sc->mtx);
1572	return;
1573}
1574
1575/*
1576** This routine is run via an vlan
1577** unconfig EVENT, remove our entry
1578** in the soft vfta.
1579*/
1580static void
1581ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1582{
1583	struct ixl_vsi			*vsi = ifp->if_softc;
1584	struct ixlv_sc		*sc = vsi->back;
1585	struct ixlv_vlan_filter	*v;
1586	int				i = 0;
1587
1588	if (ifp->if_softc !=  arg)
1589		return;
1590
1591	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1592		return;
1593
1594	mtx_lock(&sc->mtx);
1595	SLIST_FOREACH(v, sc->vlan_filters, next) {
1596		if (v->vlan == vtag) {
1597			v->flags = IXL_FILTER_DEL;
1598			++i;
1599			--vsi->num_vlans;
1600		}
1601	}
1602	if (i)
1603		sc->aq_required |= IXLV_FLAG_AQ_DEL_VLAN_FILTER;
1604	mtx_unlock(&sc->mtx);
1605	return;
1606}
1607
1608/*
1609** Get a new filter and add it to the mac filter list.
1610*/
1611static struct ixlv_mac_filter *
1612ixlv_get_mac_filter(struct ixlv_sc *sc)
1613{
1614	struct ixlv_mac_filter	*f;
1615
1616	f = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
1617	SLIST_INSERT_HEAD(sc->mac_filters, f, next);
1618
1619	return (f);
1620}
1621
1622/*
1623** Find the filter with matching MAC address
1624*/
1625static struct ixlv_mac_filter *
1626ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
1627{
1628	struct ixlv_mac_filter	*f;
1629	bool				match = FALSE;
1630
1631	SLIST_FOREACH(f, sc->mac_filters, next) {
1632		if (cmp_etheraddr(f->macaddr, macaddr)) {
1633			match = TRUE;
1634			break;
1635		}
1636	}
1637
1638	if (!match)
1639		f = NULL;
1640	return (f);
1641}
1642
1643/*
1644** Admin Queue interrupt handler
1645*/
1646static void
1647ixlv_msix_adminq(void *arg)
1648{
1649	struct ixlv_sc	*sc = arg;
1650	struct i40e_hw		*hw = &sc->hw;
1651	u32			reg, mask;
1652
1653        reg = rd32(hw, I40E_VFINT_ICR01);
1654        mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
1655
1656        reg = rd32(hw, I40E_VFINT_DYN_CTL01);
1657        reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
1658        wr32(hw, I40E_VFINT_DYN_CTL01, reg);
1659
1660        /* re-enable interrupt causes */
1661        wr32(hw, I40E_VFINT_ICR0_ENA1, mask);
1662        wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
1663
1664	/* schedule task */
1665	taskqueue_enqueue(sc->tq, &sc->aq_irq);
1666	return;
1667}
1668
1669void
1670ixlv_enable_intr(struct ixl_vsi *vsi)
1671{
1672	struct i40e_hw		*hw = vsi->hw;
1673	struct ixl_queue	*que = vsi->queues;
1674
1675	ixlv_enable_adminq_irq(hw);
1676	for (int i = 0; i < vsi->num_queues; i++, que++)
1677		ixlv_enable_queue_irq(hw, que->me);
1678}
1679
1680void
1681ixlv_disable_intr(struct ixl_vsi *vsi)
1682{
1683        struct i40e_hw          *hw = vsi->hw;
1684        struct ixl_queue       *que = vsi->queues;
1685
1686	ixlv_disable_adminq_irq(hw);
1687	for (int i = 0; i < vsi->num_queues; i++, que++)
1688		ixlv_disable_queue_irq(hw, que->me);
1689}
1690
1691
1692static void
1693ixlv_disable_adminq_irq(struct i40e_hw *hw)
1694{
1695	wr32(hw, I40E_VFINT_DYN_CTL01, 0);
1696	wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
1697	/* flush */
1698	rd32(hw, I40E_VFGEN_RSTAT);
1699	return;
1700}
1701
1702static void
1703ixlv_enable_adminq_irq(struct i40e_hw *hw)
1704{
1705	wr32(hw, I40E_VFINT_DYN_CTL01,
1706	    I40E_VFINT_DYN_CTL01_INTENA_MASK |
1707	    I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1708	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
1709	/* flush */
1710	rd32(hw, I40E_VFGEN_RSTAT);
1711	return;
1712}
1713
1714static void
1715ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
1716{
1717	u32		reg;
1718
1719	reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1720	    I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
1721	wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
1722}
1723
1724static void
1725ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
1726{
1727	wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
1728	rd32(hw, I40E_VFGEN_RSTAT);
1729	return;
1730}
1731
1732
1733/*
1734** Provide a update to the queue RX
1735** interrupt moderation value.
1736*/
1737static void
1738ixlv_set_queue_rx_itr(struct ixl_queue *que)
1739{
1740	struct ixl_vsi	*vsi = que->vsi;
1741	struct i40e_hw	*hw = vsi->hw;
1742	struct rx_ring	*rxr = &que->rxr;
1743	u16		rx_itr;
1744	u16		rx_latency = 0;
1745	int		rx_bytes;
1746
1747
1748	/* Idle, do nothing */
1749	if (rxr->bytes == 0)
1750		return;
1751
1752	if (ixlv_dynamic_rx_itr) {
1753		rx_bytes = rxr->bytes/rxr->itr;
1754		rx_itr = rxr->itr;
1755
1756		/* Adjust latency range */
1757		switch (rxr->latency) {
1758		case IXL_LOW_LATENCY:
1759			if (rx_bytes > 10) {
1760				rx_latency = IXL_AVE_LATENCY;
1761				rx_itr = IXL_ITR_20K;
1762			}
1763			break;
1764		case IXL_AVE_LATENCY:
1765			if (rx_bytes > 20) {
1766				rx_latency = IXL_BULK_LATENCY;
1767				rx_itr = IXL_ITR_8K;
1768			} else if (rx_bytes <= 10) {
1769				rx_latency = IXL_LOW_LATENCY;
1770				rx_itr = IXL_ITR_100K;
1771			}
1772			break;
1773		case IXL_BULK_LATENCY:
1774			if (rx_bytes <= 20) {
1775				rx_latency = IXL_AVE_LATENCY;
1776				rx_itr = IXL_ITR_20K;
1777			}
1778			break;
1779       		 }
1780
1781		rxr->latency = rx_latency;
1782
1783		if (rx_itr != rxr->itr) {
1784			/* do an exponential smoothing */
1785			rx_itr = (10 * rx_itr * rxr->itr) /
1786			    ((9 * rx_itr) + rxr->itr);
1787			rxr->itr = rx_itr & IXL_MAX_ITR;
1788			wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1789			    que->me), rxr->itr);
1790		}
1791	} else { /* We may have have toggled to non-dynamic */
1792		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
1793			vsi->rx_itr_setting = ixlv_rx_itr;
1794		/* Update the hardware if needed */
1795		if (rxr->itr != vsi->rx_itr_setting) {
1796			rxr->itr = vsi->rx_itr_setting;
1797			wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
1798			    que->me), rxr->itr);
1799		}
1800	}
1801	rxr->bytes = 0;
1802	rxr->packets = 0;
1803	return;
1804}
1805
1806
1807/*
1808** Provide a update to the queue TX
1809** interrupt moderation value.
1810*/
1811static void
1812ixlv_set_queue_tx_itr(struct ixl_queue *que)
1813{
1814	struct ixl_vsi	*vsi = que->vsi;
1815	struct i40e_hw	*hw = vsi->hw;
1816	struct tx_ring	*txr = &que->txr;
1817	u16		tx_itr;
1818	u16		tx_latency = 0;
1819	int		tx_bytes;
1820
1821
1822	/* Idle, do nothing */
1823	if (txr->bytes == 0)
1824		return;
1825
1826	if (ixlv_dynamic_tx_itr) {
1827		tx_bytes = txr->bytes/txr->itr;
1828		tx_itr = txr->itr;
1829
1830		switch (txr->latency) {
1831		case IXL_LOW_LATENCY:
1832			if (tx_bytes > 10) {
1833				tx_latency = IXL_AVE_LATENCY;
1834				tx_itr = IXL_ITR_20K;
1835			}
1836			break;
1837		case IXL_AVE_LATENCY:
1838			if (tx_bytes > 20) {
1839				tx_latency = IXL_BULK_LATENCY;
1840				tx_itr = IXL_ITR_8K;
1841			} else if (tx_bytes <= 10) {
1842				tx_latency = IXL_LOW_LATENCY;
1843				tx_itr = IXL_ITR_100K;
1844			}
1845			break;
1846		case IXL_BULK_LATENCY:
1847			if (tx_bytes <= 20) {
1848				tx_latency = IXL_AVE_LATENCY;
1849				tx_itr = IXL_ITR_20K;
1850			}
1851			break;
1852		}
1853
1854		txr->latency = tx_latency;
1855
1856		if (tx_itr != txr->itr) {
1857       	         /* do an exponential smoothing */
1858			tx_itr = (10 * tx_itr * txr->itr) /
1859			    ((9 * tx_itr) + txr->itr);
1860			txr->itr = tx_itr & IXL_MAX_ITR;
1861			wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
1862			    que->me), txr->itr);
1863		}
1864
1865	} else { /* We may have have toggled to non-dynamic */
1866		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
1867			vsi->tx_itr_setting = ixlv_tx_itr;
1868		/* Update the hardware if needed */
1869		if (txr->itr != vsi->tx_itr_setting) {
1870			txr->itr = vsi->tx_itr_setting;
1871			wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
1872			    que->me), txr->itr);
1873		}
1874	}
1875	txr->bytes = 0;
1876	txr->packets = 0;
1877	return;
1878}
1879
1880
1881/*
1882**
1883** MSIX Interrupt Handlers and Tasklets
1884**
1885*/
1886static void
1887ixlv_handle_que(void *context, int pending)
1888{
1889	struct ixl_queue *que = context;
1890	struct ixl_vsi *vsi = que->vsi;
1891	struct i40e_hw  *hw = vsi->hw;
1892	struct tx_ring  *txr = &que->txr;
1893	struct ifnet    *ifp = vsi->ifp;
1894	bool		more;
1895
1896	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1897		more = ixl_rxeof(que, IXL_RX_LIMIT);
1898		mtx_lock(&txr->mtx);
1899		ixl_txeof(que);
1900		if (!drbr_empty(ifp, txr->br))
1901			ixl_mq_start_locked(ifp, txr);
1902		mtx_unlock(&txr->mtx);
1903		if (more) {
1904			taskqueue_enqueue(que->tq, &que->task);
1905			return;
1906		}
1907	}
1908
1909	/* Reenable this interrupt - hmmm */
1910	ixlv_enable_queue_irq(hw, que->me);
1911	return;
1912}
1913
1914
1915/*********************************************************************
1916 *
1917 *  MSIX Queue Interrupt Service routine
1918 *
1919 **********************************************************************/
1920static void
1921ixlv_msix_que(void *arg)
1922{
1923	struct ixl_queue	*que = arg;
1924	struct ixl_vsi	*vsi = que->vsi;
1925	struct i40e_hw	*hw = vsi->hw;
1926	struct tx_ring	*txr = &que->txr;
1927	bool		more_tx, more_rx;
1928
1929	/* Spurious interrupts are ignored */
1930	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
1931		return;
1932
1933	++que->irqs;
1934
1935	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
1936
1937	mtx_lock(&txr->mtx);
1938	more_tx = ixl_txeof(que);
1939	/*
1940	** Make certain that if the stack
1941	** has anything queued the task gets
1942	** scheduled to handle it.
1943	*/
1944	if (!drbr_empty(vsi->ifp, txr->br))
1945		more_tx = 1;
1946	mtx_unlock(&txr->mtx);
1947
1948	ixlv_set_queue_rx_itr(que);
1949	ixlv_set_queue_tx_itr(que);
1950
1951	if (more_tx || more_rx)
1952		taskqueue_enqueue(que->tq, &que->task);
1953	else
1954		ixlv_enable_queue_irq(hw, que->me);
1955
1956	return;
1957}
1958
1959
1960/*********************************************************************
1961 *
1962 *  Media Ioctl callback
1963 *
1964 *  This routine is called whenever the user queries the status of
1965 *  the interface using ifconfig.
1966 *
1967 **********************************************************************/
1968static void
1969ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1970{
1971	struct ixl_vsi		*vsi = ifp->if_softc;
1972	struct ixlv_sc	*sc = vsi->back;
1973
1974	INIT_DBG_IF(ifp, "begin");
1975
1976	mtx_lock(&sc->mtx);
1977
1978	ixlv_update_link_status(sc);
1979
1980	ifmr->ifm_status = IFM_AVALID;
1981	ifmr->ifm_active = IFM_ETHER;
1982
1983	if (!vsi->link_up) {
1984		mtx_unlock(&sc->mtx);
1985		INIT_DBG_IF(ifp, "end: link not up");
1986		return;
1987	}
1988
1989	ifmr->ifm_status |= IFM_ACTIVE;
1990	/* Hardware is always full-duplex */
1991	ifmr->ifm_active |= IFM_FDX;
1992	mtx_unlock(&sc->mtx);
1993	INIT_DBG_IF(ifp, "end");
1994	return;
1995}
1996
1997/*********************************************************************
1998 *
1999 *  Media Ioctl callback
2000 *
2001 *  This routine is called when the user changes speed/duplex using
2002 *  media/mediopt option with ifconfig.
2003 *
2004 **********************************************************************/
2005static int
2006ixlv_media_change(struct ifnet * ifp)
2007{
2008	struct ixl_vsi *vsi = ifp->if_softc;
2009	struct ifmedia *ifm = &vsi->media;
2010
2011	INIT_DBG_IF(ifp, "begin");
2012
2013	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2014		return (EINVAL);
2015
2016	INIT_DBG_IF(ifp, "end");
2017	return (0);
2018}
2019
2020
2021/*********************************************************************
2022 *  Multicast Initialization
2023 *
2024 *  This routine is called by init to reset a fresh state.
2025 *
2026 **********************************************************************/
2027
2028static void
2029ixlv_init_multi(struct ixl_vsi *vsi)
2030{
2031	struct ixlv_mac_filter *f;
2032	struct ixlv_sc	*sc = vsi->back;
2033	int			mcnt = 0;
2034
2035	IOCTL_DBG_IF(vsi->ifp, "begin");
2036
2037	/* First clear any multicast filters */
2038	SLIST_FOREACH(f, sc->mac_filters, next) {
2039		if ((f->flags & IXL_FILTER_USED)
2040		    && (f->flags & IXL_FILTER_MC)) {
2041			f->flags |= IXL_FILTER_DEL;
2042			mcnt++;
2043		}
2044	}
2045	if (mcnt > 0)
2046		sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
2047
2048	IOCTL_DBG_IF(vsi->ifp, "end");
2049}
2050
2051static void
2052ixlv_add_multi(struct ixl_vsi *vsi)
2053{
2054	struct ifmultiaddr	*ifma;
2055	struct ifnet		*ifp = vsi->ifp;
2056	struct ixlv_sc	*sc = vsi->back;
2057	int			mcnt = 0;
2058
2059	IOCTL_DBG_IF(ifp, "begin");
2060
2061	if_maddr_rlock(ifp);
2062	/*
2063	** Get a count, to decide if we
2064	** simply use multicast promiscuous.
2065	*/
2066	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2067		if (ifma->ifma_addr->sa_family != AF_LINK)
2068			continue;
2069		mcnt++;
2070	}
2071	if_maddr_runlock(ifp);
2072
2073	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
2074		/* delete all multicast filters */
2075		ixlv_init_multi(vsi);
2076		sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
2077		sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_PROMISC;
2078		IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
2079		return;
2080	}
2081
2082	mcnt = 0;
2083	if_maddr_rlock(ifp);
2084	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2085		if (ifma->ifma_addr->sa_family != AF_LINK)
2086			continue;
2087		if (!ixlv_add_mac_filter(sc,
2088		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
2089		    IXL_FILTER_MC))
2090			mcnt++;
2091	}
2092	if_maddr_runlock(ifp);
2093	/*
2094	** Notify AQ task that sw filters need to be
2095	** added to hw list
2096	*/
2097	if (mcnt > 0)
2098		sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
2099
2100	IOCTL_DBG_IF(ifp, "end");
2101}
2102
2103static void
2104ixlv_del_multi(struct ixl_vsi *vsi)
2105{
2106	struct ixlv_mac_filter *f;
2107	struct ifmultiaddr	*ifma;
2108	struct ifnet		*ifp = vsi->ifp;
2109	struct ixlv_sc	*sc = vsi->back;
2110	int			mcnt = 0;
2111	bool		match = FALSE;
2112
2113	IOCTL_DBG_IF(ifp, "begin");
2114
2115	/* Search for removed multicast addresses */
2116	if_maddr_rlock(ifp);
2117	SLIST_FOREACH(f, sc->mac_filters, next) {
2118		if ((f->flags & IXL_FILTER_USED)
2119		    && (f->flags & IXL_FILTER_MC)) {
2120			/* check if mac address in filter is in sc's list */
2121			match = FALSE;
2122			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2123				if (ifma->ifma_addr->sa_family != AF_LINK)
2124					continue;
2125				u8 *mc_addr =
2126				    (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2127				if (cmp_etheraddr(f->macaddr, mc_addr)) {
2128					match = TRUE;
2129					break;
2130				}
2131			}
2132			/* if this filter is not in the sc's list, remove it */
2133			if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
2134				f->flags |= IXL_FILTER_DEL;
2135				mcnt++;
2136				IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
2137				    MAC_FORMAT_ARGS(f->macaddr));
2138			}
2139			else if (match == FALSE)
2140				IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
2141				    MAC_FORMAT_ARGS(f->macaddr));
2142		}
2143	}
2144	if_maddr_runlock(ifp);
2145
2146	if (mcnt > 0)
2147		sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
2148
2149	IOCTL_DBG_IF(ifp, "end");
2150}
2151
2152/*********************************************************************
2153 *  Timer routine
2154 *
2155 *  This routine checks for link status,updates statistics,
2156 *  and runs the watchdog check.
2157 *
2158 **********************************************************************/
2159
2160static void
2161ixlv_local_timer(void *arg)
2162{
2163	struct ixlv_sc	*sc = arg;
2164	struct i40e_hw		*hw = &sc->hw;
2165	struct ixl_vsi		*vsi = &sc->vsi;
2166	struct ixl_queue	*que = vsi->queues;
2167	device_t		dev = sc->dev;
2168	int			hung = 0;
2169	u32			mask, val, oldval;
2170
2171	mtx_assert(&sc->mtx, MA_OWNED);
2172
2173	/* If Reset is in progress just bail */
2174	if (sc->init_state == IXLV_RESET_PENDING)
2175		return;
2176
2177	/* Check for when PF triggers a VF reset */
2178	val = rd32(hw, I40E_VFGEN_RSTAT) &
2179	    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2180
2181	if (val != I40E_VFR_VFACTIVE
2182	    && val != I40E_VFR_COMPLETED) {
2183#ifdef IXL_DEBUG
2184		device_printf(dev, "%s: reset in progress! (%d)\n",
2185		    __func__, val);
2186#endif
2187		return;
2188	}
2189
2190	/* check for Admin queue errors */
2191	val = rd32(hw, hw->aq.arq.len);
2192	oldval = val;
2193	if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
2194		device_printf(dev, "ARQ VF Error detected\n");
2195		val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
2196	}
2197	if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
2198		device_printf(dev, "ARQ Overflow Error detected\n");
2199		val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
2200	}
2201	if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
2202		device_printf(dev, "ARQ Critical Error detected\n");
2203		val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
2204	}
2205	if (oldval != val)
2206		wr32(hw, hw->aq.arq.len, val);
2207
2208	val = rd32(hw, hw->aq.asq.len);
2209	oldval = val;
2210	if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
2211		device_printf(dev, "ASQ VF Error detected\n");
2212		val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
2213	}
2214	if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
2215		device_printf(dev, "ASQ Overflow Error detected\n");
2216		val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
2217	}
2218	if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
2219		device_printf(dev, "ASQ Critical Error detected\n");
2220		val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
2221	}
2222	if (oldval != val)
2223		wr32(hw, hw->aq.asq.len, val);
2224
2225	/* clean and process any events */
2226	taskqueue_enqueue(sc->tq, &sc->aq_irq);
2227
2228	/*
2229	** Check status on the queues for a hang
2230	*/
2231	mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK |
2232	    I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK);
2233
2234	for (int i = 0; i < vsi->num_queues; i++,que++) {
2235		/* Any queues with outstanding work get a sw irq */
2236		if (que->busy)
2237			wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
2238		/*
2239		** Each time txeof runs without cleaning, but there
2240		** are uncleaned descriptors it increments busy. If
2241		** we get to 5 we declare it hung.
2242		*/
2243		if (que->busy == IXL_QUEUE_HUNG) {
2244			++hung;
2245			/* Mark the queue as inactive */
2246			vsi->active_queues &= ~((u64)1 << que->me);
2247			continue;
2248		} else {
2249			/* Check if we've come back from hung */
2250			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
2251     				vsi->active_queues |= ((u64)1 << que->me);
2252		}
2253		if (que->busy >= IXL_MAX_TX_BUSY) {
2254			device_printf(dev,"Warning queue %d "
2255			    "appears to be hung!\n", i);
2256			que->busy = IXL_QUEUE_HUNG;
2257			++hung;
2258		}
2259	}
2260	/* Only reset when all queues show hung */
2261	if (hung == vsi->num_queues)
2262		goto hung;
2263	callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
2264	return;
2265
2266hung:
2267	device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
2268	sc->init_state = IXLV_RESET_REQUIRED;
2269	ixlv_init_locked(sc);
2270}
2271
2272/*
2273** Note: this routine updates the OS on the link state
2274**	the real check of the hardware only happens with
2275**	a link interrupt.
2276*/
2277static void
2278ixlv_update_link_status(struct ixlv_sc *sc)
2279{
2280	struct ixl_vsi		*vsi = &sc->vsi;
2281	struct ifnet		*ifp = vsi->ifp;
2282	device_t		 dev = sc->dev;
2283
2284	if (vsi->link_up){
2285		if (vsi->link_active == FALSE) {
2286			if (bootverbose)
2287				device_printf(dev,"Link is Up, %d Gbps\n",
2288				    (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
2289			vsi->link_active = TRUE;
2290			if_link_state_change(ifp, LINK_STATE_UP);
2291		}
2292	} else { /* Link down */
2293		if (vsi->link_active == TRUE) {
2294			if (bootverbose)
2295				device_printf(dev,"Link is Down\n");
2296			if_link_state_change(ifp, LINK_STATE_DOWN);
2297			vsi->link_active = FALSE;
2298		}
2299	}
2300
2301	return;
2302}
2303
2304/*********************************************************************
2305 *
2306 *  This routine disables all traffic on the adapter by issuing a
2307 *  global reset on the MAC and deallocates TX/RX buffers.
2308 *
2309 **********************************************************************/
2310
2311static void
2312ixlv_stop(struct ixlv_sc *sc)
2313{
2314	mtx_assert(&sc->sc_mtx, MA_OWNED);
2315
2316	INIT_DBG_IF(&sc->vsi->ifp, "begin");
2317
2318	sc->aq_required |= IXLV_FLAG_AQ_DISABLE_QUEUES;
2319	callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
2320	    ixlv_sched_aq, sc);
2321
2322	/* Stop the local timer */
2323	callout_stop(&sc->timer);
2324
2325	INIT_DBG_IF(&sc->vsi->ifp, "end");
2326}
2327
2328
2329/*********************************************************************
2330 *
2331 *  Free all station queue structs.
2332 *
2333 **********************************************************************/
2334static void
2335ixlv_free_queues(struct ixl_vsi *vsi)
2336{
2337	struct ixlv_sc	*sc = (struct ixlv_sc *)vsi->back;
2338	struct ixl_queue	*que = vsi->queues;
2339
2340	for (int i = 0; i < vsi->num_queues; i++, que++) {
2341		struct tx_ring *txr = &que->txr;
2342		struct rx_ring *rxr = &que->rxr;
2343
2344		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
2345			continue;
2346		IXL_TX_LOCK(txr);
2347		ixl_free_que_tx(que);
2348		if (txr->base)
2349			i40e_free_dma(&sc->hw, &txr->dma);
2350		IXL_TX_UNLOCK(txr);
2351		IXL_TX_LOCK_DESTROY(txr);
2352
2353		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
2354			continue;
2355		IXL_RX_LOCK(rxr);
2356		ixl_free_que_rx(que);
2357		if (rxr->base)
2358			i40e_free_dma(&sc->hw, &rxr->dma);
2359		IXL_RX_UNLOCK(rxr);
2360		IXL_RX_LOCK_DESTROY(rxr);
2361
2362	}
2363	free(vsi->queues, M_DEVBUF);
2364}
2365
2366
2367/*
2368** ixlv_config_rss - setup RSS
2369*/
2370static void
2371ixlv_config_rss(struct ixlv_sc *sc)
2372{
2373	struct i40e_hw	*hw = &sc->hw;
2374	struct ixl_vsi	*vsi = &sc->vsi;
2375	u32		lut = 0;
2376	u64		set_hena, hena;
2377	int		i, j;
2378
2379	/* set up random bits */
2380	static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
2381	    0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
2382	    0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
2383	    0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
2384	    0x4954b126 };
2385
2386	/* Fill out hash function seed */
2387	for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2388                wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
2389
2390	/* Enable PCTYPES for RSS: */
2391	set_hena =
2392		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2393		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2394		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2395		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2396		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2397		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2398		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2399		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2400		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2401		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2402		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2403
2404	hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
2405	    ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
2406	hena |= set_hena;
2407	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
2408	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
2409
2410	/* Populate the LUT with max no. of queues in round robin fashion */
2411	for (i = j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
2412                if (j == vsi->num_queues)
2413                        j = 0;
2414                /* lut = 4-byte sliding window of 4 lut entries */
2415                lut = (lut << 8) | (j &
2416                         ((0x1 << hw->func_caps.rss_table_entry_width) - 1));
2417                /* On i = 3, we have 4 entries in lut; write to the register */
2418                if ((i & 3) == 3)
2419                        wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
2420        }
2421	ixl_flush(hw);
2422}
2423
2424
2425/*
2426** This routine refreshes vlan filters, called by init
2427** it scans the filter table and then updates the AQ
2428*/
2429static void
2430ixlv_setup_vlan_filters(struct ixlv_sc *sc)
2431{
2432	struct ixl_vsi			*vsi = &sc->vsi;
2433	struct ixlv_vlan_filter	*f;
2434	int				cnt = 0;
2435
2436	if (vsi->num_vlans == 0)
2437		return;
2438	/*
2439	** Scan the filter table for vlan entries,
2440	** and if found call for the AQ update.
2441	*/
2442	SLIST_FOREACH(f, sc->vlan_filters, next)
2443                if (f->flags & IXL_FILTER_ADD)
2444			cnt++;
2445	if (cnt == 0)
2446		return;
2447
2448	sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
2449	return;
2450}
2451
2452
2453/*
2454** This routine adds new MAC filters to the sc's list;
2455** these are later added in hardware by the periodic
2456** aq task.
2457*/
2458static int
2459ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
2460{
2461	struct ixlv_mac_filter	*f;
2462	device_t			dev = sc->dev;
2463
2464	/* Does one already exist? */
2465	f = ixlv_find_mac_filter(sc, macaddr);
2466	if (f != NULL) {
2467		IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
2468		    MAC_FORMAT_ARGS(macaddr));
2469		return (EEXIST);
2470	}
2471
2472	/* If not, get a new empty filter */
2473	f = ixlv_get_mac_filter(sc);
2474	if (f == NULL) {
2475		device_printf(dev, "%s: no filters available!!\n",
2476		    __func__);
2477		return (ENOMEM);
2478	}
2479
2480	IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
2481	    MAC_FORMAT_ARGS(macaddr));
2482
2483	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
2484	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
2485	f->flags |= flags;
2486	return (0);
2487}
2488
2489/*
2490** Tasklet handler for MSIX Adminq interrupts
2491**  - done outside interrupt context since it might sleep
2492*/
2493static void
2494ixlv_do_adminq(void *context, int pending)
2495{
2496	struct ixlv_sc		*sc = context;
2497	struct i40e_hw			*hw = &sc->hw;
2498	struct i40e_arq_event_info	event;
2499	struct i40e_virtchnl_msg	*v_msg;
2500	i40e_status			ret;
2501	u16				result = 0;
2502
2503
2504	event.buf_len = IXL_AQ_BUF_SZ;
2505        event.msg_buf = malloc(event.buf_len,
2506	    M_DEVBUF, M_NOWAIT | M_ZERO);
2507	if (!event.msg_buf) {
2508		printf("Unable to allocate adminq memory\n");
2509		return;
2510	}
2511	v_msg = (struct i40e_virtchnl_msg *)&event.desc;
2512
2513	mtx_lock(&sc->mtx);
2514	/* clean and process any events */
2515	do {
2516		ret = i40e_clean_arq_element(hw, &event, &result);
2517		if (ret)
2518			break;
2519		ixlv_vc_completion(sc, v_msg->v_opcode,
2520		    v_msg->v_retval, event.msg_buf, event.msg_len);
2521		if (result != 0)
2522			bzero(event.msg_buf, IXL_AQ_BUF_SZ);
2523	} while (result);
2524
2525	ixlv_enable_adminq_irq(hw);
2526	free(event.msg_buf, M_DEVBUF);
2527	mtx_unlock(&sc->mtx);
2528	return;
2529}
2530
2531/*
2532** ixlv_sched_aq - Periodic scheduling tasklet
2533**
2534*/
2535static void
2536ixlv_sched_aq(void *context)
2537{
2538	struct ixlv_sc	*sc = context;
2539	struct ixl_vsi		*vsi = &sc->vsi;
2540
2541	/* This is driven by a callout, don't spin */
2542	if (!mtx_trylock(&sc->mtx))
2543		goto done_nolock;
2544
2545	if (sc->init_state == IXLV_RESET_PENDING)
2546		goto done;
2547
2548	/* Process requested admin queue tasks */
2549	if (sc->aq_pending)
2550		goto done;
2551
2552	if (sc->aq_required & IXLV_FLAG_AQ_MAP_VECTORS) {
2553		ixlv_map_queues(sc);
2554		goto done;
2555	}
2556
2557	if (sc->aq_required & IXLV_FLAG_AQ_ADD_MAC_FILTER) {
2558		ixlv_add_ether_filters(sc);
2559		goto done;
2560	}
2561
2562	if (sc->aq_required & IXLV_FLAG_AQ_ADD_VLAN_FILTER) {
2563		ixlv_add_vlans(sc);
2564		goto done;
2565	}
2566
2567	if (sc->aq_required & IXLV_FLAG_AQ_DEL_MAC_FILTER) {
2568		ixlv_del_ether_filters(sc);
2569		goto done;
2570	}
2571
2572	if (sc->aq_required & IXLV_FLAG_AQ_DEL_VLAN_FILTER) {
2573		ixlv_del_vlans(sc);
2574		goto done;
2575	}
2576
2577	if (sc->aq_required & IXLV_FLAG_AQ_CONFIGURE_QUEUES) {
2578		ixlv_configure_queues(sc);
2579		goto done;
2580	}
2581
2582	if (sc->aq_required & IXLV_FLAG_AQ_DISABLE_QUEUES) {
2583		ixlv_disable_queues(sc);
2584		goto done;
2585	}
2586
2587	if (sc->aq_required & IXLV_FLAG_AQ_ENABLE_QUEUES) {
2588		ixlv_enable_queues(sc);
2589		goto done;
2590	}
2591
2592	/* Do stats request only if no other AQ operations requested */
2593	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
2594		ixlv_request_stats(sc);
2595
2596done:
2597	mtx_unlock(&sc->mtx);
2598done_nolock:
2599	if (sc->aq_required) /* Reschedule */
2600		callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO,
2601		    ixlv_sched_aq, sc);
2602	else
2603		callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc);
2604}
2605
2606static void
2607ixlv_add_stats_sysctls(struct ixlv_sc *sc)
2608{
2609	device_t dev = sc->dev;
2610	struct ixl_vsi *vsi = &sc->vsi;
2611	struct i40e_eth_stats *es = &vsi->eth_stats;
2612
2613	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2614	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2615	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2616
2617	struct sysctl_oid *vsi_node, *queue_node;
2618	struct sysctl_oid_list *vsi_list, *queue_list;
2619
2620#define QUEUE_NAME_LEN 32
2621	char queue_namebuf[QUEUE_NAME_LEN];
2622
2623	struct ixl_queue *queues = vsi->queues;
2624	struct tx_ring *txr;
2625	struct rx_ring *rxr;
2626
2627	/* Driver statistics */
2628	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2629			CTLFLAG_RD, &sc->watchdog_events,
2630			"Watchdog timeouts");
2631	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
2632			CTLFLAG_RD, &sc->admin_irq,
2633			"Admin Queue IRQ Handled");
2634
2635	/* VSI statistics */
2636	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
2637				   CTLFLAG_RD, NULL, "VSI-specific statistics");
2638	vsi_list = SYSCTL_CHILDREN(vsi_node);
2639
2640	struct ixl_sysctl_info ctls[] =
2641	{
2642		{&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
2643		{&es->rx_unicast, "ucast_pkts_rcvd",
2644			"Unicast Packets Received"},
2645		{&es->rx_multicast, "mcast_pkts_rcvd",
2646			"Multicast Packets Received"},
2647		{&es->rx_broadcast, "bcast_pkts_rcvd",
2648			"Broadcast Packets Received"},
2649		{&es->rx_discards, "rx_discards", "Discarded RX packets"},
2650		{&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
2651		{&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
2652		{&es->tx_multicast, "mcast_pkts_txd",
2653			"Multicast Packets Transmitted"},
2654		{&es->tx_broadcast, "bcast_pkts_txd",
2655			"Broadcast Packets Transmitted"},
2656		{&es->tx_discards, "tx_discards", "Discarded TX packets"},
2657		// end
2658		{0,0,0}
2659	};
2660	struct ixl_sysctl_info *entry = ctls;
2661	while (entry->stat != 0)
2662	{
2663		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
2664				CTLFLAG_RD, entry->stat,
2665				entry->description);
2666		entry++;
2667	}
2668
2669	/* Queue statistics */
2670	for (int q = 0; q < vsi->num_queues; q++) {
2671		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
2672		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
2673					     CTLFLAG_RD, NULL, "Queue Name");
2674		queue_list = SYSCTL_CHILDREN(queue_node);
2675
2676		txr = &(queues[q].txr);
2677		rxr = &(queues[q].rxr);
2678
2679		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
2680				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
2681				"m_defrag() failed");
2682		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
2683				CTLFLAG_RD, &(queues[q].dropped_pkts),
2684				"Driver dropped packets");
2685		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2686				CTLFLAG_RD, &(queues[q].irqs),
2687				"irqs on this queue");
2688		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
2689				CTLFLAG_RD, &(queues[q].tso),
2690				"TSO");
2691		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
2692				CTLFLAG_RD, &(queues[q].tx_dma_setup),
2693				"Driver tx dma failure in xmit");
2694		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
2695				CTLFLAG_RD, &(txr->no_desc),
2696				"Queue No Descriptor Available");
2697		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2698				CTLFLAG_RD, &(txr->total_packets),
2699				"Queue Packets Transmitted");
2700		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
2701				CTLFLAG_RD, &(txr->tx_bytes),
2702				"Queue Bytes Transmitted");
2703		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2704				CTLFLAG_RD, &(rxr->rx_packets),
2705				"Queue Packets Received");
2706		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2707				CTLFLAG_RD, &(rxr->rx_bytes),
2708				"Queue Bytes Received");
2709	}
2710}
2711
2712static void
2713ixlv_init_filters(struct ixlv_sc *sc)
2714{
2715	sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
2716	    M_DEVBUF, M_NOWAIT | M_ZERO);
2717	SLIST_INIT(sc->mac_filters);
2718	sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
2719	    M_DEVBUF, M_NOWAIT | M_ZERO);
2720	SLIST_INIT(sc->vlan_filters);
2721	return;
2722}
2723
2724static void
2725ixlv_free_filters(struct ixlv_sc *sc)
2726{
2727	struct ixlv_mac_filter *f;
2728	struct ixlv_vlan_filter *v;
2729
2730	while (!SLIST_EMPTY(sc->mac_filters)) {
2731		f = SLIST_FIRST(sc->mac_filters);
2732		SLIST_REMOVE_HEAD(sc->mac_filters, next);
2733		free(f, M_DEVBUF);
2734	}
2735	while (!SLIST_EMPTY(sc->vlan_filters)) {
2736		v = SLIST_FIRST(sc->vlan_filters);
2737		SLIST_REMOVE_HEAD(sc->vlan_filters, next);
2738		free(v, M_DEVBUF);
2739	}
2740	return;
2741}
2742
2743