1/******************************************************************************
2
3  Copyright (c) 2001-2015, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: releng/11.0/sys/dev/ixgbe/if_ixv.c 302384 2016-07-07 03:39:18Z sbruno $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#endif
40
41#include "ixgbe.h"
42
43/*********************************************************************
44 *  Driver version
45 *********************************************************************/
46char ixv_driver_version[] = "1.4.6-k";
47
48/*********************************************************************
49 *  PCI Device ID Table
50 *
51 *  Used by probe to select devices to load on
52 *  Last field stores an index into ixv_strings
53 *  Last entry must be all 0s
54 *
55 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 *********************************************************************/
57
58static ixgbe_vendor_info_t ixv_vendor_info_array[] =
59{
60	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64	/* required last entry */
65	{0, 0, 0, 0, 0}
66};
67
68/*********************************************************************
69 *  Table of branding strings
70 *********************************************************************/
71
72static char    *ixv_strings[] = {
73	"Intel(R) PRO/10GbE Virtual Function Network Driver"
74};
75
76/*********************************************************************
77 *  Function prototypes
78 *********************************************************************/
79static int      ixv_probe(device_t);
80static int      ixv_attach(device_t);
81static int      ixv_detach(device_t);
82static int      ixv_shutdown(device_t);
83static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
84static void	ixv_init(void *);
85static void	ixv_init_locked(struct adapter *);
86static void     ixv_stop(void *);
87static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
88static int      ixv_media_change(struct ifnet *);
89static void     ixv_identify_hardware(struct adapter *);
90static int      ixv_allocate_pci_resources(struct adapter *);
91static int      ixv_allocate_msix(struct adapter *);
92static int	ixv_setup_msix(struct adapter *);
93static void	ixv_free_pci_resources(struct adapter *);
94static void     ixv_local_timer(void *);
95static void     ixv_setup_interface(device_t, struct adapter *);
96static void     ixv_config_link(struct adapter *);
97
98static void     ixv_initialize_transmit_units(struct adapter *);
99static void     ixv_initialize_receive_units(struct adapter *);
100
101static void     ixv_enable_intr(struct adapter *);
102static void     ixv_disable_intr(struct adapter *);
103static void     ixv_set_multi(struct adapter *);
104static void     ixv_update_link_status(struct adapter *);
105static int	ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
107static void	ixv_configure_ivars(struct adapter *);
108static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
109
110static void	ixv_setup_vlan_support(struct adapter *);
111static void	ixv_register_vlan(void *, struct ifnet *, u16);
112static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
113
114static void	ixv_save_stats(struct adapter *);
115static void	ixv_init_stats(struct adapter *);
116static void	ixv_update_stats(struct adapter *);
117static void	ixv_add_stats_sysctls(struct adapter *);
118static void	ixv_set_sysctl_value(struct adapter *, const char *,
119		    const char *, int *, int);
120
121/* The MSI/X Interrupt handlers */
122static void	ixv_msix_que(void *);
123static void	ixv_msix_mbx(void *);
124
125/* Deferred interrupt tasklets */
126static void	ixv_handle_que(void *, int);
127static void	ixv_handle_mbx(void *, int);
128
129#ifdef DEV_NETMAP
130/*
131 * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
132 * if_ix.c.
133 */
134extern void ixgbe_netmap_attach(struct adapter *adapter);
135
136#include <net/netmap.h>
137#include <sys/selinfo.h>
138#include <dev/netmap/netmap_kern.h>
139#endif /* DEV_NETMAP */
140
141/*********************************************************************
142 *  FreeBSD Device Interface Entry Points
143 *********************************************************************/
144
145static device_method_t ixv_methods[] = {
146	/* Device interface */
147	DEVMETHOD(device_probe, ixv_probe),
148	DEVMETHOD(device_attach, ixv_attach),
149	DEVMETHOD(device_detach, ixv_detach),
150	DEVMETHOD(device_shutdown, ixv_shutdown),
151	DEVMETHOD_END
152};
153
154static driver_t ixv_driver = {
155	"ixv", ixv_methods, sizeof(struct adapter),
156};
157
158devclass_t ixv_devclass;
159DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
160MODULE_DEPEND(ixv, pci, 1, 1, 1);
161MODULE_DEPEND(ixv, ether, 1, 1, 1);
162#ifdef DEV_NETMAP
163MODULE_DEPEND(ix, netmap, 1, 1, 1);
164#endif /* DEV_NETMAP */
165/* XXX depend on 'ix' ? */
166
167/*
168** TUNEABLE PARAMETERS:
169*/
170
171/* Number of Queues - do not exceed MSIX vectors - 1 */
172static int ixv_num_queues = 1;
173TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
174
175/*
176** AIM: Adaptive Interrupt Moderation
177** which means that the interrupt rate
178** is varied over time based on the
179** traffic for that interrupt vector
180*/
181static int ixv_enable_aim = FALSE;
182TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
183
184/* How many packets rxeof tries to clean at a time */
185static int ixv_rx_process_limit = 256;
186TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
187
188/* How many packets txeof tries to clean at a time */
189static int ixv_tx_process_limit = 256;
190TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
191
192/* Flow control setting, default to full */
193static int ixv_flow_control = ixgbe_fc_full;
194TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
195
196/*
197 * Header split: this causes the hardware to DMA
198 * the header into a separate mbuf from the payload,
199 * it can be a performance win in some workloads, but
200 * in others it actually hurts, its off by default.
201 */
202static int ixv_header_split = FALSE;
203TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
204
205/*
206** Number of TX descriptors per ring,
207** setting higher than RX as this seems
208** the better performing choice.
209*/
210static int ixv_txd = DEFAULT_TXD;
211TUNABLE_INT("hw.ixv.txd", &ixv_txd);
212
213/* Number of RX descriptors per ring */
214static int ixv_rxd = DEFAULT_RXD;
215TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
216
217/*
218** Shadow VFTA table, this is needed because
219** the real filter table gets cleared during
220** a soft reset and we need to repopulate it.
221*/
222static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
223
224/*********************************************************************
225 *  Device identification routine
226 *
227 *  ixv_probe determines if the driver should be loaded on
228 *  adapter based on PCI vendor/device id of the adapter.
229 *
230 *  return BUS_PROBE_DEFAULT on success, positive on failure
231 *********************************************************************/
232
233static int
234ixv_probe(device_t dev)
235{
236	ixgbe_vendor_info_t *ent;
237
238	u16	pci_vendor_id = 0;
239	u16	pci_device_id = 0;
240	u16	pci_subvendor_id = 0;
241	u16	pci_subdevice_id = 0;
242	char	adapter_name[256];
243
244
245	pci_vendor_id = pci_get_vendor(dev);
246	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
247		return (ENXIO);
248
249	pci_device_id = pci_get_device(dev);
250	pci_subvendor_id = pci_get_subvendor(dev);
251	pci_subdevice_id = pci_get_subdevice(dev);
252
253	ent = ixv_vendor_info_array;
254	while (ent->vendor_id != 0) {
255		if ((pci_vendor_id == ent->vendor_id) &&
256		    (pci_device_id == ent->device_id) &&
257
258		    ((pci_subvendor_id == ent->subvendor_id) ||
259		     (ent->subvendor_id == 0)) &&
260
261		    ((pci_subdevice_id == ent->subdevice_id) ||
262		     (ent->subdevice_id == 0))) {
263			sprintf(adapter_name, "%s, Version - %s",
264				ixv_strings[ent->index],
265				ixv_driver_version);
266			device_set_desc_copy(dev, adapter_name);
267			return (BUS_PROBE_DEFAULT);
268		}
269		ent++;
270	}
271	return (ENXIO);
272}
273
274/*********************************************************************
275 *  Device initialization routine
276 *
277 *  The attach entry point is called when the driver is being loaded.
278 *  This routine identifies the type of hardware, allocates all resources
279 *  and initializes the hardware.
280 *
281 *  return 0 on success, positive on failure
282 *********************************************************************/
283
284static int
285ixv_attach(device_t dev)
286{
287	struct adapter *adapter;
288	struct ixgbe_hw *hw;
289	int             error = 0;
290
291	INIT_DEBUGOUT("ixv_attach: begin");
292
293	/* Allocate, clear, and link in our adapter structure */
294	adapter = device_get_softc(dev);
295	adapter->dev = dev;
296	hw = &adapter->hw;
297
298#ifdef DEV_NETMAP
299	adapter->init_locked = ixv_init_locked;
300	adapter->stop_locked = ixv_stop;
301#endif
302
303	/* Core Lock Init*/
304	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
305
306	/* SYSCTL APIs */
307	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
309			OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
310			adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
311
312	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
314			OID_AUTO, "enable_aim", CTLFLAG_RW,
315			&ixv_enable_aim, 1, "Interrupt Moderation");
316
317	/* Set up the timer callout */
318	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
319
320	/* Determine hardware revision */
321	ixv_identify_hardware(adapter);
322
323	/* Do base PCI setup - map BAR0 */
324	if (ixv_allocate_pci_resources(adapter)) {
325		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
326		error = ENXIO;
327		goto err_out;
328	}
329
330	/* Sysctls for limiting the amount of work done in the taskqueues */
331	ixv_set_sysctl_value(adapter, "rx_processing_limit",
332	    "max number of rx packets to process",
333	    &adapter->rx_process_limit, ixv_rx_process_limit);
334
335	ixv_set_sysctl_value(adapter, "tx_processing_limit",
336	    "max number of tx packets to process",
337	    &adapter->tx_process_limit, ixv_tx_process_limit);
338
339	/* Do descriptor calc and sanity checks */
340	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
341	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
342		device_printf(dev, "TXD config issue, using default!\n");
343		adapter->num_tx_desc = DEFAULT_TXD;
344	} else
345		adapter->num_tx_desc = ixv_txd;
346
347	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
348	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
349		device_printf(dev, "RXD config issue, using default!\n");
350		adapter->num_rx_desc = DEFAULT_RXD;
351	} else
352		adapter->num_rx_desc = ixv_rxd;
353
354	/* Allocate our TX/RX Queues */
355	if (ixgbe_allocate_queues(adapter)) {
356		device_printf(dev, "ixgbe_allocate_queues() failed!\n");
357		error = ENOMEM;
358		goto err_out;
359	}
360
361	/*
362	** Initialize the shared code: its
363	** at this point the mac type is set.
364	*/
365	error = ixgbe_init_shared_code(hw);
366	if (error) {
367		device_printf(dev, "ixgbe_init_shared_code() failed!\n");
368		error = EIO;
369		goto err_late;
370	}
371
372	/* Setup the mailbox */
373	ixgbe_init_mbx_params_vf(hw);
374
375	/* Reset mbox api to 1.0 */
376	error = ixgbe_reset_hw(hw);
377	if (error == IXGBE_ERR_RESET_FAILED)
378		device_printf(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
379	else if (error)
380		device_printf(dev, "ixgbe_reset_hw() failed with error %d\n", error);
381	if (error) {
382		error = EIO;
383		goto err_late;
384	}
385
386	/* Negotiate mailbox API version */
387	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
388	if (error) {
389		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
390		error = EIO;
391		goto err_late;
392	}
393
394	error = ixgbe_init_hw(hw);
395	if (error) {
396		device_printf(dev, "ixgbe_init_hw() failed!\n");
397		error = EIO;
398		goto err_late;
399	}
400
401	error = ixv_allocate_msix(adapter);
402	if (error) {
403		device_printf(dev, "ixv_allocate_msix() failed!\n");
404		goto err_late;
405	}
406
407	/* If no mac address was assigned, make a random one */
408	if (!ixv_check_ether_addr(hw->mac.addr)) {
409		u8 addr[ETHER_ADDR_LEN];
410		arc4rand(&addr, sizeof(addr), 0);
411		addr[0] &= 0xFE;
412		addr[0] |= 0x02;
413		bcopy(addr, hw->mac.addr, sizeof(addr));
414	}
415
416	/* Setup OS specific network interface */
417	ixv_setup_interface(dev, adapter);
418
419	/* Do the stats setup */
420	ixv_save_stats(adapter);
421	ixv_init_stats(adapter);
422	ixv_add_stats_sysctls(adapter);
423
424	/* Register for VLAN events */
425	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
426	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
427	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
428	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
429
430#ifdef DEV_NETMAP
431	ixgbe_netmap_attach(adapter);
432#endif /* DEV_NETMAP */
433	INIT_DEBUGOUT("ixv_attach: end");
434	return (0);
435
436err_late:
437	ixgbe_free_transmit_structures(adapter);
438	ixgbe_free_receive_structures(adapter);
439err_out:
440	ixv_free_pci_resources(adapter);
441	return (error);
442
443}
444
445/*********************************************************************
446 *  Device removal routine
447 *
448 *  The detach entry point is called when the driver is being removed.
449 *  This routine stops the adapter and deallocates all the resources
450 *  that were allocated for driver operation.
451 *
452 *  return 0 on success, positive on failure
453 *********************************************************************/
454
455static int
456ixv_detach(device_t dev)
457{
458	struct adapter *adapter = device_get_softc(dev);
459	struct ix_queue *que = adapter->queues;
460
461	INIT_DEBUGOUT("ixv_detach: begin");
462
463	/* Make sure VLANS are not using driver */
464	if (adapter->ifp->if_vlantrunk != NULL) {
465		device_printf(dev, "Vlan in use, detach first\n");
466		return (EBUSY);
467	}
468
469	IXGBE_CORE_LOCK(adapter);
470	ixv_stop(adapter);
471	IXGBE_CORE_UNLOCK(adapter);
472
473	for (int i = 0; i < adapter->num_queues; i++, que++) {
474		if (que->tq) {
475			struct tx_ring  *txr = que->txr;
476			taskqueue_drain(que->tq, &txr->txq_task);
477			taskqueue_drain(que->tq, &que->que_task);
478			taskqueue_free(que->tq);
479		}
480	}
481
482	/* Drain the Mailbox(link) queue */
483	if (adapter->tq) {
484		taskqueue_drain(adapter->tq, &adapter->link_task);
485		taskqueue_free(adapter->tq);
486	}
487
488	/* Unregister VLAN events */
489	if (adapter->vlan_attach != NULL)
490		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
491	if (adapter->vlan_detach != NULL)
492		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
493
494	ether_ifdetach(adapter->ifp);
495	callout_drain(&adapter->timer);
496#ifdef DEV_NETMAP
497	netmap_detach(adapter->ifp);
498#endif /* DEV_NETMAP */
499	ixv_free_pci_resources(adapter);
500	bus_generic_detach(dev);
501	if_free(adapter->ifp);
502
503	ixgbe_free_transmit_structures(adapter);
504	ixgbe_free_receive_structures(adapter);
505
506	IXGBE_CORE_LOCK_DESTROY(adapter);
507	return (0);
508}
509
510/*********************************************************************
511 *
512 *  Shutdown entry point
513 *
514 **********************************************************************/
515static int
516ixv_shutdown(device_t dev)
517{
518	struct adapter *adapter = device_get_softc(dev);
519	IXGBE_CORE_LOCK(adapter);
520	ixv_stop(adapter);
521	IXGBE_CORE_UNLOCK(adapter);
522	return (0);
523}
524
525
526/*********************************************************************
527 *  Ioctl entry point
528 *
529 *  ixv_ioctl is called when the user wants to configure the
530 *  interface.
531 *
532 *  return 0 on success, positive on failure
533 **********************************************************************/
534
535static int
536ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
537{
538	struct adapter	*adapter = ifp->if_softc;
539	struct ifreq	*ifr = (struct ifreq *) data;
540#if defined(INET) || defined(INET6)
541	struct ifaddr	*ifa = (struct ifaddr *) data;
542	bool		avoid_reset = FALSE;
543#endif
544	int             error = 0;
545
546	switch (command) {
547
548	case SIOCSIFADDR:
549#ifdef INET
550		if (ifa->ifa_addr->sa_family == AF_INET)
551			avoid_reset = TRUE;
552#endif
553#ifdef INET6
554		if (ifa->ifa_addr->sa_family == AF_INET6)
555			avoid_reset = TRUE;
556#endif
557#if defined(INET) || defined(INET6)
558		/*
559		** Calling init results in link renegotiation,
560		** so we avoid doing it when possible.
561		*/
562		if (avoid_reset) {
563			ifp->if_flags |= IFF_UP;
564			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
565				ixv_init(adapter);
566			if (!(ifp->if_flags & IFF_NOARP))
567				arp_ifinit(ifp, ifa);
568		} else
569			error = ether_ioctl(ifp, command, data);
570		break;
571#endif
572	case SIOCSIFMTU:
573		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
574		if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
575			error = EINVAL;
576		} else {
577			IXGBE_CORE_LOCK(adapter);
578			ifp->if_mtu = ifr->ifr_mtu;
579			adapter->max_frame_size =
580				ifp->if_mtu + IXGBE_MTU_HDR;
581			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
582				ixv_init_locked(adapter);
583			IXGBE_CORE_UNLOCK(adapter);
584		}
585		break;
586	case SIOCSIFFLAGS:
587		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
588		IXGBE_CORE_LOCK(adapter);
589		if (ifp->if_flags & IFF_UP) {
590			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
591				ixv_init_locked(adapter);
592		} else
593			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
594				ixv_stop(adapter);
595		adapter->if_flags = ifp->if_flags;
596		IXGBE_CORE_UNLOCK(adapter);
597		break;
598	case SIOCADDMULTI:
599	case SIOCDELMULTI:
600		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
601		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
602			IXGBE_CORE_LOCK(adapter);
603			ixv_disable_intr(adapter);
604			ixv_set_multi(adapter);
605			ixv_enable_intr(adapter);
606			IXGBE_CORE_UNLOCK(adapter);
607		}
608		break;
609	case SIOCSIFMEDIA:
610	case SIOCGIFMEDIA:
611		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
612		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
613		break;
614	case SIOCSIFCAP:
615	{
616		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
617		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
618		if (mask & IFCAP_HWCSUM)
619			ifp->if_capenable ^= IFCAP_HWCSUM;
620		if (mask & IFCAP_TSO4)
621			ifp->if_capenable ^= IFCAP_TSO4;
622		if (mask & IFCAP_LRO)
623			ifp->if_capenable ^= IFCAP_LRO;
624		if (mask & IFCAP_VLAN_HWTAGGING)
625			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
626		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
627			IXGBE_CORE_LOCK(adapter);
628			ixv_init_locked(adapter);
629			IXGBE_CORE_UNLOCK(adapter);
630		}
631		VLAN_CAPABILITIES(ifp);
632		break;
633	}
634
635	default:
636		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
637		error = ether_ioctl(ifp, command, data);
638		break;
639	}
640
641	return (error);
642}
643
644/*********************************************************************
645 *  Init entry point
646 *
647 *  This routine is used in two ways. It is used by the stack as
648 *  init entry point in network interface structure. It is also used
649 *  by the driver as a hw/sw initialization routine to get to a
650 *  consistent state.
651 *
652 *  return 0 on success, positive on failure
653 **********************************************************************/
654#define IXGBE_MHADD_MFS_SHIFT 16
655
656static void
657ixv_init_locked(struct adapter *adapter)
658{
659	struct ifnet	*ifp = adapter->ifp;
660	device_t 	dev = adapter->dev;
661	struct ixgbe_hw *hw = &adapter->hw;
662	int error = 0;
663
664	INIT_DEBUGOUT("ixv_init_locked: begin");
665	mtx_assert(&adapter->core_mtx, MA_OWNED);
666	hw->adapter_stopped = FALSE;
667	ixgbe_stop_adapter(hw);
668        callout_stop(&adapter->timer);
669
670        /* reprogram the RAR[0] in case user changed it. */
671        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
672
673	/* Get the latest mac address, User can use a LAA */
674	bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
675	     IXGBE_ETH_LENGTH_OF_ADDRESS);
676        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
677	hw->addr_ctrl.rar_used_count = 1;
678
679	/* Prepare transmit descriptors and buffers */
680	if (ixgbe_setup_transmit_structures(adapter)) {
681		device_printf(dev, "Could not setup transmit structures\n");
682		ixv_stop(adapter);
683		return;
684	}
685
686	/* Reset VF and renegotiate mailbox API version */
687	ixgbe_reset_hw(hw);
688	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
689	if (error)
690		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
691
692	ixv_initialize_transmit_units(adapter);
693
694	/* Setup Multicast table */
695	ixv_set_multi(adapter);
696
697	/*
698	** Determine the correct mbuf pool
699	** for doing jumbo/headersplit
700	*/
701	if (ifp->if_mtu > ETHERMTU)
702		adapter->rx_mbuf_sz = MJUMPAGESIZE;
703	else
704		adapter->rx_mbuf_sz = MCLBYTES;
705
706	/* Prepare receive descriptors and buffers */
707	if (ixgbe_setup_receive_structures(adapter)) {
708		device_printf(dev, "Could not setup receive structures\n");
709		ixv_stop(adapter);
710		return;
711	}
712
713	/* Configure RX settings */
714	ixv_initialize_receive_units(adapter);
715
716	/* Set the various hardware offload abilities */
717	ifp->if_hwassist = 0;
718	if (ifp->if_capenable & IFCAP_TSO4)
719		ifp->if_hwassist |= CSUM_TSO;
720	if (ifp->if_capenable & IFCAP_TXCSUM) {
721		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
722#if __FreeBSD_version >= 800000
723		ifp->if_hwassist |= CSUM_SCTP;
724#endif
725	}
726
727	/* Set up VLAN offload and filter */
728	ixv_setup_vlan_support(adapter);
729
730	/* Set up MSI/X routing */
731	ixv_configure_ivars(adapter);
732
733	/* Set up auto-mask */
734	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
735
736        /* Set moderation on the Link interrupt */
737        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
738
739	/* Stats init */
740	ixv_init_stats(adapter);
741
742	/* Config/Enable Link */
743	ixv_config_link(adapter);
744
745	/* Start watchdog */
746	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
747
748	/* And now turn on interrupts */
749	ixv_enable_intr(adapter);
750
751	/* Now inform the stack we're ready */
752	ifp->if_drv_flags |= IFF_DRV_RUNNING;
753	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
754
755	return;
756}
757
758static void
759ixv_init(void *arg)
760{
761	struct adapter *adapter = arg;
762
763	IXGBE_CORE_LOCK(adapter);
764	ixv_init_locked(adapter);
765	IXGBE_CORE_UNLOCK(adapter);
766	return;
767}
768
769
770/*
771**
772** MSIX Interrupt Handlers and Tasklets
773**
774*/
775
776static inline void
777ixv_enable_queue(struct adapter *adapter, u32 vector)
778{
779	struct ixgbe_hw *hw = &adapter->hw;
780	u32	queue = 1 << vector;
781	u32	mask;
782
783	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
784	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
785}
786
787static inline void
788ixv_disable_queue(struct adapter *adapter, u32 vector)
789{
790	struct ixgbe_hw *hw = &adapter->hw;
791	u64	queue = (u64)(1 << vector);
792	u32	mask;
793
794	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
795	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
796}
797
798static inline void
799ixv_rearm_queues(struct adapter *adapter, u64 queues)
800{
801	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
802	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
803}
804
805
806static void
807ixv_handle_que(void *context, int pending)
808{
809	struct ix_queue *que = context;
810	struct adapter  *adapter = que->adapter;
811	struct tx_ring	*txr = que->txr;
812	struct ifnet    *ifp = adapter->ifp;
813	bool		more;
814
815	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
816		more = ixgbe_rxeof(que);
817		IXGBE_TX_LOCK(txr);
818		ixgbe_txeof(txr);
819#if __FreeBSD_version >= 800000
820		if (!drbr_empty(ifp, txr->br))
821			ixgbe_mq_start_locked(ifp, txr);
822#else
823		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
824			ixgbe_start_locked(txr, ifp);
825#endif
826		IXGBE_TX_UNLOCK(txr);
827		if (more) {
828			taskqueue_enqueue(que->tq, &que->que_task);
829			return;
830		}
831	}
832
833	/* Reenable this interrupt */
834	ixv_enable_queue(adapter, que->msix);
835	return;
836}
837
838/*********************************************************************
839 *
840 *  MSI Queue Interrupt Service routine
841 *
842 **********************************************************************/
843void
844ixv_msix_que(void *arg)
845{
846	struct ix_queue	*que = arg;
847	struct adapter  *adapter = que->adapter;
848	struct ifnet    *ifp = adapter->ifp;
849	struct tx_ring	*txr = que->txr;
850	struct rx_ring	*rxr = que->rxr;
851	bool		more;
852	u32		newitr = 0;
853
854	ixv_disable_queue(adapter, que->msix);
855	++que->irqs;
856
857	more = ixgbe_rxeof(que);
858
859	IXGBE_TX_LOCK(txr);
860	ixgbe_txeof(txr);
861	/*
862	** Make certain that if the stack
863	** has anything queued the task gets
864	** scheduled to handle it.
865	*/
866#ifdef IXGBE_LEGACY_TX
867	if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
868		ixgbe_start_locked(txr, ifp);
869#else
870	if (!drbr_empty(adapter->ifp, txr->br))
871		ixgbe_mq_start_locked(ifp, txr);
872#endif
873	IXGBE_TX_UNLOCK(txr);
874
875	/* Do AIM now? */
876
877	if (ixv_enable_aim == FALSE)
878		goto no_calc;
879	/*
880	** Do Adaptive Interrupt Moderation:
881        **  - Write out last calculated setting
882	**  - Calculate based on average size over
883	**    the last interval.
884	*/
885        if (que->eitr_setting)
886                IXGBE_WRITE_REG(&adapter->hw,
887                    IXGBE_VTEITR(que->msix),
888		    que->eitr_setting);
889
890        que->eitr_setting = 0;
891
892        /* Idle, do nothing */
893        if ((txr->bytes == 0) && (rxr->bytes == 0))
894                goto no_calc;
895
896	if ((txr->bytes) && (txr->packets))
897               	newitr = txr->bytes/txr->packets;
898	if ((rxr->bytes) && (rxr->packets))
899		newitr = max(newitr,
900		    (rxr->bytes / rxr->packets));
901	newitr += 24; /* account for hardware frame, crc */
902
903	/* set an upper boundary */
904	newitr = min(newitr, 3000);
905
906	/* Be nice to the mid range */
907	if ((newitr > 300) && (newitr < 1200))
908		newitr = (newitr / 3);
909	else
910		newitr = (newitr / 2);
911
912	newitr |= newitr << 16;
913
914        /* save for next interrupt */
915        que->eitr_setting = newitr;
916
917        /* Reset state */
918        txr->bytes = 0;
919        txr->packets = 0;
920        rxr->bytes = 0;
921        rxr->packets = 0;
922
923no_calc:
924	if (more)
925		taskqueue_enqueue(que->tq, &que->que_task);
926	else /* Reenable this interrupt */
927		ixv_enable_queue(adapter, que->msix);
928	return;
929}
930
931static void
932ixv_msix_mbx(void *arg)
933{
934	struct adapter	*adapter = arg;
935	struct ixgbe_hw *hw = &adapter->hw;
936	u32		reg;
937
938	++adapter->link_irq;
939
940	/* First get the cause */
941	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
942	/* Clear interrupt with write */
943	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
944
945	/* Link status change */
946	if (reg & IXGBE_EICR_LSC)
947		taskqueue_enqueue(adapter->tq, &adapter->link_task);
948
949	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
950	return;
951}
952
953/*********************************************************************
954 *
955 *  Media Ioctl callback
956 *
957 *  This routine is called whenever the user queries the status of
958 *  the interface using ifconfig.
959 *
960 **********************************************************************/
961static void
962ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
963{
964	struct adapter *adapter = ifp->if_softc;
965
966	INIT_DEBUGOUT("ixv_media_status: begin");
967	IXGBE_CORE_LOCK(adapter);
968	ixv_update_link_status(adapter);
969
970	ifmr->ifm_status = IFM_AVALID;
971	ifmr->ifm_active = IFM_ETHER;
972
973	if (!adapter->link_active) {
974		IXGBE_CORE_UNLOCK(adapter);
975		return;
976	}
977
978	ifmr->ifm_status |= IFM_ACTIVE;
979
980	switch (adapter->link_speed) {
981		case IXGBE_LINK_SPEED_1GB_FULL:
982			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
983			break;
984		case IXGBE_LINK_SPEED_10GB_FULL:
985			ifmr->ifm_active |= IFM_FDX;
986			break;
987	}
988
989	IXGBE_CORE_UNLOCK(adapter);
990
991	return;
992}
993
994/*********************************************************************
995 *
996 *  Media Ioctl callback
997 *
998 *  This routine is called when the user changes speed/duplex using
999 *  media/mediopt option with ifconfig.
1000 *
1001 **********************************************************************/
1002static int
1003ixv_media_change(struct ifnet * ifp)
1004{
1005	struct adapter *adapter = ifp->if_softc;
1006	struct ifmedia *ifm = &adapter->media;
1007
1008	INIT_DEBUGOUT("ixv_media_change: begin");
1009
1010	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1011		return (EINVAL);
1012
1013        switch (IFM_SUBTYPE(ifm->ifm_media)) {
1014        case IFM_AUTO:
1015                break;
1016        default:
1017                device_printf(adapter->dev, "Only auto media type\n");
1018		return (EINVAL);
1019        }
1020
1021	return (0);
1022}
1023
1024
1025/*********************************************************************
1026 *  Multicast Update
1027 *
1028 *  This routine is called whenever multicast address list is updated.
1029 *
1030 **********************************************************************/
1031#define IXGBE_RAR_ENTRIES 16
1032
1033static void
1034ixv_set_multi(struct adapter *adapter)
1035{
1036	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1037	u8	*update_ptr;
1038	struct	ifmultiaddr *ifma;
1039	int	mcnt = 0;
1040	struct ifnet   *ifp = adapter->ifp;
1041
1042	IOCTL_DEBUGOUT("ixv_set_multi: begin");
1043
1044#if __FreeBSD_version < 800000
1045	IF_ADDR_LOCK(ifp);
1046#else
1047	if_maddr_rlock(ifp);
1048#endif
1049	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1050		if (ifma->ifma_addr->sa_family != AF_LINK)
1051			continue;
1052		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1053		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1054		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1055		mcnt++;
1056	}
1057#if __FreeBSD_version < 800000
1058	IF_ADDR_UNLOCK(ifp);
1059#else
1060	if_maddr_runlock(ifp);
1061#endif
1062
1063	update_ptr = mta;
1064
1065	ixgbe_update_mc_addr_list(&adapter->hw,
1066	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1067
1068	return;
1069}
1070
1071/*
1072 * This is an iterator function now needed by the multicast
1073 * shared code. It simply feeds the shared code routine the
1074 * addresses in the array of ixv_set_multi() one by one.
1075 */
1076static u8 *
1077ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1078{
1079	u8 *addr = *update_ptr;
1080	u8 *newptr;
1081	*vmdq = 0;
1082
1083	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1084	*update_ptr = newptr;
1085	return addr;
1086}
1087
1088/*********************************************************************
1089 *  Timer routine
1090 *
1091 *  This routine checks for link status,updates statistics,
1092 *  and runs the watchdog check.
1093 *
1094 **********************************************************************/
1095
1096static void
1097ixv_local_timer(void *arg)
1098{
1099	struct adapter	*adapter = arg;
1100	device_t	dev = adapter->dev;
1101	struct ix_queue	*que = adapter->queues;
1102	u64		queues = 0;
1103	int		hung = 0;
1104
1105	mtx_assert(&adapter->core_mtx, MA_OWNED);
1106
1107	ixv_update_link_status(adapter);
1108
1109	/* Stats Update */
1110	ixv_update_stats(adapter);
1111
1112	/*
1113	** Check the TX queues status
1114	**      - mark hung queues so we don't schedule on them
1115	**      - watchdog only if all queues show hung
1116	*/
1117	for (int i = 0; i < adapter->num_queues; i++, que++) {
1118		/* Keep track of queues with work for soft irq */
1119		if (que->txr->busy)
1120			queues |= ((u64)1 << que->me);
1121		/*
1122		** Each time txeof runs without cleaning, but there
1123		** are uncleaned descriptors it increments busy. If
1124		** we get to the MAX we declare it hung.
1125		*/
1126		if (que->busy == IXGBE_QUEUE_HUNG) {
1127			++hung;
1128			/* Mark the queue as inactive */
1129			adapter->active_queues &= ~((u64)1 << que->me);
1130			continue;
1131		} else {
1132			/* Check if we've come back from hung */
1133			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1134                                adapter->active_queues |= ((u64)1 << que->me);
1135		}
1136		if (que->busy >= IXGBE_MAX_TX_BUSY) {
1137			device_printf(dev,"Warning queue %d "
1138			    "appears to be hung!\n", i);
1139			que->txr->busy = IXGBE_QUEUE_HUNG;
1140			++hung;
1141		}
1142
1143	}
1144
1145	/* Only truly watchdog if all queues show hung */
1146	if (hung == adapter->num_queues)
1147		goto watchdog;
1148	else if (queues != 0) { /* Force an IRQ on queues with work */
1149		ixv_rearm_queues(adapter, queues);
1150	}
1151
1152	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1153	return;
1154
1155watchdog:
1156	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1157	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1158	adapter->watchdog_events++;
1159	ixv_init_locked(adapter);
1160}
1161
1162/*
1163** Note: this routine updates the OS on the link state
1164**	the real check of the hardware only happens with
1165**	a link interrupt.
1166*/
1167static void
1168ixv_update_link_status(struct adapter *adapter)
1169{
1170	struct ifnet	*ifp = adapter->ifp;
1171	device_t dev = adapter->dev;
1172
1173	if (adapter->link_up){
1174		if (adapter->link_active == FALSE) {
1175			if (bootverbose)
1176				device_printf(dev,"Link is up %d Gbps %s \n",
1177				    ((adapter->link_speed == 128)? 10:1),
1178				    "Full Duplex");
1179			adapter->link_active = TRUE;
1180			if_link_state_change(ifp, LINK_STATE_UP);
1181		}
1182	} else { /* Link down */
1183		if (adapter->link_active == TRUE) {
1184			if (bootverbose)
1185				device_printf(dev,"Link is Down\n");
1186			if_link_state_change(ifp, LINK_STATE_DOWN);
1187			adapter->link_active = FALSE;
1188		}
1189	}
1190
1191	return;
1192}
1193
1194
1195/*********************************************************************
1196 *
1197 *  This routine disables all traffic on the adapter by issuing a
1198 *  global reset on the MAC and deallocates TX/RX buffers.
1199 *
1200 **********************************************************************/
1201
1202static void
1203ixv_stop(void *arg)
1204{
1205	struct ifnet   *ifp;
1206	struct adapter *adapter = arg;
1207	struct ixgbe_hw *hw = &adapter->hw;
1208	ifp = adapter->ifp;
1209
1210	mtx_assert(&adapter->core_mtx, MA_OWNED);
1211
1212	INIT_DEBUGOUT("ixv_stop: begin\n");
1213	ixv_disable_intr(adapter);
1214
1215	/* Tell the stack that the interface is no longer active */
1216	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1217
1218	ixgbe_reset_hw(hw);
1219	adapter->hw.adapter_stopped = FALSE;
1220	ixgbe_stop_adapter(hw);
1221	callout_stop(&adapter->timer);
1222
1223	/* reprogram the RAR[0] in case user changed it. */
1224	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1225
1226	return;
1227}
1228
1229
1230/*********************************************************************
1231 *
1232 *  Determine hardware revision.
1233 *
1234 **********************************************************************/
1235static void
1236ixv_identify_hardware(struct adapter *adapter)
1237{
1238	device_t        dev = adapter->dev;
1239	struct ixgbe_hw *hw = &adapter->hw;
1240
1241	/*
1242	** Make sure BUSMASTER is set, on a VM under
1243	** KVM it may not be and will break things.
1244	*/
1245	pci_enable_busmaster(dev);
1246
1247	/* Save off the information about this board */
1248	hw->vendor_id = pci_get_vendor(dev);
1249	hw->device_id = pci_get_device(dev);
1250	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1251	hw->subsystem_vendor_id =
1252	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1253	hw->subsystem_device_id =
1254	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1255
1256	/* We need this to determine device-specific things */
1257	ixgbe_set_mac_type(hw);
1258
1259	/* Set the right number of segments */
1260	adapter->num_segs = IXGBE_82599_SCATTER;
1261
1262	return;
1263}
1264
1265/*********************************************************************
1266 *
1267 *  Setup MSIX Interrupt resources and handlers
1268 *
1269 **********************************************************************/
1270static int
1271ixv_allocate_msix(struct adapter *adapter)
1272{
1273	device_t	dev = adapter->dev;
1274	struct 		ix_queue *que = adapter->queues;
1275	struct		tx_ring *txr = adapter->tx_rings;
1276	int 		error, rid, vector = 0;
1277
1278	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1279		rid = vector + 1;
1280		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1281		    RF_SHAREABLE | RF_ACTIVE);
1282		if (que->res == NULL) {
1283			device_printf(dev,"Unable to allocate"
1284		    	    " bus resource: que interrupt [%d]\n", vector);
1285			return (ENXIO);
1286		}
1287		/* Set the handler function */
1288		error = bus_setup_intr(dev, que->res,
1289		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1290		    ixv_msix_que, que, &que->tag);
1291		if (error) {
1292			que->res = NULL;
1293			device_printf(dev, "Failed to register QUE handler");
1294			return (error);
1295		}
1296#if __FreeBSD_version >= 800504
1297		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1298#endif
1299		que->msix = vector;
1300        	adapter->active_queues |= (u64)(1 << que->msix);
1301		/*
1302		** Bind the msix vector, and thus the
1303		** ring to the corresponding cpu.
1304		*/
1305		if (adapter->num_queues > 1)
1306			bus_bind_intr(dev, que->res, i);
1307		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1308		TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1309		que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1310		    taskqueue_thread_enqueue, &que->tq);
1311		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1312		    device_get_nameunit(adapter->dev));
1313	}
1314
1315	/* and Mailbox */
1316	rid = vector + 1;
1317	adapter->res = bus_alloc_resource_any(dev,
1318    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1319	if (!adapter->res) {
1320		device_printf(dev,"Unable to allocate"
1321    	    " bus resource: MBX interrupt [%d]\n", rid);
1322		return (ENXIO);
1323	}
1324	/* Set the mbx handler function */
1325	error = bus_setup_intr(dev, adapter->res,
1326	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1327	    ixv_msix_mbx, adapter, &adapter->tag);
1328	if (error) {
1329		adapter->res = NULL;
1330		device_printf(dev, "Failed to register LINK handler");
1331		return (error);
1332	}
1333#if __FreeBSD_version >= 800504
1334	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1335#endif
1336	adapter->vector = vector;
1337	/* Tasklets for Mailbox */
1338	TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1339	adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1340	    taskqueue_thread_enqueue, &adapter->tq);
1341	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1342	    device_get_nameunit(adapter->dev));
1343	/*
1344	** Due to a broken design QEMU will fail to properly
1345	** enable the guest for MSIX unless the vectors in
1346	** the table are all set up, so we must rewrite the
1347	** ENABLE in the MSIX control register again at this
1348	** point to cause it to successfully initialize us.
1349	*/
1350	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1351		int msix_ctrl;
1352		pci_find_cap(dev, PCIY_MSIX, &rid);
1353		rid += PCIR_MSIX_CTRL;
1354		msix_ctrl = pci_read_config(dev, rid, 2);
1355		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1356		pci_write_config(dev, rid, msix_ctrl, 2);
1357	}
1358
1359	return (0);
1360}
1361
1362/*
1363 * Setup MSIX resources, note that the VF
1364 * device MUST use MSIX, there is no fallback.
1365 */
1366static int
1367ixv_setup_msix(struct adapter *adapter)
1368{
1369	device_t dev = adapter->dev;
1370	int rid, want, msgs;
1371
1372
1373	/* Must have at least 2 MSIX vectors */
1374	msgs = pci_msix_count(dev);
1375	if (msgs < 2)
1376		goto out;
1377	rid = PCIR_BAR(3);
1378	adapter->msix_mem = bus_alloc_resource_any(dev,
1379	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1380       	if (adapter->msix_mem == NULL) {
1381		device_printf(adapter->dev,
1382		    "Unable to map MSIX table \n");
1383		goto out;
1384	}
1385
1386	/*
1387	** Want vectors for the queues,
1388	** plus an additional for mailbox.
1389	*/
1390	want = adapter->num_queues + 1;
1391	if (want > msgs) {
1392		want = msgs;
1393		adapter->num_queues = msgs - 1;
1394	} else
1395		msgs = want;
1396	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1397               	device_printf(adapter->dev,
1398		    "Using MSIX interrupts with %d vectors\n", want);
1399		return (want);
1400	}
1401	/* Release in case alloc was insufficient */
1402	pci_release_msi(dev);
1403out:
1404       	if (adapter->msix_mem != NULL) {
1405		bus_release_resource(dev, SYS_RES_MEMORY,
1406		    rid, adapter->msix_mem);
1407		adapter->msix_mem = NULL;
1408	}
1409	device_printf(adapter->dev,"MSIX config error\n");
1410	return (ENXIO);
1411}
1412
1413
1414static int
1415ixv_allocate_pci_resources(struct adapter *adapter)
1416{
1417	int             rid;
1418	device_t        dev = adapter->dev;
1419
1420	rid = PCIR_BAR(0);
1421	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1422	    &rid, RF_ACTIVE);
1423
1424	if (!(adapter->pci_mem)) {
1425		device_printf(dev, "Unable to allocate bus resource: memory\n");
1426		return (ENXIO);
1427	}
1428
1429	adapter->osdep.mem_bus_space_tag =
1430		rman_get_bustag(adapter->pci_mem);
1431	adapter->osdep.mem_bus_space_handle =
1432		rman_get_bushandle(adapter->pci_mem);
1433	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1434
1435	/* Pick up the tuneable queues */
1436	adapter->num_queues = ixv_num_queues;
1437	adapter->hw.back = adapter;
1438
1439	/*
1440	** Now setup MSI/X, should
1441	** return us the number of
1442	** configured vectors.
1443	*/
1444	adapter->msix = ixv_setup_msix(adapter);
1445	if (adapter->msix == ENXIO)
1446		return (ENXIO);
1447	else
1448		return (0);
1449}
1450
1451static void
1452ixv_free_pci_resources(struct adapter * adapter)
1453{
1454	struct 		ix_queue *que = adapter->queues;
1455	device_t	dev = adapter->dev;
1456	int		rid, memrid;
1457
1458	memrid = PCIR_BAR(MSIX_82598_BAR);
1459
1460	/*
1461	** There is a slight possibility of a failure mode
1462	** in attach that will result in entering this function
1463	** before interrupt resources have been initialized, and
1464	** in that case we do not want to execute the loops below
1465	** We can detect this reliably by the state of the adapter
1466	** res pointer.
1467	*/
1468	if (adapter->res == NULL)
1469		goto mem;
1470
1471	/*
1472	**  Release all msix queue resources:
1473	*/
1474	for (int i = 0; i < adapter->num_queues; i++, que++) {
1475		rid = que->msix + 1;
1476		if (que->tag != NULL) {
1477			bus_teardown_intr(dev, que->res, que->tag);
1478			que->tag = NULL;
1479		}
1480		if (que->res != NULL)
1481			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1482	}
1483
1484
1485	/* Clean the Legacy or Link interrupt last */
1486	if (adapter->vector) /* we are doing MSIX */
1487		rid = adapter->vector + 1;
1488	else
1489		(adapter->msix != 0) ? (rid = 1):(rid = 0);
1490
1491	if (adapter->tag != NULL) {
1492		bus_teardown_intr(dev, adapter->res, adapter->tag);
1493		adapter->tag = NULL;
1494	}
1495	if (adapter->res != NULL)
1496		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1497
1498mem:
1499	if (adapter->msix)
1500		pci_release_msi(dev);
1501
1502	if (adapter->msix_mem != NULL)
1503		bus_release_resource(dev, SYS_RES_MEMORY,
1504		    memrid, adapter->msix_mem);
1505
1506	if (adapter->pci_mem != NULL)
1507		bus_release_resource(dev, SYS_RES_MEMORY,
1508		    PCIR_BAR(0), adapter->pci_mem);
1509
1510	return;
1511}
1512
1513/*********************************************************************
1514 *
1515 *  Setup networking device structure and register an interface.
1516 *
1517 **********************************************************************/
1518static void
1519ixv_setup_interface(device_t dev, struct adapter *adapter)
1520{
1521	struct ifnet   *ifp;
1522
1523	INIT_DEBUGOUT("ixv_setup_interface: begin");
1524
1525	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1526	if (ifp == NULL)
1527		panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1528	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1529	ifp->if_baudrate = 1000000000;
1530	ifp->if_init = ixv_init;
1531	ifp->if_softc = adapter;
1532	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1533	ifp->if_ioctl = ixv_ioctl;
1534#if __FreeBSD_version >= 800000
1535	ifp->if_transmit = ixgbe_mq_start;
1536	ifp->if_qflush = ixgbe_qflush;
1537#else
1538	ifp->if_start = ixgbe_start;
1539#endif
1540	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1541
1542	ether_ifattach(ifp, adapter->hw.mac.addr);
1543
1544	adapter->max_frame_size =
1545	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1546
1547	/*
1548	 * Tell the upper layer(s) we support long frames.
1549	 */
1550	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1551
1552	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1553	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1554	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1555			     |  IFCAP_VLAN_HWTSO
1556			     |  IFCAP_VLAN_MTU;
1557	ifp->if_capabilities |= IFCAP_LRO;
1558	ifp->if_capenable = ifp->if_capabilities;
1559
1560	/*
1561	 * Specify the media types supported by this adapter and register
1562	 * callbacks to update media and link information
1563	 */
1564	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1565		     ixv_media_status);
1566	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1567	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1568
1569	return;
1570}
1571
1572static void
1573ixv_config_link(struct adapter *adapter)
1574{
1575	struct ixgbe_hw *hw = &adapter->hw;
1576	u32	autoneg;
1577
1578	if (hw->mac.ops.check_link)
1579		hw->mac.ops.check_link(hw, &autoneg,
1580		    &adapter->link_up, FALSE);
1581}
1582
1583
1584/*********************************************************************
1585 *
1586 *  Enable transmit unit.
1587 *
1588 **********************************************************************/
1589static void
1590ixv_initialize_transmit_units(struct adapter *adapter)
1591{
1592	struct tx_ring	*txr = adapter->tx_rings;
1593	struct ixgbe_hw	*hw = &adapter->hw;
1594
1595
1596	for (int i = 0; i < adapter->num_queues; i++, txr++) {
1597		u64	tdba = txr->txdma.dma_paddr;
1598		u32	txctrl, txdctl;
1599
1600		/* Set WTHRESH to 8, burst writeback */
1601		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1602		txdctl |= (8 << 16);
1603		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1604
1605		/* Set the HW Tx Head and Tail indices */
1606	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1607	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1608
1609		/* Set Tx Tail register */
1610		txr->tail = IXGBE_VFTDT(i);
1611
1612		/* Set Ring parameters */
1613		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1614		       (tdba & 0x00000000ffffffffULL));
1615		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1616		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1617		    adapter->num_tx_desc *
1618		    sizeof(struct ixgbe_legacy_tx_desc));
1619		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1620		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1621		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1622
1623		/* Now enable */
1624		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1625		txdctl |= IXGBE_TXDCTL_ENABLE;
1626		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1627	}
1628
1629	return;
1630}
1631
1632
1633/*********************************************************************
1634 *
1635 *  Setup receive registers and features.
1636 *
1637 **********************************************************************/
1638#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1639
1640static void
1641ixv_initialize_receive_units(struct adapter *adapter)
1642{
1643	struct	rx_ring	*rxr = adapter->rx_rings;
1644	struct ixgbe_hw	*hw = &adapter->hw;
1645	struct ifnet	*ifp = adapter->ifp;
1646	u32		bufsz, rxcsum, psrtype;
1647
1648	if (ifp->if_mtu > ETHERMTU)
1649		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1650	else
1651		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1652
1653	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1654	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1655	    IXGBE_PSRTYPE_L2HDR;
1656
1657	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1658
1659	/* Tell PF our max_frame size */
1660	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1661
1662	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1663		u64 rdba = rxr->rxdma.dma_paddr;
1664		u32 reg, rxdctl;
1665
1666		/* Disable the queue */
1667		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1668		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1669		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1670		for (int j = 0; j < 10; j++) {
1671			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1672			    IXGBE_RXDCTL_ENABLE)
1673				msec_delay(1);
1674			else
1675				break;
1676		}
1677		wmb();
1678		/* Setup the Base and Length of the Rx Descriptor Ring */
1679		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1680		    (rdba & 0x00000000ffffffffULL));
1681		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1682		    (rdba >> 32));
1683		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1684		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1685
1686		/* Reset the ring indices */
1687		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1688		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1689
1690		/* Set up the SRRCTL register */
1691		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1692		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1693		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1694		reg |= bufsz;
1695		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1696		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1697
1698		/* Capture Rx Tail index */
1699		rxr->tail = IXGBE_VFRDT(rxr->me);
1700
1701		/* Do the queue enabling last */
1702		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1703		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1704		for (int k = 0; k < 10; k++) {
1705			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1706			    IXGBE_RXDCTL_ENABLE)
1707				break;
1708			else
1709				msec_delay(1);
1710		}
1711		wmb();
1712
1713		/* Set the Tail Pointer */
1714#ifdef DEV_NETMAP
1715		/*
1716		 * In netmap mode, we must preserve the buffers made
1717		 * available to userspace before the if_init()
1718		 * (this is true by default on the TX side, because
1719		 * init makes all buffers available to userspace).
1720		 *
1721		 * netmap_reset() and the device specific routines
1722		 * (e.g. ixgbe_setup_receive_rings()) map these
1723		 * buffers at the end of the NIC ring, so here we
1724		 * must set the RDT (tail) register to make sure
1725		 * they are not overwritten.
1726		 *
1727		 * In this driver the NIC ring starts at RDH = 0,
1728		 * RDT points to the last slot available for reception (?),
1729		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1730		 */
1731		if (ifp->if_capenable & IFCAP_NETMAP) {
1732			struct netmap_adapter *na = NA(adapter->ifp);
1733			struct netmap_kring *kring = &na->rx_rings[i];
1734			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1735
1736			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1737		} else
1738#endif /* DEV_NETMAP */
1739			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1740			    adapter->num_rx_desc - 1);
1741	}
1742
1743	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1744
1745	if (ifp->if_capenable & IFCAP_RXCSUM)
1746		rxcsum |= IXGBE_RXCSUM_PCSD;
1747
1748	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1749		rxcsum |= IXGBE_RXCSUM_IPPCSE;
1750
1751	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1752
1753	return;
1754}
1755
1756static void
1757ixv_setup_vlan_support(struct adapter *adapter)
1758{
1759	struct ixgbe_hw *hw = &adapter->hw;
1760	u32		ctrl, vid, vfta, retry;
1761	struct rx_ring	*rxr;
1762
1763	/*
1764	** We get here thru init_locked, meaning
1765	** a soft reset, this has already cleared
1766	** the VFTA and other state, so if there
1767	** have been no vlan's registered do nothing.
1768	*/
1769	if (adapter->num_vlans == 0)
1770		return;
1771
1772	/* Enable the queues */
1773	for (int i = 0; i < adapter->num_queues; i++) {
1774		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1775		ctrl |= IXGBE_RXDCTL_VME;
1776		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1777		/*
1778		 * Let Rx path know that it needs to store VLAN tag
1779		 * as part of extra mbuf info.
1780		 */
1781		rxr = &adapter->rx_rings[i];
1782		rxr->vtag_strip = TRUE;
1783	}
1784
1785	/*
1786	** A soft reset zero's out the VFTA, so
1787	** we need to repopulate it now.
1788	*/
1789	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1790		if (ixv_shadow_vfta[i] == 0)
1791			continue;
1792		vfta = ixv_shadow_vfta[i];
1793		/*
1794		** Reconstruct the vlan id's
1795		** based on the bits set in each
1796		** of the array ints.
1797		*/
1798		for (int j = 0; j < 32; j++) {
1799			retry = 0;
1800			if ((vfta & (1 << j)) == 0)
1801				continue;
1802			vid = (i * 32) + j;
1803			/* Call the shared code mailbox routine */
1804			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1805				if (++retry > 5)
1806					break;
1807			}
1808		}
1809	}
1810}
1811
1812/*
1813** This routine is run via an vlan config EVENT,
1814** it enables us to use the HW Filter table since
1815** we can get the vlan id. This just creates the
1816** entry in the soft version of the VFTA, init will
1817** repopulate the real table.
1818*/
1819static void
1820ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1821{
1822	struct adapter	*adapter = ifp->if_softc;
1823	u16		index, bit;
1824
1825	if (ifp->if_softc != arg) /* Not our event */
1826		return;
1827
1828	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1829		return;
1830
1831	IXGBE_CORE_LOCK(adapter);
1832	index = (vtag >> 5) & 0x7F;
1833	bit = vtag & 0x1F;
1834	ixv_shadow_vfta[index] |= (1 << bit);
1835	++adapter->num_vlans;
1836	/* Re-init to load the changes */
1837	ixv_init_locked(adapter);
1838	IXGBE_CORE_UNLOCK(adapter);
1839}
1840
1841/*
1842** This routine is run via an vlan
1843** unconfig EVENT, remove our entry
1844** in the soft vfta.
1845*/
1846static void
1847ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1848{
1849	struct adapter	*adapter = ifp->if_softc;
1850	u16		index, bit;
1851
1852	if (ifp->if_softc !=  arg)
1853		return;
1854
1855	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1856		return;
1857
1858	IXGBE_CORE_LOCK(adapter);
1859	index = (vtag >> 5) & 0x7F;
1860	bit = vtag & 0x1F;
1861	ixv_shadow_vfta[index] &= ~(1 << bit);
1862	--adapter->num_vlans;
1863	/* Re-init to load the changes */
1864	ixv_init_locked(adapter);
1865	IXGBE_CORE_UNLOCK(adapter);
1866}
1867
1868static void
1869ixv_enable_intr(struct adapter *adapter)
1870{
1871	struct ixgbe_hw *hw = &adapter->hw;
1872	struct ix_queue *que = adapter->queues;
1873	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1874
1875
1876	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1877
1878	mask = IXGBE_EIMS_ENABLE_MASK;
1879	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1880	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1881
1882        for (int i = 0; i < adapter->num_queues; i++, que++)
1883		ixv_enable_queue(adapter, que->msix);
1884
1885	IXGBE_WRITE_FLUSH(hw);
1886
1887	return;
1888}
1889
1890static void
1891ixv_disable_intr(struct adapter *adapter)
1892{
1893	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1894	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1895	IXGBE_WRITE_FLUSH(&adapter->hw);
1896	return;
1897}
1898
1899/*
1900** Setup the correct IVAR register for a particular MSIX interrupt
1901**  - entry is the register array entry
1902**  - vector is the MSIX vector for this queue
1903**  - type is RX/TX/MISC
1904*/
1905static void
1906ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1907{
1908	struct ixgbe_hw *hw = &adapter->hw;
1909	u32 ivar, index;
1910
1911	vector |= IXGBE_IVAR_ALLOC_VAL;
1912
1913	if (type == -1) { /* MISC IVAR */
1914		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1915		ivar &= ~0xFF;
1916		ivar |= vector;
1917		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1918	} else {	/* RX/TX IVARS */
1919		index = (16 * (entry & 1)) + (8 * type);
1920		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1921		ivar &= ~(0xFF << index);
1922		ivar |= (vector << index);
1923		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1924	}
1925}
1926
1927static void
1928ixv_configure_ivars(struct adapter *adapter)
1929{
1930	struct  ix_queue *que = adapter->queues;
1931
1932        for (int i = 0; i < adapter->num_queues; i++, que++) {
1933		/* First the RX queue entry */
1934                ixv_set_ivar(adapter, i, que->msix, 0);
1935		/* ... and the TX */
1936		ixv_set_ivar(adapter, i, que->msix, 1);
1937		/* Set an initial value in EITR */
1938                IXGBE_WRITE_REG(&adapter->hw,
1939                    IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1940	}
1941
1942	/* For the mailbox interrupt */
1943        ixv_set_ivar(adapter, 1, adapter->vector, -1);
1944}
1945
1946
1947/*
1948** Tasklet handler for MSIX MBX interrupts
1949**  - do outside interrupt since it might sleep
1950*/
1951static void
1952ixv_handle_mbx(void *context, int pending)
1953{
1954	struct adapter  *adapter = context;
1955
1956	ixgbe_check_link(&adapter->hw,
1957	    &adapter->link_speed, &adapter->link_up, 0);
1958	ixv_update_link_status(adapter);
1959}
1960
1961/*
1962** The VF stats registers never have a truly virgin
1963** starting point, so this routine tries to make an
1964** artificial one, marking ground zero on attach as
1965** it were.
1966*/
1967static void
1968ixv_save_stats(struct adapter *adapter)
1969{
1970	if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1971		adapter->stats.vf.saved_reset_vfgprc +=
1972		    adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1973		adapter->stats.vf.saved_reset_vfgptc +=
1974		    adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1975		adapter->stats.vf.saved_reset_vfgorc +=
1976		    adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1977		adapter->stats.vf.saved_reset_vfgotc +=
1978		    adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1979		adapter->stats.vf.saved_reset_vfmprc +=
1980		    adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1981	}
1982}
1983
1984static void
1985ixv_init_stats(struct adapter *adapter)
1986{
1987	struct ixgbe_hw *hw = &adapter->hw;
1988
1989	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1990	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1991	adapter->stats.vf.last_vfgorc |=
1992	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1993
1994	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1995	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1996	adapter->stats.vf.last_vfgotc |=
1997	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1998
1999	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2000
2001	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2002	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2003	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2004	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2005	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2006}
2007
2008#define UPDATE_STAT_32(reg, last, count)		\
2009{							\
2010	u32 current = IXGBE_READ_REG(hw, reg);		\
2011	if (current < last)				\
2012		count += 0x100000000LL;			\
2013	last = current;					\
2014	count &= 0xFFFFFFFF00000000LL;			\
2015	count |= current;				\
2016}
2017
2018#define UPDATE_STAT_36(lsb, msb, last, count) 		\
2019{							\
2020	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
2021	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
2022	u64 current = ((cur_msb << 32) | cur_lsb);	\
2023	if (current < last)				\
2024		count += 0x1000000000LL;		\
2025	last = current;					\
2026	count &= 0xFFFFFFF000000000LL;			\
2027	count |= current;				\
2028}
2029
2030/*
2031** ixv_update_stats - Update the board statistics counters.
2032*/
2033void
2034ixv_update_stats(struct adapter *adapter)
2035{
2036        struct ixgbe_hw *hw = &adapter->hw;
2037
2038        UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2039	    adapter->stats.vf.vfgprc);
2040        UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2041	    adapter->stats.vf.vfgptc);
2042        UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2043	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2044        UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2045	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2046        UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2047	    adapter->stats.vf.vfmprc);
2048}
2049
2050/*
2051 * Add statistic sysctls for the VF.
2052 */
2053static void
2054ixv_add_stats_sysctls(struct adapter *adapter)
2055{
2056	device_t dev = adapter->dev;
2057	struct ix_queue *que = &adapter->queues[0];
2058	struct tx_ring *txr = que->txr;
2059	struct rx_ring *rxr = que->rxr;
2060
2061	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2062	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2063	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2064	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2065
2066	struct sysctl_oid *stat_node, *queue_node;
2067	struct sysctl_oid_list *stat_list, *queue_list;
2068
2069	/* Driver Statistics */
2070	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2071			CTLFLAG_RD, &adapter->dropped_pkts,
2072			"Driver dropped packets");
2073	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2074			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
2075			"m_defrag() failed");
2076	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2077			CTLFLAG_RD, &adapter->watchdog_events,
2078			"Watchdog timeouts");
2079
2080	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2081				    CTLFLAG_RD, NULL,
2082				    "VF Statistics (read from HW registers)");
2083	stat_list = SYSCTL_CHILDREN(stat_node);
2084
2085	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2086			CTLFLAG_RD, &stats->vfgprc,
2087			"Good Packets Received");
2088	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2089			CTLFLAG_RD, &stats->vfgorc,
2090			"Good Octets Received");
2091	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2092			CTLFLAG_RD, &stats->vfmprc,
2093			"Multicast Packets Received");
2094	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2095			CTLFLAG_RD, &stats->vfgptc,
2096			"Good Packets Transmitted");
2097	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2098			CTLFLAG_RD, &stats->vfgotc,
2099			"Good Octets Transmitted");
2100
2101	queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2102				    CTLFLAG_RD, NULL,
2103				    "Queue Statistics (collected by SW)");
2104	queue_list = SYSCTL_CHILDREN(queue_node);
2105
2106	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2107			CTLFLAG_RD, &(que->irqs),
2108			"IRQs on queue");
2109	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2110			CTLFLAG_RD, &(rxr->rx_irq),
2111			"RX irqs on queue");
2112	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2113			CTLFLAG_RD, &(rxr->rx_packets),
2114			"RX packets");
2115	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2116			CTLFLAG_RD, &(rxr->rx_bytes),
2117			"RX bytes");
2118	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2119			CTLFLAG_RD, &(rxr->rx_discarded),
2120			"Discarded RX packets");
2121
2122	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2123			CTLFLAG_RD, &(txr->total_packets),
2124			"TX Packets");
2125
2126	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2127			CTLFLAG_RD, &(txr->no_desc_avail),
2128			"# of times not enough descriptors were available during TX");
2129}
2130
2131static void
2132ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2133	const char *description, int *limit, int value)
2134{
2135	*limit = value;
2136	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2137	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2138	    OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2139}
2140
2141/**********************************************************************
2142 *
2143 *  This routine is called only when em_display_debug_stats is enabled.
2144 *  This routine provides a way to take a look at important statistics
2145 *  maintained by the driver and hardware.
2146 *
2147 **********************************************************************/
2148static void
2149ixv_print_debug_info(struct adapter *adapter)
2150{
2151        device_t dev = adapter->dev;
2152        struct ixgbe_hw         *hw = &adapter->hw;
2153        struct ix_queue         *que = adapter->queues;
2154        struct rx_ring          *rxr;
2155        struct tx_ring          *txr;
2156        struct lro_ctrl         *lro;
2157
2158        device_printf(dev,"Error Byte Count = %u \n",
2159            IXGBE_READ_REG(hw, IXGBE_ERRBC));
2160
2161        for (int i = 0; i < adapter->num_queues; i++, que++) {
2162                txr = que->txr;
2163                rxr = que->rxr;
2164                lro = &rxr->lro;
2165                device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2166                    que->msix, (long)que->irqs);
2167                device_printf(dev,"RX(%d) Packets Received: %lld\n",
2168                    rxr->me, (long long)rxr->rx_packets);
2169                device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2170                    rxr->me, (long)rxr->rx_bytes);
2171                device_printf(dev,"RX(%d) LRO Queued= %lld\n",
2172                    rxr->me, (long long)lro->lro_queued);
2173                device_printf(dev,"RX(%d) LRO Flushed= %lld\n",
2174                    rxr->me, (long long)lro->lro_flushed);
2175                device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2176                    txr->me, (long)txr->total_packets);
2177                device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2178                    txr->me, (long)txr->no_desc_avail);
2179        }
2180
2181        device_printf(dev,"MBX IRQ Handled: %lu\n",
2182            (long)adapter->link_irq);
2183        return;
2184}
2185
2186static int
2187ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2188{
2189	int error, result;
2190	struct adapter *adapter;
2191
2192	result = -1;
2193	error = sysctl_handle_int(oidp, &result, 0, req);
2194
2195	if (error || !req->newptr)
2196		return (error);
2197
2198	if (result == 1) {
2199		adapter = (struct adapter *) arg1;
2200		ixv_print_debug_info(adapter);
2201	}
2202	return error;
2203}
2204
2205