if_ixv.c revision 283620
1/******************************************************************************
2
3  Copyright (c) 2001-2015, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/10/sys/dev/ixgbe/if_ixv.c 283620 2015-05-27 17:44:11Z erj $*/
34
35
36#ifndef IXGBE_STANDALONE_BUILD
37#include "opt_inet.h"
38#include "opt_inet6.h"
39#endif
40
41#include "ixgbe.h"
42
43/*********************************************************************
44 *  Driver version
45 *********************************************************************/
46char ixv_driver_version[] = "1.2.5";
47
48/*********************************************************************
49 *  PCI Device ID Table
50 *
51 *  Used by probe to select devices to load on
52 *  Last field stores an index into ixv_strings
53 *  Last entry must be all 0s
54 *
55 *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56 *********************************************************************/
57
58static ixgbe_vendor_info_t ixv_vendor_info_array[] =
59{
60	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64	/* required last entry */
65	{0, 0, 0, 0, 0}
66};
67
68/*********************************************************************
69 *  Table of branding strings
70 *********************************************************************/
71
72static char    *ixv_strings[] = {
73	"Intel(R) PRO/10GbE Virtual Function Network Driver"
74};
75
76/*********************************************************************
77 *  Function prototypes
78 *********************************************************************/
79static int      ixv_probe(device_t);
80static int      ixv_attach(device_t);
81static int      ixv_detach(device_t);
82static int      ixv_shutdown(device_t);
83static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
84static void	ixv_init(void *);
85static void	ixv_init_locked(struct adapter *);
86static void     ixv_stop(void *);
87static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
88static int      ixv_media_change(struct ifnet *);
89static void     ixv_identify_hardware(struct adapter *);
90static int      ixv_allocate_pci_resources(struct adapter *);
91static int      ixv_allocate_msix(struct adapter *);
92static int	ixv_setup_msix(struct adapter *);
93static void	ixv_free_pci_resources(struct adapter *);
94static void     ixv_local_timer(void *);
95static void     ixv_setup_interface(device_t, struct adapter *);
96static void     ixv_config_link(struct adapter *);
97
98static void     ixv_initialize_transmit_units(struct adapter *);
99static void     ixv_initialize_receive_units(struct adapter *);
100
101static void     ixv_enable_intr(struct adapter *);
102static void     ixv_disable_intr(struct adapter *);
103static void     ixv_set_multi(struct adapter *);
104static void     ixv_update_link_status(struct adapter *);
105static int	ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106static void	ixv_set_ivar(struct adapter *, u8, u8, s8);
107static void	ixv_configure_ivars(struct adapter *);
108static u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
109
110static void	ixv_setup_vlan_support(struct adapter *);
111static void	ixv_register_vlan(void *, struct ifnet *, u16);
112static void	ixv_unregister_vlan(void *, struct ifnet *, u16);
113
114static void	ixv_save_stats(struct adapter *);
115static void	ixv_init_stats(struct adapter *);
116static void	ixv_update_stats(struct adapter *);
117static void	ixv_add_stats_sysctls(struct adapter *);
118
119/* The MSI/X Interrupt handlers */
120static void	ixv_msix_que(void *);
121static void	ixv_msix_mbx(void *);
122
123/* Deferred interrupt tasklets */
124static void	ixv_handle_que(void *, int);
125static void	ixv_handle_mbx(void *, int);
126
127/*********************************************************************
128 *  FreeBSD Device Interface Entry Points
129 *********************************************************************/
130
131static device_method_t ixv_methods[] = {
132	/* Device interface */
133	DEVMETHOD(device_probe, ixv_probe),
134	DEVMETHOD(device_attach, ixv_attach),
135	DEVMETHOD(device_detach, ixv_detach),
136	DEVMETHOD(device_shutdown, ixv_shutdown),
137	DEVMETHOD_END
138};
139
140static driver_t ixv_driver = {
141	"ixv", ixv_methods, sizeof(struct adapter),
142};
143
144devclass_t ixv_devclass;
145DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146MODULE_DEPEND(ixv, pci, 1, 1, 1);
147MODULE_DEPEND(ixv, ether, 1, 1, 1);
148/* XXX depend on 'ix' ? */
149
150/*
151** TUNEABLE PARAMETERS:
152*/
153
154/*
155** AIM: Adaptive Interrupt Moderation
156** which means that the interrupt rate
157** is varied over time based on the
158** traffic for that interrupt vector
159*/
160static int ixv_enable_aim = FALSE;
161TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
162
163/* How many packets rxeof tries to clean at a time */
164static int ixv_rx_process_limit = 256;
165TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
166
167/* How many packets txeof tries to clean at a time */
168static int ixv_tx_process_limit = 256;
169TUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
170
171/* Flow control setting, default to full */
172static int ixv_flow_control = ixgbe_fc_full;
173TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
174
175/*
176 * Header split: this causes the hardware to DMA
177 * the header into a seperate mbuf from the payload,
178 * it can be a performance win in some workloads, but
179 * in others it actually hurts, its off by default.
180 */
181static int ixv_header_split = FALSE;
182TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
183
184/*
185** Number of TX descriptors per ring,
186** setting higher than RX as this seems
187** the better performing choice.
188*/
189static int ixv_txd = DEFAULT_TXD;
190TUNABLE_INT("hw.ixv.txd", &ixv_txd);
191
192/* Number of RX descriptors per ring */
193static int ixv_rxd = DEFAULT_RXD;
194TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
195
196/*
197** Shadow VFTA table, this is needed because
198** the real filter table gets cleared during
199** a soft reset and we need to repopulate it.
200*/
201static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
202
203/*********************************************************************
204 *  Device identification routine
205 *
206 *  ixv_probe determines if the driver should be loaded on
207 *  adapter based on PCI vendor/device id of the adapter.
208 *
209 *  return BUS_PROBE_DEFAULT on success, positive on failure
210 *********************************************************************/
211
212static int
213ixv_probe(device_t dev)
214{
215	ixgbe_vendor_info_t *ent;
216
217	u16	pci_vendor_id = 0;
218	u16	pci_device_id = 0;
219	u16	pci_subvendor_id = 0;
220	u16	pci_subdevice_id = 0;
221	char	adapter_name[256];
222
223
224	pci_vendor_id = pci_get_vendor(dev);
225	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
226		return (ENXIO);
227
228	pci_device_id = pci_get_device(dev);
229	pci_subvendor_id = pci_get_subvendor(dev);
230	pci_subdevice_id = pci_get_subdevice(dev);
231
232	ent = ixv_vendor_info_array;
233	while (ent->vendor_id != 0) {
234		if ((pci_vendor_id == ent->vendor_id) &&
235		    (pci_device_id == ent->device_id) &&
236
237		    ((pci_subvendor_id == ent->subvendor_id) ||
238		     (ent->subvendor_id == 0)) &&
239
240		    ((pci_subdevice_id == ent->subdevice_id) ||
241		     (ent->subdevice_id == 0))) {
242			sprintf(adapter_name, "%s, Version - %s",
243				ixv_strings[ent->index],
244				ixv_driver_version);
245			device_set_desc_copy(dev, adapter_name);
246			return (BUS_PROBE_DEFAULT);
247		}
248		ent++;
249	}
250	return (ENXIO);
251}
252
253/*********************************************************************
254 *  Device initialization routine
255 *
256 *  The attach entry point is called when the driver is being loaded.
257 *  This routine identifies the type of hardware, allocates all resources
258 *  and initializes the hardware.
259 *
260 *  return 0 on success, positive on failure
261 *********************************************************************/
262
263static int
264ixv_attach(device_t dev)
265{
266	struct adapter *adapter;
267	struct ixgbe_hw *hw;
268	int             error = 0;
269
270	INIT_DEBUGOUT("ixv_attach: begin");
271
272	/* Allocate, clear, and link in our adapter structure */
273	adapter = device_get_softc(dev);
274	adapter->dev = adapter->osdep.dev = dev;
275	hw = &adapter->hw;
276
277	/* Core Lock Init*/
278	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
279
280	/* SYSCTL APIs */
281	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
282			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
283			OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
284			adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
285
286	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
287			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
288			OID_AUTO, "enable_aim", CTLFLAG_RW,
289			&ixv_enable_aim, 1, "Interrupt Moderation");
290
291	/* Set up the timer callout */
292	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
293
294	/* Determine hardware revision */
295	ixv_identify_hardware(adapter);
296
297	/* Do base PCI setup - map BAR0 */
298	if (ixv_allocate_pci_resources(adapter)) {
299		device_printf(dev, "Allocation of PCI resources failed\n");
300		error = ENXIO;
301		goto err_out;
302	}
303
304	/* Do descriptor calc and sanity checks */
305	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
306	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
307		device_printf(dev, "TXD config issue, using default!\n");
308		adapter->num_tx_desc = DEFAULT_TXD;
309	} else
310		adapter->num_tx_desc = ixv_txd;
311
312	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
313	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
314		device_printf(dev, "RXD config issue, using default!\n");
315		adapter->num_rx_desc = DEFAULT_RXD;
316	} else
317		adapter->num_rx_desc = ixv_rxd;
318
319	/* Allocate our TX/RX Queues */
320	if (ixgbe_allocate_queues(adapter)) {
321		error = ENOMEM;
322		goto err_out;
323	}
324
325	/*
326	** Initialize the shared code: its
327	** at this point the mac type is set.
328	*/
329	error = ixgbe_init_shared_code(hw);
330	if (error) {
331		device_printf(dev,"Shared Code Initialization Failure\n");
332		error = EIO;
333		goto err_late;
334	}
335
336	/* Setup the mailbox */
337	ixgbe_init_mbx_params_vf(hw);
338
339	ixgbe_reset_hw(hw);
340
341	error = ixgbe_init_hw(hw);
342	if (error) {
343		device_printf(dev,"Hardware Initialization Failure\n");
344		error = EIO;
345		goto err_late;
346	}
347
348	error = ixv_allocate_msix(adapter);
349	if (error)
350		goto err_late;
351
352	/* If no mac address was assigned, make a random one */
353	if (!ixv_check_ether_addr(hw->mac.addr)) {
354		u8 addr[ETHER_ADDR_LEN];
355		arc4rand(&addr, sizeof(addr), 0);
356		addr[0] &= 0xFE;
357		addr[0] |= 0x02;
358		bcopy(addr, hw->mac.addr, sizeof(addr));
359	}
360
361	/* Setup OS specific network interface */
362	ixv_setup_interface(dev, adapter);
363
364	/* Do the stats setup */
365	ixv_save_stats(adapter);
366	ixv_init_stats(adapter);
367	ixv_add_stats_sysctls(adapter);
368
369	/* Register for VLAN events */
370	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
371	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
372	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
373	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
374
375	INIT_DEBUGOUT("ixv_attach: end");
376	return (0);
377
378err_late:
379	ixgbe_free_transmit_structures(adapter);
380	ixgbe_free_receive_structures(adapter);
381err_out:
382	ixv_free_pci_resources(adapter);
383	return (error);
384
385}
386
387/*********************************************************************
388 *  Device removal routine
389 *
390 *  The detach entry point is called when the driver is being removed.
391 *  This routine stops the adapter and deallocates all the resources
392 *  that were allocated for driver operation.
393 *
394 *  return 0 on success, positive on failure
395 *********************************************************************/
396
397static int
398ixv_detach(device_t dev)
399{
400	struct adapter *adapter = device_get_softc(dev);
401	struct ix_queue *que = adapter->queues;
402
403	INIT_DEBUGOUT("ixv_detach: begin");
404
405	/* Make sure VLANS are not using driver */
406	if (adapter->ifp->if_vlantrunk != NULL) {
407		device_printf(dev,"Vlan in use, detach first\n");
408		return (EBUSY);
409	}
410
411	IXGBE_CORE_LOCK(adapter);
412	ixv_stop(adapter);
413	IXGBE_CORE_UNLOCK(adapter);
414
415	for (int i = 0; i < adapter->num_queues; i++, que++) {
416		if (que->tq) {
417			struct tx_ring  *txr = que->txr;
418			taskqueue_drain(que->tq, &txr->txq_task);
419			taskqueue_drain(que->tq, &que->que_task);
420			taskqueue_free(que->tq);
421		}
422	}
423
424	/* Drain the Mailbox(link) queue */
425	if (adapter->tq) {
426		taskqueue_drain(adapter->tq, &adapter->link_task);
427		taskqueue_free(adapter->tq);
428	}
429
430	/* Unregister VLAN events */
431	if (adapter->vlan_attach != NULL)
432		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
433	if (adapter->vlan_detach != NULL)
434		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
435
436	ether_ifdetach(adapter->ifp);
437	callout_drain(&adapter->timer);
438	ixv_free_pci_resources(adapter);
439	bus_generic_detach(dev);
440	if_free(adapter->ifp);
441
442	ixgbe_free_transmit_structures(adapter);
443	ixgbe_free_receive_structures(adapter);
444
445	IXGBE_CORE_LOCK_DESTROY(adapter);
446	return (0);
447}
448
449/*********************************************************************
450 *
451 *  Shutdown entry point
452 *
453 **********************************************************************/
454static int
455ixv_shutdown(device_t dev)
456{
457	struct adapter *adapter = device_get_softc(dev);
458	IXGBE_CORE_LOCK(adapter);
459	ixv_stop(adapter);
460	IXGBE_CORE_UNLOCK(adapter);
461	return (0);
462}
463
464
465/*********************************************************************
466 *  Ioctl entry point
467 *
468 *  ixv_ioctl is called when the user wants to configure the
469 *  interface.
470 *
471 *  return 0 on success, positive on failure
472 **********************************************************************/
473
474static int
475ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
476{
477	struct adapter	*adapter = ifp->if_softc;
478	struct ifreq	*ifr = (struct ifreq *) data;
479#if defined(INET) || defined(INET6)
480	struct ifaddr	*ifa = (struct ifaddr *) data;
481	bool		avoid_reset = FALSE;
482#endif
483	int             error = 0;
484
485	switch (command) {
486
487	case SIOCSIFADDR:
488#ifdef INET
489		if (ifa->ifa_addr->sa_family == AF_INET)
490			avoid_reset = TRUE;
491#endif
492#ifdef INET6
493		if (ifa->ifa_addr->sa_family == AF_INET6)
494			avoid_reset = TRUE;
495#endif
496#if defined(INET) || defined(INET6)
497		/*
498		** Calling init results in link renegotiation,
499		** so we avoid doing it when possible.
500		*/
501		if (avoid_reset) {
502			ifp->if_flags |= IFF_UP;
503			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
504				ixv_init(adapter);
505			if (!(ifp->if_flags & IFF_NOARP))
506				arp_ifinit(ifp, ifa);
507		} else
508			error = ether_ioctl(ifp, command, data);
509		break;
510#endif
511	case SIOCSIFMTU:
512		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
513		if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
514			error = EINVAL;
515		} else {
516			IXGBE_CORE_LOCK(adapter);
517			ifp->if_mtu = ifr->ifr_mtu;
518			adapter->max_frame_size =
519				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
520			ixv_init_locked(adapter);
521			IXGBE_CORE_UNLOCK(adapter);
522		}
523		break;
524	case SIOCSIFFLAGS:
525		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
526		IXGBE_CORE_LOCK(adapter);
527		if (ifp->if_flags & IFF_UP) {
528			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
529				ixv_init_locked(adapter);
530		} else
531			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
532				ixv_stop(adapter);
533		adapter->if_flags = ifp->if_flags;
534		IXGBE_CORE_UNLOCK(adapter);
535		break;
536	case SIOCADDMULTI:
537	case SIOCDELMULTI:
538		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
539		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
540			IXGBE_CORE_LOCK(adapter);
541			ixv_disable_intr(adapter);
542			ixv_set_multi(adapter);
543			ixv_enable_intr(adapter);
544			IXGBE_CORE_UNLOCK(adapter);
545		}
546		break;
547	case SIOCSIFMEDIA:
548	case SIOCGIFMEDIA:
549		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
550		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
551		break;
552	case SIOCSIFCAP:
553	{
554		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
555		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
556		if (mask & IFCAP_HWCSUM)
557			ifp->if_capenable ^= IFCAP_HWCSUM;
558		if (mask & IFCAP_TSO4)
559			ifp->if_capenable ^= IFCAP_TSO4;
560		if (mask & IFCAP_LRO)
561			ifp->if_capenable ^= IFCAP_LRO;
562		if (mask & IFCAP_VLAN_HWTAGGING)
563			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
564		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
565			IXGBE_CORE_LOCK(adapter);
566			ixv_init_locked(adapter);
567			IXGBE_CORE_UNLOCK(adapter);
568		}
569		VLAN_CAPABILITIES(ifp);
570		break;
571	}
572
573	default:
574		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
575		error = ether_ioctl(ifp, command, data);
576		break;
577	}
578
579	return (error);
580}
581
582/*********************************************************************
583 *  Init entry point
584 *
585 *  This routine is used in two ways. It is used by the stack as
586 *  init entry point in network interface structure. It is also used
587 *  by the driver as a hw/sw initialization routine to get to a
588 *  consistent state.
589 *
590 *  return 0 on success, positive on failure
591 **********************************************************************/
592#define IXGBE_MHADD_MFS_SHIFT 16
593
594static void
595ixv_init_locked(struct adapter *adapter)
596{
597	struct ifnet	*ifp = adapter->ifp;
598	device_t 	dev = adapter->dev;
599	struct ixgbe_hw *hw = &adapter->hw;
600	u32		mhadd, gpie;
601
602	INIT_DEBUGOUT("ixv_init: begin");
603	mtx_assert(&adapter->core_mtx, MA_OWNED);
604	hw->adapter_stopped = FALSE;
605	ixgbe_stop_adapter(hw);
606        callout_stop(&adapter->timer);
607
608        /* reprogram the RAR[0] in case user changed it. */
609        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
610
611	/* Get the latest mac address, User can use a LAA */
612	bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
613	     IXGBE_ETH_LENGTH_OF_ADDRESS);
614        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
615	hw->addr_ctrl.rar_used_count = 1;
616
617	/* Prepare transmit descriptors and buffers */
618	if (ixgbe_setup_transmit_structures(adapter)) {
619		device_printf(dev,"Could not setup transmit structures\n");
620		ixv_stop(adapter);
621		return;
622	}
623
624	ixgbe_reset_hw(hw);
625	ixv_initialize_transmit_units(adapter);
626
627	/* Setup Multicast table */
628	ixv_set_multi(adapter);
629
630	/*
631	** Determine the correct mbuf pool
632	** for doing jumbo/headersplit
633	*/
634	if (ifp->if_mtu > ETHERMTU)
635		adapter->rx_mbuf_sz = MJUMPAGESIZE;
636	else
637		adapter->rx_mbuf_sz = MCLBYTES;
638
639	/* Prepare receive descriptors and buffers */
640	if (ixgbe_setup_receive_structures(adapter)) {
641		device_printf(dev,"Could not setup receive structures\n");
642		ixv_stop(adapter);
643		return;
644	}
645
646	/* Configure RX settings */
647	ixv_initialize_receive_units(adapter);
648
649	/* Enable Enhanced MSIX mode */
650	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
651	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
652	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
653        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
654
655	/* Set the various hardware offload abilities */
656	ifp->if_hwassist = 0;
657	if (ifp->if_capenable & IFCAP_TSO4)
658		ifp->if_hwassist |= CSUM_TSO;
659	if (ifp->if_capenable & IFCAP_TXCSUM) {
660		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
661#if __FreeBSD_version >= 800000
662		ifp->if_hwassist |= CSUM_SCTP;
663#endif
664	}
665
666	/* Set MTU size */
667	if (ifp->if_mtu > ETHERMTU) {
668		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
669		mhadd &= ~IXGBE_MHADD_MFS_MASK;
670		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
671		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
672	}
673
674	/* Set up VLAN offload and filter */
675	ixv_setup_vlan_support(adapter);
676
677	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
678
679	/* Set up MSI/X routing */
680	ixv_configure_ivars(adapter);
681
682	/* Set up auto-mask */
683	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
684
685        /* Set moderation on the Link interrupt */
686        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
687
688	/* Stats init */
689	ixv_init_stats(adapter);
690
691	/* Config/Enable Link */
692	ixv_config_link(adapter);
693
694	/* And now turn on interrupts */
695	ixv_enable_intr(adapter);
696
697	/* Now inform the stack we're ready */
698	ifp->if_drv_flags |= IFF_DRV_RUNNING;
699	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
700
701	return;
702}
703
704static void
705ixv_init(void *arg)
706{
707	struct adapter *adapter = arg;
708
709	IXGBE_CORE_LOCK(adapter);
710	ixv_init_locked(adapter);
711	IXGBE_CORE_UNLOCK(adapter);
712	return;
713}
714
715
716/*
717**
718** MSIX Interrupt Handlers and Tasklets
719**
720*/
721
722static inline void
723ixv_enable_queue(struct adapter *adapter, u32 vector)
724{
725	struct ixgbe_hw *hw = &adapter->hw;
726	u32	queue = 1 << vector;
727	u32	mask;
728
729	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
730	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
731}
732
733static inline void
734ixv_disable_queue(struct adapter *adapter, u32 vector)
735{
736	struct ixgbe_hw *hw = &adapter->hw;
737	u64	queue = (u64)(1 << vector);
738	u32	mask;
739
740	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
741	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
742}
743
744static inline void
745ixv_rearm_queues(struct adapter *adapter, u64 queues)
746{
747	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
748	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
749}
750
751
752static void
753ixv_handle_que(void *context, int pending)
754{
755	struct ix_queue *que = context;
756	struct adapter  *adapter = que->adapter;
757	struct tx_ring	*txr = que->txr;
758	struct ifnet    *ifp = adapter->ifp;
759	bool		more;
760
761	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
762		more = ixgbe_rxeof(que);
763		IXGBE_TX_LOCK(txr);
764		ixgbe_txeof(txr);
765#if __FreeBSD_version >= 800000
766		if (!drbr_empty(ifp, txr->br))
767			ixgbe_mq_start_locked(ifp, txr);
768#else
769		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
770			ixgbe_start_locked(txr, ifp);
771#endif
772		IXGBE_TX_UNLOCK(txr);
773		if (more) {
774			taskqueue_enqueue(que->tq, &que->que_task);
775			return;
776		}
777	}
778
779	/* Reenable this interrupt */
780	ixv_enable_queue(adapter, que->msix);
781	return;
782}
783
784/*********************************************************************
785 *
786 *  MSI Queue Interrupt Service routine
787 *
788 **********************************************************************/
789void
790ixv_msix_que(void *arg)
791{
792	struct ix_queue	*que = arg;
793	struct adapter  *adapter = que->adapter;
794	struct ifnet    *ifp = adapter->ifp;
795	struct tx_ring	*txr = que->txr;
796	struct rx_ring	*rxr = que->rxr;
797	bool		more;
798	u32		newitr = 0;
799
800	ixv_disable_queue(adapter, que->msix);
801	++que->irqs;
802
803	more = ixgbe_rxeof(que);
804
805	IXGBE_TX_LOCK(txr);
806	ixgbe_txeof(txr);
807	/*
808	** Make certain that if the stack
809	** has anything queued the task gets
810	** scheduled to handle it.
811	*/
812#ifdef IXGBE_LEGACY_TX
813	if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
814		ixgbe_start_locked(txr, ifp);
815#else
816	if (!drbr_empty(adapter->ifp, txr->br))
817		ixgbe_mq_start_locked(ifp, txr);
818#endif
819	IXGBE_TX_UNLOCK(txr);
820
821	/* Do AIM now? */
822
823	if (ixv_enable_aim == FALSE)
824		goto no_calc;
825	/*
826	** Do Adaptive Interrupt Moderation:
827        **  - Write out last calculated setting
828	**  - Calculate based on average size over
829	**    the last interval.
830	*/
831        if (que->eitr_setting)
832                IXGBE_WRITE_REG(&adapter->hw,
833                    IXGBE_VTEITR(que->msix),
834		    que->eitr_setting);
835
836        que->eitr_setting = 0;
837
838        /* Idle, do nothing */
839        if ((txr->bytes == 0) && (rxr->bytes == 0))
840                goto no_calc;
841
842	if ((txr->bytes) && (txr->packets))
843               	newitr = txr->bytes/txr->packets;
844	if ((rxr->bytes) && (rxr->packets))
845		newitr = max(newitr,
846		    (rxr->bytes / rxr->packets));
847	newitr += 24; /* account for hardware frame, crc */
848
849	/* set an upper boundary */
850	newitr = min(newitr, 3000);
851
852	/* Be nice to the mid range */
853	if ((newitr > 300) && (newitr < 1200))
854		newitr = (newitr / 3);
855	else
856		newitr = (newitr / 2);
857
858	newitr |= newitr << 16;
859
860        /* save for next interrupt */
861        que->eitr_setting = newitr;
862
863        /* Reset state */
864        txr->bytes = 0;
865        txr->packets = 0;
866        rxr->bytes = 0;
867        rxr->packets = 0;
868
869no_calc:
870	if (more)
871		taskqueue_enqueue(que->tq, &que->que_task);
872	else /* Reenable this interrupt */
873		ixv_enable_queue(adapter, que->msix);
874	return;
875}
876
877static void
878ixv_msix_mbx(void *arg)
879{
880	struct adapter	*adapter = arg;
881	struct ixgbe_hw *hw = &adapter->hw;
882	u32		reg;
883
884	++adapter->link_irq;
885
886	/* First get the cause */
887	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
888	/* Clear interrupt with write */
889	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
890
891	/* Link status change */
892	if (reg & IXGBE_EICR_LSC)
893		taskqueue_enqueue(adapter->tq, &adapter->link_task);
894
895	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
896	return;
897}
898
899/*********************************************************************
900 *
901 *  Media Ioctl callback
902 *
903 *  This routine is called whenever the user queries the status of
904 *  the interface using ifconfig.
905 *
906 **********************************************************************/
907static void
908ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
909{
910	struct adapter *adapter = ifp->if_softc;
911
912	INIT_DEBUGOUT("ixv_media_status: begin");
913	IXGBE_CORE_LOCK(adapter);
914	ixv_update_link_status(adapter);
915
916	ifmr->ifm_status = IFM_AVALID;
917	ifmr->ifm_active = IFM_ETHER;
918
919	if (!adapter->link_active) {
920		IXGBE_CORE_UNLOCK(adapter);
921		return;
922	}
923
924	ifmr->ifm_status |= IFM_ACTIVE;
925
926	switch (adapter->link_speed) {
927		case IXGBE_LINK_SPEED_1GB_FULL:
928			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
929			break;
930		case IXGBE_LINK_SPEED_10GB_FULL:
931			ifmr->ifm_active |= IFM_FDX;
932			break;
933	}
934
935	IXGBE_CORE_UNLOCK(adapter);
936
937	return;
938}
939
940/*********************************************************************
941 *
942 *  Media Ioctl callback
943 *
944 *  This routine is called when the user changes speed/duplex using
945 *  media/mediopt option with ifconfig.
946 *
947 **********************************************************************/
948static int
949ixv_media_change(struct ifnet * ifp)
950{
951	struct adapter *adapter = ifp->if_softc;
952	struct ifmedia *ifm = &adapter->media;
953
954	INIT_DEBUGOUT("ixv_media_change: begin");
955
956	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
957		return (EINVAL);
958
959        switch (IFM_SUBTYPE(ifm->ifm_media)) {
960        case IFM_AUTO:
961                break;
962        default:
963                device_printf(adapter->dev, "Only auto media type\n");
964		return (EINVAL);
965        }
966
967	return (0);
968}
969
970
971/*********************************************************************
972 *  Multicast Update
973 *
974 *  This routine is called whenever multicast address list is updated.
975 *
976 **********************************************************************/
977#define IXGBE_RAR_ENTRIES 16
978
979static void
980ixv_set_multi(struct adapter *adapter)
981{
982	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
983	u8	*update_ptr;
984	struct	ifmultiaddr *ifma;
985	int	mcnt = 0;
986	struct ifnet   *ifp = adapter->ifp;
987
988	IOCTL_DEBUGOUT("ixv_set_multi: begin");
989
990#if __FreeBSD_version < 800000
991	IF_ADDR_LOCK(ifp);
992#else
993	if_maddr_rlock(ifp);
994#endif
995	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
996		if (ifma->ifma_addr->sa_family != AF_LINK)
997			continue;
998		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
999		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1000		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1001		mcnt++;
1002	}
1003#if __FreeBSD_version < 800000
1004	IF_ADDR_UNLOCK(ifp);
1005#else
1006	if_maddr_runlock(ifp);
1007#endif
1008
1009	update_ptr = mta;
1010
1011	ixgbe_update_mc_addr_list(&adapter->hw,
1012	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1013
1014	return;
1015}
1016
1017/*
1018 * This is an iterator function now needed by the multicast
1019 * shared code. It simply feeds the shared code routine the
1020 * addresses in the array of ixv_set_multi() one by one.
1021 */
1022static u8 *
1023ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1024{
1025	u8 *addr = *update_ptr;
1026	u8 *newptr;
1027	*vmdq = 0;
1028
1029	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1030	*update_ptr = newptr;
1031	return addr;
1032}
1033
1034/*********************************************************************
1035 *  Timer routine
1036 *
1037 *  This routine checks for link status,updates statistics,
1038 *  and runs the watchdog check.
1039 *
1040 **********************************************************************/
1041
1042static void
1043ixv_local_timer(void *arg)
1044{
1045	struct adapter	*adapter = arg;
1046	device_t	dev = adapter->dev;
1047	struct ix_queue	*que = adapter->queues;
1048	u64		queues = 0;
1049	int		hung = 0;
1050
1051	mtx_assert(&adapter->core_mtx, MA_OWNED);
1052
1053	ixv_update_link_status(adapter);
1054
1055	/* Stats Update */
1056	ixv_update_stats(adapter);
1057
1058	/*
1059	** Check the TX queues status
1060	**      - mark hung queues so we don't schedule on them
1061	**      - watchdog only if all queues show hung
1062	*/
1063	for (int i = 0; i < adapter->num_queues; i++, que++) {
1064		/* Keep track of queues with work for soft irq */
1065		if (que->txr->busy)
1066			queues |= ((u64)1 << que->me);
1067		/*
1068		** Each time txeof runs without cleaning, but there
1069		** are uncleaned descriptors it increments busy. If
1070		** we get to the MAX we declare it hung.
1071		*/
1072		if (que->busy == IXGBE_QUEUE_HUNG) {
1073			++hung;
1074			/* Mark the queue as inactive */
1075			adapter->active_queues &= ~((u64)1 << que->me);
1076			continue;
1077		} else {
1078			/* Check if we've come back from hung */
1079			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1080                                adapter->active_queues |= ((u64)1 << que->me);
1081		}
1082		if (que->busy >= IXGBE_MAX_TX_BUSY) {
1083			device_printf(dev,"Warning queue %d "
1084			    "appears to be hung!\n", i);
1085			que->txr->busy = IXGBE_QUEUE_HUNG;
1086			++hung;
1087		}
1088
1089	}
1090
1091	/* Only truely watchdog if all queues show hung */
1092	if (hung == adapter->num_queues)
1093		goto watchdog;
1094	else if (queues != 0) { /* Force an IRQ on queues with work */
1095		ixv_rearm_queues(adapter, queues);
1096	}
1097
1098	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1099	return;
1100
1101watchdog:
1102	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1103	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1104	adapter->watchdog_events++;
1105	ixv_init_locked(adapter);
1106}
1107
1108/*
1109** Note: this routine updates the OS on the link state
1110**	the real check of the hardware only happens with
1111**	a link interrupt.
1112*/
1113static void
1114ixv_update_link_status(struct adapter *adapter)
1115{
1116	struct ifnet	*ifp = adapter->ifp;
1117	device_t dev = adapter->dev;
1118
1119	if (adapter->link_up){
1120		if (adapter->link_active == FALSE) {
1121			if (bootverbose)
1122				device_printf(dev,"Link is up %d Gbps %s \n",
1123				    ((adapter->link_speed == 128)? 10:1),
1124				    "Full Duplex");
1125			adapter->link_active = TRUE;
1126			if_link_state_change(ifp, LINK_STATE_UP);
1127		}
1128	} else { /* Link down */
1129		if (adapter->link_active == TRUE) {
1130			if (bootverbose)
1131				device_printf(dev,"Link is Down\n");
1132			if_link_state_change(ifp, LINK_STATE_DOWN);
1133			adapter->link_active = FALSE;
1134		}
1135	}
1136
1137	return;
1138}
1139
1140
1141/*********************************************************************
1142 *
1143 *  This routine disables all traffic on the adapter by issuing a
1144 *  global reset on the MAC and deallocates TX/RX buffers.
1145 *
1146 **********************************************************************/
1147
1148static void
1149ixv_stop(void *arg)
1150{
1151	struct ifnet   *ifp;
1152	struct adapter *adapter = arg;
1153	struct ixgbe_hw *hw = &adapter->hw;
1154	ifp = adapter->ifp;
1155
1156	mtx_assert(&adapter->core_mtx, MA_OWNED);
1157
1158	INIT_DEBUGOUT("ixv_stop: begin\n");
1159	ixv_disable_intr(adapter);
1160
1161	/* Tell the stack that the interface is no longer active */
1162	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1163
1164	ixgbe_reset_hw(hw);
1165	adapter->hw.adapter_stopped = FALSE;
1166	ixgbe_stop_adapter(hw);
1167	callout_stop(&adapter->timer);
1168
1169	/* reprogram the RAR[0] in case user changed it. */
1170	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1171
1172	return;
1173}
1174
1175
1176/*********************************************************************
1177 *
1178 *  Determine hardware revision.
1179 *
1180 **********************************************************************/
1181static void
1182ixv_identify_hardware(struct adapter *adapter)
1183{
1184	device_t        dev = adapter->dev;
1185	struct ixgbe_hw *hw = &adapter->hw;
1186
1187	/*
1188	** Make sure BUSMASTER is set, on a VM under
1189	** KVM it may not be and will break things.
1190	*/
1191	pci_enable_busmaster(dev);
1192
1193	/* Save off the information about this board */
1194	hw->vendor_id = pci_get_vendor(dev);
1195	hw->device_id = pci_get_device(dev);
1196	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1197	hw->subsystem_vendor_id =
1198	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1199	hw->subsystem_device_id =
1200	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1201
1202	/* We need this to determine device-specific things */
1203	ixgbe_set_mac_type(hw);
1204
1205	/* Set the right number of segments */
1206	adapter->num_segs = IXGBE_82599_SCATTER;
1207
1208	return;
1209}
1210
1211/*********************************************************************
1212 *
1213 *  Setup MSIX Interrupt resources and handlers
1214 *
1215 **********************************************************************/
1216static int
1217ixv_allocate_msix(struct adapter *adapter)
1218{
1219	device_t	dev = adapter->dev;
1220	struct 		ix_queue *que = adapter->queues;
1221	struct		tx_ring *txr = adapter->tx_rings;
1222	int 		error, rid, vector = 0;
1223
1224	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1225		rid = vector + 1;
1226		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1227		    RF_SHAREABLE | RF_ACTIVE);
1228		if (que->res == NULL) {
1229			device_printf(dev,"Unable to allocate"
1230		    	    " bus resource: que interrupt [%d]\n", vector);
1231			return (ENXIO);
1232		}
1233		/* Set the handler function */
1234		error = bus_setup_intr(dev, que->res,
1235		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1236		    ixv_msix_que, que, &que->tag);
1237		if (error) {
1238			que->res = NULL;
1239			device_printf(dev, "Failed to register QUE handler");
1240			return (error);
1241		}
1242#if __FreeBSD_version >= 800504
1243		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1244#endif
1245		que->msix = vector;
1246        	adapter->active_queues |= (u64)(1 << que->msix);
1247		/*
1248		** Bind the msix vector, and thus the
1249		** ring to the corresponding cpu.
1250		*/
1251		if (adapter->num_queues > 1)
1252			bus_bind_intr(dev, que->res, i);
1253		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1254		TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1255		que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1256		    taskqueue_thread_enqueue, &que->tq);
1257		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1258		    device_get_nameunit(adapter->dev));
1259	}
1260
1261	/* and Mailbox */
1262	rid = vector + 1;
1263	adapter->res = bus_alloc_resource_any(dev,
1264    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1265	if (!adapter->res) {
1266		device_printf(dev,"Unable to allocate"
1267    	    " bus resource: MBX interrupt [%d]\n", rid);
1268		return (ENXIO);
1269	}
1270	/* Set the mbx handler function */
1271	error = bus_setup_intr(dev, adapter->res,
1272	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1273	    ixv_msix_mbx, adapter, &adapter->tag);
1274	if (error) {
1275		adapter->res = NULL;
1276		device_printf(dev, "Failed to register LINK handler");
1277		return (error);
1278	}
1279#if __FreeBSD_version >= 800504
1280	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1281#endif
1282	adapter->vector = vector;
1283	/* Tasklets for Mailbox */
1284	TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1285	adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1286	    taskqueue_thread_enqueue, &adapter->tq);
1287	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1288	    device_get_nameunit(adapter->dev));
1289	/*
1290	** Due to a broken design QEMU will fail to properly
1291	** enable the guest for MSIX unless the vectors in
1292	** the table are all set up, so we must rewrite the
1293	** ENABLE in the MSIX control register again at this
1294	** point to cause it to successfully initialize us.
1295	*/
1296	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1297		int msix_ctrl;
1298		pci_find_cap(dev, PCIY_MSIX, &rid);
1299		rid += PCIR_MSIX_CTRL;
1300		msix_ctrl = pci_read_config(dev, rid, 2);
1301		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1302		pci_write_config(dev, rid, msix_ctrl, 2);
1303	}
1304
1305	return (0);
1306}
1307
1308/*
1309 * Setup MSIX resources, note that the VF
1310 * device MUST use MSIX, there is no fallback.
1311 */
1312static int
1313ixv_setup_msix(struct adapter *adapter)
1314{
1315	device_t dev = adapter->dev;
1316	int rid, want;
1317
1318
1319	/* First try MSI/X */
1320	rid = PCIR_BAR(3);
1321	adapter->msix_mem = bus_alloc_resource_any(dev,
1322	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1323       	if (adapter->msix_mem == NULL) {
1324		device_printf(adapter->dev,
1325		    "Unable to map MSIX table \n");
1326		goto out;
1327	}
1328
1329	/*
1330	** Want two vectors: one for a queue,
1331	** plus an additional for mailbox.
1332	*/
1333	want = 2;
1334	if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
1335               	device_printf(adapter->dev,
1336		    "Using MSIX interrupts with %d vectors\n", want);
1337		return (want);
1338	}
1339	/* Release in case alloc was insufficient */
1340	pci_release_msi(dev);
1341out:
1342       	if (adapter->msix_mem != NULL) {
1343		bus_release_resource(dev, SYS_RES_MEMORY,
1344		    rid, adapter->msix_mem);
1345		adapter->msix_mem = NULL;
1346	}
1347	device_printf(adapter->dev,"MSIX config error\n");
1348	return (ENXIO);
1349}
1350
1351
1352static int
1353ixv_allocate_pci_resources(struct adapter *adapter)
1354{
1355	int             rid;
1356	device_t        dev = adapter->dev;
1357
1358	rid = PCIR_BAR(0);
1359	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1360	    &rid, RF_ACTIVE);
1361
1362	if (!(adapter->pci_mem)) {
1363		device_printf(dev,"Unable to allocate bus resource: memory\n");
1364		return (ENXIO);
1365	}
1366
1367	adapter->osdep.mem_bus_space_tag =
1368		rman_get_bustag(adapter->pci_mem);
1369	adapter->osdep.mem_bus_space_handle =
1370		rman_get_bushandle(adapter->pci_mem);
1371	adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1372
1373	adapter->num_queues = 1;
1374	adapter->hw.back = &adapter->osdep;
1375
1376	/*
1377	** Now setup MSI/X, should
1378	** return us the number of
1379	** configured vectors.
1380	*/
1381	adapter->msix = ixv_setup_msix(adapter);
1382	if (adapter->msix == ENXIO)
1383		return (ENXIO);
1384	else
1385		return (0);
1386}
1387
1388static void
1389ixv_free_pci_resources(struct adapter * adapter)
1390{
1391	struct 		ix_queue *que = adapter->queues;
1392	device_t	dev = adapter->dev;
1393	int		rid, memrid;
1394
1395	memrid = PCIR_BAR(MSIX_82598_BAR);
1396
1397	/*
1398	** There is a slight possibility of a failure mode
1399	** in attach that will result in entering this function
1400	** before interrupt resources have been initialized, and
1401	** in that case we do not want to execute the loops below
1402	** We can detect this reliably by the state of the adapter
1403	** res pointer.
1404	*/
1405	if (adapter->res == NULL)
1406		goto mem;
1407
1408	/*
1409	**  Release all msix queue resources:
1410	*/
1411	for (int i = 0; i < adapter->num_queues; i++, que++) {
1412		rid = que->msix + 1;
1413		if (que->tag != NULL) {
1414			bus_teardown_intr(dev, que->res, que->tag);
1415			que->tag = NULL;
1416		}
1417		if (que->res != NULL)
1418			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1419	}
1420
1421
1422	/* Clean the Legacy or Link interrupt last */
1423	if (adapter->vector) /* we are doing MSIX */
1424		rid = adapter->vector + 1;
1425	else
1426		(adapter->msix != 0) ? (rid = 1):(rid = 0);
1427
1428	if (adapter->tag != NULL) {
1429		bus_teardown_intr(dev, adapter->res, adapter->tag);
1430		adapter->tag = NULL;
1431	}
1432	if (adapter->res != NULL)
1433		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1434
1435mem:
1436	if (adapter->msix)
1437		pci_release_msi(dev);
1438
1439	if (adapter->msix_mem != NULL)
1440		bus_release_resource(dev, SYS_RES_MEMORY,
1441		    memrid, adapter->msix_mem);
1442
1443	if (adapter->pci_mem != NULL)
1444		bus_release_resource(dev, SYS_RES_MEMORY,
1445		    PCIR_BAR(0), adapter->pci_mem);
1446
1447	return;
1448}
1449
1450/*********************************************************************
1451 *
1452 *  Setup networking device structure and register an interface.
1453 *
1454 **********************************************************************/
1455static void
1456ixv_setup_interface(device_t dev, struct adapter *adapter)
1457{
1458	struct ifnet   *ifp;
1459
1460	INIT_DEBUGOUT("ixv_setup_interface: begin");
1461
1462	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1463	if (ifp == NULL)
1464		panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1465	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1466	ifp->if_baudrate = 1000000000;
1467	ifp->if_init = ixv_init;
1468	ifp->if_softc = adapter;
1469	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1470	ifp->if_ioctl = ixv_ioctl;
1471#if __FreeBSD_version >= 800000
1472	ifp->if_transmit = ixgbe_mq_start;
1473	ifp->if_qflush = ixgbe_qflush;
1474#else
1475	ifp->if_start = ixgbe_start;
1476#endif
1477	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1478
1479	ether_ifattach(ifp, adapter->hw.mac.addr);
1480
1481	adapter->max_frame_size =
1482	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1483
1484	/*
1485	 * Tell the upper layer(s) we support long frames.
1486	 */
1487	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1488
1489	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1490	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1491	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1492			     |  IFCAP_VLAN_HWTSO
1493			     |  IFCAP_VLAN_MTU;
1494	ifp->if_capabilities |= IFCAP_LRO;
1495	ifp->if_capenable = ifp->if_capabilities;
1496
1497	/*
1498	 * Specify the media types supported by this adapter and register
1499	 * callbacks to update media and link information
1500	 */
1501	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1502		     ixv_media_status);
1503	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1504	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1505	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1506
1507	return;
1508}
1509
1510static void
1511ixv_config_link(struct adapter *adapter)
1512{
1513	struct ixgbe_hw *hw = &adapter->hw;
1514	u32	autoneg, err = 0;
1515
1516	if (hw->mac.ops.check_link)
1517		err = hw->mac.ops.check_link(hw, &autoneg,
1518		    &adapter->link_up, FALSE);
1519	if (err)
1520		goto out;
1521
1522	if (hw->mac.ops.setup_link)
1523               	err = hw->mac.ops.setup_link(hw,
1524		    autoneg, adapter->link_up);
1525out:
1526	return;
1527}
1528
1529
1530/*********************************************************************
1531 *
1532 *  Enable transmit unit.
1533 *
1534 **********************************************************************/
1535static void
1536ixv_initialize_transmit_units(struct adapter *adapter)
1537{
1538	struct tx_ring	*txr = adapter->tx_rings;
1539	struct ixgbe_hw	*hw = &adapter->hw;
1540
1541
1542	for (int i = 0; i < adapter->num_queues; i++, txr++) {
1543		u64	tdba = txr->txdma.dma_paddr;
1544		u32	txctrl, txdctl;
1545
1546		/* Set WTHRESH to 8, burst writeback */
1547		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1548		txdctl |= (8 << 16);
1549		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1550
1551		/* Set the HW Tx Head and Tail indices */
1552	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1553	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1554
1555		/* Set Tx Tail register */
1556		txr->tail = IXGBE_VFTDT(i);
1557
1558		/* Set the processing limit */
1559		txr->process_limit = ixv_tx_process_limit;
1560
1561		/* Set Ring parameters */
1562		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1563		       (tdba & 0x00000000ffffffffULL));
1564		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1565		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1566		    adapter->num_tx_desc *
1567		    sizeof(struct ixgbe_legacy_tx_desc));
1568		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1569		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1570		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1571
1572		/* Now enable */
1573		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1574		txdctl |= IXGBE_TXDCTL_ENABLE;
1575		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1576	}
1577
1578	return;
1579}
1580
1581
1582/*********************************************************************
1583 *
1584 *  Setup receive registers and features.
1585 *
1586 **********************************************************************/
1587#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1588
1589static void
1590ixv_initialize_receive_units(struct adapter *adapter)
1591{
1592	struct	rx_ring	*rxr = adapter->rx_rings;
1593	struct ixgbe_hw	*hw = &adapter->hw;
1594	struct ifnet   *ifp = adapter->ifp;
1595	u32		bufsz, fctrl, rxcsum, hlreg;
1596
1597
1598	/* Enable broadcasts */
1599	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1600	fctrl |= IXGBE_FCTRL_BAM;
1601	fctrl |= IXGBE_FCTRL_DPF;
1602	fctrl |= IXGBE_FCTRL_PMCF;
1603	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1604
1605	/* Set for Jumbo Frames? */
1606	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1607	if (ifp->if_mtu > ETHERMTU) {
1608		hlreg |= IXGBE_HLREG0_JUMBOEN;
1609		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1610	} else {
1611		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
1612		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1613	}
1614	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
1615
1616	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1617		u64 rdba = rxr->rxdma.dma_paddr;
1618		u32 reg, rxdctl;
1619
1620		/* Setup the Base and Length of the Rx Descriptor Ring */
1621		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1622		    (rdba & 0x00000000ffffffffULL));
1623		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1624		    (rdba >> 32));
1625		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1626		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1627
1628		/* Set up the SRRCTL register */
1629		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1630		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1631		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1632		reg |= bufsz;
1633		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1634		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1635
1636		/* Setup the HW Rx Head and Tail Descriptor Pointers */
1637		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1638		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1639		    adapter->num_rx_desc - 1);
1640		/* Set the processing limit */
1641		rxr->process_limit = ixv_rx_process_limit;
1642
1643		/* Set Rx Tail register */
1644		rxr->tail = IXGBE_VFRDT(rxr->me);
1645
1646		/* Do the queue enabling last */
1647		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1648		rxdctl |= IXGBE_RXDCTL_ENABLE;
1649		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1650		for (int k = 0; k < 10; k++) {
1651			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1652			    IXGBE_RXDCTL_ENABLE)
1653				break;
1654			else
1655				msec_delay(1);
1656		}
1657		wmb();
1658	}
1659
1660	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1661
1662	if (ifp->if_capenable & IFCAP_RXCSUM)
1663		rxcsum |= IXGBE_RXCSUM_PCSD;
1664
1665	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1666		rxcsum |= IXGBE_RXCSUM_IPPCSE;
1667
1668	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1669
1670	return;
1671}
1672
1673static void
1674ixv_setup_vlan_support(struct adapter *adapter)
1675{
1676	struct ixgbe_hw *hw = &adapter->hw;
1677	u32		ctrl, vid, vfta, retry;
1678
1679
1680	/*
1681	** We get here thru init_locked, meaning
1682	** a soft reset, this has already cleared
1683	** the VFTA and other state, so if there
1684	** have been no vlan's registered do nothing.
1685	*/
1686	if (adapter->num_vlans == 0)
1687		return;
1688
1689	/* Enable the queues */
1690	for (int i = 0; i < adapter->num_queues; i++) {
1691		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1692		ctrl |= IXGBE_RXDCTL_VME;
1693		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1694	}
1695
1696	/*
1697	** A soft reset zero's out the VFTA, so
1698	** we need to repopulate it now.
1699	*/
1700	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1701		if (ixv_shadow_vfta[i] == 0)
1702			continue;
1703		vfta = ixv_shadow_vfta[i];
1704		/*
1705		** Reconstruct the vlan id's
1706		** based on the bits set in each
1707		** of the array ints.
1708		*/
1709		for ( int j = 0; j < 32; j++) {
1710			retry = 0;
1711			if ((vfta & (1 << j)) == 0)
1712				continue;
1713			vid = (i * 32) + j;
1714			/* Call the shared code mailbox routine */
1715			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1716				if (++retry > 5)
1717					break;
1718			}
1719		}
1720	}
1721}
1722
1723/*
1724** This routine is run via an vlan config EVENT,
1725** it enables us to use the HW Filter table since
1726** we can get the vlan id. This just creates the
1727** entry in the soft version of the VFTA, init will
1728** repopulate the real table.
1729*/
1730static void
1731ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1732{
1733	struct adapter	*adapter = ifp->if_softc;
1734	u16		index, bit;
1735
1736	if (ifp->if_softc !=  arg)   /* Not our event */
1737		return;
1738
1739	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1740		return;
1741
1742	IXGBE_CORE_LOCK(adapter);
1743	index = (vtag >> 5) & 0x7F;
1744	bit = vtag & 0x1F;
1745	ixv_shadow_vfta[index] |= (1 << bit);
1746	++adapter->num_vlans;
1747	/* Re-init to load the changes */
1748	ixv_init_locked(adapter);
1749	IXGBE_CORE_UNLOCK(adapter);
1750}
1751
1752/*
1753** This routine is run via an vlan
1754** unconfig EVENT, remove our entry
1755** in the soft vfta.
1756*/
1757static void
1758ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1759{
1760	struct adapter	*adapter = ifp->if_softc;
1761	u16		index, bit;
1762
1763	if (ifp->if_softc !=  arg)
1764		return;
1765
1766	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1767		return;
1768
1769	IXGBE_CORE_LOCK(adapter);
1770	index = (vtag >> 5) & 0x7F;
1771	bit = vtag & 0x1F;
1772	ixv_shadow_vfta[index] &= ~(1 << bit);
1773	--adapter->num_vlans;
1774	/* Re-init to load the changes */
1775	ixv_init_locked(adapter);
1776	IXGBE_CORE_UNLOCK(adapter);
1777}
1778
1779static void
1780ixv_enable_intr(struct adapter *adapter)
1781{
1782	struct ixgbe_hw *hw = &adapter->hw;
1783	struct ix_queue *que = adapter->queues;
1784	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1785
1786
1787	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1788
1789	mask = IXGBE_EIMS_ENABLE_MASK;
1790	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1791	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1792
1793        for (int i = 0; i < adapter->num_queues; i++, que++)
1794		ixv_enable_queue(adapter, que->msix);
1795
1796	IXGBE_WRITE_FLUSH(hw);
1797
1798	return;
1799}
1800
1801static void
1802ixv_disable_intr(struct adapter *adapter)
1803{
1804	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1805	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1806	IXGBE_WRITE_FLUSH(&adapter->hw);
1807	return;
1808}
1809
1810/*
1811** Setup the correct IVAR register for a particular MSIX interrupt
1812**  - entry is the register array entry
1813**  - vector is the MSIX vector for this queue
1814**  - type is RX/TX/MISC
1815*/
1816static void
1817ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1818{
1819	struct ixgbe_hw *hw = &adapter->hw;
1820	u32 ivar, index;
1821
1822	vector |= IXGBE_IVAR_ALLOC_VAL;
1823
1824	if (type == -1) { /* MISC IVAR */
1825		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1826		ivar &= ~0xFF;
1827		ivar |= vector;
1828		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1829	} else {	/* RX/TX IVARS */
1830		index = (16 * (entry & 1)) + (8 * type);
1831		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1832		ivar &= ~(0xFF << index);
1833		ivar |= (vector << index);
1834		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1835	}
1836}
1837
1838static void
1839ixv_configure_ivars(struct adapter *adapter)
1840{
1841	struct  ix_queue *que = adapter->queues;
1842
1843        for (int i = 0; i < adapter->num_queues; i++, que++) {
1844		/* First the RX queue entry */
1845                ixv_set_ivar(adapter, i, que->msix, 0);
1846		/* ... and the TX */
1847		ixv_set_ivar(adapter, i, que->msix, 1);
1848		/* Set an initial value in EITR */
1849                IXGBE_WRITE_REG(&adapter->hw,
1850                    IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1851	}
1852
1853	/* For the mailbox interrupt */
1854        ixv_set_ivar(adapter, 1, adapter->vector, -1);
1855}
1856
1857
1858/*
1859** Tasklet handler for MSIX MBX interrupts
1860**  - do outside interrupt since it might sleep
1861*/
1862static void
1863ixv_handle_mbx(void *context, int pending)
1864{
1865	struct adapter  *adapter = context;
1866
1867	ixgbe_check_link(&adapter->hw,
1868	    &adapter->link_speed, &adapter->link_up, 0);
1869	ixv_update_link_status(adapter);
1870}
1871
1872/*
1873** The VF stats registers never have a truely virgin
1874** starting point, so this routine tries to make an
1875** artificial one, marking ground zero on attach as
1876** it were.
1877*/
1878static void
1879ixv_save_stats(struct adapter *adapter)
1880{
1881	if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1882		adapter->stats.vf.saved_reset_vfgprc +=
1883		    adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1884		adapter->stats.vf.saved_reset_vfgptc +=
1885		    adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1886		adapter->stats.vf.saved_reset_vfgorc +=
1887		    adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1888		adapter->stats.vf.saved_reset_vfgotc +=
1889		    adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1890		adapter->stats.vf.saved_reset_vfmprc +=
1891		    adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1892	}
1893}
1894
1895static void
1896ixv_init_stats(struct adapter *adapter)
1897{
1898	struct ixgbe_hw *hw = &adapter->hw;
1899
1900	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1901	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1902	adapter->stats.vf.last_vfgorc |=
1903	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1904
1905	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1906	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1907	adapter->stats.vf.last_vfgotc |=
1908	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1909
1910	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1911
1912	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1913	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1914	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1915	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1916	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1917}
1918
1919#define UPDATE_STAT_32(reg, last, count)		\
1920{							\
1921	u32 current = IXGBE_READ_REG(hw, reg);		\
1922	if (current < last)				\
1923		count += 0x100000000LL;			\
1924	last = current;					\
1925	count &= 0xFFFFFFFF00000000LL;			\
1926	count |= current;				\
1927}
1928
1929#define UPDATE_STAT_36(lsb, msb, last, count) 		\
1930{							\
1931	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
1932	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
1933	u64 current = ((cur_msb << 32) | cur_lsb);	\
1934	if (current < last)				\
1935		count += 0x1000000000LL;		\
1936	last = current;					\
1937	count &= 0xFFFFFFF000000000LL;			\
1938	count |= current;				\
1939}
1940
1941/*
1942** ixv_update_stats - Update the board statistics counters.
1943*/
1944void
1945ixv_update_stats(struct adapter *adapter)
1946{
1947        struct ixgbe_hw *hw = &adapter->hw;
1948
1949        UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1950	    adapter->stats.vf.vfgprc);
1951        UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1952	    adapter->stats.vf.vfgptc);
1953        UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1954	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1955        UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1956	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1957        UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1958	    adapter->stats.vf.vfmprc);
1959}
1960
1961/*
1962 * Add statistic sysctls for the VF.
1963 */
1964static void
1965ixv_add_stats_sysctls(struct adapter *adapter)
1966{
1967	device_t dev = adapter->dev;
1968	struct ix_queue *que = &adapter->queues[0];
1969	struct tx_ring *txr = que->txr;
1970	struct rx_ring *rxr = que->rxr;
1971
1972	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1973	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1974	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1975	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1976
1977	struct sysctl_oid *stat_node, *queue_node;
1978	struct sysctl_oid_list *stat_list, *queue_list;
1979
1980	/* Driver Statistics */
1981	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
1982			CTLFLAG_RD, &adapter->dropped_pkts,
1983			"Driver dropped packets");
1984	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
1985			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
1986			"m_defrag() failed");
1987	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1988			CTLFLAG_RD, &adapter->watchdog_events,
1989			"Watchdog timeouts");
1990
1991	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1992				    CTLFLAG_RD, NULL,
1993				    "VF Statistics (read from HW registers)");
1994	stat_list = SYSCTL_CHILDREN(stat_node);
1995
1996	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1997			CTLFLAG_RD, &stats->vfgprc,
1998			"Good Packets Received");
1999	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2000			CTLFLAG_RD, &stats->vfgorc,
2001			"Good Octets Received");
2002	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2003			CTLFLAG_RD, &stats->vfmprc,
2004			"Multicast Packets Received");
2005	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2006			CTLFLAG_RD, &stats->vfgptc,
2007			"Good Packets Transmitted");
2008	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2009			CTLFLAG_RD, &stats->vfgotc,
2010			"Good Octets Transmitted");
2011
2012	queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2013				    CTLFLAG_RD, NULL,
2014				    "Queue Statistics (collected by SW)");
2015	queue_list = SYSCTL_CHILDREN(queue_node);
2016
2017	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2018			CTLFLAG_RD, &(que->irqs),
2019			"IRQs on queue");
2020	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2021			CTLFLAG_RD, &(rxr->rx_irq),
2022			"RX irqs on queue");
2023	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2024			CTLFLAG_RD, &(rxr->rx_packets),
2025			"RX packets");
2026	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2027			CTLFLAG_RD, &(rxr->rx_bytes),
2028			"RX bytes");
2029	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2030			CTLFLAG_RD, &(rxr->rx_discarded),
2031			"Discarded RX packets");
2032
2033	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2034			CTLFLAG_RD, &(txr->total_packets),
2035			"TX Packets");
2036	SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
2037			CTLFLAG_RD, &(txr->bytes), 0,
2038			"TX Bytes");
2039	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2040			CTLFLAG_RD, &(txr->no_desc_avail),
2041			"# of times not enough descriptors were available during TX");
2042}
2043
2044/**********************************************************************
2045 *
2046 *  This routine is called only when em_display_debug_stats is enabled.
2047 *  This routine provides a way to take a look at important statistics
2048 *  maintained by the driver and hardware.
2049 *
2050 **********************************************************************/
2051static void
2052ixv_print_debug_info(struct adapter *adapter)
2053{
2054        device_t dev = adapter->dev;
2055        struct ixgbe_hw         *hw = &adapter->hw;
2056        struct ix_queue         *que = adapter->queues;
2057        struct rx_ring          *rxr;
2058        struct tx_ring          *txr;
2059        struct lro_ctrl         *lro;
2060
2061        device_printf(dev,"Error Byte Count = %u \n",
2062            IXGBE_READ_REG(hw, IXGBE_ERRBC));
2063
2064        for (int i = 0; i < adapter->num_queues; i++, que++) {
2065                txr = que->txr;
2066                rxr = que->rxr;
2067                lro = &rxr->lro;
2068                device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2069                    que->msix, (long)que->irqs);
2070                device_printf(dev,"RX(%d) Packets Received: %lld\n",
2071                    rxr->me, (long long)rxr->rx_packets);
2072                device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2073                    rxr->me, (long)rxr->rx_bytes);
2074                device_printf(dev,"RX(%d) LRO Queued= %d\n",
2075                    rxr->me, lro->lro_queued);
2076                device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2077                    rxr->me, lro->lro_flushed);
2078                device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2079                    txr->me, (long)txr->total_packets);
2080                device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2081                    txr->me, (long)txr->no_desc_avail);
2082        }
2083
2084        device_printf(dev,"MBX IRQ Handled: %lu\n",
2085            (long)adapter->link_irq);
2086        return;
2087}
2088
2089static int
2090ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2091{
2092	int error, result;
2093	struct adapter *adapter;
2094
2095	result = -1;
2096	error = sysctl_handle_int(oidp, &result, 0, req);
2097
2098	if (error || !req->newptr)
2099		return (error);
2100
2101	if (result == 1) {
2102		adapter = (struct adapter *) arg1;
2103		ixv_print_debug_info(adapter);
2104	}
2105	return error;
2106}
2107
2108