if_ixv.c revision 292674
1185029Spjd/******************************************************************************
2185029Spjd
3185029Spjd  Copyright (c) 2001-2015, Intel Corporation
4185029Spjd  All rights reserved.
5185029Spjd
6185029Spjd  Redistribution and use in source and binary forms, with or without
7185029Spjd  modification, are permitted provided that the following conditions are met:
8185029Spjd
9185029Spjd   1. Redistributions of source code must retain the above copyright notice,
10185029Spjd      this list of conditions and the following disclaimer.
11185029Spjd
12185029Spjd   2. Redistributions in binary form must reproduce the above copyright
13185029Spjd      notice, this list of conditions and the following disclaimer in the
14185029Spjd      documentation and/or other materials provided with the distribution.
15185029Spjd
16185029Spjd   3. Neither the name of the Intel Corporation nor the names of its
17185029Spjd      contributors may be used to endorse or promote products derived from
18185029Spjd      this software without specific prior written permission.
19185029Spjd
20185029Spjd  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21185029Spjd  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22219089Spjd  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23185029Spjd  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24185029Spjd  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25185029Spjd  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26185029Spjd  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27185029Spjd  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28185029Spjd  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29185029Spjd  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30185029Spjd  POSSIBILITY OF SUCH DAMAGE.
31185029Spjd
32185029Spjd******************************************************************************/
33185029Spjd/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 292674 2015-12-23 22:45:17Z sbruno $*/
34185029Spjd
35185029Spjd
36185029Spjd#ifndef IXGBE_STANDALONE_BUILD
37185029Spjd#include "opt_inet.h"
38185029Spjd#include "opt_inet6.h"
39185029Spjd#endif
40185029Spjd
41185029Spjd#include "ixgbe.h"
42185029Spjd
43185029Spjd/*********************************************************************
44185029Spjd *  Driver version
45185029Spjd *********************************************************************/
46185029Spjdchar ixv_driver_version[] = "1.4.6-k";
47185029Spjd
48209962Smm/*********************************************************************
49209962Smm *  PCI Device ID Table
50209962Smm *
51209962Smm *  Used by probe to select devices to load on
52185029Spjd *  Last field stores an index into ixv_strings
53185029Spjd *  Last entry must be all 0s
54185029Spjd *
55185029Spjd *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
56185029Spjd *********************************************************************/
57185029Spjd
58185029Spjdstatic ixgbe_vendor_info_t ixv_vendor_info_array[] =
59185029Spjd{
60185029Spjd	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
61185029Spjd	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
62185029Spjd	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, 0, 0, 0},
63185029Spjd	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, 0, 0, 0},
64185029Spjd	/* required last entry */
65185029Spjd	{0, 0, 0, 0, 0}
66185029Spjd};
67185029Spjd
68185029Spjd/*********************************************************************
69185029Spjd *  Table of branding strings
70185029Spjd *********************************************************************/
71185029Spjd
72185029Spjdstatic char    *ixv_strings[] = {
73185029Spjd	"Intel(R) PRO/10GbE Virtual Function Network Driver"
74185029Spjd};
75185029Spjd
76185029Spjd/*********************************************************************
77185029Spjd *  Function prototypes
78185029Spjd *********************************************************************/
79185029Spjdstatic int      ixv_probe(device_t);
80185029Spjdstatic int      ixv_attach(device_t);
81185029Spjdstatic int      ixv_detach(device_t);
82185029Spjdstatic int      ixv_shutdown(device_t);
83185029Spjdstatic int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
84185029Spjdstatic void	ixv_init(void *);
85185029Spjdstatic void	ixv_init_locked(struct adapter *);
86185029Spjdstatic void     ixv_stop(void *);
87185029Spjdstatic void     ixv_media_status(struct ifnet *, struct ifmediareq *);
88185029Spjdstatic int      ixv_media_change(struct ifnet *);
89185029Spjdstatic void     ixv_identify_hardware(struct adapter *);
90185029Spjdstatic int      ixv_allocate_pci_resources(struct adapter *);
91185029Spjdstatic int      ixv_allocate_msix(struct adapter *);
92185029Spjdstatic int	ixv_setup_msix(struct adapter *);
93185029Spjdstatic void	ixv_free_pci_resources(struct adapter *);
94185029Spjdstatic void     ixv_local_timer(void *);
95185029Spjdstatic void     ixv_setup_interface(device_t, struct adapter *);
96185029Spjdstatic void     ixv_config_link(struct adapter *);
97185029Spjd
98185029Spjdstatic void     ixv_initialize_transmit_units(struct adapter *);
99185029Spjdstatic void     ixv_initialize_receive_units(struct adapter *);
100209962Smm
101209962Smmstatic void     ixv_enable_intr(struct adapter *);
102209962Smmstatic void     ixv_disable_intr(struct adapter *);
103209962Smmstatic void     ixv_set_multi(struct adapter *);
104209962Smmstatic void     ixv_update_link_status(struct adapter *);
105209962Smmstatic int	ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
106209962Smmstatic void	ixv_set_ivar(struct adapter *, u8, u8, s8);
107209962Smmstatic void	ixv_configure_ivars(struct adapter *);
108209962Smmstatic u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
109185029Spjd
110185029Spjdstatic void	ixv_setup_vlan_support(struct adapter *);
111185029Spjdstatic void	ixv_register_vlan(void *, struct ifnet *, u16);
112185029Spjdstatic void	ixv_unregister_vlan(void *, struct ifnet *, u16);
113185029Spjd
114185029Spjdstatic void	ixv_save_stats(struct adapter *);
115185029Spjdstatic void	ixv_init_stats(struct adapter *);
116185029Spjdstatic void	ixv_update_stats(struct adapter *);
117185029Spjdstatic void	ixv_add_stats_sysctls(struct adapter *);
118185029Spjdstatic void	ixv_set_sysctl_value(struct adapter *, const char *,
119185029Spjd		    const char *, int *, int);
120209962Smm
121209962Smm/* The MSI/X Interrupt handlers */
122209962Smmstatic void	ixv_msix_que(void *);
123185029Spjdstatic void	ixv_msix_mbx(void *);
124185029Spjd
125185029Spjd/* Deferred interrupt tasklets */
126185029Spjdstatic void	ixv_handle_que(void *, int);
127185029Spjdstatic void	ixv_handle_mbx(void *, int);
128185029Spjd
129185029Spjd#ifdef DEV_NETMAP
130185029Spjd/*
131185029Spjd * This is defined in <dev/netmap/ixgbe_netmap.h>, which is included by
132185029Spjd * if_ix.c.
133185029Spjd */
134209962Smmextern void ixgbe_netmap_attach(struct adapter *adapter);
135209962Smm
136185029Spjd#include <net/netmap.h>
137185029Spjd#include <sys/selinfo.h>
138185029Spjd#include <dev/netmap/netmap_kern.h>
139185029Spjd#endif /* DEV_NETMAP */
140185029Spjd
141185029Spjd/*********************************************************************
142185029Spjd *  FreeBSD Device Interface Entry Points
143185029Spjd *********************************************************************/
144185029Spjd
145185029Spjdstatic device_method_t ixv_methods[] = {
146185029Spjd	/* Device interface */
147185029Spjd	DEVMETHOD(device_probe, ixv_probe),
148185029Spjd	DEVMETHOD(device_attach, ixv_attach),
149185029Spjd	DEVMETHOD(device_detach, ixv_detach),
150185029Spjd	DEVMETHOD(device_shutdown, ixv_shutdown),
151185029Spjd	DEVMETHOD_END
152185029Spjd};
153185029Spjd
154185029Spjdstatic driver_t ixv_driver = {
155185029Spjd	"ixv", ixv_methods, sizeof(struct adapter),
156185029Spjd};
157185029Spjd
158185029Spjddevclass_t ixv_devclass;
159185029SpjdDRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
160185029SpjdMODULE_DEPEND(ixv, pci, 1, 1, 1);
161185029SpjdMODULE_DEPEND(ixv, ether, 1, 1, 1);
162185029Spjd#ifdef DEV_NETMAP
163185029SpjdMODULE_DEPEND(ix, netmap, 1, 1, 1);
164185029Spjd#endif /* DEV_NETMAP */
165185029Spjd/* XXX depend on 'ix' ? */
166185029Spjd
167185029Spjd/*
168185029Spjd** TUNEABLE PARAMETERS:
169185029Spjd*/
170185029Spjd
171185029Spjd/* Number of Queues - do not exceed MSIX vectors - 1 */
172185029Spjdstatic int ixv_num_queues = 1;
173185029SpjdTUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
174185029Spjd
175185029Spjd/*
176185029Spjd** AIM: Adaptive Interrupt Moderation
177185029Spjd** which means that the interrupt rate
178185029Spjd** is varied over time based on the
179185029Spjd** traffic for that interrupt vector
180185029Spjd*/
181185029Spjdstatic int ixv_enable_aim = FALSE;
182185029SpjdTUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
183185029Spjd
184185029Spjd/* How many packets rxeof tries to clean at a time */
185185029Spjdstatic int ixv_rx_process_limit = 256;
186185029SpjdTUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
187185029Spjd
188185029Spjd/* How many packets txeof tries to clean at a time */
189185029Spjdstatic int ixv_tx_process_limit = 256;
190185029SpjdTUNABLE_INT("hw.ixv.tx_process_limit", &ixv_tx_process_limit);
191185029Spjd
192185029Spjd/* Flow control setting, default to full */
193185029Spjdstatic int ixv_flow_control = ixgbe_fc_full;
194185029SpjdTUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
195185029Spjd
196185029Spjd/*
197185029Spjd * Header split: this causes the hardware to DMA
198185029Spjd * the header into a seperate mbuf from the payload,
199209962Smm * it can be a performance win in some workloads, but
200185029Spjd * in others it actually hurts, its off by default.
201185029Spjd */
202185029Spjdstatic int ixv_header_split = FALSE;
203185029SpjdTUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
204185029Spjd
205185029Spjd/*
206185029Spjd** Number of TX descriptors per ring,
207185029Spjd** setting higher than RX as this seems
208209962Smm** the better performing choice.
209185029Spjd*/
210209962Smmstatic int ixv_txd = DEFAULT_TXD;
211209962SmmTUNABLE_INT("hw.ixv.txd", &ixv_txd);
212185029Spjd
213185029Spjd/* Number of RX descriptors per ring */
214185029Spjdstatic int ixv_rxd = DEFAULT_RXD;
215185029SpjdTUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
216185029Spjd
217185029Spjd/*
218209962Smm** Shadow VFTA table, this is needed because
219185029Spjd** the real filter table gets cleared during
220185029Spjd** a soft reset and we need to repopulate it.
221185029Spjd*/
222185029Spjdstatic u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
223209962Smm
224209962Smm/*********************************************************************
225209962Smm *  Device identification routine
226209962Smm *
227209962Smm *  ixv_probe determines if the driver should be loaded on
228209962Smm *  adapter based on PCI vendor/device id of the adapter.
229209962Smm *
230209962Smm *  return BUS_PROBE_DEFAULT on success, positive on failure
231209962Smm *********************************************************************/
232209962Smm
233209962Smmstatic int
234209962Smmixv_probe(device_t dev)
235209962Smm{
236209962Smm	ixgbe_vendor_info_t *ent;
237209962Smm
238209962Smm	u16	pci_vendor_id = 0;
239209962Smm	u16	pci_device_id = 0;
240209962Smm	u16	pci_subvendor_id = 0;
241209962Smm	u16	pci_subdevice_id = 0;
242209962Smm	char	adapter_name[256];
243209962Smm
244209962Smm
245209962Smm	pci_vendor_id = pci_get_vendor(dev);
246209962Smm	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
247209962Smm		return (ENXIO);
248209962Smm
249209962Smm	pci_device_id = pci_get_device(dev);
250209962Smm	pci_subvendor_id = pci_get_subvendor(dev);
251209962Smm	pci_subdevice_id = pci_get_subdevice(dev);
252209962Smm
253209962Smm	ent = ixv_vendor_info_array;
254209962Smm	while (ent->vendor_id != 0) {
255209962Smm		if ((pci_vendor_id == ent->vendor_id) &&
256209962Smm		    (pci_device_id == ent->device_id) &&
257209962Smm
258209962Smm		    ((pci_subvendor_id == ent->subvendor_id) ||
259209962Smm		     (ent->subvendor_id == 0)) &&
260209962Smm
261209962Smm		    ((pci_subdevice_id == ent->subdevice_id) ||
262209962Smm		     (ent->subdevice_id == 0))) {
263209962Smm			sprintf(adapter_name, "%s, Version - %s",
264209962Smm				ixv_strings[ent->index],
265209962Smm				ixv_driver_version);
266209962Smm			device_set_desc_copy(dev, adapter_name);
267209962Smm			return (BUS_PROBE_DEFAULT);
268209962Smm		}
269209962Smm		ent++;
270209962Smm	}
271209962Smm	return (ENXIO);
272209962Smm}
273209962Smm
274209962Smm/*********************************************************************
275209962Smm *  Device initialization routine
276209962Smm *
277209962Smm *  The attach entry point is called when the driver is being loaded.
278209962Smm *  This routine identifies the type of hardware, allocates all resources
279209962Smm *  and initializes the hardware.
280209962Smm *
281209962Smm *  return 0 on success, positive on failure
282209962Smm *********************************************************************/
283209962Smm
284209962Smmstatic int
285209962Smmixv_attach(device_t dev)
286209962Smm{
287209962Smm	struct adapter *adapter;
288209962Smm	struct ixgbe_hw *hw;
289209962Smm	int             error = 0;
290209962Smm
291209962Smm	INIT_DEBUGOUT("ixv_attach: begin");
292209962Smm
293185029Spjd	/* Allocate, clear, and link in our adapter structure */
294185029Spjd	adapter = device_get_softc(dev);
295209962Smm	adapter->dev = dev;
296209962Smm	hw = &adapter->hw;
297209962Smm
298209962Smm#ifdef DEV_NETMAP
299185029Spjd	adapter->init_locked = ixv_init_locked;
300185029Spjd	adapter->stop_locked = ixv_stop;
301209962Smm#endif
302209962Smm
303185029Spjd	/* Core Lock Init*/
304185029Spjd	IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
305185029Spjd
306185029Spjd	/* SYSCTL APIs */
307185029Spjd	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
308185029Spjd			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
309185029Spjd			OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
310185029Spjd			adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
311185029Spjd
312185029Spjd	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
313185029Spjd			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
314209962Smm			OID_AUTO, "enable_aim", CTLFLAG_RW,
315209962Smm			&ixv_enable_aim, 1, "Interrupt Moderation");
316185029Spjd
317185029Spjd	/* Set up the timer callout */
318185029Spjd	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
319185029Spjd
320209962Smm	/* Determine hardware revision */
321185029Spjd	ixv_identify_hardware(adapter);
322185029Spjd
323209962Smm	/* Do base PCI setup - map BAR0 */
324185029Spjd	if (ixv_allocate_pci_resources(adapter)) {
325185029Spjd		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
326185029Spjd		error = ENXIO;
327185029Spjd		goto err_out;
328185029Spjd	}
329185029Spjd
330185029Spjd	/* Sysctls for limiting the amount of work done in the taskqueues */
331185029Spjd	ixv_set_sysctl_value(adapter, "rx_processing_limit",
332185029Spjd	    "max number of rx packets to process",
333209962Smm	    &adapter->rx_process_limit, ixv_rx_process_limit);
334185029Spjd
335185029Spjd	ixv_set_sysctl_value(adapter, "tx_processing_limit",
336185029Spjd	    "max number of tx packets to process",
337185029Spjd	    &adapter->tx_process_limit, ixv_tx_process_limit);
338185029Spjd
339185029Spjd	/* Do descriptor calc and sanity checks */
340185029Spjd	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
341185029Spjd	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
342185029Spjd		device_printf(dev, "TXD config issue, using default!\n");
343185029Spjd		adapter->num_tx_desc = DEFAULT_TXD;
344185029Spjd	} else
345185029Spjd		adapter->num_tx_desc = ixv_txd;
346185029Spjd
347185029Spjd	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
348185029Spjd	    ixv_rxd < MIN_RXD || ixv_rxd > MAX_RXD) {
349185029Spjd		device_printf(dev, "RXD config issue, using default!\n");
350209962Smm		adapter->num_rx_desc = DEFAULT_RXD;
351185029Spjd	} else
352185029Spjd		adapter->num_rx_desc = ixv_rxd;
353209962Smm
354209962Smm	/* Allocate our TX/RX Queues */
355209962Smm	if (ixgbe_allocate_queues(adapter)) {
356185029Spjd		device_printf(dev, "ixgbe_allocate_queues() failed!\n");
357185029Spjd		error = ENOMEM;
358185029Spjd		goto err_out;
359185029Spjd	}
360185029Spjd
361185029Spjd	/*
362185029Spjd	** Initialize the shared code: its
363185029Spjd	** at this point the mac type is set.
364185029Spjd	*/
365209962Smm	error = ixgbe_init_shared_code(hw);
366185029Spjd	if (error) {
367185029Spjd		device_printf(dev, "ixgbe_init_shared_code() failed!\n");
368185029Spjd		error = EIO;
369185029Spjd		goto err_late;
370185029Spjd	}
371185029Spjd
372185029Spjd	/* Setup the mailbox */
373185029Spjd	ixgbe_init_mbx_params_vf(hw);
374209962Smm
375185029Spjd	/* Reset mbox api to 1.0 */
376185029Spjd	error = ixgbe_reset_hw(hw);
377185029Spjd	if (error == IXGBE_ERR_RESET_FAILED)
378219089Spjd		device_printf(dev, "ixgbe_reset_hw() failure: Reset Failed!\n");
379185029Spjd	else if (error)
380185029Spjd		device_printf(dev, "ixgbe_reset_hw() failed with error %d\n", error);
381185029Spjd	if (error) {
382185029Spjd		error = EIO;
383185029Spjd		goto err_late;
384185029Spjd	}
385185029Spjd
386185029Spjd	/* Negotiate mailbox API version */
387185029Spjd	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
388185029Spjd	if (error) {
389185029Spjd		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
390185029Spjd		error = EIO;
391219089Spjd		goto err_late;
392219089Spjd	}
393185029Spjd
394185029Spjd	error = ixgbe_init_hw(hw);
395185029Spjd	if (error) {
396185029Spjd		device_printf(dev, "ixgbe_init_hw() failed!\n");
397185029Spjd		error = EIO;
398185029Spjd		goto err_late;
399185029Spjd	}
400209962Smm
401185029Spjd	error = ixv_allocate_msix(adapter);
402185029Spjd	if (error) {
403185029Spjd		device_printf(dev, "ixv_allocate_msix() failed!\n");
404185029Spjd		goto err_late;
405185029Spjd	}
406185029Spjd
407185029Spjd	/* If no mac address was assigned, make a random one */
408185029Spjd	if (!ixv_check_ether_addr(hw->mac.addr)) {
409277300Ssmh		u8 addr[ETHER_ADDR_LEN];
410185029Spjd		arc4rand(&addr, sizeof(addr), 0);
411185029Spjd		addr[0] &= 0xFE;
412185029Spjd		addr[0] |= 0x02;
413185029Spjd		bcopy(addr, hw->mac.addr, sizeof(addr));
414185029Spjd	}
415185029Spjd
416185029Spjd	/* Setup OS specific network interface */
417277300Ssmh	ixv_setup_interface(dev, adapter);
418210398Smm
419277300Ssmh	/* Do the stats setup */
420185029Spjd	ixv_save_stats(adapter);
421185029Spjd	ixv_init_stats(adapter);
422185029Spjd	ixv_add_stats_sysctls(adapter);
423185029Spjd
424185029Spjd	/* Register for VLAN events */
425185029Spjd	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
426185029Spjd	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
427185029Spjd	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
428185029Spjd	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
429185029Spjd
430219089Spjd#ifdef DEV_NETMAP
431185029Spjd	ixgbe_netmap_attach(adapter);
432185029Spjd#endif /* DEV_NETMAP */
433185029Spjd	INIT_DEBUGOUT("ixv_attach: end");
434185029Spjd	return (0);
435185029Spjd
436185029Spjderr_late:
437185029Spjd	ixgbe_free_transmit_structures(adapter);
438185029Spjd	ixgbe_free_receive_structures(adapter);
439185029Spjderr_out:
440185029Spjd	ixv_free_pci_resources(adapter);
441185029Spjd	return (error);
442185029Spjd
443185029Spjd}
444185029Spjd
445185029Spjd/*********************************************************************
446185029Spjd *  Device removal routine
447185029Spjd *
448185029Spjd *  The detach entry point is called when the driver is being removed.
449185029Spjd *  This routine stops the adapter and deallocates all the resources
450185029Spjd *  that were allocated for driver operation.
451185029Spjd *
452185029Spjd *  return 0 on success, positive on failure
453185029Spjd *********************************************************************/
454185029Spjd
455185029Spjdstatic int
456185029Spjdixv_detach(device_t dev)
457185029Spjd{
458185029Spjd	struct adapter *adapter = device_get_softc(dev);
459185029Spjd	struct ix_queue *que = adapter->queues;
460185029Spjd
461185029Spjd	INIT_DEBUGOUT("ixv_detach: begin");
462185029Spjd
463185029Spjd	/* Make sure VLANS are not using driver */
464185029Spjd	if (adapter->ifp->if_vlantrunk != NULL) {
465185029Spjd		device_printf(dev, "Vlan in use, detach first\n");
466185029Spjd		return (EBUSY);
467185029Spjd	}
468185029Spjd
469209962Smm	IXGBE_CORE_LOCK(adapter);
470185029Spjd	ixv_stop(adapter);
471185029Spjd	IXGBE_CORE_UNLOCK(adapter);
472185029Spjd
473185029Spjd	for (int i = 0; i < adapter->num_queues; i++, que++) {
474185029Spjd		if (que->tq) {
475185029Spjd			struct tx_ring  *txr = que->txr;
476185029Spjd			taskqueue_drain(que->tq, &txr->txq_task);
477185029Spjd			taskqueue_drain(que->tq, &que->que_task);
478185029Spjd			taskqueue_free(que->tq);
479185029Spjd		}
480185029Spjd	}
481185029Spjd
482185029Spjd	/* Drain the Mailbox(link) queue */
483185029Spjd	if (adapter->tq) {
484185029Spjd		taskqueue_drain(adapter->tq, &adapter->link_task);
485185029Spjd		taskqueue_free(adapter->tq);
486185029Spjd	}
487185029Spjd
488185029Spjd	/* Unregister VLAN events */
489185029Spjd	if (adapter->vlan_attach != NULL)
490185029Spjd		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
491219089Spjd	if (adapter->vlan_detach != NULL)
492219089Spjd		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
493219089Spjd
494219089Spjd	ether_ifdetach(adapter->ifp);
495219089Spjd	callout_drain(&adapter->timer);
496185029Spjd#ifdef DEV_NETMAP
497185029Spjd	netmap_detach(adapter->ifp);
498185029Spjd#endif /* DEV_NETMAP */
499209962Smm	ixv_free_pci_resources(adapter);
500185029Spjd	bus_generic_detach(dev);
501185029Spjd	if_free(adapter->ifp);
502185029Spjd
503185029Spjd	ixgbe_free_transmit_structures(adapter);
504185029Spjd	ixgbe_free_receive_structures(adapter);
505185029Spjd
506185029Spjd	IXGBE_CORE_LOCK_DESTROY(adapter);
507185029Spjd	return (0);
508185029Spjd}
509185029Spjd
510219089Spjd/*********************************************************************
511185029Spjd *
512219089Spjd *  Shutdown entry point
513219089Spjd *
514219089Spjd **********************************************************************/
515219089Spjdstatic int
516219089Spjdixv_shutdown(device_t dev)
517219089Spjd{
518185029Spjd	struct adapter *adapter = device_get_softc(dev);
519219089Spjd	IXGBE_CORE_LOCK(adapter);
520185029Spjd	ixv_stop(adapter);
521219089Spjd	IXGBE_CORE_UNLOCK(adapter);
522219089Spjd	return (0);
523219089Spjd}
524219089Spjd
525185029Spjd
526219089Spjd/*********************************************************************
527219089Spjd *  Ioctl entry point
528219089Spjd *
529219089Spjd *  ixv_ioctl is called when the user wants to configure the
530219089Spjd *  interface.
531219089Spjd *
532185029Spjd *  return 0 on success, positive on failure
533185029Spjd **********************************************************************/
534219089Spjd
535209962Smmstatic int
536185029Spjdixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
537185029Spjd{
538185029Spjd	struct adapter	*adapter = ifp->if_softc;
539185029Spjd	struct ifreq	*ifr = (struct ifreq *) data;
540185029Spjd#if defined(INET) || defined(INET6)
541185029Spjd	struct ifaddr	*ifa = (struct ifaddr *) data;
542185029Spjd	bool		avoid_reset = FALSE;
543185029Spjd#endif
544185029Spjd	int             error = 0;
545185029Spjd
546185029Spjd	switch (command) {
547185029Spjd
548185029Spjd	case SIOCSIFADDR:
549185029Spjd#ifdef INET
550185029Spjd		if (ifa->ifa_addr->sa_family == AF_INET)
551185029Spjd			avoid_reset = TRUE;
552185029Spjd#endif
553185029Spjd#ifdef INET6
554185029Spjd		if (ifa->ifa_addr->sa_family == AF_INET6)
555185029Spjd			avoid_reset = TRUE;
556209962Smm#endif
557185029Spjd#if defined(INET) || defined(INET6)
558185029Spjd		/*
559185029Spjd		** Calling init results in link renegotiation,
560185029Spjd		** so we avoid doing it when possible.
561185029Spjd		*/
562185029Spjd		if (avoid_reset) {
563247187Smm			ifp->if_flags |= IFF_UP;
564185029Spjd			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
565247187Smm				ixv_init(adapter);
566185029Spjd			if (!(ifp->if_flags & IFF_NOARP))
567185029Spjd				arp_ifinit(ifp, ifa);
568185029Spjd		} else
569185029Spjd			error = ether_ioctl(ifp, command, data);
570185029Spjd		break;
571185029Spjd#endif
572185029Spjd	case SIOCSIFMTU:
573185029Spjd		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
574185029Spjd		if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
575185029Spjd			error = EINVAL;
576185029Spjd		} else {
577185029Spjd			IXGBE_CORE_LOCK(adapter);
578209962Smm			ifp->if_mtu = ifr->ifr_mtu;
579185029Spjd			adapter->max_frame_size =
580185029Spjd				ifp->if_mtu + IXGBE_MTU_HDR;
581185029Spjd			ixv_init_locked(adapter);
582185029Spjd			IXGBE_CORE_UNLOCK(adapter);
583185029Spjd		}
584185029Spjd		break;
585185029Spjd	case SIOCSIFFLAGS:
586185029Spjd		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
587185029Spjd		IXGBE_CORE_LOCK(adapter);
588185029Spjd		if (ifp->if_flags & IFF_UP) {
589185029Spjd			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
590247187Smm				ixv_init_locked(adapter);
591247187Smm		} else
592247187Smm			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
593185029Spjd				ixv_stop(adapter);
594185029Spjd		adapter->if_flags = ifp->if_flags;
595185029Spjd		IXGBE_CORE_UNLOCK(adapter);
596185029Spjd		break;
597185029Spjd	case SIOCADDMULTI:
598185029Spjd	case SIOCDELMULTI:
599185029Spjd		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
600185029Spjd		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
601185029Spjd			IXGBE_CORE_LOCK(adapter);
602185029Spjd			ixv_disable_intr(adapter);
603185029Spjd			ixv_set_multi(adapter);
604185029Spjd			ixv_enable_intr(adapter);
605185029Spjd			IXGBE_CORE_UNLOCK(adapter);
606185029Spjd		}
607185029Spjd		break;
608185029Spjd	case SIOCSIFMEDIA:
609247187Smm	case SIOCGIFMEDIA:
610185029Spjd		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
611185029Spjd		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
612185029Spjd		break;
613185029Spjd	case SIOCSIFCAP:
614185029Spjd	{
615185029Spjd		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
616185029Spjd		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
617185029Spjd		if (mask & IFCAP_HWCSUM)
618185029Spjd			ifp->if_capenable ^= IFCAP_HWCSUM;
619185029Spjd		if (mask & IFCAP_TSO4)
620185029Spjd			ifp->if_capenable ^= IFCAP_TSO4;
621185029Spjd		if (mask & IFCAP_LRO)
622185029Spjd			ifp->if_capenable ^= IFCAP_LRO;
623185029Spjd		if (mask & IFCAP_VLAN_HWTAGGING)
624185029Spjd			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
625185029Spjd		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
626185029Spjd			IXGBE_CORE_LOCK(adapter);
627185029Spjd			ixv_init_locked(adapter);
628185029Spjd			IXGBE_CORE_UNLOCK(adapter);
629209962Smm		}
630185029Spjd		VLAN_CAPABILITIES(ifp);
631209962Smm		break;
632209962Smm	}
633209962Smm
634185029Spjd	default:
635185029Spjd		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
636185029Spjd		error = ether_ioctl(ifp, command, data);
637185029Spjd		break;
638185029Spjd	}
639185029Spjd
640185029Spjd	return (error);
641185029Spjd}
642185029Spjd
643185029Spjd/*********************************************************************
644185029Spjd *  Init entry point
645185029Spjd *
646185029Spjd *  This routine is used in two ways. It is used by the stack as
647185029Spjd *  init entry point in network interface structure. It is also used
648185029Spjd *  by the driver as a hw/sw initialization routine to get to a
649185029Spjd *  consistent state.
650185029Spjd *
651185029Spjd *  return 0 on success, positive on failure
652185029Spjd **********************************************************************/
653185029Spjd#define IXGBE_MHADD_MFS_SHIFT 16
654185029Spjd
655185029Spjdstatic void
656185029Spjdixv_init_locked(struct adapter *adapter)
657185029Spjd{
658185029Spjd	struct ifnet	*ifp = adapter->ifp;
659185029Spjd	device_t 	dev = adapter->dev;
660185029Spjd	struct ixgbe_hw *hw = &adapter->hw;
661185029Spjd	int error = 0;
662185029Spjd
663185029Spjd	INIT_DEBUGOUT("ixv_init_locked: begin");
664185029Spjd	mtx_assert(&adapter->core_mtx, MA_OWNED);
665185029Spjd	hw->adapter_stopped = FALSE;
666185029Spjd	ixgbe_stop_adapter(hw);
667185029Spjd        callout_stop(&adapter->timer);
668185029Spjd
669185029Spjd        /* reprogram the RAR[0] in case user changed it. */
670185029Spjd        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
671185029Spjd
672185029Spjd	/* Get the latest mac address, User can use a LAA */
673185029Spjd	bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
674185029Spjd	     IXGBE_ETH_LENGTH_OF_ADDRESS);
675185029Spjd        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
676185029Spjd	hw->addr_ctrl.rar_used_count = 1;
677185029Spjd
678185029Spjd	/* Prepare transmit descriptors and buffers */
679185029Spjd	if (ixgbe_setup_transmit_structures(adapter)) {
680185029Spjd		device_printf(dev, "Could not setup transmit structures\n");
681185029Spjd		ixv_stop(adapter);
682185029Spjd		return;
683185029Spjd	}
684185029Spjd
685185029Spjd	/* Reset VF and renegotiate mailbox API version */
686185029Spjd	ixgbe_reset_hw(hw);
687185029Spjd	error = ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11);
688185029Spjd	if (error)
689185029Spjd		device_printf(dev, "MBX API 1.1 negotiation failed! Error %d\n", error);
690185029Spjd
691185029Spjd	ixv_initialize_transmit_units(adapter);
692185029Spjd
693185029Spjd	/* Setup Multicast table */
694185029Spjd	ixv_set_multi(adapter);
695185029Spjd
696185029Spjd	/*
697185029Spjd	** Determine the correct mbuf pool
698185029Spjd	** for doing jumbo/headersplit
699185029Spjd	*/
700185029Spjd	if (ifp->if_mtu > ETHERMTU)
701185029Spjd		adapter->rx_mbuf_sz = MJUMPAGESIZE;
702185029Spjd	else
703185029Spjd		adapter->rx_mbuf_sz = MCLBYTES;
704185029Spjd
705185029Spjd	/* Prepare receive descriptors and buffers */
706277300Ssmh	if (ixgbe_setup_receive_structures(adapter)) {
707185029Spjd		device_printf(dev, "Could not setup receive structures\n");
708209962Smm		ixv_stop(adapter);
709277300Ssmh		return;
710185029Spjd	}
711185029Spjd
712277300Ssmh	/* Configure RX settings */
713209962Smm	ixv_initialize_receive_units(adapter);
714185029Spjd
715185029Spjd	/* Set the various hardware offload abilities */
716185029Spjd	ifp->if_hwassist = 0;
717185029Spjd	if (ifp->if_capenable & IFCAP_TSO4)
718185029Spjd		ifp->if_hwassist |= CSUM_TSO;
719185029Spjd	if (ifp->if_capenable & IFCAP_TXCSUM) {
720185029Spjd		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
721185029Spjd#if __FreeBSD_version >= 800000
722185029Spjd		ifp->if_hwassist |= CSUM_SCTP;
723185029Spjd#endif
724185029Spjd	}
725185029Spjd
726185029Spjd	/* Set up VLAN offload and filter */
727185029Spjd	ixv_setup_vlan_support(adapter);
728209962Smm
729185029Spjd	/* Set up MSI/X routing */
730185029Spjd	ixv_configure_ivars(adapter);
731185029Spjd
732185029Spjd	/* Set up auto-mask */
733185029Spjd	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
734185029Spjd
735185029Spjd        /* Set moderation on the Link interrupt */
736185029Spjd        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
737185029Spjd
738185029Spjd	/* Stats init */
739185029Spjd	ixv_init_stats(adapter);
740185029Spjd
741185029Spjd	/* Config/Enable Link */
742185029Spjd	ixv_config_link(adapter);
743185029Spjd
744277300Ssmh	/* Start watchdog */
745185029Spjd	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
746185029Spjd
747185029Spjd	/* And now turn on interrupts */
748185029Spjd	ixv_enable_intr(adapter);
749185029Spjd
750185029Spjd	/* Now inform the stack we're ready */
751185029Spjd	ifp->if_drv_flags |= IFF_DRV_RUNNING;
752209962Smm	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
753209962Smm
754209962Smm	return;
755209962Smm}
756209962Smm
757209962Smmstatic void
758209962Smmixv_init(void *arg)
759209962Smm{
760209962Smm	struct adapter *adapter = arg;
761209962Smm
762209962Smm	IXGBE_CORE_LOCK(adapter);
763209962Smm	ixv_init_locked(adapter);
764209962Smm	IXGBE_CORE_UNLOCK(adapter);
765209962Smm	return;
766209962Smm}
767185029Spjd
768
769/*
770**
771** MSIX Interrupt Handlers and Tasklets
772**
773*/
774
775static inline void
776ixv_enable_queue(struct adapter *adapter, u32 vector)
777{
778	struct ixgbe_hw *hw = &adapter->hw;
779	u32	queue = 1 << vector;
780	u32	mask;
781
782	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
783	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
784}
785
786static inline void
787ixv_disable_queue(struct adapter *adapter, u32 vector)
788{
789	struct ixgbe_hw *hw = &adapter->hw;
790	u64	queue = (u64)(1 << vector);
791	u32	mask;
792
793	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
794	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
795}
796
797static inline void
798ixv_rearm_queues(struct adapter *adapter, u64 queues)
799{
800	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
801	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
802}
803
804
805static void
806ixv_handle_que(void *context, int pending)
807{
808	struct ix_queue *que = context;
809	struct adapter  *adapter = que->adapter;
810	struct tx_ring	*txr = que->txr;
811	struct ifnet    *ifp = adapter->ifp;
812	bool		more;
813
814	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
815		more = ixgbe_rxeof(que);
816		IXGBE_TX_LOCK(txr);
817		ixgbe_txeof(txr);
818#if __FreeBSD_version >= 800000
819		if (!drbr_empty(ifp, txr->br))
820			ixgbe_mq_start_locked(ifp, txr);
821#else
822		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
823			ixgbe_start_locked(txr, ifp);
824#endif
825		IXGBE_TX_UNLOCK(txr);
826		if (more) {
827			taskqueue_enqueue(que->tq, &que->que_task);
828			return;
829		}
830	}
831
832	/* Reenable this interrupt */
833	ixv_enable_queue(adapter, que->msix);
834	return;
835}
836
837/*********************************************************************
838 *
839 *  MSI Queue Interrupt Service routine
840 *
841 **********************************************************************/
842void
843ixv_msix_que(void *arg)
844{
845	struct ix_queue	*que = arg;
846	struct adapter  *adapter = que->adapter;
847	struct ifnet    *ifp = adapter->ifp;
848	struct tx_ring	*txr = que->txr;
849	struct rx_ring	*rxr = que->rxr;
850	bool		more;
851	u32		newitr = 0;
852
853	ixv_disable_queue(adapter, que->msix);
854	++que->irqs;
855
856	more = ixgbe_rxeof(que);
857
858	IXGBE_TX_LOCK(txr);
859	ixgbe_txeof(txr);
860	/*
861	** Make certain that if the stack
862	** has anything queued the task gets
863	** scheduled to handle it.
864	*/
865#ifdef IXGBE_LEGACY_TX
866	if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
867		ixgbe_start_locked(txr, ifp);
868#else
869	if (!drbr_empty(adapter->ifp, txr->br))
870		ixgbe_mq_start_locked(ifp, txr);
871#endif
872	IXGBE_TX_UNLOCK(txr);
873
874	/* Do AIM now? */
875
876	if (ixv_enable_aim == FALSE)
877		goto no_calc;
878	/*
879	** Do Adaptive Interrupt Moderation:
880        **  - Write out last calculated setting
881	**  - Calculate based on average size over
882	**    the last interval.
883	*/
884        if (que->eitr_setting)
885                IXGBE_WRITE_REG(&adapter->hw,
886                    IXGBE_VTEITR(que->msix),
887		    que->eitr_setting);
888
889        que->eitr_setting = 0;
890
891        /* Idle, do nothing */
892        if ((txr->bytes == 0) && (rxr->bytes == 0))
893                goto no_calc;
894
895	if ((txr->bytes) && (txr->packets))
896               	newitr = txr->bytes/txr->packets;
897	if ((rxr->bytes) && (rxr->packets))
898		newitr = max(newitr,
899		    (rxr->bytes / rxr->packets));
900	newitr += 24; /* account for hardware frame, crc */
901
902	/* set an upper boundary */
903	newitr = min(newitr, 3000);
904
905	/* Be nice to the mid range */
906	if ((newitr > 300) && (newitr < 1200))
907		newitr = (newitr / 3);
908	else
909		newitr = (newitr / 2);
910
911	newitr |= newitr << 16;
912
913        /* save for next interrupt */
914        que->eitr_setting = newitr;
915
916        /* Reset state */
917        txr->bytes = 0;
918        txr->packets = 0;
919        rxr->bytes = 0;
920        rxr->packets = 0;
921
922no_calc:
923	if (more)
924		taskqueue_enqueue(que->tq, &que->que_task);
925	else /* Reenable this interrupt */
926		ixv_enable_queue(adapter, que->msix);
927	return;
928}
929
930static void
931ixv_msix_mbx(void *arg)
932{
933	struct adapter	*adapter = arg;
934	struct ixgbe_hw *hw = &adapter->hw;
935	u32		reg;
936
937	++adapter->link_irq;
938
939	/* First get the cause */
940	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
941	/* Clear interrupt with write */
942	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
943
944	/* Link status change */
945	if (reg & IXGBE_EICR_LSC)
946		taskqueue_enqueue(adapter->tq, &adapter->link_task);
947
948	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
949	return;
950}
951
952/*********************************************************************
953 *
954 *  Media Ioctl callback
955 *
956 *  This routine is called whenever the user queries the status of
957 *  the interface using ifconfig.
958 *
959 **********************************************************************/
960static void
961ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
962{
963	struct adapter *adapter = ifp->if_softc;
964
965	INIT_DEBUGOUT("ixv_media_status: begin");
966	IXGBE_CORE_LOCK(adapter);
967	ixv_update_link_status(adapter);
968
969	ifmr->ifm_status = IFM_AVALID;
970	ifmr->ifm_active = IFM_ETHER;
971
972	if (!adapter->link_active) {
973		IXGBE_CORE_UNLOCK(adapter);
974		return;
975	}
976
977	ifmr->ifm_status |= IFM_ACTIVE;
978
979	switch (adapter->link_speed) {
980		case IXGBE_LINK_SPEED_1GB_FULL:
981			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
982			break;
983		case IXGBE_LINK_SPEED_10GB_FULL:
984			ifmr->ifm_active |= IFM_FDX;
985			break;
986	}
987
988	IXGBE_CORE_UNLOCK(adapter);
989
990	return;
991}
992
993/*********************************************************************
994 *
995 *  Media Ioctl callback
996 *
997 *  This routine is called when the user changes speed/duplex using
998 *  media/mediopt option with ifconfig.
999 *
1000 **********************************************************************/
1001static int
1002ixv_media_change(struct ifnet * ifp)
1003{
1004	struct adapter *adapter = ifp->if_softc;
1005	struct ifmedia *ifm = &adapter->media;
1006
1007	INIT_DEBUGOUT("ixv_media_change: begin");
1008
1009	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1010		return (EINVAL);
1011
1012        switch (IFM_SUBTYPE(ifm->ifm_media)) {
1013        case IFM_AUTO:
1014                break;
1015        default:
1016                device_printf(adapter->dev, "Only auto media type\n");
1017		return (EINVAL);
1018        }
1019
1020	return (0);
1021}
1022
1023
1024/*********************************************************************
1025 *  Multicast Update
1026 *
1027 *  This routine is called whenever multicast address list is updated.
1028 *
1029 **********************************************************************/
1030#define IXGBE_RAR_ENTRIES 16
1031
1032static void
1033ixv_set_multi(struct adapter *adapter)
1034{
1035	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1036	u8	*update_ptr;
1037	struct	ifmultiaddr *ifma;
1038	int	mcnt = 0;
1039	struct ifnet   *ifp = adapter->ifp;
1040
1041	IOCTL_DEBUGOUT("ixv_set_multi: begin");
1042
1043#if __FreeBSD_version < 800000
1044	IF_ADDR_LOCK(ifp);
1045#else
1046	if_maddr_rlock(ifp);
1047#endif
1048	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1049		if (ifma->ifma_addr->sa_family != AF_LINK)
1050			continue;
1051		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1052		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1053		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1054		mcnt++;
1055	}
1056#if __FreeBSD_version < 800000
1057	IF_ADDR_UNLOCK(ifp);
1058#else
1059	if_maddr_runlock(ifp);
1060#endif
1061
1062	update_ptr = mta;
1063
1064	ixgbe_update_mc_addr_list(&adapter->hw,
1065	    update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1066
1067	return;
1068}
1069
1070/*
1071 * This is an iterator function now needed by the multicast
1072 * shared code. It simply feeds the shared code routine the
1073 * addresses in the array of ixv_set_multi() one by one.
1074 */
1075static u8 *
1076ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1077{
1078	u8 *addr = *update_ptr;
1079	u8 *newptr;
1080	*vmdq = 0;
1081
1082	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1083	*update_ptr = newptr;
1084	return addr;
1085}
1086
1087/*********************************************************************
1088 *  Timer routine
1089 *
1090 *  This routine checks for link status,updates statistics,
1091 *  and runs the watchdog check.
1092 *
1093 **********************************************************************/
1094
1095static void
1096ixv_local_timer(void *arg)
1097{
1098	struct adapter	*adapter = arg;
1099	device_t	dev = adapter->dev;
1100	struct ix_queue	*que = adapter->queues;
1101	u64		queues = 0;
1102	int		hung = 0;
1103
1104	mtx_assert(&adapter->core_mtx, MA_OWNED);
1105
1106	ixv_update_link_status(adapter);
1107
1108	/* Stats Update */
1109	ixv_update_stats(adapter);
1110
1111	/*
1112	** Check the TX queues status
1113	**      - mark hung queues so we don't schedule on them
1114	**      - watchdog only if all queues show hung
1115	*/
1116	for (int i = 0; i < adapter->num_queues; i++, que++) {
1117		/* Keep track of queues with work for soft irq */
1118		if (que->txr->busy)
1119			queues |= ((u64)1 << que->me);
1120		/*
1121		** Each time txeof runs without cleaning, but there
1122		** are uncleaned descriptors it increments busy. If
1123		** we get to the MAX we declare it hung.
1124		*/
1125		if (que->busy == IXGBE_QUEUE_HUNG) {
1126			++hung;
1127			/* Mark the queue as inactive */
1128			adapter->active_queues &= ~((u64)1 << que->me);
1129			continue;
1130		} else {
1131			/* Check if we've come back from hung */
1132			if ((adapter->active_queues & ((u64)1 << que->me)) == 0)
1133                                adapter->active_queues |= ((u64)1 << que->me);
1134		}
1135		if (que->busy >= IXGBE_MAX_TX_BUSY) {
1136			device_printf(dev,"Warning queue %d "
1137			    "appears to be hung!\n", i);
1138			que->txr->busy = IXGBE_QUEUE_HUNG;
1139			++hung;
1140		}
1141
1142	}
1143
1144	/* Only truely watchdog if all queues show hung */
1145	if (hung == adapter->num_queues)
1146		goto watchdog;
1147	else if (queues != 0) { /* Force an IRQ on queues with work */
1148		ixv_rearm_queues(adapter, queues);
1149	}
1150
1151	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1152	return;
1153
1154watchdog:
1155	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1156	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1157	adapter->watchdog_events++;
1158	ixv_init_locked(adapter);
1159}
1160
1161/*
1162** Note: this routine updates the OS on the link state
1163**	the real check of the hardware only happens with
1164**	a link interrupt.
1165*/
1166static void
1167ixv_update_link_status(struct adapter *adapter)
1168{
1169	struct ifnet	*ifp = adapter->ifp;
1170	device_t dev = adapter->dev;
1171
1172	if (adapter->link_up){
1173		if (adapter->link_active == FALSE) {
1174			if (bootverbose)
1175				device_printf(dev,"Link is up %d Gbps %s \n",
1176				    ((adapter->link_speed == 128)? 10:1),
1177				    "Full Duplex");
1178			adapter->link_active = TRUE;
1179			if_link_state_change(ifp, LINK_STATE_UP);
1180		}
1181	} else { /* Link down */
1182		if (adapter->link_active == TRUE) {
1183			if (bootverbose)
1184				device_printf(dev,"Link is Down\n");
1185			if_link_state_change(ifp, LINK_STATE_DOWN);
1186			adapter->link_active = FALSE;
1187		}
1188	}
1189
1190	return;
1191}
1192
1193
1194/*********************************************************************
1195 *
1196 *  This routine disables all traffic on the adapter by issuing a
1197 *  global reset on the MAC and deallocates TX/RX buffers.
1198 *
1199 **********************************************************************/
1200
1201static void
1202ixv_stop(void *arg)
1203{
1204	struct ifnet   *ifp;
1205	struct adapter *adapter = arg;
1206	struct ixgbe_hw *hw = &adapter->hw;
1207	ifp = adapter->ifp;
1208
1209	mtx_assert(&adapter->core_mtx, MA_OWNED);
1210
1211	INIT_DEBUGOUT("ixv_stop: begin\n");
1212	ixv_disable_intr(adapter);
1213
1214	/* Tell the stack that the interface is no longer active */
1215	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1216
1217	ixgbe_reset_hw(hw);
1218	adapter->hw.adapter_stopped = FALSE;
1219	ixgbe_stop_adapter(hw);
1220	callout_stop(&adapter->timer);
1221
1222	/* reprogram the RAR[0] in case user changed it. */
1223	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1224
1225	return;
1226}
1227
1228
1229/*********************************************************************
1230 *
1231 *  Determine hardware revision.
1232 *
1233 **********************************************************************/
1234static void
1235ixv_identify_hardware(struct adapter *adapter)
1236{
1237	device_t        dev = adapter->dev;
1238	struct ixgbe_hw *hw = &adapter->hw;
1239
1240	/*
1241	** Make sure BUSMASTER is set, on a VM under
1242	** KVM it may not be and will break things.
1243	*/
1244	pci_enable_busmaster(dev);
1245
1246	/* Save off the information about this board */
1247	hw->vendor_id = pci_get_vendor(dev);
1248	hw->device_id = pci_get_device(dev);
1249	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
1250	hw->subsystem_vendor_id =
1251	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1252	hw->subsystem_device_id =
1253	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1254
1255	/* We need this to determine device-specific things */
1256	ixgbe_set_mac_type(hw);
1257
1258	/* Set the right number of segments */
1259	adapter->num_segs = IXGBE_82599_SCATTER;
1260
1261	return;
1262}
1263
1264/*********************************************************************
1265 *
1266 *  Setup MSIX Interrupt resources and handlers
1267 *
1268 **********************************************************************/
1269static int
1270ixv_allocate_msix(struct adapter *adapter)
1271{
1272	device_t	dev = adapter->dev;
1273	struct 		ix_queue *que = adapter->queues;
1274	struct		tx_ring *txr = adapter->tx_rings;
1275	int 		error, rid, vector = 0;
1276
1277	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
1278		rid = vector + 1;
1279		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1280		    RF_SHAREABLE | RF_ACTIVE);
1281		if (que->res == NULL) {
1282			device_printf(dev,"Unable to allocate"
1283		    	    " bus resource: que interrupt [%d]\n", vector);
1284			return (ENXIO);
1285		}
1286		/* Set the handler function */
1287		error = bus_setup_intr(dev, que->res,
1288		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1289		    ixv_msix_que, que, &que->tag);
1290		if (error) {
1291			que->res = NULL;
1292			device_printf(dev, "Failed to register QUE handler");
1293			return (error);
1294		}
1295#if __FreeBSD_version >= 800504
1296		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1297#endif
1298		que->msix = vector;
1299        	adapter->active_queues |= (u64)(1 << que->msix);
1300		/*
1301		** Bind the msix vector, and thus the
1302		** ring to the corresponding cpu.
1303		*/
1304		if (adapter->num_queues > 1)
1305			bus_bind_intr(dev, que->res, i);
1306		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
1307		TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1308		que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1309		    taskqueue_thread_enqueue, &que->tq);
1310		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1311		    device_get_nameunit(adapter->dev));
1312	}
1313
1314	/* and Mailbox */
1315	rid = vector + 1;
1316	adapter->res = bus_alloc_resource_any(dev,
1317    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1318	if (!adapter->res) {
1319		device_printf(dev,"Unable to allocate"
1320    	    " bus resource: MBX interrupt [%d]\n", rid);
1321		return (ENXIO);
1322	}
1323	/* Set the mbx handler function */
1324	error = bus_setup_intr(dev, adapter->res,
1325	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1326	    ixv_msix_mbx, adapter, &adapter->tag);
1327	if (error) {
1328		adapter->res = NULL;
1329		device_printf(dev, "Failed to register LINK handler");
1330		return (error);
1331	}
1332#if __FreeBSD_version >= 800504
1333	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1334#endif
1335	adapter->vector = vector;
1336	/* Tasklets for Mailbox */
1337	TASK_INIT(&adapter->link_task, 0, ixv_handle_mbx, adapter);
1338	adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1339	    taskqueue_thread_enqueue, &adapter->tq);
1340	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1341	    device_get_nameunit(adapter->dev));
1342	/*
1343	** Due to a broken design QEMU will fail to properly
1344	** enable the guest for MSIX unless the vectors in
1345	** the table are all set up, so we must rewrite the
1346	** ENABLE in the MSIX control register again at this
1347	** point to cause it to successfully initialize us.
1348	*/
1349	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1350		int msix_ctrl;
1351		pci_find_cap(dev, PCIY_MSIX, &rid);
1352		rid += PCIR_MSIX_CTRL;
1353		msix_ctrl = pci_read_config(dev, rid, 2);
1354		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1355		pci_write_config(dev, rid, msix_ctrl, 2);
1356	}
1357
1358	return (0);
1359}
1360
1361/*
1362 * Setup MSIX resources, note that the VF
1363 * device MUST use MSIX, there is no fallback.
1364 */
1365static int
1366ixv_setup_msix(struct adapter *adapter)
1367{
1368	device_t dev = adapter->dev;
1369	int rid, want, msgs;
1370
1371
1372	/* Must have at least 2 MSIX vectors */
1373	msgs = pci_msix_count(dev);
1374	if (msgs < 2)
1375		goto out;
1376	rid = PCIR_BAR(3);
1377	adapter->msix_mem = bus_alloc_resource_any(dev,
1378	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1379       	if (adapter->msix_mem == NULL) {
1380		device_printf(adapter->dev,
1381		    "Unable to map MSIX table \n");
1382		goto out;
1383	}
1384
1385	/*
1386	** Want vectors for the queues,
1387	** plus an additional for mailbox.
1388	*/
1389	want = adapter->num_queues + 1;
1390	if (want > msgs) {
1391		want = msgs;
1392		adapter->num_queues = msgs - 1;
1393	} else
1394		msgs = want;
1395	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
1396               	device_printf(adapter->dev,
1397		    "Using MSIX interrupts with %d vectors\n", want);
1398		return (want);
1399	}
1400	/* Release in case alloc was insufficient */
1401	pci_release_msi(dev);
1402out:
1403       	if (adapter->msix_mem != NULL) {
1404		bus_release_resource(dev, SYS_RES_MEMORY,
1405		    rid, adapter->msix_mem);
1406		adapter->msix_mem = NULL;
1407	}
1408	device_printf(adapter->dev,"MSIX config error\n");
1409	return (ENXIO);
1410}
1411
1412
1413static int
1414ixv_allocate_pci_resources(struct adapter *adapter)
1415{
1416	int             rid;
1417	device_t        dev = adapter->dev;
1418
1419	rid = PCIR_BAR(0);
1420	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1421	    &rid, RF_ACTIVE);
1422
1423	if (!(adapter->pci_mem)) {
1424		device_printf(dev, "Unable to allocate bus resource: memory\n");
1425		return (ENXIO);
1426	}
1427
1428	adapter->osdep.mem_bus_space_tag =
1429		rman_get_bustag(adapter->pci_mem);
1430	adapter->osdep.mem_bus_space_handle =
1431		rman_get_bushandle(adapter->pci_mem);
1432	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1433
1434	/* Pick up the tuneable queues */
1435	adapter->num_queues = ixv_num_queues;
1436	adapter->hw.back = adapter;
1437
1438	/*
1439	** Now setup MSI/X, should
1440	** return us the number of
1441	** configured vectors.
1442	*/
1443	adapter->msix = ixv_setup_msix(adapter);
1444	if (adapter->msix == ENXIO)
1445		return (ENXIO);
1446	else
1447		return (0);
1448}
1449
1450static void
1451ixv_free_pci_resources(struct adapter * adapter)
1452{
1453	struct 		ix_queue *que = adapter->queues;
1454	device_t	dev = adapter->dev;
1455	int		rid, memrid;
1456
1457	memrid = PCIR_BAR(MSIX_82598_BAR);
1458
1459	/*
1460	** There is a slight possibility of a failure mode
1461	** in attach that will result in entering this function
1462	** before interrupt resources have been initialized, and
1463	** in that case we do not want to execute the loops below
1464	** We can detect this reliably by the state of the adapter
1465	** res pointer.
1466	*/
1467	if (adapter->res == NULL)
1468		goto mem;
1469
1470	/*
1471	**  Release all msix queue resources:
1472	*/
1473	for (int i = 0; i < adapter->num_queues; i++, que++) {
1474		rid = que->msix + 1;
1475		if (que->tag != NULL) {
1476			bus_teardown_intr(dev, que->res, que->tag);
1477			que->tag = NULL;
1478		}
1479		if (que->res != NULL)
1480			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1481	}
1482
1483
1484	/* Clean the Legacy or Link interrupt last */
1485	if (adapter->vector) /* we are doing MSIX */
1486		rid = adapter->vector + 1;
1487	else
1488		(adapter->msix != 0) ? (rid = 1):(rid = 0);
1489
1490	if (adapter->tag != NULL) {
1491		bus_teardown_intr(dev, adapter->res, adapter->tag);
1492		adapter->tag = NULL;
1493	}
1494	if (adapter->res != NULL)
1495		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1496
1497mem:
1498	if (adapter->msix)
1499		pci_release_msi(dev);
1500
1501	if (adapter->msix_mem != NULL)
1502		bus_release_resource(dev, SYS_RES_MEMORY,
1503		    memrid, adapter->msix_mem);
1504
1505	if (adapter->pci_mem != NULL)
1506		bus_release_resource(dev, SYS_RES_MEMORY,
1507		    PCIR_BAR(0), adapter->pci_mem);
1508
1509	return;
1510}
1511
1512/*********************************************************************
1513 *
1514 *  Setup networking device structure and register an interface.
1515 *
1516 **********************************************************************/
1517static void
1518ixv_setup_interface(device_t dev, struct adapter *adapter)
1519{
1520	struct ifnet   *ifp;
1521
1522	INIT_DEBUGOUT("ixv_setup_interface: begin");
1523
1524	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1525	if (ifp == NULL)
1526		panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1527	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1528	ifp->if_baudrate = 1000000000;
1529	ifp->if_init = ixv_init;
1530	ifp->if_softc = adapter;
1531	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1532	ifp->if_ioctl = ixv_ioctl;
1533#if __FreeBSD_version >= 800000
1534	ifp->if_transmit = ixgbe_mq_start;
1535	ifp->if_qflush = ixgbe_qflush;
1536#else
1537	ifp->if_start = ixgbe_start;
1538#endif
1539	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1540
1541	ether_ifattach(ifp, adapter->hw.mac.addr);
1542
1543	adapter->max_frame_size =
1544	    ifp->if_mtu + IXGBE_MTU_HDR_VLAN;
1545
1546	/*
1547	 * Tell the upper layer(s) we support long frames.
1548	 */
1549	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1550
1551	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1552	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1553	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1554			     |  IFCAP_VLAN_HWTSO
1555			     |  IFCAP_VLAN_MTU;
1556	ifp->if_capabilities |= IFCAP_LRO;
1557	ifp->if_capenable = ifp->if_capabilities;
1558
1559	/*
1560	 * Specify the media types supported by this adapter and register
1561	 * callbacks to update media and link information
1562	 */
1563	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1564		     ixv_media_status);
1565	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1566	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1567
1568	return;
1569}
1570
1571static void
1572ixv_config_link(struct adapter *adapter)
1573{
1574	struct ixgbe_hw *hw = &adapter->hw;
1575	u32	autoneg;
1576
1577	if (hw->mac.ops.check_link)
1578		hw->mac.ops.check_link(hw, &autoneg,
1579		    &adapter->link_up, FALSE);
1580}
1581
1582
1583/*********************************************************************
1584 *
1585 *  Enable transmit unit.
1586 *
1587 **********************************************************************/
1588static void
1589ixv_initialize_transmit_units(struct adapter *adapter)
1590{
1591	struct tx_ring	*txr = adapter->tx_rings;
1592	struct ixgbe_hw	*hw = &adapter->hw;
1593
1594
1595	for (int i = 0; i < adapter->num_queues; i++, txr++) {
1596		u64	tdba = txr->txdma.dma_paddr;
1597		u32	txctrl, txdctl;
1598
1599		/* Set WTHRESH to 8, burst writeback */
1600		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1601		txdctl |= (8 << 16);
1602		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1603
1604		/* Set the HW Tx Head and Tail indices */
1605	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
1606	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
1607
1608		/* Set Tx Tail register */
1609		txr->tail = IXGBE_VFTDT(i);
1610
1611		/* Set Ring parameters */
1612		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
1613		       (tdba & 0x00000000ffffffffULL));
1614		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
1615		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
1616		    adapter->num_tx_desc *
1617		    sizeof(struct ixgbe_legacy_tx_desc));
1618		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
1619		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1620		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
1621
1622		/* Now enable */
1623		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
1624		txdctl |= IXGBE_TXDCTL_ENABLE;
1625		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
1626	}
1627
1628	return;
1629}
1630
1631
1632/*********************************************************************
1633 *
1634 *  Setup receive registers and features.
1635 *
1636 **********************************************************************/
1637#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1638
1639static void
1640ixv_initialize_receive_units(struct adapter *adapter)
1641{
1642	struct	rx_ring	*rxr = adapter->rx_rings;
1643	struct ixgbe_hw	*hw = &adapter->hw;
1644	struct ifnet	*ifp = adapter->ifp;
1645	u32		bufsz, rxcsum, psrtype;
1646
1647	if (ifp->if_mtu > ETHERMTU)
1648		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1649	else
1650		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1651
1652	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1653	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1654	    IXGBE_PSRTYPE_L2HDR;
1655
1656	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1657
1658	/* Tell PF our max_frame size */
1659	ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size);
1660
1661	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
1662		u64 rdba = rxr->rxdma.dma_paddr;
1663		u32 reg, rxdctl;
1664
1665		/* Disable the queue */
1666		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1667		rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME);
1668		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1669		for (int j = 0; j < 10; j++) {
1670			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1671			    IXGBE_RXDCTL_ENABLE)
1672				msec_delay(1);
1673			else
1674				break;
1675		}
1676		wmb();
1677		/* Setup the Base and Length of the Rx Descriptor Ring */
1678		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
1679		    (rdba & 0x00000000ffffffffULL));
1680		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
1681		    (rdba >> 32));
1682		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
1683		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
1684
1685		/* Reset the ring indices */
1686		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1687		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1688
1689		/* Set up the SRRCTL register */
1690		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1691		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1692		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1693		reg |= bufsz;
1694		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1695		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1696
1697		/* Capture Rx Tail index */
1698		rxr->tail = IXGBE_VFRDT(rxr->me);
1699
1700		/* Do the queue enabling last */
1701		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1702		rxdctl |= IXGBE_RXDCTL_ENABLE;
1703		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1704		for (int k = 0; k < 10; k++) {
1705			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1706			    IXGBE_RXDCTL_ENABLE)
1707				break;
1708			else
1709				msec_delay(1);
1710		}
1711		wmb();
1712
1713		/* Set the Tail Pointer */
1714#ifdef DEV_NETMAP
1715		/*
1716		 * In netmap mode, we must preserve the buffers made
1717		 * available to userspace before the if_init()
1718		 * (this is true by default on the TX side, because
1719		 * init makes all buffers available to userspace).
1720		 *
1721		 * netmap_reset() and the device specific routines
1722		 * (e.g. ixgbe_setup_receive_rings()) map these
1723		 * buffers at the end of the NIC ring, so here we
1724		 * must set the RDT (tail) register to make sure
1725		 * they are not overwritten.
1726		 *
1727		 * In this driver the NIC ring starts at RDH = 0,
1728		 * RDT points to the last slot available for reception (?),
1729		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1730		 */
1731		if (ifp->if_capenable & IFCAP_NETMAP) {
1732			struct netmap_adapter *na = NA(adapter->ifp);
1733			struct netmap_kring *kring = &na->rx_rings[i];
1734			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1735
1736			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1737		} else
1738#endif /* DEV_NETMAP */
1739			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1740			    adapter->num_rx_desc - 1);
1741	}
1742
1743	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1744
1745	if (ifp->if_capenable & IFCAP_RXCSUM)
1746		rxcsum |= IXGBE_RXCSUM_PCSD;
1747
1748	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
1749		rxcsum |= IXGBE_RXCSUM_IPPCSE;
1750
1751	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1752
1753	return;
1754}
1755
1756static void
1757ixv_setup_vlan_support(struct adapter *adapter)
1758{
1759	struct ixgbe_hw *hw = &adapter->hw;
1760	u32		ctrl, vid, vfta, retry;
1761	struct rx_ring	*rxr;
1762
1763	/*
1764	** We get here thru init_locked, meaning
1765	** a soft reset, this has already cleared
1766	** the VFTA and other state, so if there
1767	** have been no vlan's registered do nothing.
1768	*/
1769	if (adapter->num_vlans == 0)
1770		return;
1771
1772	/* Enable the queues */
1773	for (int i = 0; i < adapter->num_queues; i++) {
1774		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1775		ctrl |= IXGBE_RXDCTL_VME;
1776		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1777		/*
1778		 * Let Rx path know that it needs to store VLAN tag
1779		 * as part of extra mbuf info.
1780		 */
1781		rxr = &adapter->rx_rings[i];
1782		rxr->vtag_strip = TRUE;
1783	}
1784
1785	/*
1786	** A soft reset zero's out the VFTA, so
1787	** we need to repopulate it now.
1788	*/
1789	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1790		if (ixv_shadow_vfta[i] == 0)
1791			continue;
1792		vfta = ixv_shadow_vfta[i];
1793		/*
1794		** Reconstruct the vlan id's
1795		** based on the bits set in each
1796		** of the array ints.
1797		*/
1798		for (int j = 0; j < 32; j++) {
1799			retry = 0;
1800			if ((vfta & (1 << j)) == 0)
1801				continue;
1802			vid = (i * 32) + j;
1803			/* Call the shared code mailbox routine */
1804			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
1805				if (++retry > 5)
1806					break;
1807			}
1808		}
1809	}
1810}
1811
1812/*
1813** This routine is run via an vlan config EVENT,
1814** it enables us to use the HW Filter table since
1815** we can get the vlan id. This just creates the
1816** entry in the soft version of the VFTA, init will
1817** repopulate the real table.
1818*/
1819static void
1820ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1821{
1822	struct adapter	*adapter = ifp->if_softc;
1823	u16		index, bit;
1824
1825	if (ifp->if_softc != arg) /* Not our event */
1826		return;
1827
1828	if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1829		return;
1830
1831	IXGBE_CORE_LOCK(adapter);
1832	index = (vtag >> 5) & 0x7F;
1833	bit = vtag & 0x1F;
1834	ixv_shadow_vfta[index] |= (1 << bit);
1835	++adapter->num_vlans;
1836	/* Re-init to load the changes */
1837	ixv_init_locked(adapter);
1838	IXGBE_CORE_UNLOCK(adapter);
1839}
1840
1841/*
1842** This routine is run via an vlan
1843** unconfig EVENT, remove our entry
1844** in the soft vfta.
1845*/
1846static void
1847ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
1848{
1849	struct adapter	*adapter = ifp->if_softc;
1850	u16		index, bit;
1851
1852	if (ifp->if_softc !=  arg)
1853		return;
1854
1855	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1856		return;
1857
1858	IXGBE_CORE_LOCK(adapter);
1859	index = (vtag >> 5) & 0x7F;
1860	bit = vtag & 0x1F;
1861	ixv_shadow_vfta[index] &= ~(1 << bit);
1862	--adapter->num_vlans;
1863	/* Re-init to load the changes */
1864	ixv_init_locked(adapter);
1865	IXGBE_CORE_UNLOCK(adapter);
1866}
1867
1868static void
1869ixv_enable_intr(struct adapter *adapter)
1870{
1871	struct ixgbe_hw *hw = &adapter->hw;
1872	struct ix_queue *que = adapter->queues;
1873	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1874
1875
1876	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1877
1878	mask = IXGBE_EIMS_ENABLE_MASK;
1879	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1880	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1881
1882        for (int i = 0; i < adapter->num_queues; i++, que++)
1883		ixv_enable_queue(adapter, que->msix);
1884
1885	IXGBE_WRITE_FLUSH(hw);
1886
1887	return;
1888}
1889
1890static void
1891ixv_disable_intr(struct adapter *adapter)
1892{
1893	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1894	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1895	IXGBE_WRITE_FLUSH(&adapter->hw);
1896	return;
1897}
1898
1899/*
1900** Setup the correct IVAR register for a particular MSIX interrupt
1901**  - entry is the register array entry
1902**  - vector is the MSIX vector for this queue
1903**  - type is RX/TX/MISC
1904*/
1905static void
1906ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1907{
1908	struct ixgbe_hw *hw = &adapter->hw;
1909	u32 ivar, index;
1910
1911	vector |= IXGBE_IVAR_ALLOC_VAL;
1912
1913	if (type == -1) { /* MISC IVAR */
1914		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1915		ivar &= ~0xFF;
1916		ivar |= vector;
1917		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1918	} else {	/* RX/TX IVARS */
1919		index = (16 * (entry & 1)) + (8 * type);
1920		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1921		ivar &= ~(0xFF << index);
1922		ivar |= (vector << index);
1923		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1924	}
1925}
1926
1927static void
1928ixv_configure_ivars(struct adapter *adapter)
1929{
1930	struct  ix_queue *que = adapter->queues;
1931
1932        for (int i = 0; i < adapter->num_queues; i++, que++) {
1933		/* First the RX queue entry */
1934                ixv_set_ivar(adapter, i, que->msix, 0);
1935		/* ... and the TX */
1936		ixv_set_ivar(adapter, i, que->msix, 1);
1937		/* Set an initial value in EITR */
1938                IXGBE_WRITE_REG(&adapter->hw,
1939                    IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
1940	}
1941
1942	/* For the mailbox interrupt */
1943        ixv_set_ivar(adapter, 1, adapter->vector, -1);
1944}
1945
1946
1947/*
1948** Tasklet handler for MSIX MBX interrupts
1949**  - do outside interrupt since it might sleep
1950*/
1951static void
1952ixv_handle_mbx(void *context, int pending)
1953{
1954	struct adapter  *adapter = context;
1955
1956	ixgbe_check_link(&adapter->hw,
1957	    &adapter->link_speed, &adapter->link_up, 0);
1958	ixv_update_link_status(adapter);
1959}
1960
1961/*
1962** The VF stats registers never have a truely virgin
1963** starting point, so this routine tries to make an
1964** artificial one, marking ground zero on attach as
1965** it were.
1966*/
1967static void
1968ixv_save_stats(struct adapter *adapter)
1969{
1970	if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1971		adapter->stats.vf.saved_reset_vfgprc +=
1972		    adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1973		adapter->stats.vf.saved_reset_vfgptc +=
1974		    adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1975		adapter->stats.vf.saved_reset_vfgorc +=
1976		    adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1977		adapter->stats.vf.saved_reset_vfgotc +=
1978		    adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1979		adapter->stats.vf.saved_reset_vfmprc +=
1980		    adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1981	}
1982}
1983
1984static void
1985ixv_init_stats(struct adapter *adapter)
1986{
1987	struct ixgbe_hw *hw = &adapter->hw;
1988
1989	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1990	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1991	adapter->stats.vf.last_vfgorc |=
1992	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1993
1994	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1995	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1996	adapter->stats.vf.last_vfgotc |=
1997	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1998
1999	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2000
2001	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
2002	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
2003	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
2004	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
2005	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
2006}
2007
2008#define UPDATE_STAT_32(reg, last, count)		\
2009{							\
2010	u32 current = IXGBE_READ_REG(hw, reg);		\
2011	if (current < last)				\
2012		count += 0x100000000LL;			\
2013	last = current;					\
2014	count &= 0xFFFFFFFF00000000LL;			\
2015	count |= current;				\
2016}
2017
2018#define UPDATE_STAT_36(lsb, msb, last, count) 		\
2019{							\
2020	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
2021	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
2022	u64 current = ((cur_msb << 32) | cur_lsb);	\
2023	if (current < last)				\
2024		count += 0x1000000000LL;		\
2025	last = current;					\
2026	count &= 0xFFFFFFF000000000LL;			\
2027	count |= current;				\
2028}
2029
2030/*
2031** ixv_update_stats - Update the board statistics counters.
2032*/
2033void
2034ixv_update_stats(struct adapter *adapter)
2035{
2036        struct ixgbe_hw *hw = &adapter->hw;
2037
2038        UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
2039	    adapter->stats.vf.vfgprc);
2040        UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
2041	    adapter->stats.vf.vfgptc);
2042        UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2043	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
2044        UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2045	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
2046        UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
2047	    adapter->stats.vf.vfmprc);
2048}
2049
2050/*
2051 * Add statistic sysctls for the VF.
2052 */
2053static void
2054ixv_add_stats_sysctls(struct adapter *adapter)
2055{
2056	device_t dev = adapter->dev;
2057	struct ix_queue *que = &adapter->queues[0];
2058	struct tx_ring *txr = que->txr;
2059	struct rx_ring *rxr = que->rxr;
2060
2061	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2062	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
2063	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
2064	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
2065
2066	struct sysctl_oid *stat_node, *queue_node;
2067	struct sysctl_oid_list *stat_list, *queue_list;
2068
2069	/* Driver Statistics */
2070	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
2071			CTLFLAG_RD, &adapter->dropped_pkts,
2072			"Driver dropped packets");
2073	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
2074			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
2075			"m_defrag() failed");
2076	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
2077			CTLFLAG_RD, &adapter->watchdog_events,
2078			"Watchdog timeouts");
2079
2080	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
2081				    CTLFLAG_RD, NULL,
2082				    "VF Statistics (read from HW registers)");
2083	stat_list = SYSCTL_CHILDREN(stat_node);
2084
2085	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
2086			CTLFLAG_RD, &stats->vfgprc,
2087			"Good Packets Received");
2088	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
2089			CTLFLAG_RD, &stats->vfgorc,
2090			"Good Octets Received");
2091	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
2092			CTLFLAG_RD, &stats->vfmprc,
2093			"Multicast Packets Received");
2094	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
2095			CTLFLAG_RD, &stats->vfgptc,
2096			"Good Packets Transmitted");
2097	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
2098			CTLFLAG_RD, &stats->vfgotc,
2099			"Good Octets Transmitted");
2100
2101	queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "que",
2102				    CTLFLAG_RD, NULL,
2103				    "Queue Statistics (collected by SW)");
2104	queue_list = SYSCTL_CHILDREN(queue_node);
2105
2106	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
2107			CTLFLAG_RD, &(que->irqs),
2108			"IRQs on queue");
2109	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_irqs",
2110			CTLFLAG_RD, &(rxr->rx_irq),
2111			"RX irqs on queue");
2112	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
2113			CTLFLAG_RD, &(rxr->rx_packets),
2114			"RX packets");
2115	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
2116			CTLFLAG_RD, &(rxr->rx_bytes),
2117			"RX bytes");
2118	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
2119			CTLFLAG_RD, &(rxr->rx_discarded),
2120			"Discarded RX packets");
2121
2122	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
2123			CTLFLAG_RD, &(txr->total_packets),
2124			"TX Packets");
2125
2126	SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
2127			CTLFLAG_RD, &(txr->no_desc_avail),
2128			"# of times not enough descriptors were available during TX");
2129}
2130
2131static void
2132ixv_set_sysctl_value(struct adapter *adapter, const char *name,
2133	const char *description, int *limit, int value)
2134{
2135	*limit = value;
2136	SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
2137	    SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
2138	    OID_AUTO, name, CTLFLAG_RW, limit, value, description);
2139}
2140
2141/**********************************************************************
2142 *
2143 *  This routine is called only when em_display_debug_stats is enabled.
2144 *  This routine provides a way to take a look at important statistics
2145 *  maintained by the driver and hardware.
2146 *
2147 **********************************************************************/
2148static void
2149ixv_print_debug_info(struct adapter *adapter)
2150{
2151        device_t dev = adapter->dev;
2152        struct ixgbe_hw         *hw = &adapter->hw;
2153        struct ix_queue         *que = adapter->queues;
2154        struct rx_ring          *rxr;
2155        struct tx_ring          *txr;
2156        struct lro_ctrl         *lro;
2157
2158        device_printf(dev,"Error Byte Count = %u \n",
2159            IXGBE_READ_REG(hw, IXGBE_ERRBC));
2160
2161        for (int i = 0; i < adapter->num_queues; i++, que++) {
2162                txr = que->txr;
2163                rxr = que->rxr;
2164                lro = &rxr->lro;
2165                device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
2166                    que->msix, (long)que->irqs);
2167                device_printf(dev,"RX(%d) Packets Received: %lld\n",
2168                    rxr->me, (long long)rxr->rx_packets);
2169                device_printf(dev,"RX(%d) Bytes Received: %lu\n",
2170                    rxr->me, (long)rxr->rx_bytes);
2171                device_printf(dev,"RX(%d) LRO Queued= %d\n",
2172                    rxr->me, lro->lro_queued);
2173                device_printf(dev,"RX(%d) LRO Flushed= %d\n",
2174                    rxr->me, lro->lro_flushed);
2175                device_printf(dev,"TX(%d) Packets Sent: %lu\n",
2176                    txr->me, (long)txr->total_packets);
2177                device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
2178                    txr->me, (long)txr->no_desc_avail);
2179        }
2180
2181        device_printf(dev,"MBX IRQ Handled: %lu\n",
2182            (long)adapter->link_irq);
2183        return;
2184}
2185
2186static int
2187ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
2188{
2189	int error, result;
2190	struct adapter *adapter;
2191
2192	result = -1;
2193	error = sysctl_handle_int(oidp, &result, 0, req);
2194
2195	if (error || !req->newptr)
2196		return (error);
2197
2198	if (result == 1) {
2199		adapter = (struct adapter *) arg1;
2200		ixv_print_debug_info(adapter);
2201	}
2202	return error;
2203}
2204
2205