ixv.c revision 222592
1169689Skan/******************************************************************************
2169689Skan
3169689Skan  Copyright (c) 2001-2011, Intel Corporation
4169689Skan  All rights reserved.
5169689Skan
6169689Skan  Redistribution and use in source and binary forms, with or without
7169689Skan  modification, are permitted provided that the following conditions are met:
8169689Skan
9169689Skan   1. Redistributions of source code must retain the above copyright notice,
10169689Skan      this list of conditions and the following disclaimer.
11169689Skan
12169689Skan   2. Redistributions in binary form must reproduce the above copyright
13169689Skan      notice, this list of conditions and the following disclaimer in the
14169689Skan      documentation and/or other materials provided with the distribution.
15169689Skan
16169689Skan   3. Neither the name of the Intel Corporation nor the names of its
17169689Skan      contributors may be used to endorse or promote products derived from
18169689Skan      this software without specific prior written permission.
19169689Skan
20169689Skan  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21169689Skan  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22169689Skan  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23169689Skan  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24169689Skan  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25169689Skan  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26169689Skan  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27169689Skan  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28169689Skan  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29169689Skan  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30169689Skan  POSSIBILITY OF SUCH DAMAGE.
31169689Skan
32169689Skan******************************************************************************/
33169689Skan/*$FreeBSD: head/sys/dev/ixgbe/ixv.c 222592 2011-06-02 05:31:54Z jfv $*/
34169689Skan
35169689Skan#ifdef HAVE_KERNEL_OPTION_HEADERS
36169689Skan#include "opt_inet.h"
37169689Skan#include "opt_inet6.h"
38169689Skan#endif
39169689Skan
40169689Skan#include "ixv.h"
41169689Skan
42169689Skan/*********************************************************************
43169689Skan *  Driver version
44169689Skan *********************************************************************/
45169689Skanchar ixv_driver_version[] = "1.0.1";
46169689Skan
47169689Skan/*********************************************************************
48169689Skan *  PCI Device ID Table
49169689Skan *
50169689Skan *  Used by probe to select devices to load on
51169689Skan *  Last field stores an index into ixv_strings
52169689Skan *  Last entry must be all 0s
53169689Skan *
54169689Skan *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55169689Skan *********************************************************************/
56169689Skan
57169689Skanstatic ixv_vendor_info_t ixv_vendor_info_array[] =
58169689Skan{
59169689Skan	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60169689Skan	/* required last entry */
61169689Skan	{0, 0, 0, 0, 0}
62169689Skan};
63169689Skan
64169689Skan/*********************************************************************
65169689Skan *  Table of branding strings
66169689Skan *********************************************************************/
67169689Skan
68169689Skanstatic char    *ixv_strings[] = {
69169689Skan	"Intel(R) PRO/10GbE Virtual Function Network Driver"
70169689Skan};
71169689Skan
72169689Skan/*********************************************************************
73169689Skan *  Function prototypes
74169689Skan *********************************************************************/
75169689Skanstatic int      ixv_probe(device_t);
76169689Skanstatic int      ixv_attach(device_t);
77169689Skanstatic int      ixv_detach(device_t);
78169689Skanstatic int      ixv_shutdown(device_t);
79169689Skan#if __FreeBSD_version < 800000
80169689Skanstatic void     ixv_start(struct ifnet *);
81169689Skanstatic void     ixv_start_locked(struct tx_ring *, struct ifnet *);
82169689Skan#else
83169689Skanstatic int	ixv_mq_start(struct ifnet *, struct mbuf *);
84169689Skanstatic int	ixv_mq_start_locked(struct ifnet *,
85169689Skan		    struct tx_ring *, struct mbuf *);
86169689Skanstatic void	ixv_qflush(struct ifnet *);
87169689Skan#endif
88169689Skanstatic int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
89169689Skanstatic void	ixv_init(void *);
90169689Skanstatic void	ixv_init_locked(struct adapter *);
91169689Skanstatic void     ixv_stop(void *);
92169689Skanstatic void     ixv_media_status(struct ifnet *, struct ifmediareq *);
93169689Skanstatic int      ixv_media_change(struct ifnet *);
94169689Skanstatic void     ixv_identify_hardware(struct adapter *);
95169689Skanstatic int      ixv_allocate_pci_resources(struct adapter *);
96169689Skanstatic int      ixv_allocate_msix(struct adapter *);
97169689Skanstatic int	ixv_allocate_queues(struct adapter *);
98169689Skanstatic int	ixv_setup_msix(struct adapter *);
99169689Skanstatic void	ixv_free_pci_resources(struct adapter *);
100169689Skanstatic void     ixv_local_timer(void *);
101169689Skanstatic void     ixv_setup_interface(device_t, struct adapter *);
102169689Skanstatic void     ixv_config_link(struct adapter *);
103169689Skan
104169689Skanstatic int      ixv_allocate_transmit_buffers(struct tx_ring *);
105169689Skanstatic int	ixv_setup_transmit_structures(struct adapter *);
106169689Skanstatic void	ixv_setup_transmit_ring(struct tx_ring *);
107169689Skanstatic void     ixv_initialize_transmit_units(struct adapter *);
108169689Skanstatic void     ixv_free_transmit_structures(struct adapter *);
109169689Skanstatic void     ixv_free_transmit_buffers(struct tx_ring *);
110169689Skan
111169689Skanstatic int      ixv_allocate_receive_buffers(struct rx_ring *);
112169689Skanstatic int      ixv_setup_receive_structures(struct adapter *);
113169689Skanstatic int	ixv_setup_receive_ring(struct rx_ring *);
114169689Skanstatic void     ixv_initialize_receive_units(struct adapter *);
115169689Skanstatic void     ixv_free_receive_structures(struct adapter *);
116169689Skanstatic void     ixv_free_receive_buffers(struct rx_ring *);
117169689Skan
118169689Skanstatic void     ixv_enable_intr(struct adapter *);
119169689Skanstatic void     ixv_disable_intr(struct adapter *);
120169689Skanstatic bool	ixv_txeof(struct tx_ring *);
121169689Skanstatic bool	ixv_rxeof(struct ix_queue *, int);
122169689Skanstatic void	ixv_rx_checksum(u32, struct mbuf *, u32);
123169689Skanstatic void     ixv_set_multi(struct adapter *);
124169689Skanstatic void     ixv_update_link_status(struct adapter *);
125169689Skanstatic void	ixv_refresh_mbufs(struct rx_ring *, int);
126169689Skanstatic int      ixv_xmit(struct tx_ring *, struct mbuf **);
127169689Skanstatic int	ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
128169689Skanstatic int	ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
129169689Skanstatic int	ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
130169689Skanstatic int	ixv_dma_malloc(struct adapter *, bus_size_t,
131169689Skan		    struct ixv_dma_alloc *, int);
132169689Skanstatic void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
133169689Skanstatic void	ixv_add_rx_process_limit(struct adapter *, const char *,
134169689Skan		    const char *, int *, int);
135169689Skanstatic bool	ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
136169689Skanstatic bool	ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
137169689Skanstatic void	ixv_set_ivar(struct adapter *, u8, u8, s8);
138169689Skanstatic void	ixv_configure_ivars(struct adapter *);
139169689Skanstatic u8 *	ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
140169689Skan
141169689Skanstatic void	ixv_setup_vlan_support(struct adapter *);
142169689Skanstatic void	ixv_register_vlan(void *, struct ifnet *, u16);
143169689Skanstatic void	ixv_unregister_vlan(void *, struct ifnet *, u16);
144169689Skan
145169689Skanstatic void	ixv_save_stats(struct adapter *);
146169689Skanstatic void	ixv_init_stats(struct adapter *);
147169689Skanstatic void	ixv_update_stats(struct adapter *);
148169689Skan
149169689Skanstatic __inline void ixv_rx_discard(struct rx_ring *, int);
150169689Skanstatic __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
151169689Skan		    struct mbuf *, u32);
152169689Skan
153169689Skan/* The MSI/X Interrupt handlers */
154169689Skanstatic void	ixv_msix_que(void *);
155169689Skanstatic void	ixv_msix_mbx(void *);
156169689Skan
157169689Skan/* Deferred interrupt tasklets */
158169689Skanstatic void	ixv_handle_que(void *, int);
159169689Skanstatic void	ixv_handle_mbx(void *, int);
160169689Skan
161169689Skan/*********************************************************************
162169689Skan *  FreeBSD Device Interface Entry Points
163169689Skan *********************************************************************/
164169689Skan
165169689Skanstatic device_method_t ixv_methods[] = {
166169689Skan	/* Device interface */
167169689Skan	DEVMETHOD(device_probe, ixv_probe),
168169689Skan	DEVMETHOD(device_attach, ixv_attach),
169169689Skan	DEVMETHOD(device_detach, ixv_detach),
170169689Skan	DEVMETHOD(device_shutdown, ixv_shutdown),
171169689Skan	{0, 0}
172169689Skan};
173169689Skan
174169689Skanstatic driver_t ixv_driver = {
175169689Skan	"ix", ixv_methods, sizeof(struct adapter),
176169689Skan};
177169689Skan
178169689Skanextern devclass_t ixgbe_devclass;
179169689SkanDRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
180169689SkanMODULE_DEPEND(ixv, pci, 1, 1, 1);
181169689SkanMODULE_DEPEND(ixv, ether, 1, 1, 1);
182169689Skan
183169689Skan/*
184169689Skan** TUNEABLE PARAMETERS:
185169689Skan*/
186169689Skan
187169689Skan/*
188169689Skan** AIM: Adaptive Interrupt Moderation
189169689Skan** which means that the interrupt rate
190169689Skan** is varied over time based on the
191169689Skan** traffic for that interrupt vector
192169689Skan*/
193169689Skanstatic int ixv_enable_aim = FALSE;
194169689SkanTUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
195169689Skan
196169689Skan/* How many packets rxeof tries to clean at a time */
197169689Skanstatic int ixv_rx_process_limit = 128;
198169689SkanTUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
199169689Skan
200169689Skan/* Flow control setting, default to full */
201169689Skanstatic int ixv_flow_control = ixgbe_fc_full;
202169689SkanTUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
203169689Skan
204169689Skan/*
205169689Skan * Header split: this causes the hardware to DMA
206169689Skan * the header into a seperate mbuf from the payload,
207169689Skan * it can be a performance win in some workloads, but
208169689Skan * in others it actually hurts, its off by default.
209169689Skan */
210169689Skanstatic bool ixv_header_split = FALSE;
211169689SkanTUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
212169689Skan
213169689Skan/*
214169689Skan** Number of TX descriptors per ring,
215169689Skan** setting higher than RX as this seems
216169689Skan** the better performing choice.
217169689Skan*/
218169689Skanstatic int ixv_txd = DEFAULT_TXD;
219169689SkanTUNABLE_INT("hw.ixv.txd", &ixv_txd);
220169689Skan
221169689Skan/* Number of RX descriptors per ring */
222169689Skanstatic int ixv_rxd = DEFAULT_RXD;
223169689SkanTUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
224169689Skan
225169689Skan/*
226169689Skan** Shadow VFTA table, this is needed because
227169689Skan** the real filter table gets cleared during
228169689Skan** a soft reset and we need to repopulate it.
229169689Skan*/
230169689Skanstatic u32 ixv_shadow_vfta[VFTA_SIZE];
231169689Skan
232169689Skan/*********************************************************************
233169689Skan *  Device identification routine
234169689Skan *
235169689Skan *  ixv_probe determines if the driver should be loaded on
236169689Skan *  adapter based on PCI vendor/device id of the adapter.
237169689Skan *
238169689Skan *  return BUS_PROBE_DEFAULT on success, positive on failure
239169689Skan *********************************************************************/
240169689Skan
241169689Skanstatic int
242169689Skanixv_probe(device_t dev)
243169689Skan{
244169689Skan	ixv_vendor_info_t *ent;
245169689Skan
246169689Skan	u16	pci_vendor_id = 0;
247169689Skan	u16	pci_device_id = 0;
248169689Skan	u16	pci_subvendor_id = 0;
249169689Skan	u16	pci_subdevice_id = 0;
250169689Skan	char	adapter_name[256];
251169689Skan
252169689Skan
253169689Skan	pci_vendor_id = pci_get_vendor(dev);
254169689Skan	if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
255169689Skan		return (ENXIO);
256169689Skan
257169689Skan	pci_device_id = pci_get_device(dev);
258169689Skan	pci_subvendor_id = pci_get_subvendor(dev);
259169689Skan	pci_subdevice_id = pci_get_subdevice(dev);
260169689Skan
261169689Skan	ent = ixv_vendor_info_array;
262169689Skan	while (ent->vendor_id != 0) {
263169689Skan		if ((pci_vendor_id == ent->vendor_id) &&
264169689Skan		    (pci_device_id == ent->device_id) &&
265169689Skan
266169689Skan		    ((pci_subvendor_id == ent->subvendor_id) ||
267169689Skan		     (ent->subvendor_id == 0)) &&
268169689Skan
269169689Skan		    ((pci_subdevice_id == ent->subdevice_id) ||
270169689Skan		     (ent->subdevice_id == 0))) {
271169689Skan			sprintf(adapter_name, "%s, Version - %s",
272169689Skan				ixv_strings[ent->index],
273169689Skan				ixv_driver_version);
274169689Skan			device_set_desc_copy(dev, adapter_name);
275169689Skan			return (BUS_PROBE_DEFAULT);
276169689Skan		}
277169689Skan		ent++;
278169689Skan	}
279169689Skan	return (ENXIO);
280169689Skan}
281169689Skan
282169689Skan/*********************************************************************
283169689Skan *  Device initialization routine
284169689Skan *
285169689Skan *  The attach entry point is called when the driver is being loaded.
286169689Skan *  This routine identifies the type of hardware, allocates all resources
287169689Skan *  and initializes the hardware.
288169689Skan *
289169689Skan *  return 0 on success, positive on failure
290169689Skan *********************************************************************/
291169689Skan
292169689Skanstatic int
293169689Skanixv_attach(device_t dev)
294169689Skan{
295169689Skan	struct adapter *adapter;
296169689Skan	struct ixgbe_hw *hw;
297169689Skan	int             error = 0;
298169689Skan
299169689Skan	INIT_DEBUGOUT("ixv_attach: begin");
300169689Skan
301169689Skan	if (resource_disabled("ixgbe", device_get_unit(dev))) {
302169689Skan		device_printf(dev, "Disabled by device hint\n");
303169689Skan		return (ENXIO);
304169689Skan	}
305169689Skan
306169689Skan	/* Allocate, clear, and link in our adapter structure */
307169689Skan	adapter = device_get_softc(dev);
308169689Skan	adapter->dev = adapter->osdep.dev = dev;
309169689Skan	hw = &adapter->hw;
310169689Skan
311169689Skan	/* Core Lock Init*/
312169689Skan	IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
313169689Skan
314169689Skan	/* SYSCTL APIs */
315169689Skan	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
316169689Skan			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
317169689Skan			OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
318169689Skan			adapter, 0, ixv_sysctl_stats, "I", "Statistics");
319169689Skan
320169689Skan	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
321169689Skan			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
322169689Skan			OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
323169689Skan			adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
324169689Skan
325169689Skan	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
326169689Skan			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
327169689Skan			OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
328169689Skan			adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
329169689Skan
330169689Skan	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
331169689Skan			SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
332169689Skan			OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
333169689Skan			&ixv_enable_aim, 1, "Interrupt Moderation");
334169689Skan
335169689Skan	/* Set up the timer callout */
336169689Skan	callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
337169689Skan
338169689Skan	/* Determine hardware revision */
339169689Skan	ixv_identify_hardware(adapter);
340169689Skan
341169689Skan	/* Do base PCI setup - map BAR0 */
342169689Skan	if (ixv_allocate_pci_resources(adapter)) {
343169689Skan		device_printf(dev, "Allocation of PCI resources failed\n");
344169689Skan		error = ENXIO;
345169689Skan		goto err_out;
346169689Skan	}
347169689Skan
348169689Skan	/* Do descriptor calc and sanity checks */
349169689Skan	if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
350169689Skan	    ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
351169689Skan		device_printf(dev, "TXD config issue, using default!\n");
352169689Skan		adapter->num_tx_desc = DEFAULT_TXD;
353169689Skan	} else
354169689Skan		adapter->num_tx_desc = ixv_txd;
355169689Skan
356169689Skan	if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
357169689Skan	    ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
358169689Skan		device_printf(dev, "RXD config issue, using default!\n");
359169689Skan		adapter->num_rx_desc = DEFAULT_RXD;
360169689Skan	} else
361169689Skan		adapter->num_rx_desc = ixv_rxd;
362169689Skan
363169689Skan	/* Allocate our TX/RX Queues */
364169689Skan	if (ixv_allocate_queues(adapter)) {
365169689Skan		error = ENOMEM;
366169689Skan		goto err_out;
367169689Skan	}
368169689Skan
369169689Skan	/*
370169689Skan	** Initialize the shared code: its
371169689Skan	** at this point the mac type is set.
372169689Skan	*/
373169689Skan	error = ixgbe_init_shared_code(hw);
374169689Skan	if (error) {
375169689Skan		device_printf(dev,"Shared Code Initialization Failure\n");
376169689Skan		error = EIO;
377169689Skan		goto err_late;
378169689Skan	}
379169689Skan
380169689Skan	/* Setup the mailbox */
381169689Skan	ixgbe_init_mbx_params_vf(hw);
382169689Skan
383169689Skan	ixgbe_reset_hw(hw);
384169689Skan
385169689Skan	/* Get Hardware Flow Control setting */
386169689Skan	hw->fc.requested_mode = ixgbe_fc_full;
387169689Skan	hw->fc.pause_time = IXV_FC_PAUSE;
388169689Skan	hw->fc.low_water = IXV_FC_LO;
389169689Skan	hw->fc.high_water = IXV_FC_HI;
390169689Skan	hw->fc.send_xon = TRUE;
391169689Skan
392169689Skan	error = ixgbe_init_hw(hw);
393169689Skan	if (error) {
394169689Skan		device_printf(dev,"Hardware Initialization Failure\n");
395169689Skan		error = EIO;
396169689Skan		goto err_late;
397169689Skan	}
398169689Skan
399169689Skan	error = ixv_allocate_msix(adapter);
400169689Skan	if (error)
401169689Skan		goto err_late;
402169689Skan
403169689Skan	/* Setup OS specific network interface */
404169689Skan	ixv_setup_interface(dev, adapter);
405169689Skan
406169689Skan	/* Sysctl for limiting the amount of work done in the taskqueue */
407169689Skan	ixv_add_rx_process_limit(adapter, "rx_processing_limit",
408169689Skan	    "max number of rx packets to process", &adapter->rx_process_limit,
409169689Skan	    ixv_rx_process_limit);
410169689Skan
411169689Skan	/* Do the stats setup */
412169689Skan	ixv_save_stats(adapter);
413169689Skan	ixv_init_stats(adapter);
414169689Skan
415169689Skan	/* Register for VLAN events */
416169689Skan	adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
417169689Skan	    ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
418169689Skan	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
419169689Skan	    ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
420169689Skan
421169689Skan	INIT_DEBUGOUT("ixv_attach: end");
422169689Skan	return (0);
423169689Skan
424169689Skanerr_late:
425169689Skan	ixv_free_transmit_structures(adapter);
426169689Skan	ixv_free_receive_structures(adapter);
427169689Skanerr_out:
428169689Skan	ixv_free_pci_resources(adapter);
429169689Skan	return (error);
430169689Skan
431169689Skan}
432169689Skan
433169689Skan/*********************************************************************
434169689Skan *  Device removal routine
435169689Skan *
436169689Skan *  The detach entry point is called when the driver is being removed.
437169689Skan *  This routine stops the adapter and deallocates all the resources
438169689Skan *  that were allocated for driver operation.
439169689Skan *
440169689Skan *  return 0 on success, positive on failure
441169689Skan *********************************************************************/
442169689Skan
443169689Skanstatic int
444169689Skanixv_detach(device_t dev)
445169689Skan{
446169689Skan	struct adapter *adapter = device_get_softc(dev);
447169689Skan	struct ix_queue *que = adapter->queues;
448169689Skan
449169689Skan	INIT_DEBUGOUT("ixv_detach: begin");
450169689Skan
451169689Skan	/* Make sure VLANS are not using driver */
452169689Skan	if (adapter->ifp->if_vlantrunk != NULL) {
453169689Skan		device_printf(dev,"Vlan in use, detach first\n");
454169689Skan		return (EBUSY);
455169689Skan	}
456169689Skan
457169689Skan	IXV_CORE_LOCK(adapter);
458169689Skan	ixv_stop(adapter);
459169689Skan	IXV_CORE_UNLOCK(adapter);
460169689Skan
461169689Skan	for (int i = 0; i < adapter->num_queues; i++, que++) {
462169689Skan		if (que->tq) {
463169689Skan			taskqueue_drain(que->tq, &que->que_task);
464169689Skan			taskqueue_free(que->tq);
465169689Skan		}
466169689Skan	}
467169689Skan
468169689Skan	/* Drain the Link queue */
469169689Skan	if (adapter->tq) {
470169689Skan		taskqueue_drain(adapter->tq, &adapter->mbx_task);
471169689Skan		taskqueue_free(adapter->tq);
472169689Skan	}
473169689Skan
474169689Skan	/* Unregister VLAN events */
475169689Skan	if (adapter->vlan_attach != NULL)
476169689Skan		EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
477169689Skan	if (adapter->vlan_detach != NULL)
478169689Skan		EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
479169689Skan
480169689Skan	ether_ifdetach(adapter->ifp);
481169689Skan	callout_drain(&adapter->timer);
482169689Skan	ixv_free_pci_resources(adapter);
483169689Skan	bus_generic_detach(dev);
484169689Skan	if_free(adapter->ifp);
485169689Skan
486169689Skan	ixv_free_transmit_structures(adapter);
487169689Skan	ixv_free_receive_structures(adapter);
488169689Skan
489169689Skan	IXV_CORE_LOCK_DESTROY(adapter);
490169689Skan	return (0);
491169689Skan}
492169689Skan
493169689Skan/*********************************************************************
494169689Skan *
495169689Skan *  Shutdown entry point
496169689Skan *
497169689Skan **********************************************************************/
498169689Skanstatic int
499169689Skanixv_shutdown(device_t dev)
500169689Skan{
501169689Skan	struct adapter *adapter = device_get_softc(dev);
502169689Skan	IXV_CORE_LOCK(adapter);
503169689Skan	ixv_stop(adapter);
504169689Skan	IXV_CORE_UNLOCK(adapter);
505169689Skan	return (0);
506169689Skan}
507169689Skan
508169689Skan#if __FreeBSD_version < 800000
509169689Skan/*********************************************************************
510169689Skan *  Transmit entry point
511169689Skan *
512169689Skan *  ixv_start is called by the stack to initiate a transmit.
513169689Skan *  The driver will remain in this routine as long as there are
514169689Skan *  packets to transmit and transmit resources are available.
515169689Skan *  In case resources are not available stack is notified and
516169689Skan *  the packet is requeued.
517169689Skan **********************************************************************/
518169689Skanstatic void
519169689Skanixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
520169689Skan{
521169689Skan	struct mbuf    *m_head;
522169689Skan	struct adapter *adapter = txr->adapter;
523169689Skan
524169689Skan	IXV_TX_LOCK_ASSERT(txr);
525169689Skan
526169689Skan	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
527169689Skan	    IFF_DRV_RUNNING)
528169689Skan		return;
529169689Skan	if (!adapter->link_active)
530169689Skan		return;
531169689Skan
532169689Skan	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
533169689Skan
534169689Skan		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
535169689Skan		if (m_head == NULL)
536169689Skan			break;
537169689Skan
538169689Skan		if (ixv_xmit(txr, &m_head)) {
539169689Skan			if (m_head == NULL)
540169689Skan				break;
541169689Skan			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
542169689Skan			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
543169689Skan			break;
544169689Skan		}
545169689Skan		/* Send a copy of the frame to the BPF listener */
546169689Skan		ETHER_BPF_MTAP(ifp, m_head);
547169689Skan
548169689Skan		/* Set watchdog on */
549169689Skan		txr->watchdog_check = TRUE;
550169689Skan		txr->watchdog_time = ticks;
551169689Skan
552169689Skan	}
553169689Skan	return;
554169689Skan}
555169689Skan
556169689Skan/*
557169689Skan * Legacy TX start - called by the stack, this
558169689Skan * always uses the first tx ring, and should
559169689Skan * not be used with multiqueue tx enabled.
560169689Skan */
561169689Skanstatic void
562169689Skanixv_start(struct ifnet *ifp)
563169689Skan{
564169689Skan	struct adapter *adapter = ifp->if_softc;
565169689Skan	struct tx_ring	*txr = adapter->tx_rings;
566169689Skan
567169689Skan	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
568169689Skan		IXV_TX_LOCK(txr);
569169689Skan		ixv_start_locked(txr, ifp);
570169689Skan		IXV_TX_UNLOCK(txr);
571169689Skan	}
572169689Skan	return;
573169689Skan}
574169689Skan
575169689Skan#else
576169689Skan
577169689Skan/*
578169689Skan** Multiqueue Transmit driver
579169689Skan**
580169689Skan*/
581169689Skanstatic int
582169689Skanixv_mq_start(struct ifnet *ifp, struct mbuf *m)
583169689Skan{
584169689Skan	struct adapter	*adapter = ifp->if_softc;
585169689Skan	struct ix_queue	*que;
586169689Skan	struct tx_ring	*txr;
587169689Skan	int 		i = 0, err = 0;
588169689Skan
589169689Skan	/* Which queue to use */
590169689Skan	if ((m->m_flags & M_FLOWID) != 0)
591169689Skan		i = m->m_pkthdr.flowid % adapter->num_queues;
592169689Skan
593169689Skan	txr = &adapter->tx_rings[i];
594169689Skan	que = &adapter->queues[i];
595169689Skan
596169689Skan	if (IXV_TX_TRYLOCK(txr)) {
597169689Skan		err = ixv_mq_start_locked(ifp, txr, m);
598169689Skan		IXV_TX_UNLOCK(txr);
599169689Skan	} else {
600169689Skan		err = drbr_enqueue(ifp, txr->br, m);
601169689Skan		taskqueue_enqueue(que->tq, &que->que_task);
602169689Skan	}
603169689Skan
604169689Skan	return (err);
605169689Skan}
606169689Skan
607169689Skanstatic int
608169689Skanixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
609169689Skan{
610169689Skan	struct adapter  *adapter = txr->adapter;
611169689Skan        struct mbuf     *next;
612169689Skan        int             enqueued, err = 0;
613169689Skan
614169689Skan	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
615169689Skan	    IFF_DRV_RUNNING || adapter->link_active == 0) {
616169689Skan		if (m != NULL)
617169689Skan			err = drbr_enqueue(ifp, txr->br, m);
618169689Skan		return (err);
619169689Skan	}
620169689Skan
621169689Skan	/* Do a clean if descriptors are low */
622169689Skan	if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
623169689Skan		ixv_txeof(txr);
624169689Skan
625169689Skan	enqueued = 0;
626169689Skan	if (m == NULL) {
627169689Skan		next = drbr_dequeue(ifp, txr->br);
628169689Skan	} else if (drbr_needs_enqueue(ifp, txr->br)) {
629169689Skan		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
630169689Skan			return (err);
631169689Skan		next = drbr_dequeue(ifp, txr->br);
632169689Skan	} else
633169689Skan		next = m;
634169689Skan
635169689Skan	/* Process the queue */
636169689Skan	while (next != NULL) {
637169689Skan		if ((err = ixv_xmit(txr, &next)) != 0) {
638169689Skan			if (next != NULL)
639169689Skan				err = drbr_enqueue(ifp, txr->br, next);
640169689Skan			break;
641169689Skan		}
642169689Skan		enqueued++;
643169689Skan		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
644169689Skan		/* Send a copy of the frame to the BPF listener */
645169689Skan		ETHER_BPF_MTAP(ifp, next);
646169689Skan		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
647169689Skan			break;
648169689Skan		if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
649169689Skan			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
650169689Skan			break;
651169689Skan		}
652169689Skan		next = drbr_dequeue(ifp, txr->br);
653169689Skan	}
654169689Skan
655169689Skan	if (enqueued > 0) {
656169689Skan		/* Set watchdog on */
657169689Skan		txr->watchdog_check = TRUE;
658169689Skan		txr->watchdog_time = ticks;
659169689Skan	}
660169689Skan
661169689Skan	return (err);
662169689Skan}
663169689Skan
664169689Skan/*
665169689Skan** Flush all ring buffers
666169689Skan*/
667169689Skanstatic void
668169689Skanixv_qflush(struct ifnet *ifp)
669169689Skan{
670169689Skan	struct adapter  *adapter = ifp->if_softc;
671169689Skan	struct tx_ring  *txr = adapter->tx_rings;
672169689Skan	struct mbuf     *m;
673169689Skan
674169689Skan	for (int i = 0; i < adapter->num_queues; i++, txr++) {
675169689Skan		IXV_TX_LOCK(txr);
676169689Skan		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
677169689Skan			m_freem(m);
678169689Skan		IXV_TX_UNLOCK(txr);
679169689Skan	}
680169689Skan	if_qflush(ifp);
681169689Skan}
682169689Skan
683169689Skan#endif
684169689Skan
685169689Skan/*********************************************************************
686169689Skan *  Ioctl entry point
687169689Skan *
688169689Skan *  ixv_ioctl is called when the user wants to configure the
689169689Skan *  interface.
690169689Skan *
691169689Skan *  return 0 on success, positive on failure
692169689Skan **********************************************************************/
693169689Skan
694169689Skanstatic int
695169689Skanixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
696169689Skan{
697169689Skan	struct adapter	*adapter = ifp->if_softc;
698169689Skan	struct ifreq	*ifr = (struct ifreq *) data;
699169689Skan#if defined(INET) || defined(INET6)
700169689Skan	struct ifaddr	*ifa = (struct ifaddr *) data;
701169689Skan	bool		avoid_reset = FALSE;
702169689Skan#endif
703169689Skan	int             error = 0;
704169689Skan
705169689Skan	switch (command) {
706169689Skan
707169689Skan	case SIOCSIFADDR:
708169689Skan#ifdef INET
709169689Skan		if (ifa->ifa_addr->sa_family == AF_INET)
710169689Skan			avoid_reset = TRUE;
711169689Skan#endif
712169689Skan#ifdef INET6
713169689Skan		if (ifa->ifa_addr->sa_family == AF_INET6)
714169689Skan			avoid_reset = TRUE;
715169689Skan#endif
716169689Skan#if defined(INET) || defined(INET6)
717169689Skan		/*
718169689Skan		** Calling init results in link renegotiation,
719169689Skan		** so we avoid doing it when possible.
720169689Skan		*/
721169689Skan		if (avoid_reset) {
722169689Skan			ifp->if_flags |= IFF_UP;
723169689Skan			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
724169689Skan				ixv_init(adapter);
725169689Skan			if (!(ifp->if_flags & IFF_NOARP))
726169689Skan				arp_ifinit(ifp, ifa);
727169689Skan		} else
728169689Skan			error = ether_ioctl(ifp, command, data);
729169689Skan		break;
730169689Skan#endif
731169689Skan	case SIOCSIFMTU:
732169689Skan		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
733169689Skan		if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
734169689Skan			error = EINVAL;
735169689Skan		} else {
736169689Skan			IXV_CORE_LOCK(adapter);
737169689Skan			ifp->if_mtu = ifr->ifr_mtu;
738169689Skan			adapter->max_frame_size =
739169689Skan				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
740169689Skan			ixv_init_locked(adapter);
741169689Skan			IXV_CORE_UNLOCK(adapter);
742169689Skan		}
743169689Skan		break;
744169689Skan	case SIOCSIFFLAGS:
745169689Skan		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
746169689Skan		IXV_CORE_LOCK(adapter);
747169689Skan		if (ifp->if_flags & IFF_UP) {
748169689Skan			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
749169689Skan				ixv_init_locked(adapter);
750169689Skan		} else
751169689Skan			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
752169689Skan				ixv_stop(adapter);
753169689Skan		adapter->if_flags = ifp->if_flags;
754169689Skan		IXV_CORE_UNLOCK(adapter);
755169689Skan		break;
756169689Skan	case SIOCADDMULTI:
757169689Skan	case SIOCDELMULTI:
758169689Skan		IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
759169689Skan		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
760169689Skan			IXV_CORE_LOCK(adapter);
761169689Skan			ixv_disable_intr(adapter);
762169689Skan			ixv_set_multi(adapter);
763169689Skan			ixv_enable_intr(adapter);
764169689Skan			IXV_CORE_UNLOCK(adapter);
765169689Skan		}
766169689Skan		break;
767169689Skan	case SIOCSIFMEDIA:
768169689Skan	case SIOCGIFMEDIA:
769169689Skan		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
770169689Skan		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
771169689Skan		break;
772169689Skan	case SIOCSIFCAP:
773169689Skan	{
774169689Skan		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
775169689Skan		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
776169689Skan		if (mask & IFCAP_HWCSUM)
777169689Skan			ifp->if_capenable ^= IFCAP_HWCSUM;
778169689Skan		if (mask & IFCAP_TSO4)
779169689Skan			ifp->if_capenable ^= IFCAP_TSO4;
780169689Skan		if (mask & IFCAP_LRO)
781169689Skan			ifp->if_capenable ^= IFCAP_LRO;
782169689Skan		if (mask & IFCAP_VLAN_HWTAGGING)
783169689Skan			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
784169689Skan		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
785169689Skan			IXV_CORE_LOCK(adapter);
786169689Skan			ixv_init_locked(adapter);
787169689Skan			IXV_CORE_UNLOCK(adapter);
788169689Skan		}
789169689Skan		VLAN_CAPABILITIES(ifp);
790169689Skan		break;
791169689Skan	}
792169689Skan
793169689Skan	default:
794169689Skan		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
795169689Skan		error = ether_ioctl(ifp, command, data);
796169689Skan		break;
797169689Skan	}
798169689Skan
799169689Skan	return (error);
800169689Skan}
801169689Skan
802169689Skan/*********************************************************************
803169689Skan *  Init entry point
804169689Skan *
805169689Skan *  This routine is used in two ways. It is used by the stack as
806169689Skan *  init entry point in network interface structure. It is also used
807169689Skan *  by the driver as a hw/sw initialization routine to get to a
808169689Skan *  consistent state.
809169689Skan *
810169689Skan *  return 0 on success, positive on failure
811169689Skan **********************************************************************/
812169689Skan#define IXGBE_MHADD_MFS_SHIFT 16
813169689Skan
814169689Skanstatic void
815169689Skanixv_init_locked(struct adapter *adapter)
816169689Skan{
817169689Skan	struct ifnet	*ifp = adapter->ifp;
818169689Skan	device_t 	dev = adapter->dev;
819169689Skan	struct ixgbe_hw *hw = &adapter->hw;
820169689Skan	u32		mhadd, gpie;
821169689Skan
822169689Skan	INIT_DEBUGOUT("ixv_init: begin");
823169689Skan	mtx_assert(&adapter->core_mtx, MA_OWNED);
824169689Skan	hw->adapter_stopped = FALSE;
825169689Skan	ixgbe_stop_adapter(hw);
826169689Skan        callout_stop(&adapter->timer);
827169689Skan
828169689Skan        /* reprogram the RAR[0] in case user changed it. */
829169689Skan        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
830169689Skan
831169689Skan	/* Get the latest mac address, User can use a LAA */
832169689Skan	bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
833169689Skan	     IXGBE_ETH_LENGTH_OF_ADDRESS);
834169689Skan        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
835169689Skan	hw->addr_ctrl.rar_used_count = 1;
836169689Skan
837169689Skan	/* Prepare transmit descriptors and buffers */
838169689Skan	if (ixv_setup_transmit_structures(adapter)) {
839169689Skan		device_printf(dev,"Could not setup transmit structures\n");
840169689Skan		ixv_stop(adapter);
841169689Skan		return;
842169689Skan	}
843169689Skan
844169689Skan	ixgbe_reset_hw(hw);
845169689Skan	ixv_initialize_transmit_units(adapter);
846169689Skan
847169689Skan	/* Setup Multicast table */
848169689Skan	ixv_set_multi(adapter);
849169689Skan
850169689Skan	/*
851169689Skan	** Determine the correct mbuf pool
852169689Skan	** for doing jumbo/headersplit
853169689Skan	*/
854169689Skan	if (ifp->if_mtu > ETHERMTU)
855169689Skan		adapter->rx_mbuf_sz = MJUMPAGESIZE;
856169689Skan	else
857169689Skan		adapter->rx_mbuf_sz = MCLBYTES;
858169689Skan
859169689Skan	/* Prepare receive descriptors and buffers */
860169689Skan	if (ixv_setup_receive_structures(adapter)) {
861169689Skan		device_printf(dev,"Could not setup receive structures\n");
862169689Skan		ixv_stop(adapter);
863169689Skan		return;
864169689Skan	}
865169689Skan
866169689Skan	/* Configure RX settings */
867169689Skan	ixv_initialize_receive_units(adapter);
868169689Skan
869169689Skan	/* Enable Enhanced MSIX mode */
870169689Skan	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
871169689Skan	gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
872169689Skan	gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
873169689Skan        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
874169689Skan
875169689Skan	/* Set the various hardware offload abilities */
876169689Skan	ifp->if_hwassist = 0;
877169689Skan	if (ifp->if_capenable & IFCAP_TSO4)
878169689Skan		ifp->if_hwassist |= CSUM_TSO;
879169689Skan	if (ifp->if_capenable & IFCAP_TXCSUM) {
880169689Skan		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
881169689Skan#if __FreeBSD_version >= 800000
882169689Skan		ifp->if_hwassist |= CSUM_SCTP;
883169689Skan#endif
884169689Skan	}
885169689Skan
886169689Skan	/* Set MTU size */
887169689Skan	if (ifp->if_mtu > ETHERMTU) {
888169689Skan		mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
889169689Skan		mhadd &= ~IXGBE_MHADD_MFS_MASK;
890169689Skan		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
891169689Skan		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
892169689Skan	}
893169689Skan
894169689Skan	/* Set up VLAN offload and filter */
895169689Skan	ixv_setup_vlan_support(adapter);
896169689Skan
897169689Skan	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
898169689Skan
899169689Skan	/* Set up MSI/X routing */
900169689Skan	ixv_configure_ivars(adapter);
901169689Skan
902169689Skan	/* Set up auto-mask */
903169689Skan	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
904169689Skan
905169689Skan        /* Set moderation on the Link interrupt */
906169689Skan        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
907169689Skan
908169689Skan	/* Stats init */
909169689Skan	ixv_init_stats(adapter);
910169689Skan
911169689Skan	/* Config/Enable Link */
912169689Skan	ixv_config_link(adapter);
913169689Skan
914169689Skan	/* And now turn on interrupts */
915169689Skan	ixv_enable_intr(adapter);
916169689Skan
917169689Skan	/* Now inform the stack we're ready */
918169689Skan	ifp->if_drv_flags |= IFF_DRV_RUNNING;
919169689Skan	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
920169689Skan
921169689Skan	return;
922169689Skan}
923169689Skan
924169689Skanstatic void
925169689Skanixv_init(void *arg)
926169689Skan{
927169689Skan	struct adapter *adapter = arg;
928169689Skan
929169689Skan	IXV_CORE_LOCK(adapter);
930169689Skan	ixv_init_locked(adapter);
931169689Skan	IXV_CORE_UNLOCK(adapter);
932169689Skan	return;
933169689Skan}
934169689Skan
935169689Skan
936169689Skan/*
937169689Skan**
938169689Skan** MSIX Interrupt Handlers and Tasklets
939169689Skan**
940169689Skan*/
941169689Skan
942169689Skanstatic inline void
943169689Skanixv_enable_queue(struct adapter *adapter, u32 vector)
944169689Skan{
945169689Skan	struct ixgbe_hw *hw = &adapter->hw;
946169689Skan	u32	queue = 1 << vector;
947169689Skan	u32	mask;
948169689Skan
949169689Skan	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
950169689Skan	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
951169689Skan}
952169689Skan
953169689Skanstatic inline void
954169689Skanixv_disable_queue(struct adapter *adapter, u32 vector)
955169689Skan{
956169689Skan	struct ixgbe_hw *hw = &adapter->hw;
957169689Skan	u64	queue = (u64)(1 << vector);
958169689Skan	u32	mask;
959169689Skan
960169689Skan	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
961169689Skan	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
962169689Skan}
963169689Skan
964169689Skanstatic inline void
965169689Skanixv_rearm_queues(struct adapter *adapter, u64 queues)
966169689Skan{
967169689Skan	u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
968169689Skan	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
969169689Skan}
970169689Skan
971169689Skan
972169689Skanstatic void
973169689Skanixv_handle_que(void *context, int pending)
974169689Skan{
975169689Skan	struct ix_queue *que = context;
976169689Skan	struct adapter  *adapter = que->adapter;
977169689Skan	struct tx_ring  *txr = que->txr;
978169689Skan	struct ifnet    *ifp = adapter->ifp;
979169689Skan	bool		more;
980169689Skan
981169689Skan	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
982169689Skan		more = ixv_rxeof(que, adapter->rx_process_limit);
983169689Skan		IXV_TX_LOCK(txr);
984169689Skan		ixv_txeof(txr);
985169689Skan#if __FreeBSD_version >= 800000
986169689Skan		if (!drbr_empty(ifp, txr->br))
987169689Skan			ixv_mq_start_locked(ifp, txr, NULL);
988169689Skan#else
989169689Skan		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
990169689Skan			ixv_start_locked(txr, ifp);
991169689Skan#endif
992169689Skan		IXV_TX_UNLOCK(txr);
993169689Skan		if (more) {
994169689Skan			taskqueue_enqueue(que->tq, &que->que_task);
995169689Skan			return;
996169689Skan		}
997169689Skan	}
998169689Skan
999169689Skan	/* Reenable this interrupt */
1000169689Skan	ixv_enable_queue(adapter, que->msix);
1001169689Skan	return;
1002169689Skan}
1003169689Skan
1004169689Skan/*********************************************************************
1005169689Skan *
1006169689Skan *  MSI Queue Interrupt Service routine
1007169689Skan *
1008169689Skan **********************************************************************/
1009169689Skanvoid
1010169689Skanixv_msix_que(void *arg)
1011169689Skan{
1012169689Skan	struct ix_queue	*que = arg;
1013169689Skan	struct adapter  *adapter = que->adapter;
1014169689Skan	struct tx_ring	*txr = que->txr;
1015169689Skan	struct rx_ring	*rxr = que->rxr;
1016169689Skan	bool		more_tx, more_rx;
1017169689Skan	u32		newitr = 0;
1018169689Skan
1019169689Skan	ixv_disable_queue(adapter, que->msix);
1020169689Skan	++que->irqs;
1021169689Skan
1022169689Skan	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1023169689Skan
1024169689Skan	IXV_TX_LOCK(txr);
1025169689Skan	more_tx = ixv_txeof(txr);
1026169689Skan	IXV_TX_UNLOCK(txr);
1027169689Skan
1028169689Skan	more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1029169689Skan
1030169689Skan	/* Do AIM now? */
1031169689Skan
1032169689Skan	if (ixv_enable_aim == FALSE)
1033169689Skan		goto no_calc;
1034169689Skan	/*
1035169689Skan	** Do Adaptive Interrupt Moderation:
1036169689Skan        **  - Write out last calculated setting
1037169689Skan	**  - Calculate based on average size over
1038169689Skan	**    the last interval.
1039169689Skan	*/
1040169689Skan        if (que->eitr_setting)
1041169689Skan                IXGBE_WRITE_REG(&adapter->hw,
1042169689Skan                    IXGBE_VTEITR(que->msix),
1043169689Skan		    que->eitr_setting);
1044169689Skan
1045169689Skan        que->eitr_setting = 0;
1046169689Skan
1047169689Skan        /* Idle, do nothing */
1048169689Skan        if ((txr->bytes == 0) && (rxr->bytes == 0))
1049169689Skan                goto no_calc;
1050169689Skan
1051169689Skan	if ((txr->bytes) && (txr->packets))
1052169689Skan               	newitr = txr->bytes/txr->packets;
1053169689Skan	if ((rxr->bytes) && (rxr->packets))
1054169689Skan		newitr = max(newitr,
1055169689Skan		    (rxr->bytes / rxr->packets));
1056169689Skan	newitr += 24; /* account for hardware frame, crc */
1057169689Skan
1058169689Skan	/* set an upper boundary */
1059169689Skan	newitr = min(newitr, 3000);
1060169689Skan
1061169689Skan	/* Be nice to the mid range */
1062169689Skan	if ((newitr > 300) && (newitr < 1200))
1063169689Skan		newitr = (newitr / 3);
1064169689Skan	else
1065169689Skan		newitr = (newitr / 2);
1066169689Skan
1067169689Skan	newitr |= newitr << 16;
1068169689Skan
1069169689Skan        /* save for next interrupt */
1070169689Skan        que->eitr_setting = newitr;
1071169689Skan
1072169689Skan        /* Reset state */
1073169689Skan        txr->bytes = 0;
1074169689Skan        txr->packets = 0;
1075169689Skan        rxr->bytes = 0;
1076169689Skan        rxr->packets = 0;
1077169689Skan
1078169689Skanno_calc:
1079169689Skan	if (more_tx || more_rx)
1080169689Skan		taskqueue_enqueue(que->tq, &que->que_task);
1081169689Skan	else /* Reenable this interrupt */
1082169689Skan		ixv_enable_queue(adapter, que->msix);
1083169689Skan	return;
1084169689Skan}
1085169689Skan
1086169689Skanstatic void
1087169689Skanixv_msix_mbx(void *arg)
1088169689Skan{
1089169689Skan	struct adapter	*adapter = arg;
1090169689Skan	struct ixgbe_hw *hw = &adapter->hw;
1091169689Skan	u32		reg;
1092169689Skan
1093169689Skan	++adapter->mbx_irq;
1094169689Skan
1095169689Skan	/* First get the cause */
1096169689Skan	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1097169689Skan	/* Clear interrupt with write */
1098169689Skan	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1099169689Skan
1100169689Skan	/* Link status change */
1101169689Skan	if (reg & IXGBE_EICR_LSC)
1102169689Skan		taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1103169689Skan
1104169689Skan	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1105169689Skan	return;
1106169689Skan}
1107169689Skan
1108169689Skan/*********************************************************************
1109169689Skan *
1110169689Skan *  Media Ioctl callback
1111169689Skan *
1112169689Skan *  This routine is called whenever the user queries the status of
1113169689Skan *  the interface using ifconfig.
1114169689Skan *
1115169689Skan **********************************************************************/
1116169689Skanstatic void
1117169689Skanixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1118169689Skan{
1119169689Skan	struct adapter *adapter = ifp->if_softc;
1120169689Skan
1121169689Skan	INIT_DEBUGOUT("ixv_media_status: begin");
1122169689Skan	IXV_CORE_LOCK(adapter);
1123169689Skan	ixv_update_link_status(adapter);
1124169689Skan
1125169689Skan	ifmr->ifm_status = IFM_AVALID;
1126169689Skan	ifmr->ifm_active = IFM_ETHER;
1127169689Skan
1128169689Skan	if (!adapter->link_active) {
1129169689Skan		IXV_CORE_UNLOCK(adapter);
1130169689Skan		return;
1131169689Skan	}
1132169689Skan
1133169689Skan	ifmr->ifm_status |= IFM_ACTIVE;
1134169689Skan
1135169689Skan	switch (adapter->link_speed) {
1136169689Skan		case IXGBE_LINK_SPEED_1GB_FULL:
1137169689Skan			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1138169689Skan			break;
1139169689Skan		case IXGBE_LINK_SPEED_10GB_FULL:
1140169689Skan			ifmr->ifm_active |= IFM_FDX;
1141169689Skan			break;
1142169689Skan	}
1143169689Skan
1144169689Skan	IXV_CORE_UNLOCK(adapter);
1145169689Skan
1146169689Skan	return;
1147169689Skan}
1148169689Skan
1149169689Skan/*********************************************************************
1150169689Skan *
1151169689Skan *  Media Ioctl callback
1152169689Skan *
1153169689Skan *  This routine is called when the user changes speed/duplex using
1154169689Skan *  media/mediopt option with ifconfig.
1155169689Skan *
1156169689Skan **********************************************************************/
1157169689Skanstatic int
1158169689Skanixv_media_change(struct ifnet * ifp)
1159169689Skan{
1160169689Skan	struct adapter *adapter = ifp->if_softc;
1161169689Skan	struct ifmedia *ifm = &adapter->media;
1162169689Skan
1163169689Skan	INIT_DEBUGOUT("ixv_media_change: begin");
1164169689Skan
1165169689Skan	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1166169689Skan		return (EINVAL);
1167169689Skan
1168169689Skan        switch (IFM_SUBTYPE(ifm->ifm_media)) {
1169169689Skan        case IFM_AUTO:
1170169689Skan                break;
1171169689Skan        default:
1172169689Skan                device_printf(adapter->dev, "Only auto media type\n");
1173169689Skan		return (EINVAL);
1174169689Skan        }
1175169689Skan
1176169689Skan	return (0);
1177169689Skan}
1178169689Skan
1179169689Skan/*********************************************************************
1180169689Skan *
1181169689Skan *  This routine maps the mbufs to tx descriptors, allowing the
1182169689Skan *  TX engine to transmit the packets.
1183169689Skan *  	- return 0 on success, positive on failure
1184169689Skan *
1185169689Skan **********************************************************************/
1186169689Skan
1187169689Skanstatic int
1188169689Skanixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1189169689Skan{
1190169689Skan	struct adapter  *adapter = txr->adapter;
1191169689Skan	u32		olinfo_status = 0, cmd_type_len;
1192169689Skan	u32		paylen = 0;
1193169689Skan	int             i, j, error, nsegs;
1194169689Skan	int		first, last = 0;
1195169689Skan	struct mbuf	*m_head;
1196169689Skan	bus_dma_segment_t segs[32];
1197169689Skan	bus_dmamap_t	map;
1198169689Skan	struct ixv_tx_buf *txbuf;
1199169689Skan	union ixgbe_adv_tx_desc *txd = NULL;
1200169689Skan
1201169689Skan	m_head = *m_headp;
1202169689Skan
1203169689Skan	/* Basic descriptor defines */
1204169689Skan        cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1205169689Skan	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1206169689Skan
1207169689Skan	if (m_head->m_flags & M_VLANTAG)
1208169689Skan        	cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1209169689Skan
1210169689Skan        /*
1211169689Skan         * Important to capture the first descriptor
1212169689Skan         * used because it will contain the index of
1213169689Skan         * the one we tell the hardware to report back
1214169689Skan         */
1215169689Skan        first = txr->next_avail_desc;
1216169689Skan	txbuf = &txr->tx_buffers[first];
1217169689Skan	map = txbuf->map;
1218169689Skan
1219169689Skan	/*
1220169689Skan	 * Map the packet for DMA.
1221169689Skan	 */
1222169689Skan	error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1223169689Skan	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1224169689Skan
1225169689Skan	if (error == EFBIG) {
1226169689Skan		struct mbuf *m;
1227169689Skan
1228169689Skan		m = m_defrag(*m_headp, M_DONTWAIT);
1229169689Skan		if (m == NULL) {
1230169689Skan			adapter->mbuf_defrag_failed++;
1231169689Skan			m_freem(*m_headp);
1232169689Skan			*m_headp = NULL;
1233169689Skan			return (ENOBUFS);
1234169689Skan		}
1235169689Skan		*m_headp = m;
1236169689Skan
1237169689Skan		/* Try it again */
1238169689Skan		error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1239169689Skan		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1240169689Skan
1241169689Skan		if (error == ENOMEM) {
1242169689Skan			adapter->no_tx_dma_setup++;
1243169689Skan			return (error);
1244169689Skan		} else if (error != 0) {
1245169689Skan			adapter->no_tx_dma_setup++;
1246169689Skan			m_freem(*m_headp);
1247169689Skan			*m_headp = NULL;
1248169689Skan			return (error);
1249169689Skan		}
1250169689Skan	} else if (error == ENOMEM) {
1251169689Skan		adapter->no_tx_dma_setup++;
1252169689Skan		return (error);
1253169689Skan	} else if (error != 0) {
1254169689Skan		adapter->no_tx_dma_setup++;
1255169689Skan		m_freem(*m_headp);
1256169689Skan		*m_headp = NULL;
1257169689Skan		return (error);
1258169689Skan	}
1259169689Skan
1260169689Skan	/* Make certain there are enough descriptors */
1261169689Skan	if (nsegs > txr->tx_avail - 2) {
1262169689Skan		txr->no_desc_avail++;
1263169689Skan		error = ENOBUFS;
1264169689Skan		goto xmit_fail;
1265169689Skan	}
1266169689Skan	m_head = *m_headp;
1267169689Skan
1268169689Skan	/*
1269169689Skan	** Set up the appropriate offload context
1270169689Skan	** this becomes the first descriptor of
1271169689Skan	** a packet.
1272169689Skan	*/
1273169689Skan	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1274169689Skan		if (ixv_tso_setup(txr, m_head, &paylen)) {
1275169689Skan			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1276169689Skan			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1277169689Skan			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1278169689Skan			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1279169689Skan			++adapter->tso_tx;
1280169689Skan		} else
1281169689Skan			return (ENXIO);
1282169689Skan	} else if (ixv_tx_ctx_setup(txr, m_head))
1283169689Skan		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1284169689Skan
1285169689Skan        /* Record payload length */
1286169689Skan	if (paylen == 0)
1287169689Skan        	olinfo_status |= m_head->m_pkthdr.len <<
1288169689Skan		    IXGBE_ADVTXD_PAYLEN_SHIFT;
1289169689Skan
1290169689Skan	i = txr->next_avail_desc;
1291169689Skan	for (j = 0; j < nsegs; j++) {
1292169689Skan		bus_size_t seglen;
1293169689Skan		bus_addr_t segaddr;
1294169689Skan
1295169689Skan		txbuf = &txr->tx_buffers[i];
1296169689Skan		txd = &txr->tx_base[i];
1297169689Skan		seglen = segs[j].ds_len;
1298169689Skan		segaddr = htole64(segs[j].ds_addr);
1299169689Skan
1300169689Skan		txd->read.buffer_addr = segaddr;
1301169689Skan		txd->read.cmd_type_len = htole32(txr->txd_cmd |
1302169689Skan		    cmd_type_len |seglen);
1303169689Skan		txd->read.olinfo_status = htole32(olinfo_status);
1304169689Skan		last = i; /* descriptor that will get completion IRQ */
1305169689Skan
1306169689Skan		if (++i == adapter->num_tx_desc)
1307169689Skan			i = 0;
1308169689Skan
1309169689Skan		txbuf->m_head = NULL;
1310169689Skan		txbuf->eop_index = -1;
1311169689Skan	}
1312169689Skan
1313169689Skan	txd->read.cmd_type_len |=
1314169689Skan	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1315169689Skan	txr->tx_avail -= nsegs;
1316169689Skan	txr->next_avail_desc = i;
1317169689Skan
1318169689Skan	txbuf->m_head = m_head;
1319169689Skan	txr->tx_buffers[first].map = txbuf->map;
1320169689Skan	txbuf->map = map;
1321169689Skan	bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1322169689Skan
1323169689Skan        /* Set the index of the descriptor that will be marked done */
1324169689Skan        txbuf = &txr->tx_buffers[first];
1325169689Skan	txbuf->eop_index = last;
1326169689Skan
1327169689Skan        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1328169689Skan            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1329169689Skan	/*
1330169689Skan	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1331169689Skan	 * hardware that this frame is available to transmit.
1332169689Skan	 */
1333169689Skan	++txr->total_packets;
1334169689Skan	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1335169689Skan
1336169689Skan	return (0);
1337169689Skan
1338169689Skanxmit_fail:
1339169689Skan	bus_dmamap_unload(txr->txtag, txbuf->map);
1340169689Skan	return (error);
1341169689Skan
1342169689Skan}
1343169689Skan
1344169689Skan
1345169689Skan/*********************************************************************
1346169689Skan *  Multicast Update
1347169689Skan *
1348169689Skan *  This routine is called whenever multicast address list is updated.
1349169689Skan *
1350169689Skan **********************************************************************/
1351169689Skan#define IXGBE_RAR_ENTRIES 16
1352169689Skan
1353169689Skanstatic void
1354169689Skanixv_set_multi(struct adapter *adapter)
1355169689Skan{
1356169689Skan	u8	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1357169689Skan	u8	*update_ptr;
1358169689Skan	struct	ifmultiaddr *ifma;
1359169689Skan	int	mcnt = 0;
1360169689Skan	struct ifnet   *ifp = adapter->ifp;
1361169689Skan
1362169689Skan	IOCTL_DEBUGOUT("ixv_set_multi: begin");
1363169689Skan
1364169689Skan#if __FreeBSD_version < 800000
1365169689Skan	IF_ADDR_LOCK(ifp);
1366169689Skan#else
1367169689Skan	if_maddr_rlock(ifp);
1368169689Skan#endif
1369169689Skan	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1370169689Skan		if (ifma->ifma_addr->sa_family != AF_LINK)
1371169689Skan			continue;
1372169689Skan		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1373169689Skan		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1374169689Skan		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1375169689Skan		mcnt++;
1376169689Skan	}
1377169689Skan#if __FreeBSD_version < 800000
1378169689Skan	IF_ADDR_UNLOCK(ifp);
1379169689Skan#else
1380169689Skan	if_maddr_runlock(ifp);
1381169689Skan#endif
1382169689Skan
1383169689Skan	update_ptr = mta;
1384169689Skan
1385169689Skan	ixgbe_update_mc_addr_list(&adapter->hw,
1386169689Skan	    update_ptr, mcnt, ixv_mc_array_itr);
1387169689Skan
1388169689Skan	return;
1389169689Skan}
1390169689Skan
1391169689Skan/*
1392169689Skan * This is an iterator function now needed by the multicast
1393169689Skan * shared code. It simply feeds the shared code routine the
1394169689Skan * addresses in the array of ixv_set_multi() one by one.
1395169689Skan */
1396169689Skanstatic u8 *
1397169689Skanixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1398169689Skan{
1399169689Skan	u8 *addr = *update_ptr;
1400169689Skan	u8 *newptr;
1401169689Skan	*vmdq = 0;
1402169689Skan
1403169689Skan	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1404169689Skan	*update_ptr = newptr;
1405169689Skan	return addr;
1406169689Skan}
1407169689Skan
1408169689Skan/*********************************************************************
1409169689Skan *  Timer routine
1410169689Skan *
1411169689Skan *  This routine checks for link status,updates statistics,
1412169689Skan *  and runs the watchdog check.
1413169689Skan *
1414169689Skan **********************************************************************/
1415169689Skan
1416169689Skanstatic void
1417169689Skanixv_local_timer(void *arg)
1418169689Skan{
1419169689Skan	struct adapter	*adapter = arg;
1420169689Skan	device_t	dev = adapter->dev;
1421169689Skan	struct tx_ring	*txr = adapter->tx_rings;
1422169689Skan	int		i;
1423169689Skan
1424169689Skan	mtx_assert(&adapter->core_mtx, MA_OWNED);
1425169689Skan
1426169689Skan	ixv_update_link_status(adapter);
1427169689Skan
1428169689Skan	/* Stats Update */
1429169689Skan	ixv_update_stats(adapter);
1430169689Skan
1431169689Skan	/*
1432169689Skan	 * If the interface has been paused
1433169689Skan	 * then don't do the watchdog check
1434169689Skan	 */
1435169689Skan	if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1436169689Skan		goto out;
1437169689Skan	/*
1438169689Skan	** Check for time since any descriptor was cleaned
1439169689Skan	*/
1440169689Skan        for (i = 0; i < adapter->num_queues; i++, txr++) {
1441169689Skan		IXV_TX_LOCK(txr);
1442169689Skan		if (txr->watchdog_check == FALSE) {
1443169689Skan			IXV_TX_UNLOCK(txr);
1444169689Skan			continue;
1445169689Skan		}
1446169689Skan		if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1447169689Skan			goto hung;
1448169689Skan		IXV_TX_UNLOCK(txr);
1449169689Skan	}
1450169689Skanout:
1451169689Skan       	ixv_rearm_queues(adapter, adapter->que_mask);
1452169689Skan	callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1453169689Skan	return;
1454169689Skan
1455169689Skanhung:
1456169689Skan	device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1457169689Skan	device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1458169689Skan	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1459169689Skan	    IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1460169689Skan	device_printf(dev,"TX(%d) desc avail = %d,"
1461169689Skan	    "Next TX to Clean = %d\n",
1462169689Skan	    txr->me, txr->tx_avail, txr->next_to_clean);
1463169689Skan	adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1464169689Skan	adapter->watchdog_events++;
1465169689Skan	IXV_TX_UNLOCK(txr);
1466169689Skan	ixv_init_locked(adapter);
1467169689Skan}
1468169689Skan
1469169689Skan/*
1470169689Skan** Note: this routine updates the OS on the link state
1471169689Skan**	the real check of the hardware only happens with
1472169689Skan**	a link interrupt.
1473169689Skan*/
1474169689Skanstatic void
1475169689Skanixv_update_link_status(struct adapter *adapter)
1476169689Skan{
1477169689Skan	struct ifnet	*ifp = adapter->ifp;
1478169689Skan	struct tx_ring *txr = adapter->tx_rings;
1479169689Skan	device_t dev = adapter->dev;
1480169689Skan
1481169689Skan
1482169689Skan	if (adapter->link_up){
1483169689Skan		if (adapter->link_active == FALSE) {
1484169689Skan			if (bootverbose)
1485169689Skan				device_printf(dev,"Link is up %d Gbps %s \n",
1486169689Skan				    ((adapter->link_speed == 128)? 10:1),
1487169689Skan				    "Full Duplex");
1488169689Skan			adapter->link_active = TRUE;
1489169689Skan			if_link_state_change(ifp, LINK_STATE_UP);
1490169689Skan		}
1491169689Skan	} else { /* Link down */
1492169689Skan		if (adapter->link_active == TRUE) {
1493169689Skan			if (bootverbose)
1494169689Skan				device_printf(dev,"Link is Down\n");
1495169689Skan			if_link_state_change(ifp, LINK_STATE_DOWN);
1496169689Skan			adapter->link_active = FALSE;
1497169689Skan			for (int i = 0; i < adapter->num_queues;
1498169689Skan			    i++, txr++)
1499169689Skan				txr->watchdog_check = FALSE;
1500169689Skan		}
1501169689Skan	}
1502169689Skan
1503169689Skan	return;
1504169689Skan}
1505169689Skan
1506169689Skan
1507169689Skan/*********************************************************************
1508169689Skan *
1509169689Skan *  This routine disables all traffic on the adapter by issuing a
1510169689Skan *  global reset on the MAC and deallocates TX/RX buffers.
1511169689Skan *
1512169689Skan **********************************************************************/
1513169689Skan
1514169689Skanstatic void
1515169689Skanixv_stop(void *arg)
1516169689Skan{
1517169689Skan	struct ifnet   *ifp;
1518169689Skan	struct adapter *adapter = arg;
1519169689Skan	struct ixgbe_hw *hw = &adapter->hw;
1520169689Skan	ifp = adapter->ifp;
1521169689Skan
1522169689Skan	mtx_assert(&adapter->core_mtx, MA_OWNED);
1523169689Skan
1524169689Skan	INIT_DEBUGOUT("ixv_stop: begin\n");
1525169689Skan	ixv_disable_intr(adapter);
1526169689Skan
1527169689Skan	/* Tell the stack that the interface is no longer active */
1528169689Skan	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1529169689Skan
1530169689Skan	ixgbe_reset_hw(hw);
1531169689Skan	adapter->hw.adapter_stopped = FALSE;
1532169689Skan	ixgbe_stop_adapter(hw);
1533169689Skan	callout_stop(&adapter->timer);
1534169689Skan
1535169689Skan	/* reprogram the RAR[0] in case user changed it. */
1536169689Skan	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1537169689Skan
1538169689Skan	return;
1539169689Skan}
1540169689Skan
1541169689Skan
1542169689Skan/*********************************************************************
1543169689Skan *
1544169689Skan *  Determine hardware revision.
1545169689Skan *
1546169689Skan **********************************************************************/
1547169689Skanstatic void
1548169689Skanixv_identify_hardware(struct adapter *adapter)
1549169689Skan{
1550169689Skan	device_t        dev = adapter->dev;
1551169689Skan	u16		pci_cmd_word;
1552169689Skan
1553169689Skan	/*
1554169689Skan	** Make sure BUSMASTER is set, on a VM under
1555169689Skan	** KVM it may not be and will break things.
1556169689Skan	*/
1557169689Skan	pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1558169689Skan	if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1559169689Skan	    (pci_cmd_word & PCIM_CMD_MEMEN))) {
1560169689Skan		INIT_DEBUGOUT("Memory Access and/or Bus Master "
1561169689Skan		    "bits were not set!\n");
1562169689Skan		pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1563169689Skan		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1564169689Skan	}
1565169689Skan
1566169689Skan	/* Save off the information about this board */
1567169689Skan	adapter->hw.vendor_id = pci_get_vendor(dev);
1568169689Skan	adapter->hw.device_id = pci_get_device(dev);
1569169689Skan	adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1570169689Skan	adapter->hw.subsystem_vendor_id =
1571169689Skan	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
1572169689Skan	adapter->hw.subsystem_device_id =
1573169689Skan	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
1574169689Skan
1575169689Skan	return;
1576169689Skan}
1577169689Skan
1578169689Skan/*********************************************************************
1579169689Skan *
1580169689Skan *  Setup MSIX Interrupt resources and handlers
1581169689Skan *
1582169689Skan **********************************************************************/
1583169689Skanstatic int
1584169689Skanixv_allocate_msix(struct adapter *adapter)
1585169689Skan{
1586169689Skan	device_t        dev = adapter->dev;
1587169689Skan	struct 		ix_queue *que = adapter->queues;
1588169689Skan	int 		error, rid, vector = 0;
1589169689Skan
1590169689Skan	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1591169689Skan		rid = vector + 1;
1592169689Skan		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1593169689Skan		    RF_SHAREABLE | RF_ACTIVE);
1594169689Skan		if (que->res == NULL) {
1595169689Skan			device_printf(dev,"Unable to allocate"
1596169689Skan		    	    " bus resource: que interrupt [%d]\n", vector);
1597169689Skan			return (ENXIO);
1598169689Skan		}
1599169689Skan		/* Set the handler function */
1600169689Skan		error = bus_setup_intr(dev, que->res,
1601169689Skan		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1602169689Skan		    ixv_msix_que, que, &que->tag);
1603169689Skan		if (error) {
1604169689Skan			que->res = NULL;
1605169689Skan			device_printf(dev, "Failed to register QUE handler");
1606169689Skan			return (error);
1607169689Skan		}
1608169689Skan#if __FreeBSD_version >= 800504
1609169689Skan		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1610169689Skan#endif
1611169689Skan		que->msix = vector;
1612169689Skan        	adapter->que_mask |= (u64)(1 << que->msix);
1613169689Skan		/*
1614169689Skan		** Bind the msix vector, and thus the
1615169689Skan		** ring to the corresponding cpu.
1616169689Skan		*/
1617169689Skan		if (adapter->num_queues > 1)
1618169689Skan			bus_bind_intr(dev, que->res, i);
1619169689Skan
1620169689Skan		TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1621169689Skan		que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1622169689Skan		    taskqueue_thread_enqueue, &que->tq);
1623169689Skan		taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1624169689Skan		    device_get_nameunit(adapter->dev));
1625169689Skan	}
1626169689Skan
1627169689Skan	/* and Mailbox */
1628169689Skan	rid = vector + 1;
1629169689Skan	adapter->res = bus_alloc_resource_any(dev,
1630169689Skan    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1631169689Skan	if (!adapter->res) {
1632169689Skan		device_printf(dev,"Unable to allocate"
1633169689Skan    	    " bus resource: MBX interrupt [%d]\n", rid);
1634169689Skan		return (ENXIO);
1635169689Skan	}
1636169689Skan	/* Set the mbx handler function */
1637169689Skan	error = bus_setup_intr(dev, adapter->res,
1638169689Skan	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
1639169689Skan	    ixv_msix_mbx, adapter, &adapter->tag);
1640169689Skan	if (error) {
1641169689Skan		adapter->res = NULL;
1642169689Skan		device_printf(dev, "Failed to register LINK handler");
1643169689Skan		return (error);
1644169689Skan	}
1645169689Skan#if __FreeBSD_version >= 800504
1646169689Skan	bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1647169689Skan#endif
1648169689Skan	adapter->mbxvec = vector;
1649169689Skan	/* Tasklets for Mailbox */
1650169689Skan	TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1651169689Skan	adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1652169689Skan	    taskqueue_thread_enqueue, &adapter->tq);
1653169689Skan	taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1654169689Skan	    device_get_nameunit(adapter->dev));
1655169689Skan	/*
1656169689Skan	** Due to a broken design QEMU will fail to properly
1657169689Skan	** enable the guest for MSIX unless the vectors in
1658169689Skan	** the table are all set up, so we must rewrite the
1659169689Skan	** ENABLE in the MSIX control register again at this
1660169689Skan	** point to cause it to successfully initialize us.
1661169689Skan	*/
1662169689Skan	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1663169689Skan		int msix_ctrl;
1664169689Skan		pci_find_cap(dev, PCIY_MSIX, &rid);
1665169689Skan		rid += PCIR_MSIX_CTRL;
1666169689Skan		msix_ctrl = pci_read_config(dev, rid, 2);
1667169689Skan		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1668169689Skan		pci_write_config(dev, rid, msix_ctrl, 2);
1669169689Skan	}
1670169689Skan
1671169689Skan	return (0);
1672169689Skan}
1673169689Skan
1674169689Skan/*
1675169689Skan * Setup MSIX resources, note that the VF
1676169689Skan * device MUST use MSIX, there is no fallback.
1677169689Skan */
1678169689Skanstatic int
1679169689Skanixv_setup_msix(struct adapter *adapter)
1680169689Skan{
1681169689Skan	device_t dev = adapter->dev;
1682169689Skan	int rid, vectors, want = 2;
1683169689Skan
1684169689Skan
1685169689Skan	/* First try MSI/X */
1686169689Skan	rid = PCIR_BAR(3);
1687169689Skan	adapter->msix_mem = bus_alloc_resource_any(dev,
1688169689Skan	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
1689169689Skan       	if (!adapter->msix_mem) {
1690169689Skan		device_printf(adapter->dev,
1691169689Skan		    "Unable to map MSIX table \n");
1692169689Skan		goto out;
1693169689Skan	}
1694169689Skan
1695169689Skan	vectors = pci_msix_count(dev);
1696169689Skan	if (vectors < 2) {
1697169689Skan		bus_release_resource(dev, SYS_RES_MEMORY,
1698169689Skan		    rid, adapter->msix_mem);
1699169689Skan		adapter->msix_mem = NULL;
1700169689Skan		goto out;
1701169689Skan	}
1702169689Skan
1703169689Skan	/*
1704169689Skan	** Want two vectors: one for a queue,
1705169689Skan	** plus an additional for mailbox.
1706169689Skan	*/
1707169689Skan	if (pci_alloc_msix(dev, &want) == 0) {
1708169689Skan               	device_printf(adapter->dev,
1709169689Skan		    "Using MSIX interrupts with %d vectors\n", want);
1710169689Skan		return (want);
1711169689Skan	}
1712169689Skanout:
1713169689Skan	device_printf(adapter->dev,"MSIX config error\n");
1714169689Skan	return (ENXIO);
1715169689Skan}
1716169689Skan
1717169689Skan
1718169689Skanstatic int
1719169689Skanixv_allocate_pci_resources(struct adapter *adapter)
1720169689Skan{
1721169689Skan	int             rid;
1722169689Skan	device_t        dev = adapter->dev;
1723169689Skan
1724169689Skan	rid = PCIR_BAR(0);
1725169689Skan	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1726169689Skan	    &rid, RF_ACTIVE);
1727169689Skan
1728169689Skan	if (!(adapter->pci_mem)) {
1729169689Skan		device_printf(dev,"Unable to allocate bus resource: memory\n");
1730169689Skan		return (ENXIO);
1731169689Skan	}
1732169689Skan
1733169689Skan	adapter->osdep.mem_bus_space_tag =
1734169689Skan		rman_get_bustag(adapter->pci_mem);
1735169689Skan	adapter->osdep.mem_bus_space_handle =
1736169689Skan		rman_get_bushandle(adapter->pci_mem);
1737169689Skan	adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1738169689Skan
1739169689Skan	adapter->num_queues = 1;
1740169689Skan	adapter->hw.back = &adapter->osdep;
1741169689Skan
1742169689Skan	/*
1743169689Skan	** Now setup MSI/X, should
1744169689Skan	** return us the number of
1745169689Skan	** configured vectors.
1746169689Skan	*/
1747169689Skan	adapter->msix = ixv_setup_msix(adapter);
1748169689Skan	if (adapter->msix == ENXIO)
1749169689Skan		return (ENXIO);
1750169689Skan	else
1751169689Skan		return (0);
1752169689Skan}
1753169689Skan
1754169689Skanstatic void
1755169689Skanixv_free_pci_resources(struct adapter * adapter)
1756169689Skan{
1757169689Skan	struct 		ix_queue *que = adapter->queues;
1758169689Skan	device_t	dev = adapter->dev;
1759169689Skan	int		rid, memrid;
1760169689Skan
1761169689Skan	memrid = PCIR_BAR(MSIX_BAR);
1762169689Skan
1763169689Skan	/*
1764169689Skan	** There is a slight possibility of a failure mode
1765169689Skan	** in attach that will result in entering this function
1766169689Skan	** before interrupt resources have been initialized, and
1767169689Skan	** in that case we do not want to execute the loops below
1768169689Skan	** We can detect this reliably by the state of the adapter
1769169689Skan	** res pointer.
1770169689Skan	*/
1771169689Skan	if (adapter->res == NULL)
1772169689Skan		goto mem;
1773169689Skan
1774169689Skan	/*
1775169689Skan	**  Release all msix queue resources:
1776169689Skan	*/
1777169689Skan	for (int i = 0; i < adapter->num_queues; i++, que++) {
1778169689Skan		rid = que->msix + 1;
1779169689Skan		if (que->tag != NULL) {
1780169689Skan			bus_teardown_intr(dev, que->res, que->tag);
1781169689Skan			que->tag = NULL;
1782169689Skan		}
1783169689Skan		if (que->res != NULL)
1784169689Skan			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1785169689Skan	}
1786169689Skan
1787169689Skan
1788169689Skan	/* Clean the Legacy or Link interrupt last */
1789169689Skan	if (adapter->mbxvec) /* we are doing MSIX */
1790169689Skan		rid = adapter->mbxvec + 1;
1791169689Skan	else
1792169689Skan		(adapter->msix != 0) ? (rid = 1):(rid = 0);
1793169689Skan
1794169689Skan	if (adapter->tag != NULL) {
1795169689Skan		bus_teardown_intr(dev, adapter->res, adapter->tag);
1796169689Skan		adapter->tag = NULL;
1797169689Skan	}
1798169689Skan	if (adapter->res != NULL)
1799169689Skan		bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1800169689Skan
1801169689Skanmem:
1802169689Skan	if (adapter->msix)
1803169689Skan		pci_release_msi(dev);
1804169689Skan
1805169689Skan	if (adapter->msix_mem != NULL)
1806169689Skan		bus_release_resource(dev, SYS_RES_MEMORY,
1807169689Skan		    memrid, adapter->msix_mem);
1808169689Skan
1809169689Skan	if (adapter->pci_mem != NULL)
1810169689Skan		bus_release_resource(dev, SYS_RES_MEMORY,
1811169689Skan		    PCIR_BAR(0), adapter->pci_mem);
1812169689Skan
1813169689Skan	return;
1814169689Skan}
1815169689Skan
1816169689Skan/*********************************************************************
1817169689Skan *
1818169689Skan *  Setup networking device structure and register an interface.
1819169689Skan *
1820169689Skan **********************************************************************/
1821169689Skanstatic void
1822169689Skanixv_setup_interface(device_t dev, struct adapter *adapter)
1823169689Skan{
1824169689Skan	struct ifnet   *ifp;
1825169689Skan
1826169689Skan	INIT_DEBUGOUT("ixv_setup_interface: begin");
1827169689Skan
1828169689Skan	ifp = adapter->ifp = if_alloc(IFT_ETHER);
1829169689Skan	if (ifp == NULL)
1830169689Skan		panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1831169689Skan	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1832169689Skan	ifp->if_mtu = ETHERMTU;
1833169689Skan	ifp->if_baudrate = 1000000000;
1834169689Skan	ifp->if_init = ixv_init;
1835169689Skan	ifp->if_softc = adapter;
1836169689Skan	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1837169689Skan	ifp->if_ioctl = ixv_ioctl;
1838169689Skan#if __FreeBSD_version >= 800000
1839169689Skan	ifp->if_transmit = ixv_mq_start;
1840169689Skan	ifp->if_qflush = ixv_qflush;
1841169689Skan#else
1842169689Skan	ifp->if_start = ixv_start;
1843169689Skan#endif
1844169689Skan	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1845169689Skan
1846169689Skan	ether_ifattach(ifp, adapter->hw.mac.addr);
1847169689Skan
1848169689Skan	adapter->max_frame_size =
1849169689Skan	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1850169689Skan
1851169689Skan	/*
1852169689Skan	 * Tell the upper layer(s) we support long frames.
1853169689Skan	 */
1854169689Skan	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1855169689Skan
1856169689Skan	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1857169689Skan	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1858169689Skan	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1859169689Skan			     |  IFCAP_VLAN_HWTSO
1860169689Skan			     |  IFCAP_VLAN_MTU;
1861169689Skan	ifp->if_capenable = ifp->if_capabilities;
1862169689Skan
1863169689Skan	/* Don't enable LRO by default */
1864169689Skan	ifp->if_capabilities |= IFCAP_LRO;
1865169689Skan
1866169689Skan	/*
1867169689Skan	 * Specify the media types supported by this adapter and register
1868169689Skan	 * callbacks to update media and link information
1869169689Skan	 */
1870169689Skan	ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1871169689Skan		     ixv_media_status);
1872169689Skan	ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1873169689Skan	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1874169689Skan	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1875169689Skan
1876169689Skan	return;
1877169689Skan}
1878169689Skan
1879169689Skanstatic void
1880169689Skanixv_config_link(struct adapter *adapter)
1881169689Skan{
1882169689Skan	struct ixgbe_hw *hw = &adapter->hw;
1883169689Skan	u32	autoneg, err = 0;
1884169689Skan	bool	negotiate = TRUE;
1885169689Skan
1886169689Skan	if (hw->mac.ops.check_link)
1887169689Skan		err = hw->mac.ops.check_link(hw, &autoneg,
1888169689Skan		    &adapter->link_up, FALSE);
1889169689Skan	if (err)
1890169689Skan		goto out;
1891169689Skan
1892169689Skan	if (hw->mac.ops.setup_link)
1893169689Skan               	err = hw->mac.ops.setup_link(hw, autoneg,
1894169689Skan		    negotiate, adapter->link_up);
1895169689Skanout:
1896169689Skan	return;
1897169689Skan}
1898169689Skan
1899169689Skan/********************************************************************
1900169689Skan * Manage DMA'able memory.
1901169689Skan *******************************************************************/
1902169689Skanstatic void
1903169689Skanixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1904169689Skan{
1905169689Skan	if (error)
1906169689Skan		return;
1907169689Skan	*(bus_addr_t *) arg = segs->ds_addr;
1908169689Skan	return;
1909169689Skan}
1910169689Skan
1911169689Skanstatic int
1912169689Skanixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1913169689Skan		struct ixv_dma_alloc *dma, int mapflags)
1914169689Skan{
1915169689Skan	device_t dev = adapter->dev;
1916169689Skan	int             r;
1917169689Skan
1918169689Skan	r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),	/* parent */
1919169689Skan			       DBA_ALIGN, 0,	/* alignment, bounds */
1920169689Skan			       BUS_SPACE_MAXADDR,	/* lowaddr */
1921169689Skan			       BUS_SPACE_MAXADDR,	/* highaddr */
1922169689Skan			       NULL, NULL,	/* filter, filterarg */
1923169689Skan			       size,	/* maxsize */
1924169689Skan			       1,	/* nsegments */
1925169689Skan			       size,	/* maxsegsize */
1926169689Skan			       BUS_DMA_ALLOCNOW,	/* flags */
1927169689Skan			       NULL,	/* lockfunc */
1928169689Skan			       NULL,	/* lockfuncarg */
1929169689Skan			       &dma->dma_tag);
1930169689Skan	if (r != 0) {
1931169689Skan		device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1932169689Skan		       "error %u\n", r);
1933169689Skan		goto fail_0;
1934169689Skan	}
1935169689Skan	r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1936169689Skan			     BUS_DMA_NOWAIT, &dma->dma_map);
1937169689Skan	if (r != 0) {
1938169689Skan		device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1939169689Skan		       "error %u\n", r);
1940169689Skan		goto fail_1;
1941169689Skan	}
1942169689Skan	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1943169689Skan			    size,
1944169689Skan			    ixv_dmamap_cb,
1945169689Skan			    &dma->dma_paddr,
1946169689Skan			    mapflags | BUS_DMA_NOWAIT);
1947169689Skan	if (r != 0) {
1948169689Skan		device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1949169689Skan		       "error %u\n", r);
1950169689Skan		goto fail_2;
1951169689Skan	}
1952169689Skan	dma->dma_size = size;
1953169689Skan	return (0);
1954169689Skanfail_2:
1955169689Skan	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1956169689Skanfail_1:
1957169689Skan	bus_dma_tag_destroy(dma->dma_tag);
1958169689Skanfail_0:
1959169689Skan	dma->dma_map = NULL;
1960169689Skan	dma->dma_tag = NULL;
1961169689Skan	return (r);
1962169689Skan}
1963169689Skan
1964169689Skanstatic void
1965169689Skanixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1966169689Skan{
1967169689Skan	bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1968169689Skan	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1969169689Skan	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1970169689Skan	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1971169689Skan	bus_dma_tag_destroy(dma->dma_tag);
1972169689Skan}
1973169689Skan
1974169689Skan
1975169689Skan/*********************************************************************
1976169689Skan *
1977169689Skan *  Allocate memory for the transmit and receive rings, and then
1978169689Skan *  the descriptors associated with each, called only once at attach.
1979169689Skan *
1980169689Skan **********************************************************************/
1981169689Skanstatic int
1982169689Skanixv_allocate_queues(struct adapter *adapter)
1983169689Skan{
1984169689Skan	device_t	dev = adapter->dev;
1985169689Skan	struct ix_queue	*que;
1986169689Skan	struct tx_ring	*txr;
1987169689Skan	struct rx_ring	*rxr;
1988169689Skan	int rsize, tsize, error = 0;
1989169689Skan	int txconf = 0, rxconf = 0;
1990169689Skan
1991169689Skan        /* First allocate the top level queue structs */
1992169689Skan        if (!(adapter->queues =
1993169689Skan            (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1994169689Skan            adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1995169689Skan                device_printf(dev, "Unable to allocate queue memory\n");
1996169689Skan                error = ENOMEM;
1997169689Skan                goto fail;
1998169689Skan        }
1999169689Skan
2000169689Skan	/* First allocate the TX ring struct memory */
2001169689Skan	if (!(adapter->tx_rings =
2002169689Skan	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2003169689Skan	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2004169689Skan		device_printf(dev, "Unable to allocate TX ring memory\n");
2005169689Skan		error = ENOMEM;
2006169689Skan		goto tx_fail;
2007169689Skan	}
2008169689Skan
2009169689Skan	/* Next allocate the RX */
2010169689Skan	if (!(adapter->rx_rings =
2011169689Skan	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2012169689Skan	    adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2013169689Skan		device_printf(dev, "Unable to allocate RX ring memory\n");
2014169689Skan		error = ENOMEM;
2015169689Skan		goto rx_fail;
2016169689Skan	}
2017169689Skan
2018169689Skan	/* For the ring itself */
2019169689Skan	tsize = roundup2(adapter->num_tx_desc *
2020169689Skan	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2021169689Skan
2022169689Skan	/*
2023169689Skan	 * Now set up the TX queues, txconf is needed to handle the
2024169689Skan	 * possibility that things fail midcourse and we need to
2025169689Skan	 * undo memory gracefully
2026169689Skan	 */
2027169689Skan	for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2028169689Skan		/* Set up some basics */
2029169689Skan		txr = &adapter->tx_rings[i];
2030169689Skan		txr->adapter = adapter;
2031169689Skan		txr->me = i;
2032169689Skan
2033169689Skan		/* Initialize the TX side lock */
2034169689Skan		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2035169689Skan		    device_get_nameunit(dev), txr->me);
2036169689Skan		mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2037169689Skan
2038169689Skan		if (ixv_dma_malloc(adapter, tsize,
2039169689Skan			&txr->txdma, BUS_DMA_NOWAIT)) {
2040169689Skan			device_printf(dev,
2041169689Skan			    "Unable to allocate TX Descriptor memory\n");
2042169689Skan			error = ENOMEM;
2043169689Skan			goto err_tx_desc;
2044169689Skan		}
2045169689Skan		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2046169689Skan		bzero((void *)txr->tx_base, tsize);
2047169689Skan
2048169689Skan        	/* Now allocate transmit buffers for the ring */
2049169689Skan        	if (ixv_allocate_transmit_buffers(txr)) {
2050169689Skan			device_printf(dev,
2051169689Skan			    "Critical Failure setting up transmit buffers\n");
2052169689Skan			error = ENOMEM;
2053169689Skan			goto err_tx_desc;
2054169689Skan        	}
2055169689Skan#if __FreeBSD_version >= 800000
2056169689Skan		/* Allocate a buf ring */
2057169689Skan		txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2058169689Skan		    M_WAITOK, &txr->tx_mtx);
2059169689Skan		if (txr->br == NULL) {
2060169689Skan			device_printf(dev,
2061169689Skan			    "Critical Failure setting up buf ring\n");
2062169689Skan			error = ENOMEM;
2063169689Skan			goto err_tx_desc;
2064169689Skan		}
2065169689Skan#endif
2066169689Skan	}
2067169689Skan
2068169689Skan	/*
2069169689Skan	 * Next the RX queues...
2070169689Skan	 */
2071169689Skan	rsize = roundup2(adapter->num_rx_desc *
2072169689Skan	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2073169689Skan	for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2074169689Skan		rxr = &adapter->rx_rings[i];
2075169689Skan		/* Set up some basics */
2076169689Skan		rxr->adapter = adapter;
2077169689Skan		rxr->me = i;
2078169689Skan
2079169689Skan		/* Initialize the RX side lock */
2080169689Skan		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2081169689Skan		    device_get_nameunit(dev), rxr->me);
2082169689Skan		mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2083169689Skan
2084169689Skan		if (ixv_dma_malloc(adapter, rsize,
2085169689Skan			&rxr->rxdma, BUS_DMA_NOWAIT)) {
2086169689Skan			device_printf(dev,
2087169689Skan			    "Unable to allocate RxDescriptor memory\n");
2088169689Skan			error = ENOMEM;
2089169689Skan			goto err_rx_desc;
2090169689Skan		}
2091169689Skan		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2092169689Skan		bzero((void *)rxr->rx_base, rsize);
2093169689Skan
2094169689Skan        	/* Allocate receive buffers for the ring*/
2095169689Skan		if (ixv_allocate_receive_buffers(rxr)) {
2096169689Skan			device_printf(dev,
2097169689Skan			    "Critical Failure setting up receive buffers\n");
2098169689Skan			error = ENOMEM;
2099169689Skan			goto err_rx_desc;
2100169689Skan		}
2101169689Skan	}
2102169689Skan
2103169689Skan	/*
2104169689Skan	** Finally set up the queue holding structs
2105169689Skan	*/
2106169689Skan	for (int i = 0; i < adapter->num_queues; i++) {
2107169689Skan		que = &adapter->queues[i];
2108169689Skan		que->adapter = adapter;
2109169689Skan		que->txr = &adapter->tx_rings[i];
2110169689Skan		que->rxr = &adapter->rx_rings[i];
2111169689Skan	}
2112169689Skan
2113169689Skan	return (0);
2114169689Skan
2115169689Skanerr_rx_desc:
2116169689Skan	for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2117169689Skan		ixv_dma_free(adapter, &rxr->rxdma);
2118169689Skanerr_tx_desc:
2119169689Skan	for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2120169689Skan		ixv_dma_free(adapter, &txr->txdma);
2121169689Skan	free(adapter->rx_rings, M_DEVBUF);
2122169689Skanrx_fail:
2123169689Skan	free(adapter->tx_rings, M_DEVBUF);
2124169689Skantx_fail:
2125169689Skan	free(adapter->queues, M_DEVBUF);
2126169689Skanfail:
2127169689Skan	return (error);
2128169689Skan}
2129169689Skan
2130169689Skan
2131169689Skan/*********************************************************************
2132169689Skan *
2133169689Skan *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2134169689Skan *  the information needed to transmit a packet on the wire. This is
2135169689Skan *  called only once at attach, setup is done every reset.
2136169689Skan *
2137169689Skan **********************************************************************/
2138169689Skanstatic int
2139169689Skanixv_allocate_transmit_buffers(struct tx_ring *txr)
2140169689Skan{
2141169689Skan	struct adapter *adapter = txr->adapter;
2142169689Skan	device_t dev = adapter->dev;
2143169689Skan	struct ixv_tx_buf *txbuf;
2144169689Skan	int error, i;
2145169689Skan
2146169689Skan	/*
2147169689Skan	 * Setup DMA descriptor areas.
2148169689Skan	 */
2149169689Skan	if ((error = bus_dma_tag_create(NULL,		/* parent */
2150169689Skan			       1, 0,		/* alignment, bounds */
2151169689Skan			       BUS_SPACE_MAXADDR,	/* lowaddr */
2152169689Skan			       BUS_SPACE_MAXADDR,	/* highaddr */
2153169689Skan			       NULL, NULL,		/* filter, filterarg */
2154169689Skan			       IXV_TSO_SIZE,		/* maxsize */
2155169689Skan			       32,			/* nsegments */
2156169689Skan			       PAGE_SIZE,		/* maxsegsize */
2157169689Skan			       0,			/* flags */
2158169689Skan			       NULL,			/* lockfunc */
2159169689Skan			       NULL,			/* lockfuncarg */
2160169689Skan			       &txr->txtag))) {
2161169689Skan		device_printf(dev,"Unable to allocate TX DMA tag\n");
2162169689Skan		goto fail;
2163169689Skan	}
2164169689Skan
2165169689Skan	if (!(txr->tx_buffers =
2166169689Skan	    (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2167169689Skan	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2168169689Skan		device_printf(dev, "Unable to allocate tx_buffer memory\n");
2169169689Skan		error = ENOMEM;
2170169689Skan		goto fail;
2171169689Skan	}
2172169689Skan
2173169689Skan        /* Create the descriptor buffer dma maps */
2174169689Skan	txbuf = txr->tx_buffers;
2175169689Skan	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2176169689Skan		error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2177169689Skan		if (error != 0) {
2178169689Skan			device_printf(dev, "Unable to create TX DMA map\n");
2179169689Skan			goto fail;
2180169689Skan		}
2181169689Skan	}
2182169689Skan
2183169689Skan	return 0;
2184169689Skanfail:
2185169689Skan	/* We free all, it handles case where we are in the middle */
2186169689Skan	ixv_free_transmit_structures(adapter);
2187169689Skan	return (error);
2188169689Skan}
2189169689Skan
2190169689Skan/*********************************************************************
2191169689Skan *
2192169689Skan *  Initialize a transmit ring.
2193169689Skan *
2194169689Skan **********************************************************************/
2195169689Skanstatic void
2196169689Skanixv_setup_transmit_ring(struct tx_ring *txr)
2197169689Skan{
2198169689Skan	struct adapter *adapter = txr->adapter;
2199169689Skan	struct ixv_tx_buf *txbuf;
2200169689Skan	int i;
2201169689Skan
2202169689Skan	/* Clear the old ring contents */
2203169689Skan	IXV_TX_LOCK(txr);
2204169689Skan	bzero((void *)txr->tx_base,
2205169689Skan	      (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2206169689Skan	/* Reset indices */
2207169689Skan	txr->next_avail_desc = 0;
2208169689Skan	txr->next_to_clean = 0;
2209169689Skan
2210169689Skan	/* Free any existing tx buffers. */
2211169689Skan        txbuf = txr->tx_buffers;
2212169689Skan	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2213169689Skan		if (txbuf->m_head != NULL) {
2214169689Skan			bus_dmamap_sync(txr->txtag, txbuf->map,
2215169689Skan			    BUS_DMASYNC_POSTWRITE);
2216169689Skan			bus_dmamap_unload(txr->txtag, txbuf->map);
2217169689Skan			m_freem(txbuf->m_head);
2218169689Skan			txbuf->m_head = NULL;
2219169689Skan		}
2220169689Skan		/* Clear the EOP index */
2221169689Skan		txbuf->eop_index = -1;
2222169689Skan        }
2223169689Skan
2224169689Skan	/* Set number of descriptors available */
2225169689Skan	txr->tx_avail = adapter->num_tx_desc;
2226169689Skan
2227169689Skan	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2228169689Skan	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2229169689Skan	IXV_TX_UNLOCK(txr);
2230169689Skan}
2231169689Skan
2232169689Skan/*********************************************************************
2233169689Skan *
2234169689Skan *  Initialize all transmit rings.
2235169689Skan *
2236169689Skan **********************************************************************/
2237169689Skanstatic int
2238169689Skanixv_setup_transmit_structures(struct adapter *adapter)
2239169689Skan{
2240169689Skan	struct tx_ring *txr = adapter->tx_rings;
2241169689Skan
2242169689Skan	for (int i = 0; i < adapter->num_queues; i++, txr++)
2243169689Skan		ixv_setup_transmit_ring(txr);
2244169689Skan
2245169689Skan	return (0);
2246169689Skan}
2247169689Skan
2248169689Skan/*********************************************************************
2249169689Skan *
2250169689Skan *  Enable transmit unit.
2251169689Skan *
2252169689Skan **********************************************************************/
2253169689Skanstatic void
2254169689Skanixv_initialize_transmit_units(struct adapter *adapter)
2255169689Skan{
2256169689Skan	struct tx_ring	*txr = adapter->tx_rings;
2257169689Skan	struct ixgbe_hw	*hw = &adapter->hw;
2258169689Skan
2259169689Skan
2260169689Skan	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2261169689Skan		u64	tdba = txr->txdma.dma_paddr;
2262169689Skan		u32	txctrl, txdctl;
2263169689Skan
2264169689Skan		/* Set WTHRESH to 8, burst writeback */
2265169689Skan		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2266169689Skan		txdctl |= (8 << 16);
2267169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2268169689Skan		/* Now enable */
2269169689Skan		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2270169689Skan		txdctl |= IXGBE_TXDCTL_ENABLE;
2271169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2272169689Skan
2273169689Skan		/* Set the HW Tx Head and Tail indices */
2274169689Skan	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2275169689Skan	    	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2276169689Skan
2277169689Skan		/* Setup Transmit Descriptor Cmd Settings */
2278169689Skan		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2279169689Skan		txr->watchdog_check = FALSE;
2280169689Skan
2281169689Skan		/* Set Ring parameters */
2282169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2283169689Skan		       (tdba & 0x00000000ffffffffULL));
2284169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2285169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2286169689Skan		    adapter->num_tx_desc *
2287169689Skan		    sizeof(struct ixgbe_legacy_tx_desc));
2288169689Skan		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2289169689Skan		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2290169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2291169689Skan		break;
2292169689Skan	}
2293169689Skan
2294169689Skan	return;
2295169689Skan}
2296169689Skan
2297169689Skan/*********************************************************************
2298169689Skan *
2299169689Skan *  Free all transmit rings.
2300169689Skan *
2301169689Skan **********************************************************************/
2302169689Skanstatic void
2303169689Skanixv_free_transmit_structures(struct adapter *adapter)
2304169689Skan{
2305169689Skan	struct tx_ring *txr = adapter->tx_rings;
2306169689Skan
2307169689Skan	for (int i = 0; i < adapter->num_queues; i++, txr++) {
2308169689Skan		IXV_TX_LOCK(txr);
2309169689Skan		ixv_free_transmit_buffers(txr);
2310169689Skan		ixv_dma_free(adapter, &txr->txdma);
2311169689Skan		IXV_TX_UNLOCK(txr);
2312169689Skan		IXV_TX_LOCK_DESTROY(txr);
2313169689Skan	}
2314169689Skan	free(adapter->tx_rings, M_DEVBUF);
2315169689Skan}
2316169689Skan
2317169689Skan/*********************************************************************
2318169689Skan *
2319169689Skan *  Free transmit ring related data structures.
2320169689Skan *
2321169689Skan **********************************************************************/
2322169689Skanstatic void
2323169689Skanixv_free_transmit_buffers(struct tx_ring *txr)
2324169689Skan{
2325169689Skan	struct adapter *adapter = txr->adapter;
2326169689Skan	struct ixv_tx_buf *tx_buffer;
2327169689Skan	int             i;
2328169689Skan
2329169689Skan	INIT_DEBUGOUT("free_transmit_ring: begin");
2330169689Skan
2331169689Skan	if (txr->tx_buffers == NULL)
2332169689Skan		return;
2333169689Skan
2334169689Skan	tx_buffer = txr->tx_buffers;
2335169689Skan	for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2336169689Skan		if (tx_buffer->m_head != NULL) {
2337169689Skan			bus_dmamap_sync(txr->txtag, tx_buffer->map,
2338169689Skan			    BUS_DMASYNC_POSTWRITE);
2339169689Skan			bus_dmamap_unload(txr->txtag,
2340169689Skan			    tx_buffer->map);
2341169689Skan			m_freem(tx_buffer->m_head);
2342169689Skan			tx_buffer->m_head = NULL;
2343169689Skan			if (tx_buffer->map != NULL) {
2344169689Skan				bus_dmamap_destroy(txr->txtag,
2345169689Skan				    tx_buffer->map);
2346169689Skan				tx_buffer->map = NULL;
2347169689Skan			}
2348169689Skan		} else if (tx_buffer->map != NULL) {
2349169689Skan			bus_dmamap_unload(txr->txtag,
2350169689Skan			    tx_buffer->map);
2351169689Skan			bus_dmamap_destroy(txr->txtag,
2352169689Skan			    tx_buffer->map);
2353169689Skan			tx_buffer->map = NULL;
2354169689Skan		}
2355169689Skan	}
2356169689Skan#if __FreeBSD_version >= 800000
2357169689Skan	if (txr->br != NULL)
2358169689Skan		buf_ring_free(txr->br, M_DEVBUF);
2359169689Skan#endif
2360169689Skan	if (txr->tx_buffers != NULL) {
2361169689Skan		free(txr->tx_buffers, M_DEVBUF);
2362169689Skan		txr->tx_buffers = NULL;
2363169689Skan	}
2364169689Skan	if (txr->txtag != NULL) {
2365169689Skan		bus_dma_tag_destroy(txr->txtag);
2366169689Skan		txr->txtag = NULL;
2367169689Skan	}
2368169689Skan	return;
2369169689Skan}
2370169689Skan
2371169689Skan/*********************************************************************
2372169689Skan *
2373169689Skan *  Advanced Context Descriptor setup for VLAN or CSUM
2374169689Skan *
2375169689Skan **********************************************************************/
2376169689Skan
2377169689Skanstatic boolean_t
2378169689Skanixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2379169689Skan{
2380169689Skan	struct adapter *adapter = txr->adapter;
2381169689Skan	struct ixgbe_adv_tx_context_desc *TXD;
2382169689Skan	struct ixv_tx_buf        *tx_buffer;
2383169689Skan	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2384169689Skan	struct ether_vlan_header *eh;
2385169689Skan	struct ip *ip;
2386169689Skan	struct ip6_hdr *ip6;
2387169689Skan	int  ehdrlen, ip_hlen = 0;
2388169689Skan	u16	etype;
2389169689Skan	u8	ipproto = 0;
2390169689Skan	bool	offload = TRUE;
2391169689Skan	int ctxd = txr->next_avail_desc;
2392169689Skan	u16 vtag = 0;
2393169689Skan
2394169689Skan
2395169689Skan	if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2396169689Skan		offload = FALSE;
2397169689Skan
2398169689Skan
2399169689Skan	tx_buffer = &txr->tx_buffers[ctxd];
2400169689Skan	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2401169689Skan
2402169689Skan	/*
2403169689Skan	** In advanced descriptors the vlan tag must
2404169689Skan	** be placed into the descriptor itself.
2405169689Skan	*/
2406169689Skan	if (mp->m_flags & M_VLANTAG) {
2407169689Skan		vtag = htole16(mp->m_pkthdr.ether_vtag);
2408169689Skan		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2409169689Skan	} else if (offload == FALSE)
2410169689Skan		return FALSE;
2411169689Skan
2412169689Skan	/*
2413169689Skan	 * Determine where frame payload starts.
2414169689Skan	 * Jump over vlan headers if already present,
2415169689Skan	 * helpful for QinQ too.
2416169689Skan	 */
2417169689Skan	eh = mtod(mp, struct ether_vlan_header *);
2418169689Skan	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2419169689Skan		etype = ntohs(eh->evl_proto);
2420169689Skan		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2421169689Skan	} else {
2422169689Skan		etype = ntohs(eh->evl_encap_proto);
2423169689Skan		ehdrlen = ETHER_HDR_LEN;
2424169689Skan	}
2425169689Skan
2426169689Skan	/* Set the ether header length */
2427169689Skan	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2428169689Skan
2429169689Skan	switch (etype) {
2430169689Skan		case ETHERTYPE_IP:
2431169689Skan			ip = (struct ip *)(mp->m_data + ehdrlen);
2432169689Skan			ip_hlen = ip->ip_hl << 2;
2433169689Skan			if (mp->m_len < ehdrlen + ip_hlen)
2434169689Skan				return (FALSE);
2435169689Skan			ipproto = ip->ip_p;
2436169689Skan			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2437169689Skan			break;
2438169689Skan		case ETHERTYPE_IPV6:
2439169689Skan			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2440169689Skan			ip_hlen = sizeof(struct ip6_hdr);
2441169689Skan			if (mp->m_len < ehdrlen + ip_hlen)
2442169689Skan				return (FALSE);
2443169689Skan			ipproto = ip6->ip6_nxt;
2444169689Skan			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2445169689Skan			break;
2446169689Skan		default:
2447169689Skan			offload = FALSE;
2448169689Skan			break;
2449169689Skan	}
2450169689Skan
2451169689Skan	vlan_macip_lens |= ip_hlen;
2452169689Skan	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2453169689Skan
2454169689Skan	switch (ipproto) {
2455169689Skan		case IPPROTO_TCP:
2456169689Skan			if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2457169689Skan				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2458169689Skan			break;
2459169689Skan
2460169689Skan		case IPPROTO_UDP:
2461169689Skan			if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2462169689Skan				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2463169689Skan			break;
2464169689Skan
2465169689Skan#if __FreeBSD_version >= 800000
2466169689Skan		case IPPROTO_SCTP:
2467169689Skan			if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2468169689Skan				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2469169689Skan			break;
2470169689Skan#endif
2471169689Skan		default:
2472169689Skan			offload = FALSE;
2473169689Skan			break;
2474169689Skan	}
2475169689Skan
2476169689Skan	/* Now copy bits into descriptor */
2477169689Skan	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2478169689Skan	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2479169689Skan	TXD->seqnum_seed = htole32(0);
2480169689Skan	TXD->mss_l4len_idx = htole32(0);
2481169689Skan
2482169689Skan	tx_buffer->m_head = NULL;
2483169689Skan	tx_buffer->eop_index = -1;
2484169689Skan
2485169689Skan	/* We've consumed the first desc, adjust counters */
2486169689Skan	if (++ctxd == adapter->num_tx_desc)
2487169689Skan		ctxd = 0;
2488169689Skan	txr->next_avail_desc = ctxd;
2489169689Skan	--txr->tx_avail;
2490169689Skan
2491169689Skan        return (offload);
2492169689Skan}
2493169689Skan
2494169689Skan/**********************************************************************
2495169689Skan *
2496169689Skan *  Setup work for hardware segmentation offload (TSO) on
2497169689Skan *  adapters using advanced tx descriptors
2498169689Skan *
2499169689Skan **********************************************************************/
2500169689Skanstatic boolean_t
2501169689Skanixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2502169689Skan{
2503169689Skan	struct adapter *adapter = txr->adapter;
2504169689Skan	struct ixgbe_adv_tx_context_desc *TXD;
2505169689Skan	struct ixv_tx_buf        *tx_buffer;
2506169689Skan	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2507169689Skan	u32 mss_l4len_idx = 0;
2508169689Skan	u16 vtag = 0;
2509169689Skan	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
2510169689Skan	struct ether_vlan_header *eh;
2511169689Skan	struct ip *ip;
2512169689Skan	struct tcphdr *th;
2513169689Skan
2514169689Skan
2515169689Skan	/*
2516169689Skan	 * Determine where frame payload starts.
2517169689Skan	 * Jump over vlan headers if already present
2518169689Skan	 */
2519169689Skan	eh = mtod(mp, struct ether_vlan_header *);
2520169689Skan	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2521169689Skan		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2522169689Skan	else
2523169689Skan		ehdrlen = ETHER_HDR_LEN;
2524169689Skan
2525169689Skan        /* Ensure we have at least the IP+TCP header in the first mbuf. */
2526169689Skan        if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2527169689Skan		return FALSE;
2528169689Skan
2529169689Skan	ctxd = txr->next_avail_desc;
2530169689Skan	tx_buffer = &txr->tx_buffers[ctxd];
2531169689Skan	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2532169689Skan
2533169689Skan	ip = (struct ip *)(mp->m_data + ehdrlen);
2534169689Skan	if (ip->ip_p != IPPROTO_TCP)
2535169689Skan		return FALSE;   /* 0 */
2536169689Skan	ip->ip_sum = 0;
2537169689Skan	ip_hlen = ip->ip_hl << 2;
2538169689Skan	th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2539169689Skan	th->th_sum = in_pseudo(ip->ip_src.s_addr,
2540169689Skan	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2541169689Skan	tcp_hlen = th->th_off << 2;
2542169689Skan	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2543169689Skan
2544169689Skan	/* This is used in the transmit desc in encap */
2545169689Skan	*paylen = mp->m_pkthdr.len - hdrlen;
2546169689Skan
2547169689Skan	/* VLAN MACLEN IPLEN */
2548169689Skan	if (mp->m_flags & M_VLANTAG) {
2549169689Skan		vtag = htole16(mp->m_pkthdr.ether_vtag);
2550169689Skan                vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2551169689Skan	}
2552169689Skan
2553169689Skan	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2554169689Skan	vlan_macip_lens |= ip_hlen;
2555169689Skan	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2556169689Skan
2557169689Skan	/* ADV DTYPE TUCMD */
2558169689Skan	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2559169689Skan	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2560169689Skan	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2561169689Skan	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2562169689Skan
2563169689Skan
2564169689Skan	/* MSS L4LEN IDX */
2565169689Skan	mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2566169689Skan	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2567169689Skan	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2568169689Skan
2569169689Skan	TXD->seqnum_seed = htole32(0);
2570169689Skan	tx_buffer->m_head = NULL;
2571169689Skan	tx_buffer->eop_index = -1;
2572169689Skan
2573169689Skan	if (++ctxd == adapter->num_tx_desc)
2574169689Skan		ctxd = 0;
2575169689Skan
2576169689Skan	txr->tx_avail--;
2577169689Skan	txr->next_avail_desc = ctxd;
2578169689Skan	return TRUE;
2579169689Skan}
2580169689Skan
2581169689Skan
2582169689Skan/**********************************************************************
2583169689Skan *
2584169689Skan *  Examine each tx_buffer in the used queue. If the hardware is done
2585169689Skan *  processing the packet then free associated resources. The
2586169689Skan *  tx_buffer is put back on the free queue.
2587169689Skan *
2588169689Skan **********************************************************************/
2589169689Skanstatic boolean_t
2590169689Skanixv_txeof(struct tx_ring *txr)
2591169689Skan{
2592169689Skan	struct adapter	*adapter = txr->adapter;
2593169689Skan	struct ifnet	*ifp = adapter->ifp;
2594169689Skan	u32	first, last, done;
2595169689Skan	struct ixv_tx_buf *tx_buffer;
2596169689Skan	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2597169689Skan
2598169689Skan	mtx_assert(&txr->tx_mtx, MA_OWNED);
2599169689Skan
2600169689Skan	if (txr->tx_avail == adapter->num_tx_desc)
2601169689Skan		return FALSE;
2602169689Skan
2603169689Skan	first = txr->next_to_clean;
2604169689Skan	tx_buffer = &txr->tx_buffers[first];
2605169689Skan	/* For cleanup we just use legacy struct */
2606169689Skan	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2607169689Skan	last = tx_buffer->eop_index;
2608169689Skan	if (last == -1)
2609169689Skan		return FALSE;
2610169689Skan	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2611169689Skan
2612169689Skan	/*
2613169689Skan	** Get the index of the first descriptor
2614169689Skan	** BEYOND the EOP and call that 'done'.
2615169689Skan	** I do this so the comparison in the
2616169689Skan	** inner while loop below can be simple
2617169689Skan	*/
2618169689Skan	if (++last == adapter->num_tx_desc) last = 0;
2619169689Skan	done = last;
2620169689Skan
2621169689Skan        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2622169689Skan            BUS_DMASYNC_POSTREAD);
2623169689Skan	/*
2624169689Skan	** Only the EOP descriptor of a packet now has the DD
2625169689Skan	** bit set, this is what we look for...
2626169689Skan	*/
2627169689Skan	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2628169689Skan		/* We clean the range of the packet */
2629169689Skan		while (first != done) {
2630169689Skan			tx_desc->upper.data = 0;
2631169689Skan			tx_desc->lower.data = 0;
2632169689Skan			tx_desc->buffer_addr = 0;
2633169689Skan			++txr->tx_avail;
2634169689Skan
2635169689Skan			if (tx_buffer->m_head) {
2636169689Skan				bus_dmamap_sync(txr->txtag,
2637169689Skan				    tx_buffer->map,
2638169689Skan				    BUS_DMASYNC_POSTWRITE);
2639169689Skan				bus_dmamap_unload(txr->txtag,
2640169689Skan				    tx_buffer->map);
2641169689Skan				m_freem(tx_buffer->m_head);
2642169689Skan				tx_buffer->m_head = NULL;
2643169689Skan				tx_buffer->map = NULL;
2644169689Skan			}
2645169689Skan			tx_buffer->eop_index = -1;
2646169689Skan			txr->watchdog_time = ticks;
2647169689Skan
2648169689Skan			if (++first == adapter->num_tx_desc)
2649169689Skan				first = 0;
2650169689Skan
2651169689Skan			tx_buffer = &txr->tx_buffers[first];
2652169689Skan			tx_desc =
2653169689Skan			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2654169689Skan		}
2655169689Skan		++ifp->if_opackets;
2656169689Skan		/* See if there is more work now */
2657169689Skan		last = tx_buffer->eop_index;
2658169689Skan		if (last != -1) {
2659169689Skan			eop_desc =
2660169689Skan			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2661169689Skan			/* Get next done point */
2662169689Skan			if (++last == adapter->num_tx_desc) last = 0;
2663169689Skan			done = last;
2664169689Skan		} else
2665169689Skan			break;
2666169689Skan	}
2667169689Skan	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2668169689Skan	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2669169689Skan
2670169689Skan	txr->next_to_clean = first;
2671169689Skan
2672169689Skan	/*
2673169689Skan	 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2674169689Skan	 * it is OK to send packets. If there are no pending descriptors,
2675169689Skan	 * clear the timeout. Otherwise, if some descriptors have been freed,
2676169689Skan	 * restart the timeout.
2677169689Skan	 */
2678169689Skan	if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2679169689Skan		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2680169689Skan		if (txr->tx_avail == adapter->num_tx_desc) {
2681169689Skan			txr->watchdog_check = FALSE;
2682169689Skan			return FALSE;
2683169689Skan		}
2684169689Skan	}
2685169689Skan
2686169689Skan	return TRUE;
2687169689Skan}
2688169689Skan
2689169689Skan/*********************************************************************
2690169689Skan *
2691169689Skan *  Refresh mbuf buffers for RX descriptor rings
2692169689Skan *   - now keeps its own state so discards due to resource
2693169689Skan *     exhaustion are unnecessary, if an mbuf cannot be obtained
2694169689Skan *     it just returns, keeping its placeholder, thus it can simply
2695169689Skan *     be recalled to try again.
2696169689Skan *
2697169689Skan **********************************************************************/
2698169689Skanstatic void
2699169689Skanixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2700169689Skan{
2701169689Skan	struct adapter		*adapter = rxr->adapter;
2702169689Skan	bus_dma_segment_t	hseg[1];
2703169689Skan	bus_dma_segment_t	pseg[1];
2704169689Skan	struct ixv_rx_buf	*rxbuf;
2705169689Skan	struct mbuf		*mh, *mp;
2706169689Skan	int			i, nsegs, error, cleaned;
2707169689Skan
2708169689Skan	i = rxr->next_to_refresh;
2709169689Skan	cleaned = -1; /* Signify no completions */
2710169689Skan	while (i != limit) {
2711169689Skan		rxbuf = &rxr->rx_buffers[i];
2712169689Skan		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2713169689Skan			mh = m_gethdr(M_DONTWAIT, MT_DATA);
2714169689Skan			if (mh == NULL)
2715169689Skan				goto update;
2716169689Skan			mh->m_pkthdr.len = mh->m_len = MHLEN;
2717169689Skan			mh->m_len = MHLEN;
2718169689Skan			mh->m_flags |= M_PKTHDR;
2719169689Skan			m_adj(mh, ETHER_ALIGN);
2720169689Skan			/* Get the memory mapping */
2721169689Skan			error = bus_dmamap_load_mbuf_sg(rxr->htag,
2722169689Skan			    rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2723169689Skan			if (error != 0) {
2724169689Skan				printf("GET BUF: dmamap load"
2725169689Skan				    " failure - %d\n", error);
2726169689Skan				m_free(mh);
2727169689Skan				goto update;
2728169689Skan			}
2729169689Skan			rxbuf->m_head = mh;
2730169689Skan			bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2731169689Skan			    BUS_DMASYNC_PREREAD);
2732169689Skan			rxr->rx_base[i].read.hdr_addr =
2733169689Skan			    htole64(hseg[0].ds_addr);
2734169689Skan		}
2735169689Skan
2736169689Skan		if (rxbuf->m_pack == NULL) {
2737169689Skan			mp = m_getjcl(M_DONTWAIT, MT_DATA,
2738169689Skan			    M_PKTHDR, adapter->rx_mbuf_sz);
2739169689Skan			if (mp == NULL)
2740169689Skan				goto update;
2741169689Skan			mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2742169689Skan			/* Get the memory mapping */
2743169689Skan			error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2744169689Skan			    rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2745169689Skan			if (error != 0) {
2746169689Skan				printf("GET BUF: dmamap load"
2747169689Skan				    " failure - %d\n", error);
2748169689Skan				m_free(mp);
2749169689Skan				goto update;
2750169689Skan			}
2751169689Skan			rxbuf->m_pack = mp;
2752169689Skan			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2753169689Skan			    BUS_DMASYNC_PREREAD);
2754169689Skan			rxr->rx_base[i].read.pkt_addr =
2755169689Skan			    htole64(pseg[0].ds_addr);
2756169689Skan		}
2757169689Skan
2758169689Skan		cleaned = i;
2759169689Skan		/* Calculate next index */
2760169689Skan		if (++i == adapter->num_rx_desc)
2761169689Skan			i = 0;
2762169689Skan		/* This is the work marker for refresh */
2763169689Skan		rxr->next_to_refresh = i;
2764169689Skan	}
2765169689Skanupdate:
2766169689Skan	if (cleaned != -1) /* If we refreshed some, bump tail */
2767169689Skan		IXGBE_WRITE_REG(&adapter->hw,
2768169689Skan		    IXGBE_VFRDT(rxr->me), cleaned);
2769169689Skan	return;
2770169689Skan}
2771169689Skan
2772169689Skan/*********************************************************************
2773169689Skan *
2774169689Skan *  Allocate memory for rx_buffer structures. Since we use one
2775169689Skan *  rx_buffer per received packet, the maximum number of rx_buffer's
2776169689Skan *  that we'll need is equal to the number of receive descriptors
2777169689Skan *  that we've allocated.
2778169689Skan *
2779169689Skan **********************************************************************/
2780169689Skanstatic int
2781169689Skanixv_allocate_receive_buffers(struct rx_ring *rxr)
2782169689Skan{
2783169689Skan	struct	adapter 	*adapter = rxr->adapter;
2784169689Skan	device_t 		dev = adapter->dev;
2785169689Skan	struct ixv_rx_buf 	*rxbuf;
2786169689Skan	int             	i, bsize, error;
2787169689Skan
2788169689Skan	bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2789169689Skan	if (!(rxr->rx_buffers =
2790169689Skan	    (struct ixv_rx_buf *) malloc(bsize,
2791169689Skan	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
2792169689Skan		device_printf(dev, "Unable to allocate rx_buffer memory\n");
2793169689Skan		error = ENOMEM;
2794169689Skan		goto fail;
2795169689Skan	}
2796169689Skan
2797169689Skan	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
2798169689Skan				   1, 0,	/* alignment, bounds */
2799169689Skan				   BUS_SPACE_MAXADDR,	/* lowaddr */
2800169689Skan				   BUS_SPACE_MAXADDR,	/* highaddr */
2801169689Skan				   NULL, NULL,		/* filter, filterarg */
2802169689Skan				   MSIZE,		/* maxsize */
2803169689Skan				   1,			/* nsegments */
2804169689Skan				   MSIZE,		/* maxsegsize */
2805169689Skan				   0,			/* flags */
2806169689Skan				   NULL,		/* lockfunc */
2807169689Skan				   NULL,		/* lockfuncarg */
2808169689Skan				   &rxr->htag))) {
2809169689Skan		device_printf(dev, "Unable to create RX DMA tag\n");
2810169689Skan		goto fail;
2811169689Skan	}
2812169689Skan
2813169689Skan	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
2814169689Skan				   1, 0,	/* alignment, bounds */
2815169689Skan				   BUS_SPACE_MAXADDR,	/* lowaddr */
2816169689Skan				   BUS_SPACE_MAXADDR,	/* highaddr */
2817169689Skan				   NULL, NULL,		/* filter, filterarg */
2818169689Skan				   MJUMPAGESIZE,	/* maxsize */
2819169689Skan				   1,			/* nsegments */
2820169689Skan				   MJUMPAGESIZE,	/* maxsegsize */
2821169689Skan				   0,			/* flags */
2822169689Skan				   NULL,		/* lockfunc */
2823169689Skan				   NULL,		/* lockfuncarg */
2824169689Skan				   &rxr->ptag))) {
2825169689Skan		device_printf(dev, "Unable to create RX DMA tag\n");
2826169689Skan		goto fail;
2827169689Skan	}
2828169689Skan
2829169689Skan	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2830169689Skan		rxbuf = &rxr->rx_buffers[i];
2831169689Skan		error = bus_dmamap_create(rxr->htag,
2832169689Skan		    BUS_DMA_NOWAIT, &rxbuf->hmap);
2833169689Skan		if (error) {
2834169689Skan			device_printf(dev, "Unable to create RX head map\n");
2835169689Skan			goto fail;
2836169689Skan		}
2837169689Skan		error = bus_dmamap_create(rxr->ptag,
2838169689Skan		    BUS_DMA_NOWAIT, &rxbuf->pmap);
2839169689Skan		if (error) {
2840169689Skan			device_printf(dev, "Unable to create RX pkt map\n");
2841169689Skan			goto fail;
2842169689Skan		}
2843169689Skan	}
2844169689Skan
2845169689Skan	return (0);
2846169689Skan
2847169689Skanfail:
2848169689Skan	/* Frees all, but can handle partial completion */
2849169689Skan	ixv_free_receive_structures(adapter);
2850169689Skan	return (error);
2851169689Skan}
2852169689Skan
2853169689Skanstatic void
2854169689Skanixv_free_receive_ring(struct rx_ring *rxr)
2855169689Skan{
2856169689Skan	struct  adapter         *adapter;
2857169689Skan	struct ixv_rx_buf       *rxbuf;
2858169689Skan	int i;
2859169689Skan
2860169689Skan	adapter = rxr->adapter;
2861169689Skan	for (i = 0; i < adapter->num_rx_desc; i++) {
2862169689Skan		rxbuf = &rxr->rx_buffers[i];
2863169689Skan		if (rxbuf->m_head != NULL) {
2864169689Skan			bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2865169689Skan			    BUS_DMASYNC_POSTREAD);
2866169689Skan			bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2867169689Skan			rxbuf->m_head->m_flags |= M_PKTHDR;
2868169689Skan			m_freem(rxbuf->m_head);
2869169689Skan		}
2870169689Skan		if (rxbuf->m_pack != NULL) {
2871169689Skan			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2872169689Skan			    BUS_DMASYNC_POSTREAD);
2873169689Skan			bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2874169689Skan			rxbuf->m_pack->m_flags |= M_PKTHDR;
2875169689Skan			m_freem(rxbuf->m_pack);
2876169689Skan		}
2877169689Skan		rxbuf->m_head = NULL;
2878169689Skan		rxbuf->m_pack = NULL;
2879169689Skan	}
2880169689Skan}
2881169689Skan
2882169689Skan
2883169689Skan/*********************************************************************
2884169689Skan *
2885169689Skan *  Initialize a receive ring and its buffers.
2886169689Skan *
2887169689Skan **********************************************************************/
2888169689Skanstatic int
2889169689Skanixv_setup_receive_ring(struct rx_ring *rxr)
2890169689Skan{
2891169689Skan	struct	adapter 	*adapter;
2892169689Skan	struct ifnet		*ifp;
2893169689Skan	device_t		dev;
2894169689Skan	struct ixv_rx_buf	*rxbuf;
2895169689Skan	bus_dma_segment_t	pseg[1], hseg[1];
2896169689Skan	struct lro_ctrl		*lro = &rxr->lro;
2897169689Skan	int			rsize, nsegs, error = 0;
2898169689Skan
2899169689Skan	adapter = rxr->adapter;
2900169689Skan	ifp = adapter->ifp;
2901169689Skan	dev = adapter->dev;
2902169689Skan
2903169689Skan	/* Clear the ring contents */
2904169689Skan	IXV_RX_LOCK(rxr);
2905169689Skan	rsize = roundup2(adapter->num_rx_desc *
2906169689Skan	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2907169689Skan	bzero((void *)rxr->rx_base, rsize);
2908169689Skan
2909169689Skan	/* Free current RX buffer structs and their mbufs */
2910169689Skan	ixv_free_receive_ring(rxr);
2911169689Skan
2912169689Skan	/* Configure header split? */
2913169689Skan	if (ixv_header_split)
2914169689Skan		rxr->hdr_split = TRUE;
2915169689Skan
2916169689Skan	/* Now replenish the mbufs */
2917169689Skan	for (int j = 0; j != adapter->num_rx_desc; ++j) {
2918169689Skan		struct mbuf	*mh, *mp;
2919169689Skan
2920169689Skan		rxbuf = &rxr->rx_buffers[j];
2921169689Skan		/*
2922169689Skan		** Dont allocate mbufs if not
2923169689Skan		** doing header split, its wasteful
2924169689Skan		*/
2925169689Skan		if (rxr->hdr_split == FALSE)
2926169689Skan			goto skip_head;
2927169689Skan
2928169689Skan		/* First the header */
2929169689Skan		rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2930169689Skan		if (rxbuf->m_head == NULL) {
2931169689Skan			error = ENOBUFS;
2932169689Skan			goto fail;
2933169689Skan		}
2934169689Skan		m_adj(rxbuf->m_head, ETHER_ALIGN);
2935169689Skan		mh = rxbuf->m_head;
2936169689Skan		mh->m_len = mh->m_pkthdr.len = MHLEN;
2937169689Skan		mh->m_flags |= M_PKTHDR;
2938169689Skan		/* Get the memory mapping */
2939169689Skan		error = bus_dmamap_load_mbuf_sg(rxr->htag,
2940169689Skan		    rxbuf->hmap, rxbuf->m_head, hseg,
2941169689Skan		    &nsegs, BUS_DMA_NOWAIT);
2942169689Skan		if (error != 0) /* Nothing elegant to do here */
2943169689Skan			goto fail;
2944169689Skan		bus_dmamap_sync(rxr->htag,
2945169689Skan		    rxbuf->hmap, BUS_DMASYNC_PREREAD);
2946169689Skan		/* Update descriptor */
2947169689Skan		rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2948169689Skan
2949169689Skanskip_head:
2950169689Skan		/* Now the payload cluster */
2951169689Skan		rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2952169689Skan		    M_PKTHDR, adapter->rx_mbuf_sz);
2953169689Skan		if (rxbuf->m_pack == NULL) {
2954169689Skan			error = ENOBUFS;
2955169689Skan                        goto fail;
2956169689Skan		}
2957169689Skan		mp = rxbuf->m_pack;
2958169689Skan		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2959169689Skan		/* Get the memory mapping */
2960169689Skan		error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2961169689Skan		    rxbuf->pmap, mp, pseg,
2962169689Skan		    &nsegs, BUS_DMA_NOWAIT);
2963169689Skan		if (error != 0)
2964169689Skan                        goto fail;
2965169689Skan		bus_dmamap_sync(rxr->ptag,
2966169689Skan		    rxbuf->pmap, BUS_DMASYNC_PREREAD);
2967169689Skan		/* Update descriptor */
2968169689Skan		rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2969169689Skan	}
2970169689Skan
2971169689Skan
2972169689Skan	/* Setup our descriptor indices */
2973169689Skan	rxr->next_to_check = 0;
2974169689Skan	rxr->next_to_refresh = 0;
2975169689Skan	rxr->lro_enabled = FALSE;
2976169689Skan	rxr->rx_split_packets = 0;
2977169689Skan	rxr->rx_bytes = 0;
2978169689Skan
2979169689Skan	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2980169689Skan	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2981169689Skan
2982169689Skan	/*
2983169689Skan	** Now set up the LRO interface:
2984169689Skan	*/
2985169689Skan	if (ifp->if_capenable & IFCAP_LRO) {
2986169689Skan		int err = tcp_lro_init(lro);
2987169689Skan		if (err) {
2988169689Skan			device_printf(dev, "LRO Initialization failed!\n");
2989169689Skan			goto fail;
2990169689Skan		}
2991169689Skan		INIT_DEBUGOUT("RX Soft LRO Initialized\n");
2992169689Skan		rxr->lro_enabled = TRUE;
2993169689Skan		lro->ifp = adapter->ifp;
2994169689Skan	}
2995169689Skan
2996169689Skan	IXV_RX_UNLOCK(rxr);
2997169689Skan	return (0);
2998169689Skan
2999169689Skanfail:
3000169689Skan	ixv_free_receive_ring(rxr);
3001169689Skan	IXV_RX_UNLOCK(rxr);
3002169689Skan	return (error);
3003169689Skan}
3004169689Skan
3005169689Skan/*********************************************************************
3006169689Skan *
3007169689Skan *  Initialize all receive rings.
3008169689Skan *
3009169689Skan **********************************************************************/
3010169689Skanstatic int
3011169689Skanixv_setup_receive_structures(struct adapter *adapter)
3012169689Skan{
3013169689Skan	struct rx_ring *rxr = adapter->rx_rings;
3014169689Skan	int j;
3015169689Skan
3016169689Skan	for (j = 0; j < adapter->num_queues; j++, rxr++)
3017169689Skan		if (ixv_setup_receive_ring(rxr))
3018169689Skan			goto fail;
3019169689Skan
3020169689Skan	return (0);
3021169689Skanfail:
3022169689Skan	/*
3023169689Skan	 * Free RX buffers allocated so far, we will only handle
3024169689Skan	 * the rings that completed, the failing case will have
3025169689Skan	 * cleaned up for itself. 'j' failed, so its the terminus.
3026169689Skan	 */
3027169689Skan	for (int i = 0; i < j; ++i) {
3028169689Skan		rxr = &adapter->rx_rings[i];
3029169689Skan		ixv_free_receive_ring(rxr);
3030169689Skan	}
3031169689Skan
3032169689Skan	return (ENOBUFS);
3033169689Skan}
3034169689Skan
3035169689Skan/*********************************************************************
3036169689Skan *
3037169689Skan *  Setup receive registers and features.
3038169689Skan *
3039169689Skan **********************************************************************/
3040169689Skan#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3041169689Skan
3042169689Skanstatic void
3043169689Skanixv_initialize_receive_units(struct adapter *adapter)
3044169689Skan{
3045169689Skan	struct	rx_ring	*rxr = adapter->rx_rings;
3046169689Skan	struct ixgbe_hw	*hw = &adapter->hw;
3047169689Skan	struct ifnet   *ifp = adapter->ifp;
3048169689Skan	u32		bufsz, fctrl, rxcsum, hlreg;
3049169689Skan
3050169689Skan
3051169689Skan	/* Enable broadcasts */
3052169689Skan	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3053169689Skan	fctrl |= IXGBE_FCTRL_BAM;
3054169689Skan	fctrl |= IXGBE_FCTRL_DPF;
3055169689Skan	fctrl |= IXGBE_FCTRL_PMCF;
3056169689Skan	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3057169689Skan
3058169689Skan	/* Set for Jumbo Frames? */
3059169689Skan	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3060169689Skan	if (ifp->if_mtu > ETHERMTU) {
3061169689Skan		hlreg |= IXGBE_HLREG0_JUMBOEN;
3062169689Skan		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3063169689Skan	} else {
3064169689Skan		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3065169689Skan		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3066169689Skan	}
3067169689Skan	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3068169689Skan
3069169689Skan	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3070169689Skan		u64 rdba = rxr->rxdma.dma_paddr;
3071169689Skan		u32 reg, rxdctl;
3072169689Skan
3073169689Skan		/* Do the queue enabling first */
3074169689Skan		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3075169689Skan		rxdctl |= IXGBE_RXDCTL_ENABLE;
3076169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3077169689Skan		for (int k = 0; k < 10; k++) {
3078169689Skan			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3079169689Skan			    IXGBE_RXDCTL_ENABLE)
3080169689Skan				break;
3081169689Skan			else
3082169689Skan				msec_delay(1);
3083169689Skan		}
3084169689Skan		wmb();
3085169689Skan
3086169689Skan		/* Setup the Base and Length of the Rx Descriptor Ring */
3087169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3088169689Skan		    (rdba & 0x00000000ffffffffULL));
3089169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3090169689Skan		    (rdba >> 32));
3091169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3092169689Skan		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3093169689Skan
3094169689Skan		/* Set up the SRRCTL register */
3095169689Skan		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3096169689Skan		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3097169689Skan		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3098169689Skan		reg |= bufsz;
3099169689Skan		if (rxr->hdr_split) {
3100169689Skan			/* Use a standard mbuf for the header */
3101169689Skan			reg |= ((IXV_RX_HDR <<
3102169689Skan			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3103169689Skan			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
3104169689Skan			reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3105169689Skan		} else
3106169689Skan			reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3107169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3108169689Skan
3109169689Skan		/* Setup the HW Rx Head and Tail Descriptor Pointers */
3110169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3111169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3112169689Skan		    adapter->num_rx_desc - 1);
3113169689Skan	}
3114169689Skan
3115169689Skan	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3116169689Skan
3117169689Skan	if (ifp->if_capenable & IFCAP_RXCSUM)
3118169689Skan		rxcsum |= IXGBE_RXCSUM_PCSD;
3119169689Skan
3120169689Skan	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3121169689Skan		rxcsum |= IXGBE_RXCSUM_IPPCSE;
3122169689Skan
3123169689Skan	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3124169689Skan
3125169689Skan	return;
3126169689Skan}
3127169689Skan
3128169689Skan/*********************************************************************
3129169689Skan *
3130169689Skan *  Free all receive rings.
3131169689Skan *
3132169689Skan **********************************************************************/
3133169689Skanstatic void
3134169689Skanixv_free_receive_structures(struct adapter *adapter)
3135169689Skan{
3136169689Skan	struct rx_ring *rxr = adapter->rx_rings;
3137169689Skan
3138169689Skan	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3139169689Skan		struct lro_ctrl		*lro = &rxr->lro;
3140169689Skan		ixv_free_receive_buffers(rxr);
3141169689Skan		/* Free LRO memory */
3142169689Skan		tcp_lro_free(lro);
3143169689Skan		/* Free the ring memory as well */
3144169689Skan		ixv_dma_free(adapter, &rxr->rxdma);
3145169689Skan	}
3146169689Skan
3147169689Skan	free(adapter->rx_rings, M_DEVBUF);
3148169689Skan}
3149169689Skan
3150169689Skan
3151169689Skan/*********************************************************************
3152169689Skan *
3153169689Skan *  Free receive ring data structures
3154169689Skan *
3155169689Skan **********************************************************************/
3156169689Skanstatic void
3157169689Skanixv_free_receive_buffers(struct rx_ring *rxr)
3158169689Skan{
3159169689Skan	struct adapter		*adapter = rxr->adapter;
3160169689Skan	struct ixv_rx_buf	*rxbuf;
3161169689Skan
3162169689Skan	INIT_DEBUGOUT("free_receive_structures: begin");
3163169689Skan
3164169689Skan	/* Cleanup any existing buffers */
3165169689Skan	if (rxr->rx_buffers != NULL) {
3166169689Skan		for (int i = 0; i < adapter->num_rx_desc; i++) {
3167169689Skan			rxbuf = &rxr->rx_buffers[i];
3168169689Skan			if (rxbuf->m_head != NULL) {
3169169689Skan				bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3170169689Skan				    BUS_DMASYNC_POSTREAD);
3171169689Skan				bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3172169689Skan				rxbuf->m_head->m_flags |= M_PKTHDR;
3173169689Skan				m_freem(rxbuf->m_head);
3174169689Skan			}
3175169689Skan			if (rxbuf->m_pack != NULL) {
3176169689Skan				bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3177169689Skan				    BUS_DMASYNC_POSTREAD);
3178169689Skan				bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3179169689Skan				rxbuf->m_pack->m_flags |= M_PKTHDR;
3180169689Skan				m_freem(rxbuf->m_pack);
3181169689Skan			}
3182169689Skan			rxbuf->m_head = NULL;
3183169689Skan			rxbuf->m_pack = NULL;
3184169689Skan			if (rxbuf->hmap != NULL) {
3185169689Skan				bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3186169689Skan				rxbuf->hmap = NULL;
3187169689Skan			}
3188169689Skan			if (rxbuf->pmap != NULL) {
3189169689Skan				bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3190169689Skan				rxbuf->pmap = NULL;
3191169689Skan			}
3192169689Skan		}
3193169689Skan		if (rxr->rx_buffers != NULL) {
3194169689Skan			free(rxr->rx_buffers, M_DEVBUF);
3195169689Skan			rxr->rx_buffers = NULL;
3196169689Skan		}
3197169689Skan	}
3198169689Skan
3199169689Skan	if (rxr->htag != NULL) {
3200169689Skan		bus_dma_tag_destroy(rxr->htag);
3201169689Skan		rxr->htag = NULL;
3202169689Skan	}
3203169689Skan	if (rxr->ptag != NULL) {
3204169689Skan		bus_dma_tag_destroy(rxr->ptag);
3205169689Skan		rxr->ptag = NULL;
3206169689Skan	}
3207169689Skan
3208169689Skan	return;
3209169689Skan}
3210169689Skan
3211169689Skanstatic __inline void
3212169689Skanixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3213169689Skan{
3214169689Skan
3215169689Skan        /*
3216169689Skan         * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3217169689Skan         * should be computed by hardware. Also it should not have VLAN tag in
3218169689Skan         * ethernet header.
3219169689Skan         */
3220169689Skan        if (rxr->lro_enabled &&
3221169689Skan            (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3222169689Skan            (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3223169689Skan            (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3224169689Skan            (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3225169689Skan            (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3226169689Skan            (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3227169689Skan                /*
3228169689Skan                 * Send to the stack if:
3229169689Skan                 **  - LRO not enabled, or
3230169689Skan                 **  - no LRO resources, or
3231169689Skan                 **  - lro enqueue fails
3232169689Skan                 */
3233169689Skan                if (rxr->lro.lro_cnt != 0)
3234169689Skan                        if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3235169689Skan                                return;
3236169689Skan        }
3237169689Skan        (*ifp->if_input)(ifp, m);
3238169689Skan}
3239169689Skan
3240169689Skanstatic __inline void
3241169689Skanixv_rx_discard(struct rx_ring *rxr, int i)
3242169689Skan{
3243169689Skan	struct adapter		*adapter = rxr->adapter;
3244169689Skan	struct ixv_rx_buf	*rbuf;
3245169689Skan	struct mbuf		*mh, *mp;
3246169689Skan
3247169689Skan	rbuf = &rxr->rx_buffers[i];
3248169689Skan        if (rbuf->fmp != NULL) /* Partial chain ? */
3249169689Skan                m_freem(rbuf->fmp);
3250169689Skan
3251169689Skan	mh = rbuf->m_head;
3252169689Skan	mp = rbuf->m_pack;
3253169689Skan
3254169689Skan	/* Reuse loaded DMA map and just update mbuf chain */
3255169689Skan	mh->m_len = MHLEN;
3256169689Skan	mh->m_flags |= M_PKTHDR;
3257169689Skan	mh->m_next = NULL;
3258169689Skan
3259169689Skan	mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
3260169689Skan	mp->m_data = mp->m_ext.ext_buf;
3261169689Skan	mp->m_next = NULL;
3262169689Skan	return;
3263169689Skan}
3264169689Skan
3265169689Skan
3266169689Skan/*********************************************************************
3267169689Skan *
3268169689Skan *  This routine executes in interrupt context. It replenishes
3269169689Skan *  the mbufs in the descriptor and sends data which has been
3270169689Skan *  dma'ed into host memory to upper layer.
3271169689Skan *
3272169689Skan *  We loop at most count times if count is > 0, or until done if
3273169689Skan *  count < 0.
3274169689Skan *
3275169689Skan *  Return TRUE for more work, FALSE for all clean.
3276169689Skan *********************************************************************/
3277169689Skanstatic bool
3278169689Skanixv_rxeof(struct ix_queue *que, int count)
3279169689Skan{
3280169689Skan	struct adapter		*adapter = que->adapter;
3281169689Skan	struct rx_ring		*rxr = que->rxr;
3282169689Skan	struct ifnet		*ifp = adapter->ifp;
3283169689Skan	struct lro_ctrl		*lro = &rxr->lro;
3284169689Skan	struct lro_entry	*queued;
3285169689Skan	int			i, nextp, processed = 0;
3286169689Skan	u32			staterr = 0;
3287169689Skan	union ixgbe_adv_rx_desc	*cur;
3288169689Skan	struct ixv_rx_buf	*rbuf, *nbuf;
3289169689Skan
3290169689Skan	IXV_RX_LOCK(rxr);
3291169689Skan
3292169689Skan	for (i = rxr->next_to_check; count != 0;) {
3293169689Skan		struct mbuf	*sendmp, *mh, *mp;
3294169689Skan		u32		rsc, ptype;
3295169689Skan		u16		hlen, plen, hdr, vtag;
3296169689Skan		bool		eop;
3297169689Skan
3298169689Skan		/* Sync the ring. */
3299169689Skan		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3300169689Skan		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3301169689Skan
3302169689Skan		cur = &rxr->rx_base[i];
3303169689Skan		staterr = le32toh(cur->wb.upper.status_error);
3304169689Skan
3305169689Skan		if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3306169689Skan			break;
3307169689Skan		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3308169689Skan			break;
3309169689Skan
3310169689Skan		count--;
3311169689Skan		sendmp = NULL;
3312169689Skan		nbuf = NULL;
3313169689Skan		rsc = 0;
3314169689Skan		cur->wb.upper.status_error = 0;
3315169689Skan		rbuf = &rxr->rx_buffers[i];
3316169689Skan		mh = rbuf->m_head;
3317169689Skan		mp = rbuf->m_pack;
3318169689Skan
3319169689Skan		plen = le16toh(cur->wb.upper.length);
3320169689Skan		ptype = le32toh(cur->wb.lower.lo_dword.data) &
3321169689Skan		    IXGBE_RXDADV_PKTTYPE_MASK;
3322169689Skan		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3323169689Skan		vtag = le16toh(cur->wb.upper.vlan);
3324169689Skan		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3325169689Skan
3326169689Skan		/* Make sure all parts of a bad packet are discarded */
3327169689Skan		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3328169689Skan		    (rxr->discard)) {
3329169689Skan			ifp->if_ierrors++;
3330169689Skan			rxr->rx_discarded++;
3331169689Skan			if (!eop)
3332169689Skan				rxr->discard = TRUE;
3333169689Skan			else
3334169689Skan				rxr->discard = FALSE;
3335169689Skan			ixv_rx_discard(rxr, i);
3336169689Skan			goto next_desc;
3337169689Skan		}
3338169689Skan
3339169689Skan		if (!eop) {
3340169689Skan			nextp = i + 1;
3341169689Skan			if (nextp == adapter->num_rx_desc)
3342169689Skan				nextp = 0;
3343169689Skan			nbuf = &rxr->rx_buffers[nextp];
3344169689Skan			prefetch(nbuf);
3345169689Skan		}
3346169689Skan		/*
3347169689Skan		** The header mbuf is ONLY used when header
3348169689Skan		** split is enabled, otherwise we get normal
3349169689Skan		** behavior, ie, both header and payload
3350169689Skan		** are DMA'd into the payload buffer.
3351169689Skan		**
3352169689Skan		** Rather than using the fmp/lmp global pointers
3353169689Skan		** we now keep the head of a packet chain in the
3354169689Skan		** buffer struct and pass this along from one
3355169689Skan		** descriptor to the next, until we get EOP.
3356169689Skan		*/
3357169689Skan		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3358169689Skan			/* This must be an initial descriptor */
3359169689Skan			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3360169689Skan			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3361169689Skan			if (hlen > IXV_RX_HDR)
3362169689Skan				hlen = IXV_RX_HDR;
3363169689Skan			mh->m_len = hlen;
3364169689Skan			mh->m_flags |= M_PKTHDR;
3365169689Skan			mh->m_next = NULL;
3366169689Skan			mh->m_pkthdr.len = mh->m_len;
3367169689Skan			/* Null buf pointer so it is refreshed */
3368169689Skan			rbuf->m_head = NULL;
3369169689Skan			/*
3370169689Skan			** Check the payload length, this
3371169689Skan			** could be zero if its a small
3372169689Skan			** packet.
3373169689Skan			*/
3374169689Skan			if (plen > 0) {
3375169689Skan				mp->m_len = plen;
3376169689Skan				mp->m_next = NULL;
3377169689Skan				mp->m_flags &= ~M_PKTHDR;
3378169689Skan				mh->m_next = mp;
3379169689Skan				mh->m_pkthdr.len += mp->m_len;
3380169689Skan				/* Null buf pointer so it is refreshed */
3381169689Skan				rbuf->m_pack = NULL;
3382169689Skan				rxr->rx_split_packets++;
3383169689Skan			}
3384169689Skan			/*
3385169689Skan			** Now create the forward
3386169689Skan			** chain so when complete
3387169689Skan			** we wont have to.
3388169689Skan			*/
3389169689Skan                        if (eop == 0) {
3390169689Skan				/* stash the chain head */
3391169689Skan                                nbuf->fmp = mh;
3392169689Skan				/* Make forward chain */
3393169689Skan                                if (plen)
3394169689Skan                                        mp->m_next = nbuf->m_pack;
3395169689Skan                                else
3396169689Skan                                        mh->m_next = nbuf->m_pack;
3397169689Skan                        } else {
3398169689Skan				/* Singlet, prepare to send */
3399169689Skan                                sendmp = mh;
3400169689Skan                                if (staterr & IXGBE_RXD_STAT_VP) {
3401169689Skan                                        sendmp->m_pkthdr.ether_vtag = vtag;
3402169689Skan                                        sendmp->m_flags |= M_VLANTAG;
3403169689Skan                                }
3404169689Skan                        }
3405169689Skan		} else {
3406169689Skan			/*
3407169689Skan			** Either no header split, or a
3408169689Skan			** secondary piece of a fragmented
3409169689Skan			** split packet.
3410169689Skan			*/
3411169689Skan			mp->m_len = plen;
3412169689Skan			/*
3413169689Skan			** See if there is a stored head
3414169689Skan			** that determines what we are
3415169689Skan			*/
3416169689Skan			sendmp = rbuf->fmp;
3417169689Skan			rbuf->m_pack = rbuf->fmp = NULL;
3418169689Skan
3419169689Skan			if (sendmp != NULL) /* secondary frag */
3420169689Skan				sendmp->m_pkthdr.len += mp->m_len;
3421169689Skan			else {
3422169689Skan				/* first desc of a non-ps chain */
3423169689Skan				sendmp = mp;
3424169689Skan				sendmp->m_flags |= M_PKTHDR;
3425169689Skan				sendmp->m_pkthdr.len = mp->m_len;
3426169689Skan				if (staterr & IXGBE_RXD_STAT_VP) {
3427169689Skan					sendmp->m_pkthdr.ether_vtag = vtag;
3428169689Skan					sendmp->m_flags |= M_VLANTAG;
3429169689Skan				}
3430169689Skan                        }
3431169689Skan			/* Pass the head pointer on */
3432169689Skan			if (eop == 0) {
3433169689Skan				nbuf->fmp = sendmp;
3434169689Skan				sendmp = NULL;
3435169689Skan				mp->m_next = nbuf->m_pack;
3436169689Skan			}
3437169689Skan		}
3438169689Skan		++processed;
3439169689Skan		/* Sending this frame? */
3440169689Skan		if (eop) {
3441169689Skan			sendmp->m_pkthdr.rcvif = ifp;
3442169689Skan			ifp->if_ipackets++;
3443169689Skan			rxr->rx_packets++;
3444169689Skan			/* capture data for AIM */
3445169689Skan			rxr->bytes += sendmp->m_pkthdr.len;
3446169689Skan			rxr->rx_bytes += sendmp->m_pkthdr.len;
3447169689Skan			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3448169689Skan				ixv_rx_checksum(staterr, sendmp, ptype);
3449169689Skan#if __FreeBSD_version >= 800000
3450169689Skan			sendmp->m_pkthdr.flowid = que->msix;
3451169689Skan			sendmp->m_flags |= M_FLOWID;
3452169689Skan#endif
3453169689Skan		}
3454169689Skannext_desc:
3455169689Skan		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3456169689Skan		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3457169689Skan
3458169689Skan		/* Advance our pointers to the next descriptor. */
3459169689Skan		if (++i == adapter->num_rx_desc)
3460169689Skan			i = 0;
3461169689Skan
3462169689Skan		/* Now send to the stack or do LRO */
3463169689Skan		if (sendmp != NULL)
3464169689Skan			ixv_rx_input(rxr, ifp, sendmp, ptype);
3465169689Skan
3466169689Skan               /* Every 8 descriptors we go to refresh mbufs */
3467169689Skan		if (processed == 8) {
3468169689Skan			ixv_refresh_mbufs(rxr, i);
3469169689Skan			processed = 0;
3470169689Skan		}
3471169689Skan	}
3472169689Skan
3473169689Skan	/* Refresh any remaining buf structs */
3474169689Skan	if (processed != 0) {
3475169689Skan		ixv_refresh_mbufs(rxr, i);
3476169689Skan		processed = 0;
3477169689Skan	}
3478169689Skan
3479169689Skan	rxr->next_to_check = i;
3480169689Skan
3481169689Skan	/*
3482169689Skan	 * Flush any outstanding LRO work
3483169689Skan	 */
3484169689Skan	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3485169689Skan		SLIST_REMOVE_HEAD(&lro->lro_active, next);
3486169689Skan		tcp_lro_flush(lro, queued);
3487169689Skan	}
3488169689Skan
3489169689Skan	IXV_RX_UNLOCK(rxr);
3490169689Skan
3491169689Skan	/*
3492169689Skan	** We still have cleaning to do?
3493169689Skan	** Schedule another interrupt if so.
3494169689Skan	*/
3495169689Skan	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3496169689Skan		ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3497169689Skan		return (TRUE);
3498169689Skan	}
3499169689Skan
3500169689Skan	return (FALSE);
3501169689Skan}
3502169689Skan
3503169689Skan
3504169689Skan/*********************************************************************
3505169689Skan *
3506169689Skan *  Verify that the hardware indicated that the checksum is valid.
3507169689Skan *  Inform the stack about the status of checksum so that stack
3508169689Skan *  doesn't spend time verifying the checksum.
3509169689Skan *
3510169689Skan *********************************************************************/
3511169689Skanstatic void
3512169689Skanixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3513169689Skan{
3514169689Skan	u16	status = (u16) staterr;
3515169689Skan	u8	errors = (u8) (staterr >> 24);
3516169689Skan	bool	sctp = FALSE;
3517169689Skan
3518169689Skan	if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3519169689Skan	    (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3520169689Skan		sctp = TRUE;
3521169689Skan
3522169689Skan	if (status & IXGBE_RXD_STAT_IPCS) {
3523169689Skan		if (!(errors & IXGBE_RXD_ERR_IPE)) {
3524169689Skan			/* IP Checksum Good */
3525169689Skan			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3526169689Skan			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3527169689Skan
3528169689Skan		} else
3529169689Skan			mp->m_pkthdr.csum_flags = 0;
3530169689Skan	}
3531169689Skan	if (status & IXGBE_RXD_STAT_L4CS) {
3532169689Skan		u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3533169689Skan#if __FreeBSD_version >= 800000
3534169689Skan		if (sctp)
3535169689Skan			type = CSUM_SCTP_VALID;
3536169689Skan#endif
3537169689Skan		if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3538169689Skan			mp->m_pkthdr.csum_flags |= type;
3539169689Skan			if (!sctp)
3540169689Skan				mp->m_pkthdr.csum_data = htons(0xffff);
3541169689Skan		}
3542169689Skan	}
3543169689Skan	return;
3544169689Skan}
3545169689Skan
3546169689Skanstatic void
3547169689Skanixv_setup_vlan_support(struct adapter *adapter)
3548169689Skan{
3549169689Skan	struct ixgbe_hw *hw = &adapter->hw;
3550169689Skan	u32		ctrl, vid, vfta, retry;
3551169689Skan
3552169689Skan
3553169689Skan	/*
3554169689Skan	** We get here thru init_locked, meaning
3555169689Skan	** a soft reset, this has already cleared
3556169689Skan	** the VFTA and other state, so if there
3557169689Skan	** have been no vlan's registered do nothing.
3558169689Skan	*/
3559169689Skan	if (adapter->num_vlans == 0)
3560169689Skan		return;
3561169689Skan
3562169689Skan	/* Enable the queues */
3563169689Skan	for (int i = 0; i < adapter->num_queues; i++) {
3564169689Skan		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3565169689Skan		ctrl |= IXGBE_RXDCTL_VME;
3566169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3567169689Skan	}
3568169689Skan
3569169689Skan	/*
3570169689Skan	** A soft reset zero's out the VFTA, so
3571169689Skan	** we need to repopulate it now.
3572169689Skan	*/
3573169689Skan	for (int i = 0; i < VFTA_SIZE; i++) {
3574169689Skan		if (ixv_shadow_vfta[i] == 0)
3575169689Skan			continue;
3576169689Skan		vfta = ixv_shadow_vfta[i];
3577169689Skan		/*
3578169689Skan		** Reconstruct the vlan id's
3579169689Skan		** based on the bits set in each
3580169689Skan		** of the array ints.
3581169689Skan		*/
3582169689Skan		for ( int j = 0; j < 32; j++) {
3583169689Skan			retry = 0;
3584169689Skan			if ((vfta & (1 << j)) == 0)
3585169689Skan				continue;
3586169689Skan			vid = (i * 32) + j;
3587169689Skan			/* Call the shared code mailbox routine */
3588169689Skan			while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3589169689Skan				if (++retry > 5)
3590169689Skan					break;
3591169689Skan			}
3592169689Skan		}
3593169689Skan	}
3594169689Skan}
3595169689Skan
3596169689Skan/*
3597169689Skan** This routine is run via an vlan config EVENT,
3598169689Skan** it enables us to use the HW Filter table since
3599169689Skan** we can get the vlan id. This just creates the
3600169689Skan** entry in the soft version of the VFTA, init will
3601169689Skan** repopulate the real table.
3602169689Skan*/
3603169689Skanstatic void
3604169689Skanixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3605169689Skan{
3606169689Skan	struct adapter	*adapter = ifp->if_softc;
3607169689Skan	u16		index, bit;
3608169689Skan
3609169689Skan	if (ifp->if_softc !=  arg)   /* Not our event */
3610169689Skan		return;
3611169689Skan
3612169689Skan	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3613169689Skan		return;
3614169689Skan
3615169689Skan	index = (vtag >> 5) & 0x7F;
3616169689Skan	bit = vtag & 0x1F;
3617169689Skan	ixv_shadow_vfta[index] |= (1 << bit);
3618169689Skan	++adapter->num_vlans;
3619169689Skan	/* Re-init to load the changes */
3620169689Skan	ixv_init(adapter);
3621169689Skan}
3622169689Skan
3623169689Skan/*
3624169689Skan** This routine is run via an vlan
3625169689Skan** unconfig EVENT, remove our entry
3626169689Skan** in the soft vfta.
3627169689Skan*/
3628169689Skanstatic void
3629169689Skanixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3630169689Skan{
3631169689Skan	struct adapter	*adapter = ifp->if_softc;
3632169689Skan	u16		index, bit;
3633169689Skan
3634169689Skan	if (ifp->if_softc !=  arg)
3635169689Skan		return;
3636169689Skan
3637169689Skan	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
3638169689Skan		return;
3639169689Skan
3640169689Skan	index = (vtag >> 5) & 0x7F;
3641169689Skan	bit = vtag & 0x1F;
3642169689Skan	ixv_shadow_vfta[index] &= ~(1 << bit);
3643169689Skan	--adapter->num_vlans;
3644169689Skan	/* Re-init to load the changes */
3645169689Skan	ixv_init(adapter);
3646169689Skan}
3647169689Skan
3648169689Skanstatic void
3649169689Skanixv_enable_intr(struct adapter *adapter)
3650169689Skan{
3651169689Skan	struct ixgbe_hw *hw = &adapter->hw;
3652169689Skan	struct ix_queue *que = adapter->queues;
3653169689Skan	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3654169689Skan
3655169689Skan
3656169689Skan	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3657169689Skan
3658169689Skan	mask = IXGBE_EIMS_ENABLE_MASK;
3659169689Skan	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3660169689Skan	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3661169689Skan
3662169689Skan        for (int i = 0; i < adapter->num_queues; i++, que++)
3663169689Skan		ixv_enable_queue(adapter, que->msix);
3664169689Skan
3665169689Skan	IXGBE_WRITE_FLUSH(hw);
3666169689Skan
3667169689Skan	return;
3668169689Skan}
3669169689Skan
3670169689Skanstatic void
3671169689Skanixv_disable_intr(struct adapter *adapter)
3672169689Skan{
3673169689Skan	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3674169689Skan	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3675169689Skan	IXGBE_WRITE_FLUSH(&adapter->hw);
3676169689Skan	return;
3677169689Skan}
3678169689Skan
3679169689Skan/*
3680169689Skan** Setup the correct IVAR register for a particular MSIX interrupt
3681169689Skan**  - entry is the register array entry
3682169689Skan**  - vector is the MSIX vector for this queue
3683169689Skan**  - type is RX/TX/MISC
3684169689Skan*/
3685169689Skanstatic void
3686169689Skanixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3687169689Skan{
3688169689Skan	struct ixgbe_hw *hw = &adapter->hw;
3689169689Skan	u32 ivar, index;
3690169689Skan
3691169689Skan	vector |= IXGBE_IVAR_ALLOC_VAL;
3692169689Skan
3693169689Skan	if (type == -1) { /* MISC IVAR */
3694169689Skan		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3695169689Skan		ivar &= ~0xFF;
3696169689Skan		ivar |= vector;
3697169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3698169689Skan	} else {	/* RX/TX IVARS */
3699169689Skan		index = (16 * (entry & 1)) + (8 * type);
3700169689Skan		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3701169689Skan		ivar &= ~(0xFF << index);
3702169689Skan		ivar |= (vector << index);
3703169689Skan		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3704169689Skan	}
3705169689Skan}
3706169689Skan
3707169689Skanstatic void
3708169689Skanixv_configure_ivars(struct adapter *adapter)
3709169689Skan{
3710169689Skan	struct  ix_queue *que = adapter->queues;
3711169689Skan
3712169689Skan        for (int i = 0; i < adapter->num_queues; i++, que++) {
3713169689Skan		/* First the RX queue entry */
3714169689Skan                ixv_set_ivar(adapter, i, que->msix, 0);
3715169689Skan		/* ... and the TX */
3716169689Skan		ixv_set_ivar(adapter, i, que->msix, 1);
3717169689Skan		/* Set an initial value in EITR */
3718169689Skan                IXGBE_WRITE_REG(&adapter->hw,
3719169689Skan                    IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3720169689Skan	}
3721169689Skan
3722169689Skan	/* For the Link interrupt */
3723169689Skan        ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3724169689Skan}
3725169689Skan
3726169689Skan
3727169689Skan/*
3728169689Skan** Tasklet handler for MSIX MBX interrupts
3729169689Skan**  - do outside interrupt since it might sleep
3730169689Skan*/
3731169689Skanstatic void
3732169689Skanixv_handle_mbx(void *context, int pending)
3733169689Skan{
3734169689Skan	struct adapter  *adapter = context;
3735169689Skan
3736169689Skan	ixgbe_check_link(&adapter->hw,
3737169689Skan	    &adapter->link_speed, &adapter->link_up, 0);
3738169689Skan	ixv_update_link_status(adapter);
3739169689Skan}
3740169689Skan
3741169689Skan/*
3742169689Skan** The VF stats registers never have a truely virgin
3743169689Skan** starting point, so this routine tries to make an
3744169689Skan** artificial one, marking ground zero on attach as
3745169689Skan** it were.
3746169689Skan*/
3747169689Skanstatic void
3748169689Skanixv_save_stats(struct adapter *adapter)
3749169689Skan{
3750169689Skan	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3751169689Skan		adapter->stats.saved_reset_vfgprc +=
3752169689Skan		    adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3753169689Skan		adapter->stats.saved_reset_vfgptc +=
3754169689Skan		    adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3755169689Skan		adapter->stats.saved_reset_vfgorc +=
3756169689Skan		    adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3757169689Skan		adapter->stats.saved_reset_vfgotc +=
3758169689Skan		    adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3759169689Skan		adapter->stats.saved_reset_vfmprc +=
3760169689Skan		    adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3761169689Skan	}
3762169689Skan}
3763169689Skan
3764169689Skanstatic void
3765169689Skanixv_init_stats(struct adapter *adapter)
3766169689Skan{
3767169689Skan	struct ixgbe_hw *hw = &adapter->hw;
3768169689Skan
3769169689Skan	adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3770169689Skan	adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3771169689Skan	adapter->stats.last_vfgorc |=
3772169689Skan	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3773169689Skan
3774169689Skan	adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3775169689Skan	adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3776169689Skan	adapter->stats.last_vfgotc |=
3777169689Skan	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3778169689Skan
3779169689Skan	adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3780169689Skan
3781169689Skan	adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3782169689Skan	adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3783169689Skan	adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3784169689Skan	adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3785169689Skan	adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3786169689Skan}
3787169689Skan
3788169689Skan#define UPDATE_STAT_32(reg, last, count)		\
3789169689Skan{							\
3790169689Skan	u32 current = IXGBE_READ_REG(hw, reg);		\
3791169689Skan	if (current < last)				\
3792169689Skan		count += 0x100000000LL;			\
3793169689Skan	last = current;					\
3794169689Skan	count &= 0xFFFFFFFF00000000LL;			\
3795169689Skan	count |= current;				\
3796169689Skan}
3797169689Skan
3798169689Skan#define UPDATE_STAT_36(lsb, msb, last, count) 		\
3799169689Skan{							\
3800169689Skan	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);		\
3801169689Skan	u64 cur_msb = IXGBE_READ_REG(hw, msb);		\
3802169689Skan	u64 current = ((cur_msb << 32) | cur_lsb);	\
3803169689Skan	if (current < last)				\
3804169689Skan		count += 0x1000000000LL;		\
3805169689Skan	last = current;					\
3806169689Skan	count &= 0xFFFFFFF000000000LL;			\
3807169689Skan	count |= current;				\
3808169689Skan}
3809169689Skan
3810169689Skan/*
3811169689Skan** ixv_update_stats - Update the board statistics counters.
3812169689Skan*/
3813169689Skanvoid
3814169689Skanixv_update_stats(struct adapter *adapter)
3815169689Skan{
3816169689Skan        struct ixgbe_hw *hw = &adapter->hw;
3817169689Skan
3818169689Skan        UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3819169689Skan	    adapter->stats.vfgprc);
3820169689Skan        UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3821169689Skan	    adapter->stats.vfgptc);
3822169689Skan        UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3823169689Skan	    adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3824169689Skan        UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3825169689Skan	    adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3826169689Skan        UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3827169689Skan	    adapter->stats.vfmprc);
3828169689Skan}
3829169689Skan
3830169689Skan/**********************************************************************
3831169689Skan *
3832169689Skan *  This routine is called only when ixgbe_display_debug_stats is enabled.
3833169689Skan *  This routine provides a way to take a look at important statistics
3834169689Skan *  maintained by the driver and hardware.
3835169689Skan *
3836169689Skan **********************************************************************/
3837169689Skanstatic void
3838169689Skanixv_print_hw_stats(struct adapter * adapter)
3839169689Skan{
3840169689Skan        device_t dev = adapter->dev;
3841169689Skan
3842169689Skan        device_printf(dev,"Std Mbuf Failed = %lu\n",
3843169689Skan               adapter->mbuf_defrag_failed);
3844169689Skan        device_printf(dev,"Driver dropped packets = %lu\n",
3845169689Skan               adapter->dropped_pkts);
3846169689Skan        device_printf(dev, "watchdog timeouts = %ld\n",
3847169689Skan               adapter->watchdog_events);
3848169689Skan
3849169689Skan        device_printf(dev,"Good Packets Rcvd = %llu\n",
3850169689Skan               (long long)adapter->stats.vfgprc);
3851169689Skan        device_printf(dev,"Good Packets Xmtd = %llu\n",
3852169689Skan               (long long)adapter->stats.vfgptc);
3853169689Skan        device_printf(dev,"TSO Transmissions = %lu\n",
3854169689Skan               adapter->tso_tx);
3855169689Skan
3856169689Skan}
3857169689Skan
3858169689Skan/**********************************************************************
3859169689Skan *
3860169689Skan *  This routine is called only when em_display_debug_stats is enabled.
3861169689Skan *  This routine provides a way to take a look at important statistics
3862169689Skan *  maintained by the driver and hardware.
3863169689Skan *
3864169689Skan **********************************************************************/
3865169689Skanstatic void
3866169689Skanixv_print_debug_info(struct adapter *adapter)
3867169689Skan{
3868169689Skan        device_t dev = adapter->dev;
3869169689Skan        struct ixgbe_hw         *hw = &adapter->hw;
3870169689Skan        struct ix_queue         *que = adapter->queues;
3871169689Skan        struct rx_ring          *rxr;
3872169689Skan        struct tx_ring          *txr;
3873169689Skan        struct lro_ctrl         *lro;
3874169689Skan
3875169689Skan        device_printf(dev,"Error Byte Count = %u \n",
3876169689Skan            IXGBE_READ_REG(hw, IXGBE_ERRBC));
3877169689Skan
3878169689Skan        for (int i = 0; i < adapter->num_queues; i++, que++) {
3879169689Skan                txr = que->txr;
3880169689Skan                rxr = que->rxr;
3881169689Skan                lro = &rxr->lro;
3882169689Skan                device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3883169689Skan                    que->msix, (long)que->irqs);
3884169689Skan                device_printf(dev,"RX(%d) Packets Received: %lld\n",
3885169689Skan                    rxr->me, (long long)rxr->rx_packets);
3886169689Skan                device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3887169689Skan                    rxr->me, (long long)rxr->rx_split_packets);
3888169689Skan                device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3889169689Skan                    rxr->me, (long)rxr->rx_bytes);
3890169689Skan                device_printf(dev,"RX(%d) LRO Queued= %d\n",
3891169689Skan                    rxr->me, lro->lro_queued);
3892169689Skan                device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3893169689Skan                    rxr->me, lro->lro_flushed);
3894169689Skan                device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3895169689Skan                    txr->me, (long)txr->total_packets);
3896169689Skan                device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3897169689Skan                    txr->me, (long)txr->no_desc_avail);
3898169689Skan        }
3899169689Skan
3900169689Skan        device_printf(dev,"MBX IRQ Handled: %lu\n",
3901169689Skan            (long)adapter->mbx_irq);
3902169689Skan        return;
3903169689Skan}
3904169689Skan
3905169689Skanstatic int
3906169689Skanixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3907169689Skan{
3908169689Skan	int             error;
3909169689Skan	int             result;
3910169689Skan	struct adapter *adapter;
3911169689Skan
3912169689Skan	result = -1;
3913169689Skan	error = sysctl_handle_int(oidp, &result, 0, req);
3914169689Skan
3915169689Skan	if (error || !req->newptr)
3916169689Skan		return (error);
3917169689Skan
3918169689Skan	if (result == 1) {
3919169689Skan		adapter = (struct adapter *) arg1;
3920169689Skan		ixv_print_hw_stats(adapter);
3921169689Skan	}
3922169689Skan	return error;
3923169689Skan}
3924169689Skan
3925169689Skanstatic int
3926169689Skanixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3927169689Skan{
3928169689Skan	int error, result;
3929169689Skan	struct adapter *adapter;
3930169689Skan
3931169689Skan	result = -1;
3932169689Skan	error = sysctl_handle_int(oidp, &result, 0, req);
3933169689Skan
3934169689Skan	if (error || !req->newptr)
3935169689Skan		return (error);
3936169689Skan
3937169689Skan	if (result == 1) {
3938169689Skan		adapter = (struct adapter *) arg1;
3939169689Skan		ixv_print_debug_info(adapter);
3940169689Skan	}
3941169689Skan	return error;
3942169689Skan}
3943169689Skan
3944169689Skan/*
3945169689Skan** Set flow control using sysctl:
3946169689Skan** Flow control values:
3947169689Skan** 	0 - off
3948169689Skan**	1 - rx pause
3949169689Skan**	2 - tx pause
3950169689Skan**	3 - full
3951169689Skan*/
3952169689Skanstatic int
3953169689Skanixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3954169689Skan{
3955169689Skan	int error;
3956169689Skan	struct adapter *adapter;
3957169689Skan
3958169689Skan	error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3959169689Skan
3960169689Skan	if (error)
3961169689Skan		return (error);
3962169689Skan
3963169689Skan	adapter = (struct adapter *) arg1;
3964169689Skan	switch (ixv_flow_control) {
3965169689Skan		case ixgbe_fc_rx_pause:
3966169689Skan		case ixgbe_fc_tx_pause:
3967169689Skan		case ixgbe_fc_full:
3968169689Skan			adapter->hw.fc.requested_mode = ixv_flow_control;
3969169689Skan			break;
3970169689Skan		case ixgbe_fc_none:
3971169689Skan		default:
3972169689Skan			adapter->hw.fc.requested_mode = ixgbe_fc_none;
3973169689Skan	}
3974169689Skan
3975169689Skan	ixgbe_fc_enable(&adapter->hw, 0);
3976169689Skan	return error;
3977169689Skan}
3978169689Skan
3979169689Skanstatic void
3980169689Skanixv_add_rx_process_limit(struct adapter *adapter, const char *name,
3981169689Skan        const char *description, int *limit, int value)
3982169689Skan{
3983169689Skan        *limit = value;
3984169689Skan        SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
3985169689Skan            SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
3986169689Skan            OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
3987169689Skan}
3988169689Skan
3989169689Skan