Deleted Added
full compact
ixv.c (241856) ixv.c (241885)
1/******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
1/******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: head/sys/dev/ixgbe/ixv.c 241856 2012-10-22 03:41:14Z eadler $*/
33/*$FreeBSD: head/sys/dev/ixgbe/ixv.c 241885 2012-10-22 13:06:09Z eadler $*/
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_inet.h"
37#include "opt_inet6.h"
38#endif
39
40#include "ixv.h"
41
42/*********************************************************************
43 * Driver version
44 *********************************************************************/
45char ixv_driver_version[] = "1.1.4";
46
47/*********************************************************************
48 * PCI Device ID Table
49 *
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
53 *
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
56
57static ixv_vendor_info_t ixv_vendor_info_array[] =
58{
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 /* required last entry */
62 {0, 0, 0, 0, 0}
63};
64
65/*********************************************************************
66 * Table of branding strings
67 *********************************************************************/
68
69static char *ixv_strings[] = {
70 "Intel(R) PRO/10GbE Virtual Function Network Driver"
71};
72
73/*********************************************************************
74 * Function prototypes
75 *********************************************************************/
76static int ixv_probe(device_t);
77static int ixv_attach(device_t);
78static int ixv_detach(device_t);
79static int ixv_shutdown(device_t);
80#if __FreeBSD_version < 800000
81static void ixv_start(struct ifnet *);
82static void ixv_start_locked(struct tx_ring *, struct ifnet *);
83#else
84static int ixv_mq_start(struct ifnet *, struct mbuf *);
85static int ixv_mq_start_locked(struct ifnet *,
86 struct tx_ring *, struct mbuf *);
87static void ixv_qflush(struct ifnet *);
88#endif
89static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
90static void ixv_init(void *);
91static void ixv_init_locked(struct adapter *);
92static void ixv_stop(void *);
93static void ixv_media_status(struct ifnet *, struct ifmediareq *);
94static int ixv_media_change(struct ifnet *);
95static void ixv_identify_hardware(struct adapter *);
96static int ixv_allocate_pci_resources(struct adapter *);
97static int ixv_allocate_msix(struct adapter *);
98static int ixv_allocate_queues(struct adapter *);
99static int ixv_setup_msix(struct adapter *);
100static void ixv_free_pci_resources(struct adapter *);
101static void ixv_local_timer(void *);
102static void ixv_setup_interface(device_t, struct adapter *);
103static void ixv_config_link(struct adapter *);
104
105static int ixv_allocate_transmit_buffers(struct tx_ring *);
106static int ixv_setup_transmit_structures(struct adapter *);
107static void ixv_setup_transmit_ring(struct tx_ring *);
108static void ixv_initialize_transmit_units(struct adapter *);
109static void ixv_free_transmit_structures(struct adapter *);
110static void ixv_free_transmit_buffers(struct tx_ring *);
111
112static int ixv_allocate_receive_buffers(struct rx_ring *);
113static int ixv_setup_receive_structures(struct adapter *);
114static int ixv_setup_receive_ring(struct rx_ring *);
115static void ixv_initialize_receive_units(struct adapter *);
116static void ixv_free_receive_structures(struct adapter *);
117static void ixv_free_receive_buffers(struct rx_ring *);
118
119static void ixv_enable_intr(struct adapter *);
120static void ixv_disable_intr(struct adapter *);
121static bool ixv_txeof(struct tx_ring *);
122static bool ixv_rxeof(struct ix_queue *, int);
123static void ixv_rx_checksum(u32, struct mbuf *, u32);
124static void ixv_set_multi(struct adapter *);
125static void ixv_update_link_status(struct adapter *);
126static void ixv_refresh_mbufs(struct rx_ring *, int);
127static int ixv_xmit(struct tx_ring *, struct mbuf **);
128static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
129static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
130static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
131static int ixv_dma_malloc(struct adapter *, bus_size_t,
132 struct ixv_dma_alloc *, int);
133static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
134static void ixv_add_rx_process_limit(struct adapter *, const char *,
135 const char *, int *, int);
136static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
137static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
138static void ixv_set_ivar(struct adapter *, u8, u8, s8);
139static void ixv_configure_ivars(struct adapter *);
140static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
141
142static void ixv_setup_vlan_support(struct adapter *);
143static void ixv_register_vlan(void *, struct ifnet *, u16);
144static void ixv_unregister_vlan(void *, struct ifnet *, u16);
145
146static void ixv_save_stats(struct adapter *);
147static void ixv_init_stats(struct adapter *);
148static void ixv_update_stats(struct adapter *);
149
150static __inline void ixv_rx_discard(struct rx_ring *, int);
151static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
152 struct mbuf *, u32);
153
154/* The MSI/X Interrupt handlers */
155static void ixv_msix_que(void *);
156static void ixv_msix_mbx(void *);
157
158/* Deferred interrupt tasklets */
159static void ixv_handle_que(void *, int);
160static void ixv_handle_mbx(void *, int);
161
162/*********************************************************************
163 * FreeBSD Device Interface Entry Points
164 *********************************************************************/
165
166static device_method_t ixv_methods[] = {
167 /* Device interface */
168 DEVMETHOD(device_probe, ixv_probe),
169 DEVMETHOD(device_attach, ixv_attach),
170 DEVMETHOD(device_detach, ixv_detach),
171 DEVMETHOD(device_shutdown, ixv_shutdown),
172 {0, 0}
173};
174
175static driver_t ixv_driver = {
176 "ix", ixv_methods, sizeof(struct adapter),
177};
178
179extern devclass_t ixgbe_devclass;
180DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
181MODULE_DEPEND(ixv, pci, 1, 1, 1);
182MODULE_DEPEND(ixv, ether, 1, 1, 1);
183
184/*
185** TUNEABLE PARAMETERS:
186*/
187
188/*
189** AIM: Adaptive Interrupt Moderation
190** which means that the interrupt rate
191** is varied over time based on the
192** traffic for that interrupt vector
193*/
194static int ixv_enable_aim = FALSE;
195TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
196
197/* How many packets rxeof tries to clean at a time */
198static int ixv_rx_process_limit = 128;
199TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
200
201/* Flow control setting, default to full */
202static int ixv_flow_control = ixgbe_fc_full;
203TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
204
205/*
206 * Header split: this causes the hardware to DMA
207 * the header into a seperate mbuf from the payload,
208 * it can be a performance win in some workloads, but
209 * in others it actually hurts, its off by default.
210 */
211static int ixv_header_split = FALSE;
212TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
213
214/*
215** Number of TX descriptors per ring,
216** setting higher than RX as this seems
217** the better performing choice.
218*/
219static int ixv_txd = DEFAULT_TXD;
220TUNABLE_INT("hw.ixv.txd", &ixv_txd);
221
222/* Number of RX descriptors per ring */
223static int ixv_rxd = DEFAULT_RXD;
224TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
225
226/*
227** Shadow VFTA table, this is needed because
228** the real filter table gets cleared during
229** a soft reset and we need to repopulate it.
230*/
231static u32 ixv_shadow_vfta[VFTA_SIZE];
232
233/*********************************************************************
234 * Device identification routine
235 *
236 * ixv_probe determines if the driver should be loaded on
237 * adapter based on PCI vendor/device id of the adapter.
238 *
239 * return BUS_PROBE_DEFAULT on success, positive on failure
240 *********************************************************************/
241
242static int
243ixv_probe(device_t dev)
244{
245 ixv_vendor_info_t *ent;
246
247 u16 pci_vendor_id = 0;
248 u16 pci_device_id = 0;
249 u16 pci_subvendor_id = 0;
250 u16 pci_subdevice_id = 0;
251 char adapter_name[256];
252
253
254 pci_vendor_id = pci_get_vendor(dev);
255 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
256 return (ENXIO);
257
258 pci_device_id = pci_get_device(dev);
259 pci_subvendor_id = pci_get_subvendor(dev);
260 pci_subdevice_id = pci_get_subdevice(dev);
261
262 ent = ixv_vendor_info_array;
263 while (ent->vendor_id != 0) {
264 if ((pci_vendor_id == ent->vendor_id) &&
265 (pci_device_id == ent->device_id) &&
266
267 ((pci_subvendor_id == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
269
270 ((pci_subdevice_id == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 sprintf(adapter_name, "%s, Version - %s",
273 ixv_strings[ent->index],
274 ixv_driver_version);
275 device_set_desc_copy(dev, adapter_name);
276 return (BUS_PROBE_DEFAULT);
277 }
278 ent++;
279 }
280 return (ENXIO);
281}
282
283/*********************************************************************
284 * Device initialization routine
285 *
286 * The attach entry point is called when the driver is being loaded.
287 * This routine identifies the type of hardware, allocates all resources
288 * and initializes the hardware.
289 *
290 * return 0 on success, positive on failure
291 *********************************************************************/
292
293static int
294ixv_attach(device_t dev)
295{
296 struct adapter *adapter;
297 struct ixgbe_hw *hw;
298 int error = 0;
299
300 INIT_DEBUGOUT("ixv_attach: begin");
301
34
35#ifdef HAVE_KERNEL_OPTION_HEADERS
36#include "opt_inet.h"
37#include "opt_inet6.h"
38#endif
39
40#include "ixv.h"
41
42/*********************************************************************
43 * Driver version
44 *********************************************************************/
45char ixv_driver_version[] = "1.1.4";
46
47/*********************************************************************
48 * PCI Device ID Table
49 *
50 * Used by probe to select devices to load on
51 * Last field stores an index into ixv_strings
52 * Last entry must be all 0s
53 *
54 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
55 *********************************************************************/
56
57static ixv_vendor_info_t ixv_vendor_info_array[] =
58{
59 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
60 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
61 /* required last entry */
62 {0, 0, 0, 0, 0}
63};
64
65/*********************************************************************
66 * Table of branding strings
67 *********************************************************************/
68
69static char *ixv_strings[] = {
70 "Intel(R) PRO/10GbE Virtual Function Network Driver"
71};
72
73/*********************************************************************
74 * Function prototypes
75 *********************************************************************/
76static int ixv_probe(device_t);
77static int ixv_attach(device_t);
78static int ixv_detach(device_t);
79static int ixv_shutdown(device_t);
80#if __FreeBSD_version < 800000
81static void ixv_start(struct ifnet *);
82static void ixv_start_locked(struct tx_ring *, struct ifnet *);
83#else
84static int ixv_mq_start(struct ifnet *, struct mbuf *);
85static int ixv_mq_start_locked(struct ifnet *,
86 struct tx_ring *, struct mbuf *);
87static void ixv_qflush(struct ifnet *);
88#endif
89static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
90static void ixv_init(void *);
91static void ixv_init_locked(struct adapter *);
92static void ixv_stop(void *);
93static void ixv_media_status(struct ifnet *, struct ifmediareq *);
94static int ixv_media_change(struct ifnet *);
95static void ixv_identify_hardware(struct adapter *);
96static int ixv_allocate_pci_resources(struct adapter *);
97static int ixv_allocate_msix(struct adapter *);
98static int ixv_allocate_queues(struct adapter *);
99static int ixv_setup_msix(struct adapter *);
100static void ixv_free_pci_resources(struct adapter *);
101static void ixv_local_timer(void *);
102static void ixv_setup_interface(device_t, struct adapter *);
103static void ixv_config_link(struct adapter *);
104
105static int ixv_allocate_transmit_buffers(struct tx_ring *);
106static int ixv_setup_transmit_structures(struct adapter *);
107static void ixv_setup_transmit_ring(struct tx_ring *);
108static void ixv_initialize_transmit_units(struct adapter *);
109static void ixv_free_transmit_structures(struct adapter *);
110static void ixv_free_transmit_buffers(struct tx_ring *);
111
112static int ixv_allocate_receive_buffers(struct rx_ring *);
113static int ixv_setup_receive_structures(struct adapter *);
114static int ixv_setup_receive_ring(struct rx_ring *);
115static void ixv_initialize_receive_units(struct adapter *);
116static void ixv_free_receive_structures(struct adapter *);
117static void ixv_free_receive_buffers(struct rx_ring *);
118
119static void ixv_enable_intr(struct adapter *);
120static void ixv_disable_intr(struct adapter *);
121static bool ixv_txeof(struct tx_ring *);
122static bool ixv_rxeof(struct ix_queue *, int);
123static void ixv_rx_checksum(u32, struct mbuf *, u32);
124static void ixv_set_multi(struct adapter *);
125static void ixv_update_link_status(struct adapter *);
126static void ixv_refresh_mbufs(struct rx_ring *, int);
127static int ixv_xmit(struct tx_ring *, struct mbuf **);
128static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
129static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
130static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
131static int ixv_dma_malloc(struct adapter *, bus_size_t,
132 struct ixv_dma_alloc *, int);
133static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
134static void ixv_add_rx_process_limit(struct adapter *, const char *,
135 const char *, int *, int);
136static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
137static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
138static void ixv_set_ivar(struct adapter *, u8, u8, s8);
139static void ixv_configure_ivars(struct adapter *);
140static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
141
142static void ixv_setup_vlan_support(struct adapter *);
143static void ixv_register_vlan(void *, struct ifnet *, u16);
144static void ixv_unregister_vlan(void *, struct ifnet *, u16);
145
146static void ixv_save_stats(struct adapter *);
147static void ixv_init_stats(struct adapter *);
148static void ixv_update_stats(struct adapter *);
149
150static __inline void ixv_rx_discard(struct rx_ring *, int);
151static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
152 struct mbuf *, u32);
153
154/* The MSI/X Interrupt handlers */
155static void ixv_msix_que(void *);
156static void ixv_msix_mbx(void *);
157
158/* Deferred interrupt tasklets */
159static void ixv_handle_que(void *, int);
160static void ixv_handle_mbx(void *, int);
161
162/*********************************************************************
163 * FreeBSD Device Interface Entry Points
164 *********************************************************************/
165
166static device_method_t ixv_methods[] = {
167 /* Device interface */
168 DEVMETHOD(device_probe, ixv_probe),
169 DEVMETHOD(device_attach, ixv_attach),
170 DEVMETHOD(device_detach, ixv_detach),
171 DEVMETHOD(device_shutdown, ixv_shutdown),
172 {0, 0}
173};
174
175static driver_t ixv_driver = {
176 "ix", ixv_methods, sizeof(struct adapter),
177};
178
179extern devclass_t ixgbe_devclass;
180DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
181MODULE_DEPEND(ixv, pci, 1, 1, 1);
182MODULE_DEPEND(ixv, ether, 1, 1, 1);
183
184/*
185** TUNEABLE PARAMETERS:
186*/
187
188/*
189** AIM: Adaptive Interrupt Moderation
190** which means that the interrupt rate
191** is varied over time based on the
192** traffic for that interrupt vector
193*/
194static int ixv_enable_aim = FALSE;
195TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
196
197/* How many packets rxeof tries to clean at a time */
198static int ixv_rx_process_limit = 128;
199TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
200
201/* Flow control setting, default to full */
202static int ixv_flow_control = ixgbe_fc_full;
203TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
204
205/*
206 * Header split: this causes the hardware to DMA
207 * the header into a seperate mbuf from the payload,
208 * it can be a performance win in some workloads, but
209 * in others it actually hurts, its off by default.
210 */
211static int ixv_header_split = FALSE;
212TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
213
214/*
215** Number of TX descriptors per ring,
216** setting higher than RX as this seems
217** the better performing choice.
218*/
219static int ixv_txd = DEFAULT_TXD;
220TUNABLE_INT("hw.ixv.txd", &ixv_txd);
221
222/* Number of RX descriptors per ring */
223static int ixv_rxd = DEFAULT_RXD;
224TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
225
226/*
227** Shadow VFTA table, this is needed because
228** the real filter table gets cleared during
229** a soft reset and we need to repopulate it.
230*/
231static u32 ixv_shadow_vfta[VFTA_SIZE];
232
233/*********************************************************************
234 * Device identification routine
235 *
236 * ixv_probe determines if the driver should be loaded on
237 * adapter based on PCI vendor/device id of the adapter.
238 *
239 * return BUS_PROBE_DEFAULT on success, positive on failure
240 *********************************************************************/
241
242static int
243ixv_probe(device_t dev)
244{
245 ixv_vendor_info_t *ent;
246
247 u16 pci_vendor_id = 0;
248 u16 pci_device_id = 0;
249 u16 pci_subvendor_id = 0;
250 u16 pci_subdevice_id = 0;
251 char adapter_name[256];
252
253
254 pci_vendor_id = pci_get_vendor(dev);
255 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
256 return (ENXIO);
257
258 pci_device_id = pci_get_device(dev);
259 pci_subvendor_id = pci_get_subvendor(dev);
260 pci_subdevice_id = pci_get_subdevice(dev);
261
262 ent = ixv_vendor_info_array;
263 while (ent->vendor_id != 0) {
264 if ((pci_vendor_id == ent->vendor_id) &&
265 (pci_device_id == ent->device_id) &&
266
267 ((pci_subvendor_id == ent->subvendor_id) ||
268 (ent->subvendor_id == 0)) &&
269
270 ((pci_subdevice_id == ent->subdevice_id) ||
271 (ent->subdevice_id == 0))) {
272 sprintf(adapter_name, "%s, Version - %s",
273 ixv_strings[ent->index],
274 ixv_driver_version);
275 device_set_desc_copy(dev, adapter_name);
276 return (BUS_PROBE_DEFAULT);
277 }
278 ent++;
279 }
280 return (ENXIO);
281}
282
283/*********************************************************************
284 * Device initialization routine
285 *
286 * The attach entry point is called when the driver is being loaded.
287 * This routine identifies the type of hardware, allocates all resources
288 * and initializes the hardware.
289 *
290 * return 0 on success, positive on failure
291 *********************************************************************/
292
293static int
294ixv_attach(device_t dev)
295{
296 struct adapter *adapter;
297 struct ixgbe_hw *hw;
298 int error = 0;
299
300 INIT_DEBUGOUT("ixv_attach: begin");
301
302 if (resource_disabled("ixgbe", device_get_unit(dev))) {
303 device_printf(dev, "Disabled by device hint\n");
304 return (ENXIO);
305 }
306
302 /* Allocate, clear, and link in our adapter structure */
303 adapter = device_get_softc(dev);
304 adapter->dev = adapter->osdep.dev = dev;
305 hw = &adapter->hw;
306
307 /* Core Lock Init*/
308 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
309
310 /* SYSCTL APIs */
311 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
312 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
313 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
314 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
315
316 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
317 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
318 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
319 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
320
321 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
322 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
323 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
324 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
325
326 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
329 &ixv_enable_aim, 1, "Interrupt Moderation");
330
331 /* Set up the timer callout */
332 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
333
334 /* Determine hardware revision */
335 ixv_identify_hardware(adapter);
336
337 /* Do base PCI setup - map BAR0 */
338 if (ixv_allocate_pci_resources(adapter)) {
339 device_printf(dev, "Allocation of PCI resources failed\n");
340 error = ENXIO;
341 goto err_out;
342 }
343
344 /* Do descriptor calc and sanity checks */
345 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
346 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
347 device_printf(dev, "TXD config issue, using default!\n");
348 adapter->num_tx_desc = DEFAULT_TXD;
349 } else
350 adapter->num_tx_desc = ixv_txd;
351
352 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
353 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
354 device_printf(dev, "RXD config issue, using default!\n");
355 adapter->num_rx_desc = DEFAULT_RXD;
356 } else
357 adapter->num_rx_desc = ixv_rxd;
358
359 /* Allocate our TX/RX Queues */
360 if (ixv_allocate_queues(adapter)) {
361 error = ENOMEM;
362 goto err_out;
363 }
364
365 /*
366 ** Initialize the shared code: its
367 ** at this point the mac type is set.
368 */
369 error = ixgbe_init_shared_code(hw);
370 if (error) {
371 device_printf(dev,"Shared Code Initialization Failure\n");
372 error = EIO;
373 goto err_late;
374 }
375
376 /* Setup the mailbox */
377 ixgbe_init_mbx_params_vf(hw);
378
379 ixgbe_reset_hw(hw);
380
381 /* Get Hardware Flow Control setting */
382 hw->fc.requested_mode = ixgbe_fc_full;
383 hw->fc.pause_time = IXV_FC_PAUSE;
384 hw->fc.low_water[0] = IXV_FC_LO;
385 hw->fc.high_water[0] = IXV_FC_HI;
386 hw->fc.send_xon = TRUE;
387
388 error = ixgbe_init_hw(hw);
389 if (error) {
390 device_printf(dev,"Hardware Initialization Failure\n");
391 error = EIO;
392 goto err_late;
393 }
394
395 error = ixv_allocate_msix(adapter);
396 if (error)
397 goto err_late;
398
399 /* Setup OS specific network interface */
400 ixv_setup_interface(dev, adapter);
401
402 /* Sysctl for limiting the amount of work done in the taskqueue */
403 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
404 "max number of rx packets to process", &adapter->rx_process_limit,
405 ixv_rx_process_limit);
406
407 /* Do the stats setup */
408 ixv_save_stats(adapter);
409 ixv_init_stats(adapter);
410
411 /* Register for VLAN events */
412 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
413 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
414 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
415 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
416
417 INIT_DEBUGOUT("ixv_attach: end");
418 return (0);
419
420err_late:
421 ixv_free_transmit_structures(adapter);
422 ixv_free_receive_structures(adapter);
423err_out:
424 ixv_free_pci_resources(adapter);
425 return (error);
426
427}
428
429/*********************************************************************
430 * Device removal routine
431 *
432 * The detach entry point is called when the driver is being removed.
433 * This routine stops the adapter and deallocates all the resources
434 * that were allocated for driver operation.
435 *
436 * return 0 on success, positive on failure
437 *********************************************************************/
438
439static int
440ixv_detach(device_t dev)
441{
442 struct adapter *adapter = device_get_softc(dev);
443 struct ix_queue *que = adapter->queues;
444
445 INIT_DEBUGOUT("ixv_detach: begin");
446
447 /* Make sure VLANS are not using driver */
448 if (adapter->ifp->if_vlantrunk != NULL) {
449 device_printf(dev,"Vlan in use, detach first\n");
450 return (EBUSY);
451 }
452
453 IXV_CORE_LOCK(adapter);
454 ixv_stop(adapter);
455 IXV_CORE_UNLOCK(adapter);
456
457 for (int i = 0; i < adapter->num_queues; i++, que++) {
458 if (que->tq) {
459 taskqueue_drain(que->tq, &que->que_task);
460 taskqueue_free(que->tq);
461 }
462 }
463
464 /* Drain the Link queue */
465 if (adapter->tq) {
466 taskqueue_drain(adapter->tq, &adapter->mbx_task);
467 taskqueue_free(adapter->tq);
468 }
469
470 /* Unregister VLAN events */
471 if (adapter->vlan_attach != NULL)
472 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
473 if (adapter->vlan_detach != NULL)
474 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
475
476 ether_ifdetach(adapter->ifp);
477 callout_drain(&adapter->timer);
478 ixv_free_pci_resources(adapter);
479 bus_generic_detach(dev);
480 if_free(adapter->ifp);
481
482 ixv_free_transmit_structures(adapter);
483 ixv_free_receive_structures(adapter);
484
485 IXV_CORE_LOCK_DESTROY(adapter);
486 return (0);
487}
488
489/*********************************************************************
490 *
491 * Shutdown entry point
492 *
493 **********************************************************************/
494static int
495ixv_shutdown(device_t dev)
496{
497 struct adapter *adapter = device_get_softc(dev);
498 IXV_CORE_LOCK(adapter);
499 ixv_stop(adapter);
500 IXV_CORE_UNLOCK(adapter);
501 return (0);
502}
503
504#if __FreeBSD_version < 800000
505/*********************************************************************
506 * Transmit entry point
507 *
508 * ixv_start is called by the stack to initiate a transmit.
509 * The driver will remain in this routine as long as there are
510 * packets to transmit and transmit resources are available.
511 * In case resources are not available stack is notified and
512 * the packet is requeued.
513 **********************************************************************/
514static void
515ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
516{
517 struct mbuf *m_head;
518 struct adapter *adapter = txr->adapter;
519
520 IXV_TX_LOCK_ASSERT(txr);
521
522 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
523 IFF_DRV_RUNNING)
524 return;
525 if (!adapter->link_active)
526 return;
527
528 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
529
530 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
531 if (m_head == NULL)
532 break;
533
534 if (ixv_xmit(txr, &m_head)) {
535 if (m_head == NULL)
536 break;
537 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
538 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
539 break;
540 }
541 /* Send a copy of the frame to the BPF listener */
542 ETHER_BPF_MTAP(ifp, m_head);
543
544 /* Set watchdog on */
545 txr->watchdog_check = TRUE;
546 txr->watchdog_time = ticks;
547
548 }
549 return;
550}
551
552/*
553 * Legacy TX start - called by the stack, this
554 * always uses the first tx ring, and should
555 * not be used with multiqueue tx enabled.
556 */
557static void
558ixv_start(struct ifnet *ifp)
559{
560 struct adapter *adapter = ifp->if_softc;
561 struct tx_ring *txr = adapter->tx_rings;
562
563 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
564 IXV_TX_LOCK(txr);
565 ixv_start_locked(txr, ifp);
566 IXV_TX_UNLOCK(txr);
567 }
568 return;
569}
570
571#else
572
573/*
574** Multiqueue Transmit driver
575**
576*/
577static int
578ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
579{
580 struct adapter *adapter = ifp->if_softc;
581 struct ix_queue *que;
582 struct tx_ring *txr;
583 int i = 0, err = 0;
584
585 /* Which queue to use */
586 if ((m->m_flags & M_FLOWID) != 0)
587 i = m->m_pkthdr.flowid % adapter->num_queues;
588
589 txr = &adapter->tx_rings[i];
590 que = &adapter->queues[i];
591
592 if (IXV_TX_TRYLOCK(txr)) {
593 err = ixv_mq_start_locked(ifp, txr, m);
594 IXV_TX_UNLOCK(txr);
595 } else {
596 err = drbr_enqueue(ifp, txr->br, m);
597 taskqueue_enqueue(que->tq, &que->que_task);
598 }
599
600 return (err);
601}
602
603static int
604ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
605{
606 struct adapter *adapter = txr->adapter;
607 struct mbuf *next;
608 int enqueued, err = 0;
609
610 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
611 IFF_DRV_RUNNING || adapter->link_active == 0) {
612 if (m != NULL)
613 err = drbr_enqueue(ifp, txr->br, m);
614 return (err);
615 }
616
617 /* Do a clean if descriptors are low */
618 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
619 ixv_txeof(txr);
620
621 enqueued = 0;
622 if (m == NULL) {
623 next = drbr_dequeue(ifp, txr->br);
624 } else if (drbr_needs_enqueue(ifp, txr->br)) {
625 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
626 return (err);
627 next = drbr_dequeue(ifp, txr->br);
628 } else
629 next = m;
630
631 /* Process the queue */
632 while (next != NULL) {
633 if ((err = ixv_xmit(txr, &next)) != 0) {
634 if (next != NULL)
635 err = drbr_enqueue(ifp, txr->br, next);
636 break;
637 }
638 enqueued++;
639 ifp->if_obytes += next->m_pkthdr.len;
640 if (next->m_flags & M_MCAST)
641 ifp->if_omcasts++;
642 /* Send a copy of the frame to the BPF listener */
643 ETHER_BPF_MTAP(ifp, next);
644 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
645 break;
646 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
647 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
648 break;
649 }
650 next = drbr_dequeue(ifp, txr->br);
651 }
652
653 if (enqueued > 0) {
654 /* Set watchdog on */
655 txr->watchdog_check = TRUE;
656 txr->watchdog_time = ticks;
657 }
658
659 return (err);
660}
661
662/*
663** Flush all ring buffers
664*/
665static void
666ixv_qflush(struct ifnet *ifp)
667{
668 struct adapter *adapter = ifp->if_softc;
669 struct tx_ring *txr = adapter->tx_rings;
670 struct mbuf *m;
671
672 for (int i = 0; i < adapter->num_queues; i++, txr++) {
673 IXV_TX_LOCK(txr);
674 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
675 m_freem(m);
676 IXV_TX_UNLOCK(txr);
677 }
678 if_qflush(ifp);
679}
680
681#endif
682
683/*********************************************************************
684 * Ioctl entry point
685 *
686 * ixv_ioctl is called when the user wants to configure the
687 * interface.
688 *
689 * return 0 on success, positive on failure
690 **********************************************************************/
691
692static int
693ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
694{
695 struct adapter *adapter = ifp->if_softc;
696 struct ifreq *ifr = (struct ifreq *) data;
697#if defined(INET) || defined(INET6)
698 struct ifaddr *ifa = (struct ifaddr *) data;
699 bool avoid_reset = FALSE;
700#endif
701 int error = 0;
702
703 switch (command) {
704
705 case SIOCSIFADDR:
706#ifdef INET
707 if (ifa->ifa_addr->sa_family == AF_INET)
708 avoid_reset = TRUE;
709#endif
710#ifdef INET6
711 if (ifa->ifa_addr->sa_family == AF_INET6)
712 avoid_reset = TRUE;
713#endif
714#if defined(INET) || defined(INET6)
715 /*
716 ** Calling init results in link renegotiation,
717 ** so we avoid doing it when possible.
718 */
719 if (avoid_reset) {
720 ifp->if_flags |= IFF_UP;
721 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
722 ixv_init(adapter);
723 if (!(ifp->if_flags & IFF_NOARP))
724 arp_ifinit(ifp, ifa);
725 } else
726 error = ether_ioctl(ifp, command, data);
727 break;
728#endif
729 case SIOCSIFMTU:
730 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
731 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
732 error = EINVAL;
733 } else {
734 IXV_CORE_LOCK(adapter);
735 ifp->if_mtu = ifr->ifr_mtu;
736 adapter->max_frame_size =
737 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
738 ixv_init_locked(adapter);
739 IXV_CORE_UNLOCK(adapter);
740 }
741 break;
742 case SIOCSIFFLAGS:
743 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
744 IXV_CORE_LOCK(adapter);
745 if (ifp->if_flags & IFF_UP) {
746 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
747 ixv_init_locked(adapter);
748 } else
749 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
750 ixv_stop(adapter);
751 adapter->if_flags = ifp->if_flags;
752 IXV_CORE_UNLOCK(adapter);
753 break;
754 case SIOCADDMULTI:
755 case SIOCDELMULTI:
756 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
757 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
758 IXV_CORE_LOCK(adapter);
759 ixv_disable_intr(adapter);
760 ixv_set_multi(adapter);
761 ixv_enable_intr(adapter);
762 IXV_CORE_UNLOCK(adapter);
763 }
764 break;
765 case SIOCSIFMEDIA:
766 case SIOCGIFMEDIA:
767 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
768 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
769 break;
770 case SIOCSIFCAP:
771 {
772 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
773 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
774 if (mask & IFCAP_HWCSUM)
775 ifp->if_capenable ^= IFCAP_HWCSUM;
776 if (mask & IFCAP_TSO4)
777 ifp->if_capenable ^= IFCAP_TSO4;
778 if (mask & IFCAP_LRO)
779 ifp->if_capenable ^= IFCAP_LRO;
780 if (mask & IFCAP_VLAN_HWTAGGING)
781 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
782 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
783 IXV_CORE_LOCK(adapter);
784 ixv_init_locked(adapter);
785 IXV_CORE_UNLOCK(adapter);
786 }
787 VLAN_CAPABILITIES(ifp);
788 break;
789 }
790
791 default:
792 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
793 error = ether_ioctl(ifp, command, data);
794 break;
795 }
796
797 return (error);
798}
799
800/*********************************************************************
801 * Init entry point
802 *
803 * This routine is used in two ways. It is used by the stack as
804 * init entry point in network interface structure. It is also used
805 * by the driver as a hw/sw initialization routine to get to a
806 * consistent state.
807 *
808 * return 0 on success, positive on failure
809 **********************************************************************/
810#define IXGBE_MHADD_MFS_SHIFT 16
811
812static void
813ixv_init_locked(struct adapter *adapter)
814{
815 struct ifnet *ifp = adapter->ifp;
816 device_t dev = adapter->dev;
817 struct ixgbe_hw *hw = &adapter->hw;
818 u32 mhadd, gpie;
819
820 INIT_DEBUGOUT("ixv_init: begin");
821 mtx_assert(&adapter->core_mtx, MA_OWNED);
822 hw->adapter_stopped = FALSE;
823 ixgbe_stop_adapter(hw);
824 callout_stop(&adapter->timer);
825
826 /* reprogram the RAR[0] in case user changed it. */
827 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
828
829 /* Get the latest mac address, User can use a LAA */
830 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
831 IXGBE_ETH_LENGTH_OF_ADDRESS);
832 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
833 hw->addr_ctrl.rar_used_count = 1;
834
835 /* Prepare transmit descriptors and buffers */
836 if (ixv_setup_transmit_structures(adapter)) {
837 device_printf(dev,"Could not setup transmit structures\n");
838 ixv_stop(adapter);
839 return;
840 }
841
842 ixgbe_reset_hw(hw);
843 ixv_initialize_transmit_units(adapter);
844
845 /* Setup Multicast table */
846 ixv_set_multi(adapter);
847
848 /*
849 ** Determine the correct mbuf pool
850 ** for doing jumbo/headersplit
851 */
852 if (ifp->if_mtu > ETHERMTU)
853 adapter->rx_mbuf_sz = MJUMPAGESIZE;
854 else
855 adapter->rx_mbuf_sz = MCLBYTES;
856
857 /* Prepare receive descriptors and buffers */
858 if (ixv_setup_receive_structures(adapter)) {
859 device_printf(dev,"Could not setup receive structures\n");
860 ixv_stop(adapter);
861 return;
862 }
863
864 /* Configure RX settings */
865 ixv_initialize_receive_units(adapter);
866
867 /* Enable Enhanced MSIX mode */
868 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
869 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
870 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
871 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
872
873 /* Set the various hardware offload abilities */
874 ifp->if_hwassist = 0;
875 if (ifp->if_capenable & IFCAP_TSO4)
876 ifp->if_hwassist |= CSUM_TSO;
877 if (ifp->if_capenable & IFCAP_TXCSUM) {
878 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
879#if __FreeBSD_version >= 800000
880 ifp->if_hwassist |= CSUM_SCTP;
881#endif
882 }
883
884 /* Set MTU size */
885 if (ifp->if_mtu > ETHERMTU) {
886 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
887 mhadd &= ~IXGBE_MHADD_MFS_MASK;
888 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
889 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
890 }
891
892 /* Set up VLAN offload and filter */
893 ixv_setup_vlan_support(adapter);
894
895 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
896
897 /* Set up MSI/X routing */
898 ixv_configure_ivars(adapter);
899
900 /* Set up auto-mask */
901 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
902
903 /* Set moderation on the Link interrupt */
904 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
905
906 /* Stats init */
907 ixv_init_stats(adapter);
908
909 /* Config/Enable Link */
910 ixv_config_link(adapter);
911
912 /* And now turn on interrupts */
913 ixv_enable_intr(adapter);
914
915 /* Now inform the stack we're ready */
916 ifp->if_drv_flags |= IFF_DRV_RUNNING;
917 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
918
919 return;
920}
921
922static void
923ixv_init(void *arg)
924{
925 struct adapter *adapter = arg;
926
927 IXV_CORE_LOCK(adapter);
928 ixv_init_locked(adapter);
929 IXV_CORE_UNLOCK(adapter);
930 return;
931}
932
933
934/*
935**
936** MSIX Interrupt Handlers and Tasklets
937**
938*/
939
940static inline void
941ixv_enable_queue(struct adapter *adapter, u32 vector)
942{
943 struct ixgbe_hw *hw = &adapter->hw;
944 u32 queue = 1 << vector;
945 u32 mask;
946
947 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
948 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
949}
950
951static inline void
952ixv_disable_queue(struct adapter *adapter, u32 vector)
953{
954 struct ixgbe_hw *hw = &adapter->hw;
955 u64 queue = (u64)(1 << vector);
956 u32 mask;
957
958 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
959 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
960}
961
962static inline void
963ixv_rearm_queues(struct adapter *adapter, u64 queues)
964{
965 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
966 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
967}
968
969
970static void
971ixv_handle_que(void *context, int pending)
972{
973 struct ix_queue *que = context;
974 struct adapter *adapter = que->adapter;
975 struct tx_ring *txr = que->txr;
976 struct ifnet *ifp = adapter->ifp;
977 bool more;
978
979 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
980 more = ixv_rxeof(que, adapter->rx_process_limit);
981 IXV_TX_LOCK(txr);
982 ixv_txeof(txr);
983#if __FreeBSD_version >= 800000
984 if (!drbr_empty(ifp, txr->br))
985 ixv_mq_start_locked(ifp, txr, NULL);
986#else
987 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
988 ixv_start_locked(txr, ifp);
989#endif
990 IXV_TX_UNLOCK(txr);
991 if (more) {
992 taskqueue_enqueue(que->tq, &que->que_task);
993 return;
994 }
995 }
996
997 /* Reenable this interrupt */
998 ixv_enable_queue(adapter, que->msix);
999 return;
1000}
1001
1002/*********************************************************************
1003 *
1004 * MSI Queue Interrupt Service routine
1005 *
1006 **********************************************************************/
1007void
1008ixv_msix_que(void *arg)
1009{
1010 struct ix_queue *que = arg;
1011 struct adapter *adapter = que->adapter;
1012 struct tx_ring *txr = que->txr;
1013 struct rx_ring *rxr = que->rxr;
1014 bool more_tx, more_rx;
1015 u32 newitr = 0;
1016
1017 ixv_disable_queue(adapter, que->msix);
1018 ++que->irqs;
1019
1020 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1021
1022 IXV_TX_LOCK(txr);
1023 more_tx = ixv_txeof(txr);
1024 /*
1025 ** Make certain that if the stack
1026 ** has anything queued the task gets
1027 ** scheduled to handle it.
1028 */
1029#if __FreeBSD_version < 800000
1030 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1031#else
1032 if (!drbr_empty(adapter->ifp, txr->br))
1033#endif
1034 more_tx = 1;
1035 IXV_TX_UNLOCK(txr);
1036
1037 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1038
1039 /* Do AIM now? */
1040
1041 if (ixv_enable_aim == FALSE)
1042 goto no_calc;
1043 /*
1044 ** Do Adaptive Interrupt Moderation:
1045 ** - Write out last calculated setting
1046 ** - Calculate based on average size over
1047 ** the last interval.
1048 */
1049 if (que->eitr_setting)
1050 IXGBE_WRITE_REG(&adapter->hw,
1051 IXGBE_VTEITR(que->msix),
1052 que->eitr_setting);
1053
1054 que->eitr_setting = 0;
1055
1056 /* Idle, do nothing */
1057 if ((txr->bytes == 0) && (rxr->bytes == 0))
1058 goto no_calc;
1059
1060 if ((txr->bytes) && (txr->packets))
1061 newitr = txr->bytes/txr->packets;
1062 if ((rxr->bytes) && (rxr->packets))
1063 newitr = max(newitr,
1064 (rxr->bytes / rxr->packets));
1065 newitr += 24; /* account for hardware frame, crc */
1066
1067 /* set an upper boundary */
1068 newitr = min(newitr, 3000);
1069
1070 /* Be nice to the mid range */
1071 if ((newitr > 300) && (newitr < 1200))
1072 newitr = (newitr / 3);
1073 else
1074 newitr = (newitr / 2);
1075
1076 newitr |= newitr << 16;
1077
1078 /* save for next interrupt */
1079 que->eitr_setting = newitr;
1080
1081 /* Reset state */
1082 txr->bytes = 0;
1083 txr->packets = 0;
1084 rxr->bytes = 0;
1085 rxr->packets = 0;
1086
1087no_calc:
1088 if (more_tx || more_rx)
1089 taskqueue_enqueue(que->tq, &que->que_task);
1090 else /* Reenable this interrupt */
1091 ixv_enable_queue(adapter, que->msix);
1092 return;
1093}
1094
1095static void
1096ixv_msix_mbx(void *arg)
1097{
1098 struct adapter *adapter = arg;
1099 struct ixgbe_hw *hw = &adapter->hw;
1100 u32 reg;
1101
1102 ++adapter->mbx_irq;
1103
1104 /* First get the cause */
1105 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1106 /* Clear interrupt with write */
1107 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1108
1109 /* Link status change */
1110 if (reg & IXGBE_EICR_LSC)
1111 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1112
1113 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1114 return;
1115}
1116
1117/*********************************************************************
1118 *
1119 * Media Ioctl callback
1120 *
1121 * This routine is called whenever the user queries the status of
1122 * the interface using ifconfig.
1123 *
1124 **********************************************************************/
1125static void
1126ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1127{
1128 struct adapter *adapter = ifp->if_softc;
1129
1130 INIT_DEBUGOUT("ixv_media_status: begin");
1131 IXV_CORE_LOCK(adapter);
1132 ixv_update_link_status(adapter);
1133
1134 ifmr->ifm_status = IFM_AVALID;
1135 ifmr->ifm_active = IFM_ETHER;
1136
1137 if (!adapter->link_active) {
1138 IXV_CORE_UNLOCK(adapter);
1139 return;
1140 }
1141
1142 ifmr->ifm_status |= IFM_ACTIVE;
1143
1144 switch (adapter->link_speed) {
1145 case IXGBE_LINK_SPEED_1GB_FULL:
1146 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1147 break;
1148 case IXGBE_LINK_SPEED_10GB_FULL:
1149 ifmr->ifm_active |= IFM_FDX;
1150 break;
1151 }
1152
1153 IXV_CORE_UNLOCK(adapter);
1154
1155 return;
1156}
1157
1158/*********************************************************************
1159 *
1160 * Media Ioctl callback
1161 *
1162 * This routine is called when the user changes speed/duplex using
1163 * media/mediopt option with ifconfig.
1164 *
1165 **********************************************************************/
1166static int
1167ixv_media_change(struct ifnet * ifp)
1168{
1169 struct adapter *adapter = ifp->if_softc;
1170 struct ifmedia *ifm = &adapter->media;
1171
1172 INIT_DEBUGOUT("ixv_media_change: begin");
1173
1174 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1175 return (EINVAL);
1176
1177 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1178 case IFM_AUTO:
1179 break;
1180 default:
1181 device_printf(adapter->dev, "Only auto media type\n");
1182 return (EINVAL);
1183 }
1184
1185 return (0);
1186}
1187
1188/*********************************************************************
1189 *
1190 * This routine maps the mbufs to tx descriptors, allowing the
1191 * TX engine to transmit the packets.
1192 * - return 0 on success, positive on failure
1193 *
1194 **********************************************************************/
1195
1196static int
1197ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1198{
1199 struct adapter *adapter = txr->adapter;
1200 u32 olinfo_status = 0, cmd_type_len;
1201 u32 paylen = 0;
1202 int i, j, error, nsegs;
1203 int first, last = 0;
1204 struct mbuf *m_head;
1205 bus_dma_segment_t segs[32];
1206 bus_dmamap_t map;
1207 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1208 union ixgbe_adv_tx_desc *txd = NULL;
1209
1210 m_head = *m_headp;
1211
1212 /* Basic descriptor defines */
1213 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1214 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1215
1216 if (m_head->m_flags & M_VLANTAG)
1217 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1218
1219 /*
1220 * Important to capture the first descriptor
1221 * used because it will contain the index of
1222 * the one we tell the hardware to report back
1223 */
1224 first = txr->next_avail_desc;
1225 txbuf = &txr->tx_buffers[first];
1226 txbuf_mapped = txbuf;
1227 map = txbuf->map;
1228
1229 /*
1230 * Map the packet for DMA.
1231 */
1232 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1233 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1234
1235 if (error == EFBIG) {
1236 struct mbuf *m;
1237
1238 m = m_defrag(*m_headp, M_DONTWAIT);
1239 if (m == NULL) {
1240 adapter->mbuf_defrag_failed++;
1241 m_freem(*m_headp);
1242 *m_headp = NULL;
1243 return (ENOBUFS);
1244 }
1245 *m_headp = m;
1246
1247 /* Try it again */
1248 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1249 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1250
1251 if (error == ENOMEM) {
1252 adapter->no_tx_dma_setup++;
1253 return (error);
1254 } else if (error != 0) {
1255 adapter->no_tx_dma_setup++;
1256 m_freem(*m_headp);
1257 *m_headp = NULL;
1258 return (error);
1259 }
1260 } else if (error == ENOMEM) {
1261 adapter->no_tx_dma_setup++;
1262 return (error);
1263 } else if (error != 0) {
1264 adapter->no_tx_dma_setup++;
1265 m_freem(*m_headp);
1266 *m_headp = NULL;
1267 return (error);
1268 }
1269
1270 /* Make certain there are enough descriptors */
1271 if (nsegs > txr->tx_avail - 2) {
1272 txr->no_desc_avail++;
1273 error = ENOBUFS;
1274 goto xmit_fail;
1275 }
1276 m_head = *m_headp;
1277
1278 /*
1279 ** Set up the appropriate offload context
1280 ** this becomes the first descriptor of
1281 ** a packet.
1282 */
1283 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1284 if (ixv_tso_setup(txr, m_head, &paylen)) {
1285 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1286 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1287 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1288 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1289 ++adapter->tso_tx;
1290 } else
1291 return (ENXIO);
1292 } else if (ixv_tx_ctx_setup(txr, m_head))
1293 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1294
1295 /* Record payload length */
1296 if (paylen == 0)
1297 olinfo_status |= m_head->m_pkthdr.len <<
1298 IXGBE_ADVTXD_PAYLEN_SHIFT;
1299
1300 i = txr->next_avail_desc;
1301 for (j = 0; j < nsegs; j++) {
1302 bus_size_t seglen;
1303 bus_addr_t segaddr;
1304
1305 txbuf = &txr->tx_buffers[i];
1306 txd = &txr->tx_base[i];
1307 seglen = segs[j].ds_len;
1308 segaddr = htole64(segs[j].ds_addr);
1309
1310 txd->read.buffer_addr = segaddr;
1311 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1312 cmd_type_len |seglen);
1313 txd->read.olinfo_status = htole32(olinfo_status);
1314 last = i; /* descriptor that will get completion IRQ */
1315
1316 if (++i == adapter->num_tx_desc)
1317 i = 0;
1318
1319 txbuf->m_head = NULL;
1320 txbuf->eop_index = -1;
1321 }
1322
1323 txd->read.cmd_type_len |=
1324 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1325 txr->tx_avail -= nsegs;
1326 txr->next_avail_desc = i;
1327
1328 txbuf->m_head = m_head;
1329 txr->tx_buffers[first].map = txbuf->map;
1330 txbuf->map = map;
1331 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1332
1333 /* Set the index of the descriptor that will be marked done */
1334 txbuf = &txr->tx_buffers[first];
1335 txbuf->eop_index = last;
1336
1337 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1338 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1339 /*
1340 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1341 * hardware that this frame is available to transmit.
1342 */
1343 ++txr->total_packets;
1344 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1345
1346 return (0);
1347
1348xmit_fail:
1349 bus_dmamap_unload(txr->txtag, txbuf->map);
1350 return (error);
1351
1352}
1353
1354
1355/*********************************************************************
1356 * Multicast Update
1357 *
1358 * This routine is called whenever multicast address list is updated.
1359 *
1360 **********************************************************************/
1361#define IXGBE_RAR_ENTRIES 16
1362
1363static void
1364ixv_set_multi(struct adapter *adapter)
1365{
1366 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1367 u8 *update_ptr;
1368 struct ifmultiaddr *ifma;
1369 int mcnt = 0;
1370 struct ifnet *ifp = adapter->ifp;
1371
1372 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1373
1374#if __FreeBSD_version < 800000
1375 IF_ADDR_LOCK(ifp);
1376#else
1377 if_maddr_rlock(ifp);
1378#endif
1379 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1380 if (ifma->ifma_addr->sa_family != AF_LINK)
1381 continue;
1382 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1383 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1384 IXGBE_ETH_LENGTH_OF_ADDRESS);
1385 mcnt++;
1386 }
1387#if __FreeBSD_version < 800000
1388 IF_ADDR_UNLOCK(ifp);
1389#else
1390 if_maddr_runlock(ifp);
1391#endif
1392
1393 update_ptr = mta;
1394
1395 ixgbe_update_mc_addr_list(&adapter->hw,
1396 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1397
1398 return;
1399}
1400
1401/*
1402 * This is an iterator function now needed by the multicast
1403 * shared code. It simply feeds the shared code routine the
1404 * addresses in the array of ixv_set_multi() one by one.
1405 */
1406static u8 *
1407ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1408{
1409 u8 *addr = *update_ptr;
1410 u8 *newptr;
1411 *vmdq = 0;
1412
1413 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1414 *update_ptr = newptr;
1415 return addr;
1416}
1417
1418/*********************************************************************
1419 * Timer routine
1420 *
1421 * This routine checks for link status,updates statistics,
1422 * and runs the watchdog check.
1423 *
1424 **********************************************************************/
1425
1426static void
1427ixv_local_timer(void *arg)
1428{
1429 struct adapter *adapter = arg;
1430 device_t dev = adapter->dev;
1431 struct tx_ring *txr = adapter->tx_rings;
1432 int i;
1433
1434 mtx_assert(&adapter->core_mtx, MA_OWNED);
1435
1436 ixv_update_link_status(adapter);
1437
1438 /* Stats Update */
1439 ixv_update_stats(adapter);
1440
1441 /*
1442 * If the interface has been paused
1443 * then don't do the watchdog check
1444 */
1445 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1446 goto out;
1447 /*
1448 ** Check for time since any descriptor was cleaned
1449 */
1450 for (i = 0; i < adapter->num_queues; i++, txr++) {
1451 IXV_TX_LOCK(txr);
1452 if (txr->watchdog_check == FALSE) {
1453 IXV_TX_UNLOCK(txr);
1454 continue;
1455 }
1456 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1457 goto hung;
1458 IXV_TX_UNLOCK(txr);
1459 }
1460out:
1461 ixv_rearm_queues(adapter, adapter->que_mask);
1462 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1463 return;
1464
1465hung:
1466 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1467 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1468 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1469 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1470 device_printf(dev,"TX(%d) desc avail = %d,"
1471 "Next TX to Clean = %d\n",
1472 txr->me, txr->tx_avail, txr->next_to_clean);
1473 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1474 adapter->watchdog_events++;
1475 IXV_TX_UNLOCK(txr);
1476 ixv_init_locked(adapter);
1477}
1478
1479/*
1480** Note: this routine updates the OS on the link state
1481** the real check of the hardware only happens with
1482** a link interrupt.
1483*/
1484static void
1485ixv_update_link_status(struct adapter *adapter)
1486{
1487 struct ifnet *ifp = adapter->ifp;
1488 struct tx_ring *txr = adapter->tx_rings;
1489 device_t dev = adapter->dev;
1490
1491
1492 if (adapter->link_up){
1493 if (adapter->link_active == FALSE) {
1494 if (bootverbose)
1495 device_printf(dev,"Link is up %d Gbps %s \n",
1496 ((adapter->link_speed == 128)? 10:1),
1497 "Full Duplex");
1498 adapter->link_active = TRUE;
1499 if_link_state_change(ifp, LINK_STATE_UP);
1500 }
1501 } else { /* Link down */
1502 if (adapter->link_active == TRUE) {
1503 if (bootverbose)
1504 device_printf(dev,"Link is Down\n");
1505 if_link_state_change(ifp, LINK_STATE_DOWN);
1506 adapter->link_active = FALSE;
1507 for (int i = 0; i < adapter->num_queues;
1508 i++, txr++)
1509 txr->watchdog_check = FALSE;
1510 }
1511 }
1512
1513 return;
1514}
1515
1516
1517/*********************************************************************
1518 *
1519 * This routine disables all traffic on the adapter by issuing a
1520 * global reset on the MAC and deallocates TX/RX buffers.
1521 *
1522 **********************************************************************/
1523
1524static void
1525ixv_stop(void *arg)
1526{
1527 struct ifnet *ifp;
1528 struct adapter *adapter = arg;
1529 struct ixgbe_hw *hw = &adapter->hw;
1530 ifp = adapter->ifp;
1531
1532 mtx_assert(&adapter->core_mtx, MA_OWNED);
1533
1534 INIT_DEBUGOUT("ixv_stop: begin\n");
1535 ixv_disable_intr(adapter);
1536
1537 /* Tell the stack that the interface is no longer active */
1538 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1539
1540 ixgbe_reset_hw(hw);
1541 adapter->hw.adapter_stopped = FALSE;
1542 ixgbe_stop_adapter(hw);
1543 callout_stop(&adapter->timer);
1544
1545 /* reprogram the RAR[0] in case user changed it. */
1546 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1547
1548 return;
1549}
1550
1551
1552/*********************************************************************
1553 *
1554 * Determine hardware revision.
1555 *
1556 **********************************************************************/
1557static void
1558ixv_identify_hardware(struct adapter *adapter)
1559{
1560 device_t dev = adapter->dev;
1561 u16 pci_cmd_word;
1562
1563 /*
1564 ** Make sure BUSMASTER is set, on a VM under
1565 ** KVM it may not be and will break things.
1566 */
1567 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1568 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1569 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1570 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1571 "bits were not set!\n");
1572 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1573 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1574 }
1575
1576 /* Save off the information about this board */
1577 adapter->hw.vendor_id = pci_get_vendor(dev);
1578 adapter->hw.device_id = pci_get_device(dev);
1579 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1580 adapter->hw.subsystem_vendor_id =
1581 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1582 adapter->hw.subsystem_device_id =
1583 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1584
1585 return;
1586}
1587
1588/*********************************************************************
1589 *
1590 * Setup MSIX Interrupt resources and handlers
1591 *
1592 **********************************************************************/
1593static int
1594ixv_allocate_msix(struct adapter *adapter)
1595{
1596 device_t dev = adapter->dev;
1597 struct ix_queue *que = adapter->queues;
1598 int error, rid, vector = 0;
1599
1600 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1601 rid = vector + 1;
1602 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1603 RF_SHAREABLE | RF_ACTIVE);
1604 if (que->res == NULL) {
1605 device_printf(dev,"Unable to allocate"
1606 " bus resource: que interrupt [%d]\n", vector);
1607 return (ENXIO);
1608 }
1609 /* Set the handler function */
1610 error = bus_setup_intr(dev, que->res,
1611 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1612 ixv_msix_que, que, &que->tag);
1613 if (error) {
1614 que->res = NULL;
1615 device_printf(dev, "Failed to register QUE handler");
1616 return (error);
1617 }
1618#if __FreeBSD_version >= 800504
1619 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1620#endif
1621 que->msix = vector;
1622 adapter->que_mask |= (u64)(1 << que->msix);
1623 /*
1624 ** Bind the msix vector, and thus the
1625 ** ring to the corresponding cpu.
1626 */
1627 if (adapter->num_queues > 1)
1628 bus_bind_intr(dev, que->res, i);
1629
1630 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1631 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1632 taskqueue_thread_enqueue, &que->tq);
1633 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1634 device_get_nameunit(adapter->dev));
1635 }
1636
1637 /* and Mailbox */
1638 rid = vector + 1;
1639 adapter->res = bus_alloc_resource_any(dev,
1640 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1641 if (!adapter->res) {
1642 device_printf(dev,"Unable to allocate"
1643 " bus resource: MBX interrupt [%d]\n", rid);
1644 return (ENXIO);
1645 }
1646 /* Set the mbx handler function */
1647 error = bus_setup_intr(dev, adapter->res,
1648 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1649 ixv_msix_mbx, adapter, &adapter->tag);
1650 if (error) {
1651 adapter->res = NULL;
1652 device_printf(dev, "Failed to register LINK handler");
1653 return (error);
1654 }
1655#if __FreeBSD_version >= 800504
1656 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1657#endif
1658 adapter->mbxvec = vector;
1659 /* Tasklets for Mailbox */
1660 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1661 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1662 taskqueue_thread_enqueue, &adapter->tq);
1663 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1664 device_get_nameunit(adapter->dev));
1665 /*
1666 ** Due to a broken design QEMU will fail to properly
1667 ** enable the guest for MSIX unless the vectors in
1668 ** the table are all set up, so we must rewrite the
1669 ** ENABLE in the MSIX control register again at this
1670 ** point to cause it to successfully initialize us.
1671 */
1672 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1673 int msix_ctrl;
1674 pci_find_cap(dev, PCIY_MSIX, &rid);
1675 rid += PCIR_MSIX_CTRL;
1676 msix_ctrl = pci_read_config(dev, rid, 2);
1677 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1678 pci_write_config(dev, rid, msix_ctrl, 2);
1679 }
1680
1681 return (0);
1682}
1683
1684/*
1685 * Setup MSIX resources, note that the VF
1686 * device MUST use MSIX, there is no fallback.
1687 */
1688static int
1689ixv_setup_msix(struct adapter *adapter)
1690{
1691 device_t dev = adapter->dev;
1692 int rid, vectors, want = 2;
1693
1694
1695 /* First try MSI/X */
1696 rid = PCIR_BAR(3);
1697 adapter->msix_mem = bus_alloc_resource_any(dev,
1698 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1699 if (!adapter->msix_mem) {
1700 device_printf(adapter->dev,
1701 "Unable to map MSIX table \n");
1702 goto out;
1703 }
1704
1705 vectors = pci_msix_count(dev);
1706 if (vectors < 2) {
1707 bus_release_resource(dev, SYS_RES_MEMORY,
1708 rid, adapter->msix_mem);
1709 adapter->msix_mem = NULL;
1710 goto out;
1711 }
1712
1713 /*
1714 ** Want two vectors: one for a queue,
1715 ** plus an additional for mailbox.
1716 */
1717 if (pci_alloc_msix(dev, &want) == 0) {
1718 device_printf(adapter->dev,
1719 "Using MSIX interrupts with %d vectors\n", want);
1720 return (want);
1721 }
1722out:
1723 device_printf(adapter->dev,"MSIX config error\n");
1724 return (ENXIO);
1725}
1726
1727
1728static int
1729ixv_allocate_pci_resources(struct adapter *adapter)
1730{
1731 int rid;
1732 device_t dev = adapter->dev;
1733
1734 rid = PCIR_BAR(0);
1735 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1736 &rid, RF_ACTIVE);
1737
1738 if (!(adapter->pci_mem)) {
1739 device_printf(dev,"Unable to allocate bus resource: memory\n");
1740 return (ENXIO);
1741 }
1742
1743 adapter->osdep.mem_bus_space_tag =
1744 rman_get_bustag(adapter->pci_mem);
1745 adapter->osdep.mem_bus_space_handle =
1746 rman_get_bushandle(adapter->pci_mem);
1747 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1748
1749 adapter->num_queues = 1;
1750 adapter->hw.back = &adapter->osdep;
1751
1752 /*
1753 ** Now setup MSI/X, should
1754 ** return us the number of
1755 ** configured vectors.
1756 */
1757 adapter->msix = ixv_setup_msix(adapter);
1758 if (adapter->msix == ENXIO)
1759 return (ENXIO);
1760 else
1761 return (0);
1762}
1763
1764static void
1765ixv_free_pci_resources(struct adapter * adapter)
1766{
1767 struct ix_queue *que = adapter->queues;
1768 device_t dev = adapter->dev;
1769 int rid, memrid;
1770
1771 memrid = PCIR_BAR(MSIX_BAR);
1772
1773 /*
1774 ** There is a slight possibility of a failure mode
1775 ** in attach that will result in entering this function
1776 ** before interrupt resources have been initialized, and
1777 ** in that case we do not want to execute the loops below
1778 ** We can detect this reliably by the state of the adapter
1779 ** res pointer.
1780 */
1781 if (adapter->res == NULL)
1782 goto mem;
1783
1784 /*
1785 ** Release all msix queue resources:
1786 */
1787 for (int i = 0; i < adapter->num_queues; i++, que++) {
1788 rid = que->msix + 1;
1789 if (que->tag != NULL) {
1790 bus_teardown_intr(dev, que->res, que->tag);
1791 que->tag = NULL;
1792 }
1793 if (que->res != NULL)
1794 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1795 }
1796
1797
1798 /* Clean the Legacy or Link interrupt last */
1799 if (adapter->mbxvec) /* we are doing MSIX */
1800 rid = adapter->mbxvec + 1;
1801 else
1802 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1803
1804 if (adapter->tag != NULL) {
1805 bus_teardown_intr(dev, adapter->res, adapter->tag);
1806 adapter->tag = NULL;
1807 }
1808 if (adapter->res != NULL)
1809 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1810
1811mem:
1812 if (adapter->msix)
1813 pci_release_msi(dev);
1814
1815 if (adapter->msix_mem != NULL)
1816 bus_release_resource(dev, SYS_RES_MEMORY,
1817 memrid, adapter->msix_mem);
1818
1819 if (adapter->pci_mem != NULL)
1820 bus_release_resource(dev, SYS_RES_MEMORY,
1821 PCIR_BAR(0), adapter->pci_mem);
1822
1823 return;
1824}
1825
1826/*********************************************************************
1827 *
1828 * Setup networking device structure and register an interface.
1829 *
1830 **********************************************************************/
1831static void
1832ixv_setup_interface(device_t dev, struct adapter *adapter)
1833{
1834 struct ifnet *ifp;
1835
1836 INIT_DEBUGOUT("ixv_setup_interface: begin");
1837
1838 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1839 if (ifp == NULL)
1840 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1841 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1842 ifp->if_baudrate = 1000000000;
1843 ifp->if_init = ixv_init;
1844 ifp->if_softc = adapter;
1845 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1846 ifp->if_ioctl = ixv_ioctl;
1847#if __FreeBSD_version >= 800000
1848 ifp->if_transmit = ixv_mq_start;
1849 ifp->if_qflush = ixv_qflush;
1850#else
1851 ifp->if_start = ixv_start;
1852#endif
1853 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1854
1855 ether_ifattach(ifp, adapter->hw.mac.addr);
1856
1857 adapter->max_frame_size =
1858 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1859
1860 /*
1861 * Tell the upper layer(s) we support long frames.
1862 */
1863 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1864
1865 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1866 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1867 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1868 | IFCAP_VLAN_HWTSO
1869 | IFCAP_VLAN_MTU;
1870 ifp->if_capenable = ifp->if_capabilities;
1871
1872 /* Don't enable LRO by default */
1873 ifp->if_capabilities |= IFCAP_LRO;
1874
1875 /*
1876 * Specify the media types supported by this adapter and register
1877 * callbacks to update media and link information
1878 */
1879 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1880 ixv_media_status);
1881 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1882 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1883 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1884
1885 return;
1886}
1887
1888static void
1889ixv_config_link(struct adapter *adapter)
1890{
1891 struct ixgbe_hw *hw = &adapter->hw;
1892 u32 autoneg, err = 0;
1893 bool negotiate = TRUE;
1894
1895 if (hw->mac.ops.check_link)
1896 err = hw->mac.ops.check_link(hw, &autoneg,
1897 &adapter->link_up, FALSE);
1898 if (err)
1899 goto out;
1900
1901 if (hw->mac.ops.setup_link)
1902 err = hw->mac.ops.setup_link(hw, autoneg,
1903 negotiate, adapter->link_up);
1904out:
1905 return;
1906}
1907
1908/********************************************************************
1909 * Manage DMA'able memory.
1910 *******************************************************************/
1911static void
1912ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1913{
1914 if (error)
1915 return;
1916 *(bus_addr_t *) arg = segs->ds_addr;
1917 return;
1918}
1919
1920static int
1921ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1922 struct ixv_dma_alloc *dma, int mapflags)
1923{
1924 device_t dev = adapter->dev;
1925 int r;
1926
1927 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1928 DBA_ALIGN, 0, /* alignment, bounds */
1929 BUS_SPACE_MAXADDR, /* lowaddr */
1930 BUS_SPACE_MAXADDR, /* highaddr */
1931 NULL, NULL, /* filter, filterarg */
1932 size, /* maxsize */
1933 1, /* nsegments */
1934 size, /* maxsegsize */
1935 BUS_DMA_ALLOCNOW, /* flags */
1936 NULL, /* lockfunc */
1937 NULL, /* lockfuncarg */
1938 &dma->dma_tag);
1939 if (r != 0) {
1940 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1941 "error %u\n", r);
1942 goto fail_0;
1943 }
1944 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1945 BUS_DMA_NOWAIT, &dma->dma_map);
1946 if (r != 0) {
1947 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1948 "error %u\n", r);
1949 goto fail_1;
1950 }
1951 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1952 size,
1953 ixv_dmamap_cb,
1954 &dma->dma_paddr,
1955 mapflags | BUS_DMA_NOWAIT);
1956 if (r != 0) {
1957 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1958 "error %u\n", r);
1959 goto fail_2;
1960 }
1961 dma->dma_size = size;
1962 return (0);
1963fail_2:
1964 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1965fail_1:
1966 bus_dma_tag_destroy(dma->dma_tag);
1967fail_0:
1968 dma->dma_map = NULL;
1969 dma->dma_tag = NULL;
1970 return (r);
1971}
1972
1973static void
1974ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1975{
1976 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1977 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1978 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1979 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1980 bus_dma_tag_destroy(dma->dma_tag);
1981}
1982
1983
1984/*********************************************************************
1985 *
1986 * Allocate memory for the transmit and receive rings, and then
1987 * the descriptors associated with each, called only once at attach.
1988 *
1989 **********************************************************************/
1990static int
1991ixv_allocate_queues(struct adapter *adapter)
1992{
1993 device_t dev = adapter->dev;
1994 struct ix_queue *que;
1995 struct tx_ring *txr;
1996 struct rx_ring *rxr;
1997 int rsize, tsize, error = 0;
1998 int txconf = 0, rxconf = 0;
1999
2000 /* First allocate the top level queue structs */
2001 if (!(adapter->queues =
2002 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2003 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2004 device_printf(dev, "Unable to allocate queue memory\n");
2005 error = ENOMEM;
2006 goto fail;
2007 }
2008
2009 /* First allocate the TX ring struct memory */
2010 if (!(adapter->tx_rings =
2011 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2012 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2013 device_printf(dev, "Unable to allocate TX ring memory\n");
2014 error = ENOMEM;
2015 goto tx_fail;
2016 }
2017
2018 /* Next allocate the RX */
2019 if (!(adapter->rx_rings =
2020 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2021 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2022 device_printf(dev, "Unable to allocate RX ring memory\n");
2023 error = ENOMEM;
2024 goto rx_fail;
2025 }
2026
2027 /* For the ring itself */
2028 tsize = roundup2(adapter->num_tx_desc *
2029 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2030
2031 /*
2032 * Now set up the TX queues, txconf is needed to handle the
2033 * possibility that things fail midcourse and we need to
2034 * undo memory gracefully
2035 */
2036 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2037 /* Set up some basics */
2038 txr = &adapter->tx_rings[i];
2039 txr->adapter = adapter;
2040 txr->me = i;
2041
2042 /* Initialize the TX side lock */
2043 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2044 device_get_nameunit(dev), txr->me);
2045 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2046
2047 if (ixv_dma_malloc(adapter, tsize,
2048 &txr->txdma, BUS_DMA_NOWAIT)) {
2049 device_printf(dev,
2050 "Unable to allocate TX Descriptor memory\n");
2051 error = ENOMEM;
2052 goto err_tx_desc;
2053 }
2054 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2055 bzero((void *)txr->tx_base, tsize);
2056
2057 /* Now allocate transmit buffers for the ring */
2058 if (ixv_allocate_transmit_buffers(txr)) {
2059 device_printf(dev,
2060 "Critical Failure setting up transmit buffers\n");
2061 error = ENOMEM;
2062 goto err_tx_desc;
2063 }
2064#if __FreeBSD_version >= 800000
2065 /* Allocate a buf ring */
2066 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2067 M_WAITOK, &txr->tx_mtx);
2068 if (txr->br == NULL) {
2069 device_printf(dev,
2070 "Critical Failure setting up buf ring\n");
2071 error = ENOMEM;
2072 goto err_tx_desc;
2073 }
2074#endif
2075 }
2076
2077 /*
2078 * Next the RX queues...
2079 */
2080 rsize = roundup2(adapter->num_rx_desc *
2081 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2082 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2083 rxr = &adapter->rx_rings[i];
2084 /* Set up some basics */
2085 rxr->adapter = adapter;
2086 rxr->me = i;
2087
2088 /* Initialize the RX side lock */
2089 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2090 device_get_nameunit(dev), rxr->me);
2091 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2092
2093 if (ixv_dma_malloc(adapter, rsize,
2094 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2095 device_printf(dev,
2096 "Unable to allocate RxDescriptor memory\n");
2097 error = ENOMEM;
2098 goto err_rx_desc;
2099 }
2100 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2101 bzero((void *)rxr->rx_base, rsize);
2102
2103 /* Allocate receive buffers for the ring*/
2104 if (ixv_allocate_receive_buffers(rxr)) {
2105 device_printf(dev,
2106 "Critical Failure setting up receive buffers\n");
2107 error = ENOMEM;
2108 goto err_rx_desc;
2109 }
2110 }
2111
2112 /*
2113 ** Finally set up the queue holding structs
2114 */
2115 for (int i = 0; i < adapter->num_queues; i++) {
2116 que = &adapter->queues[i];
2117 que->adapter = adapter;
2118 que->txr = &adapter->tx_rings[i];
2119 que->rxr = &adapter->rx_rings[i];
2120 }
2121
2122 return (0);
2123
2124err_rx_desc:
2125 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2126 ixv_dma_free(adapter, &rxr->rxdma);
2127err_tx_desc:
2128 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2129 ixv_dma_free(adapter, &txr->txdma);
2130 free(adapter->rx_rings, M_DEVBUF);
2131rx_fail:
2132 free(adapter->tx_rings, M_DEVBUF);
2133tx_fail:
2134 free(adapter->queues, M_DEVBUF);
2135fail:
2136 return (error);
2137}
2138
2139
2140/*********************************************************************
2141 *
2142 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2143 * the information needed to transmit a packet on the wire. This is
2144 * called only once at attach, setup is done every reset.
2145 *
2146 **********************************************************************/
2147static int
2148ixv_allocate_transmit_buffers(struct tx_ring *txr)
2149{
2150 struct adapter *adapter = txr->adapter;
2151 device_t dev = adapter->dev;
2152 struct ixv_tx_buf *txbuf;
2153 int error, i;
2154
2155 /*
2156 * Setup DMA descriptor areas.
2157 */
2158 if ((error = bus_dma_tag_create(
2159 bus_get_dma_tag(adapter->dev), /* parent */
2160 1, 0, /* alignment, bounds */
2161 BUS_SPACE_MAXADDR, /* lowaddr */
2162 BUS_SPACE_MAXADDR, /* highaddr */
2163 NULL, NULL, /* filter, filterarg */
2164 IXV_TSO_SIZE, /* maxsize */
2165 32, /* nsegments */
2166 PAGE_SIZE, /* maxsegsize */
2167 0, /* flags */
2168 NULL, /* lockfunc */
2169 NULL, /* lockfuncarg */
2170 &txr->txtag))) {
2171 device_printf(dev,"Unable to allocate TX DMA tag\n");
2172 goto fail;
2173 }
2174
2175 if (!(txr->tx_buffers =
2176 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2177 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2178 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2179 error = ENOMEM;
2180 goto fail;
2181 }
2182
2183 /* Create the descriptor buffer dma maps */
2184 txbuf = txr->tx_buffers;
2185 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2186 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2187 if (error != 0) {
2188 device_printf(dev, "Unable to create TX DMA map\n");
2189 goto fail;
2190 }
2191 }
2192
2193 return 0;
2194fail:
2195 /* We free all, it handles case where we are in the middle */
2196 ixv_free_transmit_structures(adapter);
2197 return (error);
2198}
2199
2200/*********************************************************************
2201 *
2202 * Initialize a transmit ring.
2203 *
2204 **********************************************************************/
2205static void
2206ixv_setup_transmit_ring(struct tx_ring *txr)
2207{
2208 struct adapter *adapter = txr->adapter;
2209 struct ixv_tx_buf *txbuf;
2210 int i;
2211
2212 /* Clear the old ring contents */
2213 IXV_TX_LOCK(txr);
2214 bzero((void *)txr->tx_base,
2215 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2216 /* Reset indices */
2217 txr->next_avail_desc = 0;
2218 txr->next_to_clean = 0;
2219
2220 /* Free any existing tx buffers. */
2221 txbuf = txr->tx_buffers;
2222 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2223 if (txbuf->m_head != NULL) {
2224 bus_dmamap_sync(txr->txtag, txbuf->map,
2225 BUS_DMASYNC_POSTWRITE);
2226 bus_dmamap_unload(txr->txtag, txbuf->map);
2227 m_freem(txbuf->m_head);
2228 txbuf->m_head = NULL;
2229 }
2230 /* Clear the EOP index */
2231 txbuf->eop_index = -1;
2232 }
2233
2234 /* Set number of descriptors available */
2235 txr->tx_avail = adapter->num_tx_desc;
2236
2237 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2238 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2239 IXV_TX_UNLOCK(txr);
2240}
2241
2242/*********************************************************************
2243 *
2244 * Initialize all transmit rings.
2245 *
2246 **********************************************************************/
2247static int
2248ixv_setup_transmit_structures(struct adapter *adapter)
2249{
2250 struct tx_ring *txr = adapter->tx_rings;
2251
2252 for (int i = 0; i < adapter->num_queues; i++, txr++)
2253 ixv_setup_transmit_ring(txr);
2254
2255 return (0);
2256}
2257
2258/*********************************************************************
2259 *
2260 * Enable transmit unit.
2261 *
2262 **********************************************************************/
2263static void
2264ixv_initialize_transmit_units(struct adapter *adapter)
2265{
2266 struct tx_ring *txr = adapter->tx_rings;
2267 struct ixgbe_hw *hw = &adapter->hw;
2268
2269
2270 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2271 u64 tdba = txr->txdma.dma_paddr;
2272 u32 txctrl, txdctl;
2273
2274 /* Set WTHRESH to 8, burst writeback */
2275 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2276 txdctl |= (8 << 16);
2277 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2278 /* Now enable */
2279 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2280 txdctl |= IXGBE_TXDCTL_ENABLE;
2281 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2282
2283 /* Set the HW Tx Head and Tail indices */
2284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2285 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2286
2287 /* Setup Transmit Descriptor Cmd Settings */
2288 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2289 txr->watchdog_check = FALSE;
2290
2291 /* Set Ring parameters */
2292 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2293 (tdba & 0x00000000ffffffffULL));
2294 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2295 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2296 adapter->num_tx_desc *
2297 sizeof(struct ixgbe_legacy_tx_desc));
2298 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2299 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2300 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2301 break;
2302 }
2303
2304 return;
2305}
2306
2307/*********************************************************************
2308 *
2309 * Free all transmit rings.
2310 *
2311 **********************************************************************/
2312static void
2313ixv_free_transmit_structures(struct adapter *adapter)
2314{
2315 struct tx_ring *txr = adapter->tx_rings;
2316
2317 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2318 IXV_TX_LOCK(txr);
2319 ixv_free_transmit_buffers(txr);
2320 ixv_dma_free(adapter, &txr->txdma);
2321 IXV_TX_UNLOCK(txr);
2322 IXV_TX_LOCK_DESTROY(txr);
2323 }
2324 free(adapter->tx_rings, M_DEVBUF);
2325}
2326
2327/*********************************************************************
2328 *
2329 * Free transmit ring related data structures.
2330 *
2331 **********************************************************************/
2332static void
2333ixv_free_transmit_buffers(struct tx_ring *txr)
2334{
2335 struct adapter *adapter = txr->adapter;
2336 struct ixv_tx_buf *tx_buffer;
2337 int i;
2338
2339 INIT_DEBUGOUT("free_transmit_ring: begin");
2340
2341 if (txr->tx_buffers == NULL)
2342 return;
2343
2344 tx_buffer = txr->tx_buffers;
2345 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2346 if (tx_buffer->m_head != NULL) {
2347 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2348 BUS_DMASYNC_POSTWRITE);
2349 bus_dmamap_unload(txr->txtag,
2350 tx_buffer->map);
2351 m_freem(tx_buffer->m_head);
2352 tx_buffer->m_head = NULL;
2353 if (tx_buffer->map != NULL) {
2354 bus_dmamap_destroy(txr->txtag,
2355 tx_buffer->map);
2356 tx_buffer->map = NULL;
2357 }
2358 } else if (tx_buffer->map != NULL) {
2359 bus_dmamap_unload(txr->txtag,
2360 tx_buffer->map);
2361 bus_dmamap_destroy(txr->txtag,
2362 tx_buffer->map);
2363 tx_buffer->map = NULL;
2364 }
2365 }
2366#if __FreeBSD_version >= 800000
2367 if (txr->br != NULL)
2368 buf_ring_free(txr->br, M_DEVBUF);
2369#endif
2370 if (txr->tx_buffers != NULL) {
2371 free(txr->tx_buffers, M_DEVBUF);
2372 txr->tx_buffers = NULL;
2373 }
2374 if (txr->txtag != NULL) {
2375 bus_dma_tag_destroy(txr->txtag);
2376 txr->txtag = NULL;
2377 }
2378 return;
2379}
2380
2381/*********************************************************************
2382 *
2383 * Advanced Context Descriptor setup for VLAN or CSUM
2384 *
2385 **********************************************************************/
2386
2387static bool
2388ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2389{
2390 struct adapter *adapter = txr->adapter;
2391 struct ixgbe_adv_tx_context_desc *TXD;
2392 struct ixv_tx_buf *tx_buffer;
2393 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2394 struct ether_vlan_header *eh;
2395 struct ip *ip;
2396 struct ip6_hdr *ip6;
2397 int ehdrlen, ip_hlen = 0;
2398 u16 etype;
2399 u8 ipproto = 0;
2400 bool offload = TRUE;
2401 int ctxd = txr->next_avail_desc;
2402 u16 vtag = 0;
2403
2404
2405 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2406 offload = FALSE;
2407
2408
2409 tx_buffer = &txr->tx_buffers[ctxd];
2410 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2411
2412 /*
2413 ** In advanced descriptors the vlan tag must
2414 ** be placed into the descriptor itself.
2415 */
2416 if (mp->m_flags & M_VLANTAG) {
2417 vtag = htole16(mp->m_pkthdr.ether_vtag);
2418 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2419 } else if (offload == FALSE)
2420 return FALSE;
2421
2422 /*
2423 * Determine where frame payload starts.
2424 * Jump over vlan headers if already present,
2425 * helpful for QinQ too.
2426 */
2427 eh = mtod(mp, struct ether_vlan_header *);
2428 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2429 etype = ntohs(eh->evl_proto);
2430 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2431 } else {
2432 etype = ntohs(eh->evl_encap_proto);
2433 ehdrlen = ETHER_HDR_LEN;
2434 }
2435
2436 /* Set the ether header length */
2437 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2438
2439 switch (etype) {
2440 case ETHERTYPE_IP:
2441 ip = (struct ip *)(mp->m_data + ehdrlen);
2442 ip_hlen = ip->ip_hl << 2;
2443 if (mp->m_len < ehdrlen + ip_hlen)
2444 return (FALSE);
2445 ipproto = ip->ip_p;
2446 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2447 break;
2448 case ETHERTYPE_IPV6:
2449 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2450 ip_hlen = sizeof(struct ip6_hdr);
2451 if (mp->m_len < ehdrlen + ip_hlen)
2452 return (FALSE);
2453 ipproto = ip6->ip6_nxt;
2454 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2455 break;
2456 default:
2457 offload = FALSE;
2458 break;
2459 }
2460
2461 vlan_macip_lens |= ip_hlen;
2462 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2463
2464 switch (ipproto) {
2465 case IPPROTO_TCP:
2466 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2467 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2468 break;
2469
2470 case IPPROTO_UDP:
2471 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2472 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2473 break;
2474
2475#if __FreeBSD_version >= 800000
2476 case IPPROTO_SCTP:
2477 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2478 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2479 break;
2480#endif
2481 default:
2482 offload = FALSE;
2483 break;
2484 }
2485
2486 /* Now copy bits into descriptor */
2487 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2488 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2489 TXD->seqnum_seed = htole32(0);
2490 TXD->mss_l4len_idx = htole32(0);
2491
2492 tx_buffer->m_head = NULL;
2493 tx_buffer->eop_index = -1;
2494
2495 /* We've consumed the first desc, adjust counters */
2496 if (++ctxd == adapter->num_tx_desc)
2497 ctxd = 0;
2498 txr->next_avail_desc = ctxd;
2499 --txr->tx_avail;
2500
2501 return (offload);
2502}
2503
2504/**********************************************************************
2505 *
2506 * Setup work for hardware segmentation offload (TSO) on
2507 * adapters using advanced tx descriptors
2508 *
2509 **********************************************************************/
2510static bool
2511ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2512{
2513 struct adapter *adapter = txr->adapter;
2514 struct ixgbe_adv_tx_context_desc *TXD;
2515 struct ixv_tx_buf *tx_buffer;
2516 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2517 u32 mss_l4len_idx = 0;
2518 u16 vtag = 0;
2519 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2520 struct ether_vlan_header *eh;
2521 struct ip *ip;
2522 struct tcphdr *th;
2523
2524
2525 /*
2526 * Determine where frame payload starts.
2527 * Jump over vlan headers if already present
2528 */
2529 eh = mtod(mp, struct ether_vlan_header *);
2530 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2531 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2532 else
2533 ehdrlen = ETHER_HDR_LEN;
2534
2535 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2536 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2537 return FALSE;
2538
2539 ctxd = txr->next_avail_desc;
2540 tx_buffer = &txr->tx_buffers[ctxd];
2541 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2542
2543 ip = (struct ip *)(mp->m_data + ehdrlen);
2544 if (ip->ip_p != IPPROTO_TCP)
2545 return FALSE; /* 0 */
2546 ip->ip_sum = 0;
2547 ip_hlen = ip->ip_hl << 2;
2548 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2549 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2550 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2551 tcp_hlen = th->th_off << 2;
2552 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2553
2554 /* This is used in the transmit desc in encap */
2555 *paylen = mp->m_pkthdr.len - hdrlen;
2556
2557 /* VLAN MACLEN IPLEN */
2558 if (mp->m_flags & M_VLANTAG) {
2559 vtag = htole16(mp->m_pkthdr.ether_vtag);
2560 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2561 }
2562
2563 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2564 vlan_macip_lens |= ip_hlen;
2565 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2566
2567 /* ADV DTYPE TUCMD */
2568 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2569 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2570 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2571 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2572
2573
2574 /* MSS L4LEN IDX */
2575 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2576 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2577 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2578
2579 TXD->seqnum_seed = htole32(0);
2580 tx_buffer->m_head = NULL;
2581 tx_buffer->eop_index = -1;
2582
2583 if (++ctxd == adapter->num_tx_desc)
2584 ctxd = 0;
2585
2586 txr->tx_avail--;
2587 txr->next_avail_desc = ctxd;
2588 return TRUE;
2589}
2590
2591
2592/**********************************************************************
2593 *
2594 * Examine each tx_buffer in the used queue. If the hardware is done
2595 * processing the packet then free associated resources. The
2596 * tx_buffer is put back on the free queue.
2597 *
2598 **********************************************************************/
2599static bool
2600ixv_txeof(struct tx_ring *txr)
2601{
2602 struct adapter *adapter = txr->adapter;
2603 struct ifnet *ifp = adapter->ifp;
2604 u32 first, last, done;
2605 struct ixv_tx_buf *tx_buffer;
2606 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2607
2608 mtx_assert(&txr->tx_mtx, MA_OWNED);
2609
2610 if (txr->tx_avail == adapter->num_tx_desc)
2611 return FALSE;
2612
2613 first = txr->next_to_clean;
2614 tx_buffer = &txr->tx_buffers[first];
2615 /* For cleanup we just use legacy struct */
2616 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2617 last = tx_buffer->eop_index;
2618 if (last == -1)
2619 return FALSE;
2620 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2621
2622 /*
2623 ** Get the index of the first descriptor
2624 ** BEYOND the EOP and call that 'done'.
2625 ** I do this so the comparison in the
2626 ** inner while loop below can be simple
2627 */
2628 if (++last == adapter->num_tx_desc) last = 0;
2629 done = last;
2630
2631 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2632 BUS_DMASYNC_POSTREAD);
2633 /*
2634 ** Only the EOP descriptor of a packet now has the DD
2635 ** bit set, this is what we look for...
2636 */
2637 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2638 /* We clean the range of the packet */
2639 while (first != done) {
2640 tx_desc->upper.data = 0;
2641 tx_desc->lower.data = 0;
2642 tx_desc->buffer_addr = 0;
2643 ++txr->tx_avail;
2644
2645 if (tx_buffer->m_head) {
2646 bus_dmamap_sync(txr->txtag,
2647 tx_buffer->map,
2648 BUS_DMASYNC_POSTWRITE);
2649 bus_dmamap_unload(txr->txtag,
2650 tx_buffer->map);
2651 m_freem(tx_buffer->m_head);
2652 tx_buffer->m_head = NULL;
2653 tx_buffer->map = NULL;
2654 }
2655 tx_buffer->eop_index = -1;
2656 txr->watchdog_time = ticks;
2657
2658 if (++first == adapter->num_tx_desc)
2659 first = 0;
2660
2661 tx_buffer = &txr->tx_buffers[first];
2662 tx_desc =
2663 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2664 }
2665 ++ifp->if_opackets;
2666 /* See if there is more work now */
2667 last = tx_buffer->eop_index;
2668 if (last != -1) {
2669 eop_desc =
2670 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2671 /* Get next done point */
2672 if (++last == adapter->num_tx_desc) last = 0;
2673 done = last;
2674 } else
2675 break;
2676 }
2677 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2679
2680 txr->next_to_clean = first;
2681
2682 /*
2683 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2684 * it is OK to send packets. If there are no pending descriptors,
2685 * clear the timeout. Otherwise, if some descriptors have been freed,
2686 * restart the timeout.
2687 */
2688 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2689 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2690 if (txr->tx_avail == adapter->num_tx_desc) {
2691 txr->watchdog_check = FALSE;
2692 return FALSE;
2693 }
2694 }
2695
2696 return TRUE;
2697}
2698
2699/*********************************************************************
2700 *
2701 * Refresh mbuf buffers for RX descriptor rings
2702 * - now keeps its own state so discards due to resource
2703 * exhaustion are unnecessary, if an mbuf cannot be obtained
2704 * it just returns, keeping its placeholder, thus it can simply
2705 * be recalled to try again.
2706 *
2707 **********************************************************************/
2708static void
2709ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2710{
2711 struct adapter *adapter = rxr->adapter;
2712 bus_dma_segment_t hseg[1];
2713 bus_dma_segment_t pseg[1];
2714 struct ixv_rx_buf *rxbuf;
2715 struct mbuf *mh, *mp;
2716 int i, j, nsegs, error;
2717 bool refreshed = FALSE;
2718
2719 i = j = rxr->next_to_refresh;
2720 /* Get the control variable, one beyond refresh point */
2721 if (++j == adapter->num_rx_desc)
2722 j = 0;
2723 while (j != limit) {
2724 rxbuf = &rxr->rx_buffers[i];
2725 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2726 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2727 if (mh == NULL)
2728 goto update;
2729 mh->m_pkthdr.len = mh->m_len = MHLEN;
2730 mh->m_len = MHLEN;
2731 mh->m_flags |= M_PKTHDR;
2732 m_adj(mh, ETHER_ALIGN);
2733 /* Get the memory mapping */
2734 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2735 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2736 if (error != 0) {
2737 printf("GET BUF: dmamap load"
2738 " failure - %d\n", error);
2739 m_free(mh);
2740 goto update;
2741 }
2742 rxbuf->m_head = mh;
2743 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2744 BUS_DMASYNC_PREREAD);
2745 rxr->rx_base[i].read.hdr_addr =
2746 htole64(hseg[0].ds_addr);
2747 }
2748
2749 if (rxbuf->m_pack == NULL) {
2750 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2751 M_PKTHDR, adapter->rx_mbuf_sz);
2752 if (mp == NULL)
2753 goto update;
2754 } else
2755 mp = rxbuf->m_pack;
2756
2757 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2758 /* Get the memory mapping */
2759 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2760 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2761 if (error != 0) {
2762 printf("GET BUF: dmamap load"
2763 " failure - %d\n", error);
2764 m_free(mp);
2765 rxbuf->m_pack = NULL;
2766 goto update;
2767 }
2768 rxbuf->m_pack = mp;
2769 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2770 BUS_DMASYNC_PREREAD);
2771 rxr->rx_base[i].read.pkt_addr =
2772 htole64(pseg[0].ds_addr);
2773
2774 refreshed = TRUE;
2775 rxr->next_to_refresh = i = j;
2776 /* Calculate next index */
2777 if (++j == adapter->num_rx_desc)
2778 j = 0;
2779 }
2780update:
2781 if (refreshed) /* update tail index */
2782 IXGBE_WRITE_REG(&adapter->hw,
2783 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2784 return;
2785}
2786
2787/*********************************************************************
2788 *
2789 * Allocate memory for rx_buffer structures. Since we use one
2790 * rx_buffer per received packet, the maximum number of rx_buffer's
2791 * that we'll need is equal to the number of receive descriptors
2792 * that we've allocated.
2793 *
2794 **********************************************************************/
2795static int
2796ixv_allocate_receive_buffers(struct rx_ring *rxr)
2797{
2798 struct adapter *adapter = rxr->adapter;
2799 device_t dev = adapter->dev;
2800 struct ixv_rx_buf *rxbuf;
2801 int i, bsize, error;
2802
2803 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2804 if (!(rxr->rx_buffers =
2805 (struct ixv_rx_buf *) malloc(bsize,
2806 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2807 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2808 error = ENOMEM;
2809 goto fail;
2810 }
2811
2812 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2813 1, 0, /* alignment, bounds */
2814 BUS_SPACE_MAXADDR, /* lowaddr */
2815 BUS_SPACE_MAXADDR, /* highaddr */
2816 NULL, NULL, /* filter, filterarg */
2817 MSIZE, /* maxsize */
2818 1, /* nsegments */
2819 MSIZE, /* maxsegsize */
2820 0, /* flags */
2821 NULL, /* lockfunc */
2822 NULL, /* lockfuncarg */
2823 &rxr->htag))) {
2824 device_printf(dev, "Unable to create RX DMA tag\n");
2825 goto fail;
2826 }
2827
2828 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2829 1, 0, /* alignment, bounds */
2830 BUS_SPACE_MAXADDR, /* lowaddr */
2831 BUS_SPACE_MAXADDR, /* highaddr */
2832 NULL, NULL, /* filter, filterarg */
2833 MJUMPAGESIZE, /* maxsize */
2834 1, /* nsegments */
2835 MJUMPAGESIZE, /* maxsegsize */
2836 0, /* flags */
2837 NULL, /* lockfunc */
2838 NULL, /* lockfuncarg */
2839 &rxr->ptag))) {
2840 device_printf(dev, "Unable to create RX DMA tag\n");
2841 goto fail;
2842 }
2843
2844 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2845 rxbuf = &rxr->rx_buffers[i];
2846 error = bus_dmamap_create(rxr->htag,
2847 BUS_DMA_NOWAIT, &rxbuf->hmap);
2848 if (error) {
2849 device_printf(dev, "Unable to create RX head map\n");
2850 goto fail;
2851 }
2852 error = bus_dmamap_create(rxr->ptag,
2853 BUS_DMA_NOWAIT, &rxbuf->pmap);
2854 if (error) {
2855 device_printf(dev, "Unable to create RX pkt map\n");
2856 goto fail;
2857 }
2858 }
2859
2860 return (0);
2861
2862fail:
2863 /* Frees all, but can handle partial completion */
2864 ixv_free_receive_structures(adapter);
2865 return (error);
2866}
2867
2868static void
2869ixv_free_receive_ring(struct rx_ring *rxr)
2870{
2871 struct adapter *adapter;
2872 struct ixv_rx_buf *rxbuf;
2873 int i;
2874
2875 adapter = rxr->adapter;
2876 for (i = 0; i < adapter->num_rx_desc; i++) {
2877 rxbuf = &rxr->rx_buffers[i];
2878 if (rxbuf->m_head != NULL) {
2879 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2880 BUS_DMASYNC_POSTREAD);
2881 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2882 rxbuf->m_head->m_flags |= M_PKTHDR;
2883 m_freem(rxbuf->m_head);
2884 }
2885 if (rxbuf->m_pack != NULL) {
2886 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2887 BUS_DMASYNC_POSTREAD);
2888 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2889 rxbuf->m_pack->m_flags |= M_PKTHDR;
2890 m_freem(rxbuf->m_pack);
2891 }
2892 rxbuf->m_head = NULL;
2893 rxbuf->m_pack = NULL;
2894 }
2895}
2896
2897
2898/*********************************************************************
2899 *
2900 * Initialize a receive ring and its buffers.
2901 *
2902 **********************************************************************/
2903static int
2904ixv_setup_receive_ring(struct rx_ring *rxr)
2905{
2906 struct adapter *adapter;
2907 struct ifnet *ifp;
2908 device_t dev;
2909 struct ixv_rx_buf *rxbuf;
2910 bus_dma_segment_t pseg[1], hseg[1];
2911 struct lro_ctrl *lro = &rxr->lro;
2912 int rsize, nsegs, error = 0;
2913
2914 adapter = rxr->adapter;
2915 ifp = adapter->ifp;
2916 dev = adapter->dev;
2917
2918 /* Clear the ring contents */
2919 IXV_RX_LOCK(rxr);
2920 rsize = roundup2(adapter->num_rx_desc *
2921 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2922 bzero((void *)rxr->rx_base, rsize);
2923
2924 /* Free current RX buffer structs and their mbufs */
2925 ixv_free_receive_ring(rxr);
2926
2927 /* Configure header split? */
2928 if (ixv_header_split)
2929 rxr->hdr_split = TRUE;
2930
2931 /* Now replenish the mbufs */
2932 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2933 struct mbuf *mh, *mp;
2934
2935 rxbuf = &rxr->rx_buffers[j];
2936 /*
2937 ** Dont allocate mbufs if not
2938 ** doing header split, its wasteful
2939 */
2940 if (rxr->hdr_split == FALSE)
2941 goto skip_head;
2942
2943 /* First the header */
2944 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2945 if (rxbuf->m_head == NULL) {
2946 error = ENOBUFS;
2947 goto fail;
2948 }
2949 m_adj(rxbuf->m_head, ETHER_ALIGN);
2950 mh = rxbuf->m_head;
2951 mh->m_len = mh->m_pkthdr.len = MHLEN;
2952 mh->m_flags |= M_PKTHDR;
2953 /* Get the memory mapping */
2954 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2955 rxbuf->hmap, rxbuf->m_head, hseg,
2956 &nsegs, BUS_DMA_NOWAIT);
2957 if (error != 0) /* Nothing elegant to do here */
2958 goto fail;
2959 bus_dmamap_sync(rxr->htag,
2960 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2961 /* Update descriptor */
2962 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2963
2964skip_head:
2965 /* Now the payload cluster */
2966 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2967 M_PKTHDR, adapter->rx_mbuf_sz);
2968 if (rxbuf->m_pack == NULL) {
2969 error = ENOBUFS;
2970 goto fail;
2971 }
2972 mp = rxbuf->m_pack;
2973 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2974 /* Get the memory mapping */
2975 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2976 rxbuf->pmap, mp, pseg,
2977 &nsegs, BUS_DMA_NOWAIT);
2978 if (error != 0)
2979 goto fail;
2980 bus_dmamap_sync(rxr->ptag,
2981 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2982 /* Update descriptor */
2983 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2984 }
2985
2986
2987 /* Setup our descriptor indices */
2988 rxr->next_to_check = 0;
2989 rxr->next_to_refresh = 0;
2990 rxr->lro_enabled = FALSE;
2991 rxr->rx_split_packets = 0;
2992 rxr->rx_bytes = 0;
2993 rxr->discard = FALSE;
2994
2995 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2996 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2997
2998 /*
2999 ** Now set up the LRO interface:
3000 */
3001 if (ifp->if_capenable & IFCAP_LRO) {
3002 int err = tcp_lro_init(lro);
3003 if (err) {
3004 device_printf(dev, "LRO Initialization failed!\n");
3005 goto fail;
3006 }
3007 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3008 rxr->lro_enabled = TRUE;
3009 lro->ifp = adapter->ifp;
3010 }
3011
3012 IXV_RX_UNLOCK(rxr);
3013 return (0);
3014
3015fail:
3016 ixv_free_receive_ring(rxr);
3017 IXV_RX_UNLOCK(rxr);
3018 return (error);
3019}
3020
3021/*********************************************************************
3022 *
3023 * Initialize all receive rings.
3024 *
3025 **********************************************************************/
3026static int
3027ixv_setup_receive_structures(struct adapter *adapter)
3028{
3029 struct rx_ring *rxr = adapter->rx_rings;
3030 int j;
3031
3032 for (j = 0; j < adapter->num_queues; j++, rxr++)
3033 if (ixv_setup_receive_ring(rxr))
3034 goto fail;
3035
3036 return (0);
3037fail:
3038 /*
3039 * Free RX buffers allocated so far, we will only handle
3040 * the rings that completed, the failing case will have
3041 * cleaned up for itself. 'j' failed, so its the terminus.
3042 */
3043 for (int i = 0; i < j; ++i) {
3044 rxr = &adapter->rx_rings[i];
3045 ixv_free_receive_ring(rxr);
3046 }
3047
3048 return (ENOBUFS);
3049}
3050
3051/*********************************************************************
3052 *
3053 * Setup receive registers and features.
3054 *
3055 **********************************************************************/
3056#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3057
3058static void
3059ixv_initialize_receive_units(struct adapter *adapter)
3060{
3061 struct rx_ring *rxr = adapter->rx_rings;
3062 struct ixgbe_hw *hw = &adapter->hw;
3063 struct ifnet *ifp = adapter->ifp;
3064 u32 bufsz, fctrl, rxcsum, hlreg;
3065
3066
3067 /* Enable broadcasts */
3068 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3069 fctrl |= IXGBE_FCTRL_BAM;
3070 fctrl |= IXGBE_FCTRL_DPF;
3071 fctrl |= IXGBE_FCTRL_PMCF;
3072 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3073
3074 /* Set for Jumbo Frames? */
3075 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3076 if (ifp->if_mtu > ETHERMTU) {
3077 hlreg |= IXGBE_HLREG0_JUMBOEN;
3078 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3079 } else {
3080 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3081 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3082 }
3083 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3084
3085 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3086 u64 rdba = rxr->rxdma.dma_paddr;
3087 u32 reg, rxdctl;
3088
3089 /* Do the queue enabling first */
3090 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3091 rxdctl |= IXGBE_RXDCTL_ENABLE;
3092 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3093 for (int k = 0; k < 10; k++) {
3094 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3095 IXGBE_RXDCTL_ENABLE)
3096 break;
3097 else
3098 msec_delay(1);
3099 }
3100 wmb();
3101
3102 /* Setup the Base and Length of the Rx Descriptor Ring */
3103 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3104 (rdba & 0x00000000ffffffffULL));
3105 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3106 (rdba >> 32));
3107 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3108 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3109
3110 /* Set up the SRRCTL register */
3111 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3112 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3113 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3114 reg |= bufsz;
3115 if (rxr->hdr_split) {
3116 /* Use a standard mbuf for the header */
3117 reg |= ((IXV_RX_HDR <<
3118 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3119 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3120 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3121 } else
3122 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3123 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3124
3125 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3126 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3127 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3128 adapter->num_rx_desc - 1);
3129 }
3130
3131 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3132
3133 if (ifp->if_capenable & IFCAP_RXCSUM)
3134 rxcsum |= IXGBE_RXCSUM_PCSD;
3135
3136 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3137 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3138
3139 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3140
3141 return;
3142}
3143
3144/*********************************************************************
3145 *
3146 * Free all receive rings.
3147 *
3148 **********************************************************************/
3149static void
3150ixv_free_receive_structures(struct adapter *adapter)
3151{
3152 struct rx_ring *rxr = adapter->rx_rings;
3153
3154 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3155 struct lro_ctrl *lro = &rxr->lro;
3156 ixv_free_receive_buffers(rxr);
3157 /* Free LRO memory */
3158 tcp_lro_free(lro);
3159 /* Free the ring memory as well */
3160 ixv_dma_free(adapter, &rxr->rxdma);
3161 }
3162
3163 free(adapter->rx_rings, M_DEVBUF);
3164}
3165
3166
3167/*********************************************************************
3168 *
3169 * Free receive ring data structures
3170 *
3171 **********************************************************************/
3172static void
3173ixv_free_receive_buffers(struct rx_ring *rxr)
3174{
3175 struct adapter *adapter = rxr->adapter;
3176 struct ixv_rx_buf *rxbuf;
3177
3178 INIT_DEBUGOUT("free_receive_structures: begin");
3179
3180 /* Cleanup any existing buffers */
3181 if (rxr->rx_buffers != NULL) {
3182 for (int i = 0; i < adapter->num_rx_desc; i++) {
3183 rxbuf = &rxr->rx_buffers[i];
3184 if (rxbuf->m_head != NULL) {
3185 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3186 BUS_DMASYNC_POSTREAD);
3187 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3188 rxbuf->m_head->m_flags |= M_PKTHDR;
3189 m_freem(rxbuf->m_head);
3190 }
3191 if (rxbuf->m_pack != NULL) {
3192 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3193 BUS_DMASYNC_POSTREAD);
3194 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3195 rxbuf->m_pack->m_flags |= M_PKTHDR;
3196 m_freem(rxbuf->m_pack);
3197 }
3198 rxbuf->m_head = NULL;
3199 rxbuf->m_pack = NULL;
3200 if (rxbuf->hmap != NULL) {
3201 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3202 rxbuf->hmap = NULL;
3203 }
3204 if (rxbuf->pmap != NULL) {
3205 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3206 rxbuf->pmap = NULL;
3207 }
3208 }
3209 if (rxr->rx_buffers != NULL) {
3210 free(rxr->rx_buffers, M_DEVBUF);
3211 rxr->rx_buffers = NULL;
3212 }
3213 }
3214
3215 if (rxr->htag != NULL) {
3216 bus_dma_tag_destroy(rxr->htag);
3217 rxr->htag = NULL;
3218 }
3219 if (rxr->ptag != NULL) {
3220 bus_dma_tag_destroy(rxr->ptag);
3221 rxr->ptag = NULL;
3222 }
3223
3224 return;
3225}
3226
3227static __inline void
3228ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3229{
3230
3231 /*
3232 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3233 * should be computed by hardware. Also it should not have VLAN tag in
3234 * ethernet header.
3235 */
3236 if (rxr->lro_enabled &&
3237 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3238 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3239 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3240 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3241 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3242 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3243 /*
3244 * Send to the stack if:
3245 ** - LRO not enabled, or
3246 ** - no LRO resources, or
3247 ** - lro enqueue fails
3248 */
3249 if (rxr->lro.lro_cnt != 0)
3250 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3251 return;
3252 }
3253 IXV_RX_UNLOCK(rxr);
3254 (*ifp->if_input)(ifp, m);
3255 IXV_RX_LOCK(rxr);
3256}
3257
3258static __inline void
3259ixv_rx_discard(struct rx_ring *rxr, int i)
3260{
3261 struct ixv_rx_buf *rbuf;
3262
3263 rbuf = &rxr->rx_buffers[i];
3264
3265 if (rbuf->fmp != NULL) {/* Partial chain ? */
3266 rbuf->fmp->m_flags |= M_PKTHDR;
3267 m_freem(rbuf->fmp);
3268 rbuf->fmp = NULL;
3269 }
3270
3271 /*
3272 ** With advanced descriptors the writeback
3273 ** clobbers the buffer addrs, so its easier
3274 ** to just free the existing mbufs and take
3275 ** the normal refresh path to get new buffers
3276 ** and mapping.
3277 */
3278 if (rbuf->m_head) {
3279 m_free(rbuf->m_head);
3280 rbuf->m_head = NULL;
3281 }
3282
3283 if (rbuf->m_pack) {
3284 m_free(rbuf->m_pack);
3285 rbuf->m_pack = NULL;
3286 }
3287
3288 return;
3289}
3290
3291
3292/*********************************************************************
3293 *
3294 * This routine executes in interrupt context. It replenishes
3295 * the mbufs in the descriptor and sends data which has been
3296 * dma'ed into host memory to upper layer.
3297 *
3298 * We loop at most count times if count is > 0, or until done if
3299 * count < 0.
3300 *
3301 * Return TRUE for more work, FALSE for all clean.
3302 *********************************************************************/
3303static bool
3304ixv_rxeof(struct ix_queue *que, int count)
3305{
3306 struct adapter *adapter = que->adapter;
3307 struct rx_ring *rxr = que->rxr;
3308 struct ifnet *ifp = adapter->ifp;
3309 struct lro_ctrl *lro = &rxr->lro;
3310 struct lro_entry *queued;
3311 int i, nextp, processed = 0;
3312 u32 staterr = 0;
3313 union ixgbe_adv_rx_desc *cur;
3314 struct ixv_rx_buf *rbuf, *nbuf;
3315
3316 IXV_RX_LOCK(rxr);
3317
3318 for (i = rxr->next_to_check; count != 0;) {
3319 struct mbuf *sendmp, *mh, *mp;
3320 u32 rsc, ptype;
3321 u16 hlen, plen, hdr, vtag;
3322 bool eop;
3323
3324 /* Sync the ring. */
3325 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3326 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3327
3328 cur = &rxr->rx_base[i];
3329 staterr = le32toh(cur->wb.upper.status_error);
3330
3331 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3332 break;
3333 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3334 break;
3335
3336 count--;
3337 sendmp = NULL;
3338 nbuf = NULL;
3339 rsc = 0;
3340 cur->wb.upper.status_error = 0;
3341 rbuf = &rxr->rx_buffers[i];
3342 mh = rbuf->m_head;
3343 mp = rbuf->m_pack;
3344
3345 plen = le16toh(cur->wb.upper.length);
3346 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3347 IXGBE_RXDADV_PKTTYPE_MASK;
3348 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3349 vtag = le16toh(cur->wb.upper.vlan);
3350 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3351
3352 /* Make sure all parts of a bad packet are discarded */
3353 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3354 (rxr->discard)) {
3355 ifp->if_ierrors++;
3356 rxr->rx_discarded++;
3357 if (!eop)
3358 rxr->discard = TRUE;
3359 else
3360 rxr->discard = FALSE;
3361 ixv_rx_discard(rxr, i);
3362 goto next_desc;
3363 }
3364
3365 if (!eop) {
3366 nextp = i + 1;
3367 if (nextp == adapter->num_rx_desc)
3368 nextp = 0;
3369 nbuf = &rxr->rx_buffers[nextp];
3370 prefetch(nbuf);
3371 }
3372 /*
3373 ** The header mbuf is ONLY used when header
3374 ** split is enabled, otherwise we get normal
3375 ** behavior, ie, both header and payload
3376 ** are DMA'd into the payload buffer.
3377 **
3378 ** Rather than using the fmp/lmp global pointers
3379 ** we now keep the head of a packet chain in the
3380 ** buffer struct and pass this along from one
3381 ** descriptor to the next, until we get EOP.
3382 */
3383 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3384 /* This must be an initial descriptor */
3385 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3386 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3387 if (hlen > IXV_RX_HDR)
3388 hlen = IXV_RX_HDR;
3389 mh->m_len = hlen;
3390 mh->m_flags |= M_PKTHDR;
3391 mh->m_next = NULL;
3392 mh->m_pkthdr.len = mh->m_len;
3393 /* Null buf pointer so it is refreshed */
3394 rbuf->m_head = NULL;
3395 /*
3396 ** Check the payload length, this
3397 ** could be zero if its a small
3398 ** packet.
3399 */
3400 if (plen > 0) {
3401 mp->m_len = plen;
3402 mp->m_next = NULL;
3403 mp->m_flags &= ~M_PKTHDR;
3404 mh->m_next = mp;
3405 mh->m_pkthdr.len += mp->m_len;
3406 /* Null buf pointer so it is refreshed */
3407 rbuf->m_pack = NULL;
3408 rxr->rx_split_packets++;
3409 }
3410 /*
3411 ** Now create the forward
3412 ** chain so when complete
3413 ** we wont have to.
3414 */
3415 if (eop == 0) {
3416 /* stash the chain head */
3417 nbuf->fmp = mh;
3418 /* Make forward chain */
3419 if (plen)
3420 mp->m_next = nbuf->m_pack;
3421 else
3422 mh->m_next = nbuf->m_pack;
3423 } else {
3424 /* Singlet, prepare to send */
3425 sendmp = mh;
3426 if ((adapter->num_vlans) &&
3427 (staterr & IXGBE_RXD_STAT_VP)) {
3428 sendmp->m_pkthdr.ether_vtag = vtag;
3429 sendmp->m_flags |= M_VLANTAG;
3430 }
3431 }
3432 } else {
3433 /*
3434 ** Either no header split, or a
3435 ** secondary piece of a fragmented
3436 ** split packet.
3437 */
3438 mp->m_len = plen;
3439 /*
3440 ** See if there is a stored head
3441 ** that determines what we are
3442 */
3443 sendmp = rbuf->fmp;
3444 rbuf->m_pack = rbuf->fmp = NULL;
3445
3446 if (sendmp != NULL) /* secondary frag */
3447 sendmp->m_pkthdr.len += mp->m_len;
3448 else {
3449 /* first desc of a non-ps chain */
3450 sendmp = mp;
3451 sendmp->m_flags |= M_PKTHDR;
3452 sendmp->m_pkthdr.len = mp->m_len;
3453 if (staterr & IXGBE_RXD_STAT_VP) {
3454 sendmp->m_pkthdr.ether_vtag = vtag;
3455 sendmp->m_flags |= M_VLANTAG;
3456 }
3457 }
3458 /* Pass the head pointer on */
3459 if (eop == 0) {
3460 nbuf->fmp = sendmp;
3461 sendmp = NULL;
3462 mp->m_next = nbuf->m_pack;
3463 }
3464 }
3465 ++processed;
3466 /* Sending this frame? */
3467 if (eop) {
3468 sendmp->m_pkthdr.rcvif = ifp;
3469 ifp->if_ipackets++;
3470 rxr->rx_packets++;
3471 /* capture data for AIM */
3472 rxr->bytes += sendmp->m_pkthdr.len;
3473 rxr->rx_bytes += sendmp->m_pkthdr.len;
3474 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3475 ixv_rx_checksum(staterr, sendmp, ptype);
3476#if __FreeBSD_version >= 800000
3477 sendmp->m_pkthdr.flowid = que->msix;
3478 sendmp->m_flags |= M_FLOWID;
3479#endif
3480 }
3481next_desc:
3482 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3483 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3484
3485 /* Advance our pointers to the next descriptor. */
3486 if (++i == adapter->num_rx_desc)
3487 i = 0;
3488
3489 /* Now send to the stack or do LRO */
3490 if (sendmp != NULL)
3491 ixv_rx_input(rxr, ifp, sendmp, ptype);
3492
3493 /* Every 8 descriptors we go to refresh mbufs */
3494 if (processed == 8) {
3495 ixv_refresh_mbufs(rxr, i);
3496 processed = 0;
3497 }
3498 }
3499
3500 /* Refresh any remaining buf structs */
3501 if (ixv_rx_unrefreshed(rxr))
3502 ixv_refresh_mbufs(rxr, i);
3503
3504 rxr->next_to_check = i;
3505
3506 /*
3507 * Flush any outstanding LRO work
3508 */
3509 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3510 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3511 tcp_lro_flush(lro, queued);
3512 }
3513
3514 IXV_RX_UNLOCK(rxr);
3515
3516 /*
3517 ** We still have cleaning to do?
3518 ** Schedule another interrupt if so.
3519 */
3520 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3521 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3522 return (TRUE);
3523 }
3524
3525 return (FALSE);
3526}
3527
3528
3529/*********************************************************************
3530 *
3531 * Verify that the hardware indicated that the checksum is valid.
3532 * Inform the stack about the status of checksum so that stack
3533 * doesn't spend time verifying the checksum.
3534 *
3535 *********************************************************************/
3536static void
3537ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3538{
3539 u16 status = (u16) staterr;
3540 u8 errors = (u8) (staterr >> 24);
3541 bool sctp = FALSE;
3542
3543 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3544 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3545 sctp = TRUE;
3546
3547 if (status & IXGBE_RXD_STAT_IPCS) {
3548 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3549 /* IP Checksum Good */
3550 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3551 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3552
3553 } else
3554 mp->m_pkthdr.csum_flags = 0;
3555 }
3556 if (status & IXGBE_RXD_STAT_L4CS) {
3557 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3558#if __FreeBSD_version >= 800000
3559 if (sctp)
3560 type = CSUM_SCTP_VALID;
3561#endif
3562 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3563 mp->m_pkthdr.csum_flags |= type;
3564 if (!sctp)
3565 mp->m_pkthdr.csum_data = htons(0xffff);
3566 }
3567 }
3568 return;
3569}
3570
3571static void
3572ixv_setup_vlan_support(struct adapter *adapter)
3573{
3574 struct ixgbe_hw *hw = &adapter->hw;
3575 u32 ctrl, vid, vfta, retry;
3576
3577
3578 /*
3579 ** We get here thru init_locked, meaning
3580 ** a soft reset, this has already cleared
3581 ** the VFTA and other state, so if there
3582 ** have been no vlan's registered do nothing.
3583 */
3584 if (adapter->num_vlans == 0)
3585 return;
3586
3587 /* Enable the queues */
3588 for (int i = 0; i < adapter->num_queues; i++) {
3589 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3590 ctrl |= IXGBE_RXDCTL_VME;
3591 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3592 }
3593
3594 /*
3595 ** A soft reset zero's out the VFTA, so
3596 ** we need to repopulate it now.
3597 */
3598 for (int i = 0; i < VFTA_SIZE; i++) {
3599 if (ixv_shadow_vfta[i] == 0)
3600 continue;
3601 vfta = ixv_shadow_vfta[i];
3602 /*
3603 ** Reconstruct the vlan id's
3604 ** based on the bits set in each
3605 ** of the array ints.
3606 */
3607 for ( int j = 0; j < 32; j++) {
3608 retry = 0;
3609 if ((vfta & (1 << j)) == 0)
3610 continue;
3611 vid = (i * 32) + j;
3612 /* Call the shared code mailbox routine */
3613 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3614 if (++retry > 5)
3615 break;
3616 }
3617 }
3618 }
3619}
3620
3621/*
3622** This routine is run via an vlan config EVENT,
3623** it enables us to use the HW Filter table since
3624** we can get the vlan id. This just creates the
3625** entry in the soft version of the VFTA, init will
3626** repopulate the real table.
3627*/
3628static void
3629ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3630{
3631 struct adapter *adapter = ifp->if_softc;
3632 u16 index, bit;
3633
3634 if (ifp->if_softc != arg) /* Not our event */
3635 return;
3636
3637 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3638 return;
3639
3640 IXV_CORE_LOCK(adapter);
3641 index = (vtag >> 5) & 0x7F;
3642 bit = vtag & 0x1F;
3643 ixv_shadow_vfta[index] |= (1 << bit);
3644 ++adapter->num_vlans;
3645 /* Re-init to load the changes */
3646 ixv_init_locked(adapter);
3647 IXV_CORE_UNLOCK(adapter);
3648}
3649
3650/*
3651** This routine is run via an vlan
3652** unconfig EVENT, remove our entry
3653** in the soft vfta.
3654*/
3655static void
3656ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3657{
3658 struct adapter *adapter = ifp->if_softc;
3659 u16 index, bit;
3660
3661 if (ifp->if_softc != arg)
3662 return;
3663
3664 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3665 return;
3666
3667 IXV_CORE_LOCK(adapter);
3668 index = (vtag >> 5) & 0x7F;
3669 bit = vtag & 0x1F;
3670 ixv_shadow_vfta[index] &= ~(1 << bit);
3671 --adapter->num_vlans;
3672 /* Re-init to load the changes */
3673 ixv_init_locked(adapter);
3674 IXV_CORE_UNLOCK(adapter);
3675}
3676
3677static void
3678ixv_enable_intr(struct adapter *adapter)
3679{
3680 struct ixgbe_hw *hw = &adapter->hw;
3681 struct ix_queue *que = adapter->queues;
3682 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3683
3684
3685 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3686
3687 mask = IXGBE_EIMS_ENABLE_MASK;
3688 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3689 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3690
3691 for (int i = 0; i < adapter->num_queues; i++, que++)
3692 ixv_enable_queue(adapter, que->msix);
3693
3694 IXGBE_WRITE_FLUSH(hw);
3695
3696 return;
3697}
3698
3699static void
3700ixv_disable_intr(struct adapter *adapter)
3701{
3702 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3703 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3704 IXGBE_WRITE_FLUSH(&adapter->hw);
3705 return;
3706}
3707
3708/*
3709** Setup the correct IVAR register for a particular MSIX interrupt
3710** - entry is the register array entry
3711** - vector is the MSIX vector for this queue
3712** - type is RX/TX/MISC
3713*/
3714static void
3715ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3716{
3717 struct ixgbe_hw *hw = &adapter->hw;
3718 u32 ivar, index;
3719
3720 vector |= IXGBE_IVAR_ALLOC_VAL;
3721
3722 if (type == -1) { /* MISC IVAR */
3723 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3724 ivar &= ~0xFF;
3725 ivar |= vector;
3726 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3727 } else { /* RX/TX IVARS */
3728 index = (16 * (entry & 1)) + (8 * type);
3729 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3730 ivar &= ~(0xFF << index);
3731 ivar |= (vector << index);
3732 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3733 }
3734}
3735
3736static void
3737ixv_configure_ivars(struct adapter *adapter)
3738{
3739 struct ix_queue *que = adapter->queues;
3740
3741 for (int i = 0; i < adapter->num_queues; i++, que++) {
3742 /* First the RX queue entry */
3743 ixv_set_ivar(adapter, i, que->msix, 0);
3744 /* ... and the TX */
3745 ixv_set_ivar(adapter, i, que->msix, 1);
3746 /* Set an initial value in EITR */
3747 IXGBE_WRITE_REG(&adapter->hw,
3748 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3749 }
3750
3751 /* For the Link interrupt */
3752 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3753}
3754
3755
3756/*
3757** Tasklet handler for MSIX MBX interrupts
3758** - do outside interrupt since it might sleep
3759*/
3760static void
3761ixv_handle_mbx(void *context, int pending)
3762{
3763 struct adapter *adapter = context;
3764
3765 ixgbe_check_link(&adapter->hw,
3766 &adapter->link_speed, &adapter->link_up, 0);
3767 ixv_update_link_status(adapter);
3768}
3769
3770/*
3771** The VF stats registers never have a truely virgin
3772** starting point, so this routine tries to make an
3773** artificial one, marking ground zero on attach as
3774** it were.
3775*/
3776static void
3777ixv_save_stats(struct adapter *adapter)
3778{
3779 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3780 adapter->stats.saved_reset_vfgprc +=
3781 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3782 adapter->stats.saved_reset_vfgptc +=
3783 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3784 adapter->stats.saved_reset_vfgorc +=
3785 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3786 adapter->stats.saved_reset_vfgotc +=
3787 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3788 adapter->stats.saved_reset_vfmprc +=
3789 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3790 }
3791}
3792
3793static void
3794ixv_init_stats(struct adapter *adapter)
3795{
3796 struct ixgbe_hw *hw = &adapter->hw;
3797
3798 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3799 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3800 adapter->stats.last_vfgorc |=
3801 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3802
3803 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3804 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3805 adapter->stats.last_vfgotc |=
3806 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3807
3808 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3809
3810 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3811 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3812 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3813 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3814 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3815}
3816
3817#define UPDATE_STAT_32(reg, last, count) \
3818{ \
3819 u32 current = IXGBE_READ_REG(hw, reg); \
3820 if (current < last) \
3821 count += 0x100000000LL; \
3822 last = current; \
3823 count &= 0xFFFFFFFF00000000LL; \
3824 count |= current; \
3825}
3826
3827#define UPDATE_STAT_36(lsb, msb, last, count) \
3828{ \
3829 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3830 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3831 u64 current = ((cur_msb << 32) | cur_lsb); \
3832 if (current < last) \
3833 count += 0x1000000000LL; \
3834 last = current; \
3835 count &= 0xFFFFFFF000000000LL; \
3836 count |= current; \
3837}
3838
3839/*
3840** ixv_update_stats - Update the board statistics counters.
3841*/
3842void
3843ixv_update_stats(struct adapter *adapter)
3844{
3845 struct ixgbe_hw *hw = &adapter->hw;
3846
3847 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3848 adapter->stats.vfgprc);
3849 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3850 adapter->stats.vfgptc);
3851 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3852 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3853 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3854 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3855 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3856 adapter->stats.vfmprc);
3857}
3858
3859/**********************************************************************
3860 *
3861 * This routine is called only when ixgbe_display_debug_stats is enabled.
3862 * This routine provides a way to take a look at important statistics
3863 * maintained by the driver and hardware.
3864 *
3865 **********************************************************************/
3866static void
3867ixv_print_hw_stats(struct adapter * adapter)
3868{
3869 device_t dev = adapter->dev;
3870
3871 device_printf(dev,"Std Mbuf Failed = %lu\n",
3872 adapter->mbuf_defrag_failed);
3873 device_printf(dev,"Driver dropped packets = %lu\n",
3874 adapter->dropped_pkts);
3875 device_printf(dev, "watchdog timeouts = %ld\n",
3876 adapter->watchdog_events);
3877
3878 device_printf(dev,"Good Packets Rcvd = %llu\n",
3879 (long long)adapter->stats.vfgprc);
3880 device_printf(dev,"Good Packets Xmtd = %llu\n",
3881 (long long)adapter->stats.vfgptc);
3882 device_printf(dev,"TSO Transmissions = %lu\n",
3883 adapter->tso_tx);
3884
3885}
3886
3887/**********************************************************************
3888 *
3889 * This routine is called only when em_display_debug_stats is enabled.
3890 * This routine provides a way to take a look at important statistics
3891 * maintained by the driver and hardware.
3892 *
3893 **********************************************************************/
3894static void
3895ixv_print_debug_info(struct adapter *adapter)
3896{
3897 device_t dev = adapter->dev;
3898 struct ixgbe_hw *hw = &adapter->hw;
3899 struct ix_queue *que = adapter->queues;
3900 struct rx_ring *rxr;
3901 struct tx_ring *txr;
3902 struct lro_ctrl *lro;
3903
3904 device_printf(dev,"Error Byte Count = %u \n",
3905 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3906
3907 for (int i = 0; i < adapter->num_queues; i++, que++) {
3908 txr = que->txr;
3909 rxr = que->rxr;
3910 lro = &rxr->lro;
3911 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3912 que->msix, (long)que->irqs);
3913 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3914 rxr->me, (long long)rxr->rx_packets);
3915 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3916 rxr->me, (long long)rxr->rx_split_packets);
3917 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3918 rxr->me, (long)rxr->rx_bytes);
3919 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3920 rxr->me, lro->lro_queued);
3921 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3922 rxr->me, lro->lro_flushed);
3923 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3924 txr->me, (long)txr->total_packets);
3925 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3926 txr->me, (long)txr->no_desc_avail);
3927 }
3928
3929 device_printf(dev,"MBX IRQ Handled: %lu\n",
3930 (long)adapter->mbx_irq);
3931 return;
3932}
3933
3934static int
3935ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3936{
3937 int error;
3938 int result;
3939 struct adapter *adapter;
3940
3941 result = -1;
3942 error = sysctl_handle_int(oidp, &result, 0, req);
3943
3944 if (error || !req->newptr)
3945 return (error);
3946
3947 if (result == 1) {
3948 adapter = (struct adapter *) arg1;
3949 ixv_print_hw_stats(adapter);
3950 }
3951 return error;
3952}
3953
3954static int
3955ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3956{
3957 int error, result;
3958 struct adapter *adapter;
3959
3960 result = -1;
3961 error = sysctl_handle_int(oidp, &result, 0, req);
3962
3963 if (error || !req->newptr)
3964 return (error);
3965
3966 if (result == 1) {
3967 adapter = (struct adapter *) arg1;
3968 ixv_print_debug_info(adapter);
3969 }
3970 return error;
3971}
3972
3973/*
3974** Set flow control using sysctl:
3975** Flow control values:
3976** 0 - off
3977** 1 - rx pause
3978** 2 - tx pause
3979** 3 - full
3980*/
3981static int
3982ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3983{
3984 int error;
3985 struct adapter *adapter;
3986
3987 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3988
3989 if (error)
3990 return (error);
3991
3992 adapter = (struct adapter *) arg1;
3993 switch (ixv_flow_control) {
3994 case ixgbe_fc_rx_pause:
3995 case ixgbe_fc_tx_pause:
3996 case ixgbe_fc_full:
3997 adapter->hw.fc.requested_mode = ixv_flow_control;
3998 break;
3999 case ixgbe_fc_none:
4000 default:
4001 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4002 }
4003
4004 ixgbe_fc_enable(&adapter->hw);
4005 return error;
4006}
4007
4008static void
4009ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4010 const char *description, int *limit, int value)
4011{
4012 *limit = value;
4013 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4014 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4015 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4016}
4017
307 /* Allocate, clear, and link in our adapter structure */
308 adapter = device_get_softc(dev);
309 adapter->dev = adapter->osdep.dev = dev;
310 hw = &adapter->hw;
311
312 /* Core Lock Init*/
313 IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
314
315 /* SYSCTL APIs */
316 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
317 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
318 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
319 adapter, 0, ixv_sysctl_stats, "I", "Statistics");
320
321 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
322 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
323 OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
324 adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
325
326 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
327 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
328 OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
329 adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
330
331 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
332 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
333 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
334 &ixv_enable_aim, 1, "Interrupt Moderation");
335
336 /* Set up the timer callout */
337 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
338
339 /* Determine hardware revision */
340 ixv_identify_hardware(adapter);
341
342 /* Do base PCI setup - map BAR0 */
343 if (ixv_allocate_pci_resources(adapter)) {
344 device_printf(dev, "Allocation of PCI resources failed\n");
345 error = ENXIO;
346 goto err_out;
347 }
348
349 /* Do descriptor calc and sanity checks */
350 if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
351 ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
352 device_printf(dev, "TXD config issue, using default!\n");
353 adapter->num_tx_desc = DEFAULT_TXD;
354 } else
355 adapter->num_tx_desc = ixv_txd;
356
357 if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
358 ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
359 device_printf(dev, "RXD config issue, using default!\n");
360 adapter->num_rx_desc = DEFAULT_RXD;
361 } else
362 adapter->num_rx_desc = ixv_rxd;
363
364 /* Allocate our TX/RX Queues */
365 if (ixv_allocate_queues(adapter)) {
366 error = ENOMEM;
367 goto err_out;
368 }
369
370 /*
371 ** Initialize the shared code: its
372 ** at this point the mac type is set.
373 */
374 error = ixgbe_init_shared_code(hw);
375 if (error) {
376 device_printf(dev,"Shared Code Initialization Failure\n");
377 error = EIO;
378 goto err_late;
379 }
380
381 /* Setup the mailbox */
382 ixgbe_init_mbx_params_vf(hw);
383
384 ixgbe_reset_hw(hw);
385
386 /* Get Hardware Flow Control setting */
387 hw->fc.requested_mode = ixgbe_fc_full;
388 hw->fc.pause_time = IXV_FC_PAUSE;
389 hw->fc.low_water[0] = IXV_FC_LO;
390 hw->fc.high_water[0] = IXV_FC_HI;
391 hw->fc.send_xon = TRUE;
392
393 error = ixgbe_init_hw(hw);
394 if (error) {
395 device_printf(dev,"Hardware Initialization Failure\n");
396 error = EIO;
397 goto err_late;
398 }
399
400 error = ixv_allocate_msix(adapter);
401 if (error)
402 goto err_late;
403
404 /* Setup OS specific network interface */
405 ixv_setup_interface(dev, adapter);
406
407 /* Sysctl for limiting the amount of work done in the taskqueue */
408 ixv_add_rx_process_limit(adapter, "rx_processing_limit",
409 "max number of rx packets to process", &adapter->rx_process_limit,
410 ixv_rx_process_limit);
411
412 /* Do the stats setup */
413 ixv_save_stats(adapter);
414 ixv_init_stats(adapter);
415
416 /* Register for VLAN events */
417 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
418 ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
419 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
420 ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
421
422 INIT_DEBUGOUT("ixv_attach: end");
423 return (0);
424
425err_late:
426 ixv_free_transmit_structures(adapter);
427 ixv_free_receive_structures(adapter);
428err_out:
429 ixv_free_pci_resources(adapter);
430 return (error);
431
432}
433
434/*********************************************************************
435 * Device removal routine
436 *
437 * The detach entry point is called when the driver is being removed.
438 * This routine stops the adapter and deallocates all the resources
439 * that were allocated for driver operation.
440 *
441 * return 0 on success, positive on failure
442 *********************************************************************/
443
444static int
445ixv_detach(device_t dev)
446{
447 struct adapter *adapter = device_get_softc(dev);
448 struct ix_queue *que = adapter->queues;
449
450 INIT_DEBUGOUT("ixv_detach: begin");
451
452 /* Make sure VLANS are not using driver */
453 if (adapter->ifp->if_vlantrunk != NULL) {
454 device_printf(dev,"Vlan in use, detach first\n");
455 return (EBUSY);
456 }
457
458 IXV_CORE_LOCK(adapter);
459 ixv_stop(adapter);
460 IXV_CORE_UNLOCK(adapter);
461
462 for (int i = 0; i < adapter->num_queues; i++, que++) {
463 if (que->tq) {
464 taskqueue_drain(que->tq, &que->que_task);
465 taskqueue_free(que->tq);
466 }
467 }
468
469 /* Drain the Link queue */
470 if (adapter->tq) {
471 taskqueue_drain(adapter->tq, &adapter->mbx_task);
472 taskqueue_free(adapter->tq);
473 }
474
475 /* Unregister VLAN events */
476 if (adapter->vlan_attach != NULL)
477 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
478 if (adapter->vlan_detach != NULL)
479 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
480
481 ether_ifdetach(adapter->ifp);
482 callout_drain(&adapter->timer);
483 ixv_free_pci_resources(adapter);
484 bus_generic_detach(dev);
485 if_free(adapter->ifp);
486
487 ixv_free_transmit_structures(adapter);
488 ixv_free_receive_structures(adapter);
489
490 IXV_CORE_LOCK_DESTROY(adapter);
491 return (0);
492}
493
494/*********************************************************************
495 *
496 * Shutdown entry point
497 *
498 **********************************************************************/
499static int
500ixv_shutdown(device_t dev)
501{
502 struct adapter *adapter = device_get_softc(dev);
503 IXV_CORE_LOCK(adapter);
504 ixv_stop(adapter);
505 IXV_CORE_UNLOCK(adapter);
506 return (0);
507}
508
509#if __FreeBSD_version < 800000
510/*********************************************************************
511 * Transmit entry point
512 *
513 * ixv_start is called by the stack to initiate a transmit.
514 * The driver will remain in this routine as long as there are
515 * packets to transmit and transmit resources are available.
516 * In case resources are not available stack is notified and
517 * the packet is requeued.
518 **********************************************************************/
519static void
520ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
521{
522 struct mbuf *m_head;
523 struct adapter *adapter = txr->adapter;
524
525 IXV_TX_LOCK_ASSERT(txr);
526
527 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
528 IFF_DRV_RUNNING)
529 return;
530 if (!adapter->link_active)
531 return;
532
533 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
534
535 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
536 if (m_head == NULL)
537 break;
538
539 if (ixv_xmit(txr, &m_head)) {
540 if (m_head == NULL)
541 break;
542 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
543 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
544 break;
545 }
546 /* Send a copy of the frame to the BPF listener */
547 ETHER_BPF_MTAP(ifp, m_head);
548
549 /* Set watchdog on */
550 txr->watchdog_check = TRUE;
551 txr->watchdog_time = ticks;
552
553 }
554 return;
555}
556
557/*
558 * Legacy TX start - called by the stack, this
559 * always uses the first tx ring, and should
560 * not be used with multiqueue tx enabled.
561 */
562static void
563ixv_start(struct ifnet *ifp)
564{
565 struct adapter *adapter = ifp->if_softc;
566 struct tx_ring *txr = adapter->tx_rings;
567
568 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
569 IXV_TX_LOCK(txr);
570 ixv_start_locked(txr, ifp);
571 IXV_TX_UNLOCK(txr);
572 }
573 return;
574}
575
576#else
577
578/*
579** Multiqueue Transmit driver
580**
581*/
582static int
583ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
584{
585 struct adapter *adapter = ifp->if_softc;
586 struct ix_queue *que;
587 struct tx_ring *txr;
588 int i = 0, err = 0;
589
590 /* Which queue to use */
591 if ((m->m_flags & M_FLOWID) != 0)
592 i = m->m_pkthdr.flowid % adapter->num_queues;
593
594 txr = &adapter->tx_rings[i];
595 que = &adapter->queues[i];
596
597 if (IXV_TX_TRYLOCK(txr)) {
598 err = ixv_mq_start_locked(ifp, txr, m);
599 IXV_TX_UNLOCK(txr);
600 } else {
601 err = drbr_enqueue(ifp, txr->br, m);
602 taskqueue_enqueue(que->tq, &que->que_task);
603 }
604
605 return (err);
606}
607
608static int
609ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
610{
611 struct adapter *adapter = txr->adapter;
612 struct mbuf *next;
613 int enqueued, err = 0;
614
615 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
616 IFF_DRV_RUNNING || adapter->link_active == 0) {
617 if (m != NULL)
618 err = drbr_enqueue(ifp, txr->br, m);
619 return (err);
620 }
621
622 /* Do a clean if descriptors are low */
623 if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
624 ixv_txeof(txr);
625
626 enqueued = 0;
627 if (m == NULL) {
628 next = drbr_dequeue(ifp, txr->br);
629 } else if (drbr_needs_enqueue(ifp, txr->br)) {
630 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
631 return (err);
632 next = drbr_dequeue(ifp, txr->br);
633 } else
634 next = m;
635
636 /* Process the queue */
637 while (next != NULL) {
638 if ((err = ixv_xmit(txr, &next)) != 0) {
639 if (next != NULL)
640 err = drbr_enqueue(ifp, txr->br, next);
641 break;
642 }
643 enqueued++;
644 ifp->if_obytes += next->m_pkthdr.len;
645 if (next->m_flags & M_MCAST)
646 ifp->if_omcasts++;
647 /* Send a copy of the frame to the BPF listener */
648 ETHER_BPF_MTAP(ifp, next);
649 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
650 break;
651 if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
652 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
653 break;
654 }
655 next = drbr_dequeue(ifp, txr->br);
656 }
657
658 if (enqueued > 0) {
659 /* Set watchdog on */
660 txr->watchdog_check = TRUE;
661 txr->watchdog_time = ticks;
662 }
663
664 return (err);
665}
666
667/*
668** Flush all ring buffers
669*/
670static void
671ixv_qflush(struct ifnet *ifp)
672{
673 struct adapter *adapter = ifp->if_softc;
674 struct tx_ring *txr = adapter->tx_rings;
675 struct mbuf *m;
676
677 for (int i = 0; i < adapter->num_queues; i++, txr++) {
678 IXV_TX_LOCK(txr);
679 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
680 m_freem(m);
681 IXV_TX_UNLOCK(txr);
682 }
683 if_qflush(ifp);
684}
685
686#endif
687
688/*********************************************************************
689 * Ioctl entry point
690 *
691 * ixv_ioctl is called when the user wants to configure the
692 * interface.
693 *
694 * return 0 on success, positive on failure
695 **********************************************************************/
696
697static int
698ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
699{
700 struct adapter *adapter = ifp->if_softc;
701 struct ifreq *ifr = (struct ifreq *) data;
702#if defined(INET) || defined(INET6)
703 struct ifaddr *ifa = (struct ifaddr *) data;
704 bool avoid_reset = FALSE;
705#endif
706 int error = 0;
707
708 switch (command) {
709
710 case SIOCSIFADDR:
711#ifdef INET
712 if (ifa->ifa_addr->sa_family == AF_INET)
713 avoid_reset = TRUE;
714#endif
715#ifdef INET6
716 if (ifa->ifa_addr->sa_family == AF_INET6)
717 avoid_reset = TRUE;
718#endif
719#if defined(INET) || defined(INET6)
720 /*
721 ** Calling init results in link renegotiation,
722 ** so we avoid doing it when possible.
723 */
724 if (avoid_reset) {
725 ifp->if_flags |= IFF_UP;
726 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
727 ixv_init(adapter);
728 if (!(ifp->if_flags & IFF_NOARP))
729 arp_ifinit(ifp, ifa);
730 } else
731 error = ether_ioctl(ifp, command, data);
732 break;
733#endif
734 case SIOCSIFMTU:
735 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
736 if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
737 error = EINVAL;
738 } else {
739 IXV_CORE_LOCK(adapter);
740 ifp->if_mtu = ifr->ifr_mtu;
741 adapter->max_frame_size =
742 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
743 ixv_init_locked(adapter);
744 IXV_CORE_UNLOCK(adapter);
745 }
746 break;
747 case SIOCSIFFLAGS:
748 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
749 IXV_CORE_LOCK(adapter);
750 if (ifp->if_flags & IFF_UP) {
751 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
752 ixv_init_locked(adapter);
753 } else
754 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
755 ixv_stop(adapter);
756 adapter->if_flags = ifp->if_flags;
757 IXV_CORE_UNLOCK(adapter);
758 break;
759 case SIOCADDMULTI:
760 case SIOCDELMULTI:
761 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
762 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
763 IXV_CORE_LOCK(adapter);
764 ixv_disable_intr(adapter);
765 ixv_set_multi(adapter);
766 ixv_enable_intr(adapter);
767 IXV_CORE_UNLOCK(adapter);
768 }
769 break;
770 case SIOCSIFMEDIA:
771 case SIOCGIFMEDIA:
772 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
773 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
774 break;
775 case SIOCSIFCAP:
776 {
777 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
778 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
779 if (mask & IFCAP_HWCSUM)
780 ifp->if_capenable ^= IFCAP_HWCSUM;
781 if (mask & IFCAP_TSO4)
782 ifp->if_capenable ^= IFCAP_TSO4;
783 if (mask & IFCAP_LRO)
784 ifp->if_capenable ^= IFCAP_LRO;
785 if (mask & IFCAP_VLAN_HWTAGGING)
786 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
787 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
788 IXV_CORE_LOCK(adapter);
789 ixv_init_locked(adapter);
790 IXV_CORE_UNLOCK(adapter);
791 }
792 VLAN_CAPABILITIES(ifp);
793 break;
794 }
795
796 default:
797 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
798 error = ether_ioctl(ifp, command, data);
799 break;
800 }
801
802 return (error);
803}
804
805/*********************************************************************
806 * Init entry point
807 *
808 * This routine is used in two ways. It is used by the stack as
809 * init entry point in network interface structure. It is also used
810 * by the driver as a hw/sw initialization routine to get to a
811 * consistent state.
812 *
813 * return 0 on success, positive on failure
814 **********************************************************************/
815#define IXGBE_MHADD_MFS_SHIFT 16
816
817static void
818ixv_init_locked(struct adapter *adapter)
819{
820 struct ifnet *ifp = adapter->ifp;
821 device_t dev = adapter->dev;
822 struct ixgbe_hw *hw = &adapter->hw;
823 u32 mhadd, gpie;
824
825 INIT_DEBUGOUT("ixv_init: begin");
826 mtx_assert(&adapter->core_mtx, MA_OWNED);
827 hw->adapter_stopped = FALSE;
828 ixgbe_stop_adapter(hw);
829 callout_stop(&adapter->timer);
830
831 /* reprogram the RAR[0] in case user changed it. */
832 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
833
834 /* Get the latest mac address, User can use a LAA */
835 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
836 IXGBE_ETH_LENGTH_OF_ADDRESS);
837 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
838 hw->addr_ctrl.rar_used_count = 1;
839
840 /* Prepare transmit descriptors and buffers */
841 if (ixv_setup_transmit_structures(adapter)) {
842 device_printf(dev,"Could not setup transmit structures\n");
843 ixv_stop(adapter);
844 return;
845 }
846
847 ixgbe_reset_hw(hw);
848 ixv_initialize_transmit_units(adapter);
849
850 /* Setup Multicast table */
851 ixv_set_multi(adapter);
852
853 /*
854 ** Determine the correct mbuf pool
855 ** for doing jumbo/headersplit
856 */
857 if (ifp->if_mtu > ETHERMTU)
858 adapter->rx_mbuf_sz = MJUMPAGESIZE;
859 else
860 adapter->rx_mbuf_sz = MCLBYTES;
861
862 /* Prepare receive descriptors and buffers */
863 if (ixv_setup_receive_structures(adapter)) {
864 device_printf(dev,"Could not setup receive structures\n");
865 ixv_stop(adapter);
866 return;
867 }
868
869 /* Configure RX settings */
870 ixv_initialize_receive_units(adapter);
871
872 /* Enable Enhanced MSIX mode */
873 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
874 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
875 gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
876 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
877
878 /* Set the various hardware offload abilities */
879 ifp->if_hwassist = 0;
880 if (ifp->if_capenable & IFCAP_TSO4)
881 ifp->if_hwassist |= CSUM_TSO;
882 if (ifp->if_capenable & IFCAP_TXCSUM) {
883 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
884#if __FreeBSD_version >= 800000
885 ifp->if_hwassist |= CSUM_SCTP;
886#endif
887 }
888
889 /* Set MTU size */
890 if (ifp->if_mtu > ETHERMTU) {
891 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
892 mhadd &= ~IXGBE_MHADD_MFS_MASK;
893 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
894 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
895 }
896
897 /* Set up VLAN offload and filter */
898 ixv_setup_vlan_support(adapter);
899
900 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
901
902 /* Set up MSI/X routing */
903 ixv_configure_ivars(adapter);
904
905 /* Set up auto-mask */
906 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
907
908 /* Set moderation on the Link interrupt */
909 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
910
911 /* Stats init */
912 ixv_init_stats(adapter);
913
914 /* Config/Enable Link */
915 ixv_config_link(adapter);
916
917 /* And now turn on interrupts */
918 ixv_enable_intr(adapter);
919
920 /* Now inform the stack we're ready */
921 ifp->if_drv_flags |= IFF_DRV_RUNNING;
922 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
923
924 return;
925}
926
927static void
928ixv_init(void *arg)
929{
930 struct adapter *adapter = arg;
931
932 IXV_CORE_LOCK(adapter);
933 ixv_init_locked(adapter);
934 IXV_CORE_UNLOCK(adapter);
935 return;
936}
937
938
939/*
940**
941** MSIX Interrupt Handlers and Tasklets
942**
943*/
944
945static inline void
946ixv_enable_queue(struct adapter *adapter, u32 vector)
947{
948 struct ixgbe_hw *hw = &adapter->hw;
949 u32 queue = 1 << vector;
950 u32 mask;
951
952 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
953 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
954}
955
956static inline void
957ixv_disable_queue(struct adapter *adapter, u32 vector)
958{
959 struct ixgbe_hw *hw = &adapter->hw;
960 u64 queue = (u64)(1 << vector);
961 u32 mask;
962
963 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
964 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
965}
966
967static inline void
968ixv_rearm_queues(struct adapter *adapter, u64 queues)
969{
970 u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
971 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
972}
973
974
975static void
976ixv_handle_que(void *context, int pending)
977{
978 struct ix_queue *que = context;
979 struct adapter *adapter = que->adapter;
980 struct tx_ring *txr = que->txr;
981 struct ifnet *ifp = adapter->ifp;
982 bool more;
983
984 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
985 more = ixv_rxeof(que, adapter->rx_process_limit);
986 IXV_TX_LOCK(txr);
987 ixv_txeof(txr);
988#if __FreeBSD_version >= 800000
989 if (!drbr_empty(ifp, txr->br))
990 ixv_mq_start_locked(ifp, txr, NULL);
991#else
992 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
993 ixv_start_locked(txr, ifp);
994#endif
995 IXV_TX_UNLOCK(txr);
996 if (more) {
997 taskqueue_enqueue(que->tq, &que->que_task);
998 return;
999 }
1000 }
1001
1002 /* Reenable this interrupt */
1003 ixv_enable_queue(adapter, que->msix);
1004 return;
1005}
1006
1007/*********************************************************************
1008 *
1009 * MSI Queue Interrupt Service routine
1010 *
1011 **********************************************************************/
1012void
1013ixv_msix_que(void *arg)
1014{
1015 struct ix_queue *que = arg;
1016 struct adapter *adapter = que->adapter;
1017 struct tx_ring *txr = que->txr;
1018 struct rx_ring *rxr = que->rxr;
1019 bool more_tx, more_rx;
1020 u32 newitr = 0;
1021
1022 ixv_disable_queue(adapter, que->msix);
1023 ++que->irqs;
1024
1025 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1026
1027 IXV_TX_LOCK(txr);
1028 more_tx = ixv_txeof(txr);
1029 /*
1030 ** Make certain that if the stack
1031 ** has anything queued the task gets
1032 ** scheduled to handle it.
1033 */
1034#if __FreeBSD_version < 800000
1035 if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1036#else
1037 if (!drbr_empty(adapter->ifp, txr->br))
1038#endif
1039 more_tx = 1;
1040 IXV_TX_UNLOCK(txr);
1041
1042 more_rx = ixv_rxeof(que, adapter->rx_process_limit);
1043
1044 /* Do AIM now? */
1045
1046 if (ixv_enable_aim == FALSE)
1047 goto no_calc;
1048 /*
1049 ** Do Adaptive Interrupt Moderation:
1050 ** - Write out last calculated setting
1051 ** - Calculate based on average size over
1052 ** the last interval.
1053 */
1054 if (que->eitr_setting)
1055 IXGBE_WRITE_REG(&adapter->hw,
1056 IXGBE_VTEITR(que->msix),
1057 que->eitr_setting);
1058
1059 que->eitr_setting = 0;
1060
1061 /* Idle, do nothing */
1062 if ((txr->bytes == 0) && (rxr->bytes == 0))
1063 goto no_calc;
1064
1065 if ((txr->bytes) && (txr->packets))
1066 newitr = txr->bytes/txr->packets;
1067 if ((rxr->bytes) && (rxr->packets))
1068 newitr = max(newitr,
1069 (rxr->bytes / rxr->packets));
1070 newitr += 24; /* account for hardware frame, crc */
1071
1072 /* set an upper boundary */
1073 newitr = min(newitr, 3000);
1074
1075 /* Be nice to the mid range */
1076 if ((newitr > 300) && (newitr < 1200))
1077 newitr = (newitr / 3);
1078 else
1079 newitr = (newitr / 2);
1080
1081 newitr |= newitr << 16;
1082
1083 /* save for next interrupt */
1084 que->eitr_setting = newitr;
1085
1086 /* Reset state */
1087 txr->bytes = 0;
1088 txr->packets = 0;
1089 rxr->bytes = 0;
1090 rxr->packets = 0;
1091
1092no_calc:
1093 if (more_tx || more_rx)
1094 taskqueue_enqueue(que->tq, &que->que_task);
1095 else /* Reenable this interrupt */
1096 ixv_enable_queue(adapter, que->msix);
1097 return;
1098}
1099
1100static void
1101ixv_msix_mbx(void *arg)
1102{
1103 struct adapter *adapter = arg;
1104 struct ixgbe_hw *hw = &adapter->hw;
1105 u32 reg;
1106
1107 ++adapter->mbx_irq;
1108
1109 /* First get the cause */
1110 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
1111 /* Clear interrupt with write */
1112 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
1113
1114 /* Link status change */
1115 if (reg & IXGBE_EICR_LSC)
1116 taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
1117
1118 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
1119 return;
1120}
1121
1122/*********************************************************************
1123 *
1124 * Media Ioctl callback
1125 *
1126 * This routine is called whenever the user queries the status of
1127 * the interface using ifconfig.
1128 *
1129 **********************************************************************/
1130static void
1131ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1132{
1133 struct adapter *adapter = ifp->if_softc;
1134
1135 INIT_DEBUGOUT("ixv_media_status: begin");
1136 IXV_CORE_LOCK(adapter);
1137 ixv_update_link_status(adapter);
1138
1139 ifmr->ifm_status = IFM_AVALID;
1140 ifmr->ifm_active = IFM_ETHER;
1141
1142 if (!adapter->link_active) {
1143 IXV_CORE_UNLOCK(adapter);
1144 return;
1145 }
1146
1147 ifmr->ifm_status |= IFM_ACTIVE;
1148
1149 switch (adapter->link_speed) {
1150 case IXGBE_LINK_SPEED_1GB_FULL:
1151 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1152 break;
1153 case IXGBE_LINK_SPEED_10GB_FULL:
1154 ifmr->ifm_active |= IFM_FDX;
1155 break;
1156 }
1157
1158 IXV_CORE_UNLOCK(adapter);
1159
1160 return;
1161}
1162
1163/*********************************************************************
1164 *
1165 * Media Ioctl callback
1166 *
1167 * This routine is called when the user changes speed/duplex using
1168 * media/mediopt option with ifconfig.
1169 *
1170 **********************************************************************/
1171static int
1172ixv_media_change(struct ifnet * ifp)
1173{
1174 struct adapter *adapter = ifp->if_softc;
1175 struct ifmedia *ifm = &adapter->media;
1176
1177 INIT_DEBUGOUT("ixv_media_change: begin");
1178
1179 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1180 return (EINVAL);
1181
1182 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1183 case IFM_AUTO:
1184 break;
1185 default:
1186 device_printf(adapter->dev, "Only auto media type\n");
1187 return (EINVAL);
1188 }
1189
1190 return (0);
1191}
1192
1193/*********************************************************************
1194 *
1195 * This routine maps the mbufs to tx descriptors, allowing the
1196 * TX engine to transmit the packets.
1197 * - return 0 on success, positive on failure
1198 *
1199 **********************************************************************/
1200
1201static int
1202ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1203{
1204 struct adapter *adapter = txr->adapter;
1205 u32 olinfo_status = 0, cmd_type_len;
1206 u32 paylen = 0;
1207 int i, j, error, nsegs;
1208 int first, last = 0;
1209 struct mbuf *m_head;
1210 bus_dma_segment_t segs[32];
1211 bus_dmamap_t map;
1212 struct ixv_tx_buf *txbuf, *txbuf_mapped;
1213 union ixgbe_adv_tx_desc *txd = NULL;
1214
1215 m_head = *m_headp;
1216
1217 /* Basic descriptor defines */
1218 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1219 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1220
1221 if (m_head->m_flags & M_VLANTAG)
1222 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1223
1224 /*
1225 * Important to capture the first descriptor
1226 * used because it will contain the index of
1227 * the one we tell the hardware to report back
1228 */
1229 first = txr->next_avail_desc;
1230 txbuf = &txr->tx_buffers[first];
1231 txbuf_mapped = txbuf;
1232 map = txbuf->map;
1233
1234 /*
1235 * Map the packet for DMA.
1236 */
1237 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1238 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1239
1240 if (error == EFBIG) {
1241 struct mbuf *m;
1242
1243 m = m_defrag(*m_headp, M_DONTWAIT);
1244 if (m == NULL) {
1245 adapter->mbuf_defrag_failed++;
1246 m_freem(*m_headp);
1247 *m_headp = NULL;
1248 return (ENOBUFS);
1249 }
1250 *m_headp = m;
1251
1252 /* Try it again */
1253 error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
1254 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
1255
1256 if (error == ENOMEM) {
1257 adapter->no_tx_dma_setup++;
1258 return (error);
1259 } else if (error != 0) {
1260 adapter->no_tx_dma_setup++;
1261 m_freem(*m_headp);
1262 *m_headp = NULL;
1263 return (error);
1264 }
1265 } else if (error == ENOMEM) {
1266 adapter->no_tx_dma_setup++;
1267 return (error);
1268 } else if (error != 0) {
1269 adapter->no_tx_dma_setup++;
1270 m_freem(*m_headp);
1271 *m_headp = NULL;
1272 return (error);
1273 }
1274
1275 /* Make certain there are enough descriptors */
1276 if (nsegs > txr->tx_avail - 2) {
1277 txr->no_desc_avail++;
1278 error = ENOBUFS;
1279 goto xmit_fail;
1280 }
1281 m_head = *m_headp;
1282
1283 /*
1284 ** Set up the appropriate offload context
1285 ** this becomes the first descriptor of
1286 ** a packet.
1287 */
1288 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1289 if (ixv_tso_setup(txr, m_head, &paylen)) {
1290 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1291 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1292 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1293 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1294 ++adapter->tso_tx;
1295 } else
1296 return (ENXIO);
1297 } else if (ixv_tx_ctx_setup(txr, m_head))
1298 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1299
1300 /* Record payload length */
1301 if (paylen == 0)
1302 olinfo_status |= m_head->m_pkthdr.len <<
1303 IXGBE_ADVTXD_PAYLEN_SHIFT;
1304
1305 i = txr->next_avail_desc;
1306 for (j = 0; j < nsegs; j++) {
1307 bus_size_t seglen;
1308 bus_addr_t segaddr;
1309
1310 txbuf = &txr->tx_buffers[i];
1311 txd = &txr->tx_base[i];
1312 seglen = segs[j].ds_len;
1313 segaddr = htole64(segs[j].ds_addr);
1314
1315 txd->read.buffer_addr = segaddr;
1316 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1317 cmd_type_len |seglen);
1318 txd->read.olinfo_status = htole32(olinfo_status);
1319 last = i; /* descriptor that will get completion IRQ */
1320
1321 if (++i == adapter->num_tx_desc)
1322 i = 0;
1323
1324 txbuf->m_head = NULL;
1325 txbuf->eop_index = -1;
1326 }
1327
1328 txd->read.cmd_type_len |=
1329 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1330 txr->tx_avail -= nsegs;
1331 txr->next_avail_desc = i;
1332
1333 txbuf->m_head = m_head;
1334 txr->tx_buffers[first].map = txbuf->map;
1335 txbuf->map = map;
1336 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1337
1338 /* Set the index of the descriptor that will be marked done */
1339 txbuf = &txr->tx_buffers[first];
1340 txbuf->eop_index = last;
1341
1342 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1343 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1344 /*
1345 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1346 * hardware that this frame is available to transmit.
1347 */
1348 ++txr->total_packets;
1349 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
1350
1351 return (0);
1352
1353xmit_fail:
1354 bus_dmamap_unload(txr->txtag, txbuf->map);
1355 return (error);
1356
1357}
1358
1359
1360/*********************************************************************
1361 * Multicast Update
1362 *
1363 * This routine is called whenever multicast address list is updated.
1364 *
1365 **********************************************************************/
1366#define IXGBE_RAR_ENTRIES 16
1367
1368static void
1369ixv_set_multi(struct adapter *adapter)
1370{
1371 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1372 u8 *update_ptr;
1373 struct ifmultiaddr *ifma;
1374 int mcnt = 0;
1375 struct ifnet *ifp = adapter->ifp;
1376
1377 IOCTL_DEBUGOUT("ixv_set_multi: begin");
1378
1379#if __FreeBSD_version < 800000
1380 IF_ADDR_LOCK(ifp);
1381#else
1382 if_maddr_rlock(ifp);
1383#endif
1384 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1385 if (ifma->ifma_addr->sa_family != AF_LINK)
1386 continue;
1387 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1388 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1389 IXGBE_ETH_LENGTH_OF_ADDRESS);
1390 mcnt++;
1391 }
1392#if __FreeBSD_version < 800000
1393 IF_ADDR_UNLOCK(ifp);
1394#else
1395 if_maddr_runlock(ifp);
1396#endif
1397
1398 update_ptr = mta;
1399
1400 ixgbe_update_mc_addr_list(&adapter->hw,
1401 update_ptr, mcnt, ixv_mc_array_itr, TRUE);
1402
1403 return;
1404}
1405
1406/*
1407 * This is an iterator function now needed by the multicast
1408 * shared code. It simply feeds the shared code routine the
1409 * addresses in the array of ixv_set_multi() one by one.
1410 */
1411static u8 *
1412ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1413{
1414 u8 *addr = *update_ptr;
1415 u8 *newptr;
1416 *vmdq = 0;
1417
1418 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1419 *update_ptr = newptr;
1420 return addr;
1421}
1422
1423/*********************************************************************
1424 * Timer routine
1425 *
1426 * This routine checks for link status,updates statistics,
1427 * and runs the watchdog check.
1428 *
1429 **********************************************************************/
1430
1431static void
1432ixv_local_timer(void *arg)
1433{
1434 struct adapter *adapter = arg;
1435 device_t dev = adapter->dev;
1436 struct tx_ring *txr = adapter->tx_rings;
1437 int i;
1438
1439 mtx_assert(&adapter->core_mtx, MA_OWNED);
1440
1441 ixv_update_link_status(adapter);
1442
1443 /* Stats Update */
1444 ixv_update_stats(adapter);
1445
1446 /*
1447 * If the interface has been paused
1448 * then don't do the watchdog check
1449 */
1450 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1451 goto out;
1452 /*
1453 ** Check for time since any descriptor was cleaned
1454 */
1455 for (i = 0; i < adapter->num_queues; i++, txr++) {
1456 IXV_TX_LOCK(txr);
1457 if (txr->watchdog_check == FALSE) {
1458 IXV_TX_UNLOCK(txr);
1459 continue;
1460 }
1461 if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
1462 goto hung;
1463 IXV_TX_UNLOCK(txr);
1464 }
1465out:
1466 ixv_rearm_queues(adapter, adapter->que_mask);
1467 callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
1468 return;
1469
1470hung:
1471 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1472 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1473 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
1474 IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
1475 device_printf(dev,"TX(%d) desc avail = %d,"
1476 "Next TX to Clean = %d\n",
1477 txr->me, txr->tx_avail, txr->next_to_clean);
1478 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1479 adapter->watchdog_events++;
1480 IXV_TX_UNLOCK(txr);
1481 ixv_init_locked(adapter);
1482}
1483
1484/*
1485** Note: this routine updates the OS on the link state
1486** the real check of the hardware only happens with
1487** a link interrupt.
1488*/
1489static void
1490ixv_update_link_status(struct adapter *adapter)
1491{
1492 struct ifnet *ifp = adapter->ifp;
1493 struct tx_ring *txr = adapter->tx_rings;
1494 device_t dev = adapter->dev;
1495
1496
1497 if (adapter->link_up){
1498 if (adapter->link_active == FALSE) {
1499 if (bootverbose)
1500 device_printf(dev,"Link is up %d Gbps %s \n",
1501 ((adapter->link_speed == 128)? 10:1),
1502 "Full Duplex");
1503 adapter->link_active = TRUE;
1504 if_link_state_change(ifp, LINK_STATE_UP);
1505 }
1506 } else { /* Link down */
1507 if (adapter->link_active == TRUE) {
1508 if (bootverbose)
1509 device_printf(dev,"Link is Down\n");
1510 if_link_state_change(ifp, LINK_STATE_DOWN);
1511 adapter->link_active = FALSE;
1512 for (int i = 0; i < adapter->num_queues;
1513 i++, txr++)
1514 txr->watchdog_check = FALSE;
1515 }
1516 }
1517
1518 return;
1519}
1520
1521
1522/*********************************************************************
1523 *
1524 * This routine disables all traffic on the adapter by issuing a
1525 * global reset on the MAC and deallocates TX/RX buffers.
1526 *
1527 **********************************************************************/
1528
1529static void
1530ixv_stop(void *arg)
1531{
1532 struct ifnet *ifp;
1533 struct adapter *adapter = arg;
1534 struct ixgbe_hw *hw = &adapter->hw;
1535 ifp = adapter->ifp;
1536
1537 mtx_assert(&adapter->core_mtx, MA_OWNED);
1538
1539 INIT_DEBUGOUT("ixv_stop: begin\n");
1540 ixv_disable_intr(adapter);
1541
1542 /* Tell the stack that the interface is no longer active */
1543 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1544
1545 ixgbe_reset_hw(hw);
1546 adapter->hw.adapter_stopped = FALSE;
1547 ixgbe_stop_adapter(hw);
1548 callout_stop(&adapter->timer);
1549
1550 /* reprogram the RAR[0] in case user changed it. */
1551 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1552
1553 return;
1554}
1555
1556
1557/*********************************************************************
1558 *
1559 * Determine hardware revision.
1560 *
1561 **********************************************************************/
1562static void
1563ixv_identify_hardware(struct adapter *adapter)
1564{
1565 device_t dev = adapter->dev;
1566 u16 pci_cmd_word;
1567
1568 /*
1569 ** Make sure BUSMASTER is set, on a VM under
1570 ** KVM it may not be and will break things.
1571 */
1572 pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1573 if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1574 (pci_cmd_word & PCIM_CMD_MEMEN))) {
1575 INIT_DEBUGOUT("Memory Access and/or Bus Master "
1576 "bits were not set!\n");
1577 pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1578 pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
1579 }
1580
1581 /* Save off the information about this board */
1582 adapter->hw.vendor_id = pci_get_vendor(dev);
1583 adapter->hw.device_id = pci_get_device(dev);
1584 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
1585 adapter->hw.subsystem_vendor_id =
1586 pci_read_config(dev, PCIR_SUBVEND_0, 2);
1587 adapter->hw.subsystem_device_id =
1588 pci_read_config(dev, PCIR_SUBDEV_0, 2);
1589
1590 return;
1591}
1592
1593/*********************************************************************
1594 *
1595 * Setup MSIX Interrupt resources and handlers
1596 *
1597 **********************************************************************/
1598static int
1599ixv_allocate_msix(struct adapter *adapter)
1600{
1601 device_t dev = adapter->dev;
1602 struct ix_queue *que = adapter->queues;
1603 int error, rid, vector = 0;
1604
1605 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
1606 rid = vector + 1;
1607 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1608 RF_SHAREABLE | RF_ACTIVE);
1609 if (que->res == NULL) {
1610 device_printf(dev,"Unable to allocate"
1611 " bus resource: que interrupt [%d]\n", vector);
1612 return (ENXIO);
1613 }
1614 /* Set the handler function */
1615 error = bus_setup_intr(dev, que->res,
1616 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1617 ixv_msix_que, que, &que->tag);
1618 if (error) {
1619 que->res = NULL;
1620 device_printf(dev, "Failed to register QUE handler");
1621 return (error);
1622 }
1623#if __FreeBSD_version >= 800504
1624 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
1625#endif
1626 que->msix = vector;
1627 adapter->que_mask |= (u64)(1 << que->msix);
1628 /*
1629 ** Bind the msix vector, and thus the
1630 ** ring to the corresponding cpu.
1631 */
1632 if (adapter->num_queues > 1)
1633 bus_bind_intr(dev, que->res, i);
1634
1635 TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
1636 que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
1637 taskqueue_thread_enqueue, &que->tq);
1638 taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
1639 device_get_nameunit(adapter->dev));
1640 }
1641
1642 /* and Mailbox */
1643 rid = vector + 1;
1644 adapter->res = bus_alloc_resource_any(dev,
1645 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1646 if (!adapter->res) {
1647 device_printf(dev,"Unable to allocate"
1648 " bus resource: MBX interrupt [%d]\n", rid);
1649 return (ENXIO);
1650 }
1651 /* Set the mbx handler function */
1652 error = bus_setup_intr(dev, adapter->res,
1653 INTR_TYPE_NET | INTR_MPSAFE, NULL,
1654 ixv_msix_mbx, adapter, &adapter->tag);
1655 if (error) {
1656 adapter->res = NULL;
1657 device_printf(dev, "Failed to register LINK handler");
1658 return (error);
1659 }
1660#if __FreeBSD_version >= 800504
1661 bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
1662#endif
1663 adapter->mbxvec = vector;
1664 /* Tasklets for Mailbox */
1665 TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
1666 adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
1667 taskqueue_thread_enqueue, &adapter->tq);
1668 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
1669 device_get_nameunit(adapter->dev));
1670 /*
1671 ** Due to a broken design QEMU will fail to properly
1672 ** enable the guest for MSIX unless the vectors in
1673 ** the table are all set up, so we must rewrite the
1674 ** ENABLE in the MSIX control register again at this
1675 ** point to cause it to successfully initialize us.
1676 */
1677 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1678 int msix_ctrl;
1679 pci_find_cap(dev, PCIY_MSIX, &rid);
1680 rid += PCIR_MSIX_CTRL;
1681 msix_ctrl = pci_read_config(dev, rid, 2);
1682 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1683 pci_write_config(dev, rid, msix_ctrl, 2);
1684 }
1685
1686 return (0);
1687}
1688
1689/*
1690 * Setup MSIX resources, note that the VF
1691 * device MUST use MSIX, there is no fallback.
1692 */
1693static int
1694ixv_setup_msix(struct adapter *adapter)
1695{
1696 device_t dev = adapter->dev;
1697 int rid, vectors, want = 2;
1698
1699
1700 /* First try MSI/X */
1701 rid = PCIR_BAR(3);
1702 adapter->msix_mem = bus_alloc_resource_any(dev,
1703 SYS_RES_MEMORY, &rid, RF_ACTIVE);
1704 if (!adapter->msix_mem) {
1705 device_printf(adapter->dev,
1706 "Unable to map MSIX table \n");
1707 goto out;
1708 }
1709
1710 vectors = pci_msix_count(dev);
1711 if (vectors < 2) {
1712 bus_release_resource(dev, SYS_RES_MEMORY,
1713 rid, adapter->msix_mem);
1714 adapter->msix_mem = NULL;
1715 goto out;
1716 }
1717
1718 /*
1719 ** Want two vectors: one for a queue,
1720 ** plus an additional for mailbox.
1721 */
1722 if (pci_alloc_msix(dev, &want) == 0) {
1723 device_printf(adapter->dev,
1724 "Using MSIX interrupts with %d vectors\n", want);
1725 return (want);
1726 }
1727out:
1728 device_printf(adapter->dev,"MSIX config error\n");
1729 return (ENXIO);
1730}
1731
1732
1733static int
1734ixv_allocate_pci_resources(struct adapter *adapter)
1735{
1736 int rid;
1737 device_t dev = adapter->dev;
1738
1739 rid = PCIR_BAR(0);
1740 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1741 &rid, RF_ACTIVE);
1742
1743 if (!(adapter->pci_mem)) {
1744 device_printf(dev,"Unable to allocate bus resource: memory\n");
1745 return (ENXIO);
1746 }
1747
1748 adapter->osdep.mem_bus_space_tag =
1749 rman_get_bustag(adapter->pci_mem);
1750 adapter->osdep.mem_bus_space_handle =
1751 rman_get_bushandle(adapter->pci_mem);
1752 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
1753
1754 adapter->num_queues = 1;
1755 adapter->hw.back = &adapter->osdep;
1756
1757 /*
1758 ** Now setup MSI/X, should
1759 ** return us the number of
1760 ** configured vectors.
1761 */
1762 adapter->msix = ixv_setup_msix(adapter);
1763 if (adapter->msix == ENXIO)
1764 return (ENXIO);
1765 else
1766 return (0);
1767}
1768
1769static void
1770ixv_free_pci_resources(struct adapter * adapter)
1771{
1772 struct ix_queue *que = adapter->queues;
1773 device_t dev = adapter->dev;
1774 int rid, memrid;
1775
1776 memrid = PCIR_BAR(MSIX_BAR);
1777
1778 /*
1779 ** There is a slight possibility of a failure mode
1780 ** in attach that will result in entering this function
1781 ** before interrupt resources have been initialized, and
1782 ** in that case we do not want to execute the loops below
1783 ** We can detect this reliably by the state of the adapter
1784 ** res pointer.
1785 */
1786 if (adapter->res == NULL)
1787 goto mem;
1788
1789 /*
1790 ** Release all msix queue resources:
1791 */
1792 for (int i = 0; i < adapter->num_queues; i++, que++) {
1793 rid = que->msix + 1;
1794 if (que->tag != NULL) {
1795 bus_teardown_intr(dev, que->res, que->tag);
1796 que->tag = NULL;
1797 }
1798 if (que->res != NULL)
1799 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
1800 }
1801
1802
1803 /* Clean the Legacy or Link interrupt last */
1804 if (adapter->mbxvec) /* we are doing MSIX */
1805 rid = adapter->mbxvec + 1;
1806 else
1807 (adapter->msix != 0) ? (rid = 1):(rid = 0);
1808
1809 if (adapter->tag != NULL) {
1810 bus_teardown_intr(dev, adapter->res, adapter->tag);
1811 adapter->tag = NULL;
1812 }
1813 if (adapter->res != NULL)
1814 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
1815
1816mem:
1817 if (adapter->msix)
1818 pci_release_msi(dev);
1819
1820 if (adapter->msix_mem != NULL)
1821 bus_release_resource(dev, SYS_RES_MEMORY,
1822 memrid, adapter->msix_mem);
1823
1824 if (adapter->pci_mem != NULL)
1825 bus_release_resource(dev, SYS_RES_MEMORY,
1826 PCIR_BAR(0), adapter->pci_mem);
1827
1828 return;
1829}
1830
1831/*********************************************************************
1832 *
1833 * Setup networking device structure and register an interface.
1834 *
1835 **********************************************************************/
1836static void
1837ixv_setup_interface(device_t dev, struct adapter *adapter)
1838{
1839 struct ifnet *ifp;
1840
1841 INIT_DEBUGOUT("ixv_setup_interface: begin");
1842
1843 ifp = adapter->ifp = if_alloc(IFT_ETHER);
1844 if (ifp == NULL)
1845 panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
1846 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1847 ifp->if_baudrate = 1000000000;
1848 ifp->if_init = ixv_init;
1849 ifp->if_softc = adapter;
1850 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1851 ifp->if_ioctl = ixv_ioctl;
1852#if __FreeBSD_version >= 800000
1853 ifp->if_transmit = ixv_mq_start;
1854 ifp->if_qflush = ixv_qflush;
1855#else
1856 ifp->if_start = ixv_start;
1857#endif
1858 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
1859
1860 ether_ifattach(ifp, adapter->hw.mac.addr);
1861
1862 adapter->max_frame_size =
1863 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1864
1865 /*
1866 * Tell the upper layer(s) we support long frames.
1867 */
1868 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1869
1870 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
1871 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1872 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
1873 | IFCAP_VLAN_HWTSO
1874 | IFCAP_VLAN_MTU;
1875 ifp->if_capenable = ifp->if_capabilities;
1876
1877 /* Don't enable LRO by default */
1878 ifp->if_capabilities |= IFCAP_LRO;
1879
1880 /*
1881 * Specify the media types supported by this adapter and register
1882 * callbacks to update media and link information
1883 */
1884 ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
1885 ixv_media_status);
1886 ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
1887 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1888 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1889
1890 return;
1891}
1892
1893static void
1894ixv_config_link(struct adapter *adapter)
1895{
1896 struct ixgbe_hw *hw = &adapter->hw;
1897 u32 autoneg, err = 0;
1898 bool negotiate = TRUE;
1899
1900 if (hw->mac.ops.check_link)
1901 err = hw->mac.ops.check_link(hw, &autoneg,
1902 &adapter->link_up, FALSE);
1903 if (err)
1904 goto out;
1905
1906 if (hw->mac.ops.setup_link)
1907 err = hw->mac.ops.setup_link(hw, autoneg,
1908 negotiate, adapter->link_up);
1909out:
1910 return;
1911}
1912
1913/********************************************************************
1914 * Manage DMA'able memory.
1915 *******************************************************************/
1916static void
1917ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1918{
1919 if (error)
1920 return;
1921 *(bus_addr_t *) arg = segs->ds_addr;
1922 return;
1923}
1924
1925static int
1926ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
1927 struct ixv_dma_alloc *dma, int mapflags)
1928{
1929 device_t dev = adapter->dev;
1930 int r;
1931
1932 r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
1933 DBA_ALIGN, 0, /* alignment, bounds */
1934 BUS_SPACE_MAXADDR, /* lowaddr */
1935 BUS_SPACE_MAXADDR, /* highaddr */
1936 NULL, NULL, /* filter, filterarg */
1937 size, /* maxsize */
1938 1, /* nsegments */
1939 size, /* maxsegsize */
1940 BUS_DMA_ALLOCNOW, /* flags */
1941 NULL, /* lockfunc */
1942 NULL, /* lockfuncarg */
1943 &dma->dma_tag);
1944 if (r != 0) {
1945 device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
1946 "error %u\n", r);
1947 goto fail_0;
1948 }
1949 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
1950 BUS_DMA_NOWAIT, &dma->dma_map);
1951 if (r != 0) {
1952 device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
1953 "error %u\n", r);
1954 goto fail_1;
1955 }
1956 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1957 size,
1958 ixv_dmamap_cb,
1959 &dma->dma_paddr,
1960 mapflags | BUS_DMA_NOWAIT);
1961 if (r != 0) {
1962 device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
1963 "error %u\n", r);
1964 goto fail_2;
1965 }
1966 dma->dma_size = size;
1967 return (0);
1968fail_2:
1969 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1970fail_1:
1971 bus_dma_tag_destroy(dma->dma_tag);
1972fail_0:
1973 dma->dma_map = NULL;
1974 dma->dma_tag = NULL;
1975 return (r);
1976}
1977
1978static void
1979ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
1980{
1981 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1982 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1983 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1984 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1985 bus_dma_tag_destroy(dma->dma_tag);
1986}
1987
1988
1989/*********************************************************************
1990 *
1991 * Allocate memory for the transmit and receive rings, and then
1992 * the descriptors associated with each, called only once at attach.
1993 *
1994 **********************************************************************/
1995static int
1996ixv_allocate_queues(struct adapter *adapter)
1997{
1998 device_t dev = adapter->dev;
1999 struct ix_queue *que;
2000 struct tx_ring *txr;
2001 struct rx_ring *rxr;
2002 int rsize, tsize, error = 0;
2003 int txconf = 0, rxconf = 0;
2004
2005 /* First allocate the top level queue structs */
2006 if (!(adapter->queues =
2007 (struct ix_queue *) malloc(sizeof(struct ix_queue) *
2008 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2009 device_printf(dev, "Unable to allocate queue memory\n");
2010 error = ENOMEM;
2011 goto fail;
2012 }
2013
2014 /* First allocate the TX ring struct memory */
2015 if (!(adapter->tx_rings =
2016 (struct tx_ring *) malloc(sizeof(struct tx_ring) *
2017 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2018 device_printf(dev, "Unable to allocate TX ring memory\n");
2019 error = ENOMEM;
2020 goto tx_fail;
2021 }
2022
2023 /* Next allocate the RX */
2024 if (!(adapter->rx_rings =
2025 (struct rx_ring *) malloc(sizeof(struct rx_ring) *
2026 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2027 device_printf(dev, "Unable to allocate RX ring memory\n");
2028 error = ENOMEM;
2029 goto rx_fail;
2030 }
2031
2032 /* For the ring itself */
2033 tsize = roundup2(adapter->num_tx_desc *
2034 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2035
2036 /*
2037 * Now set up the TX queues, txconf is needed to handle the
2038 * possibility that things fail midcourse and we need to
2039 * undo memory gracefully
2040 */
2041 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2042 /* Set up some basics */
2043 txr = &adapter->tx_rings[i];
2044 txr->adapter = adapter;
2045 txr->me = i;
2046
2047 /* Initialize the TX side lock */
2048 snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
2049 device_get_nameunit(dev), txr->me);
2050 mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
2051
2052 if (ixv_dma_malloc(adapter, tsize,
2053 &txr->txdma, BUS_DMA_NOWAIT)) {
2054 device_printf(dev,
2055 "Unable to allocate TX Descriptor memory\n");
2056 error = ENOMEM;
2057 goto err_tx_desc;
2058 }
2059 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2060 bzero((void *)txr->tx_base, tsize);
2061
2062 /* Now allocate transmit buffers for the ring */
2063 if (ixv_allocate_transmit_buffers(txr)) {
2064 device_printf(dev,
2065 "Critical Failure setting up transmit buffers\n");
2066 error = ENOMEM;
2067 goto err_tx_desc;
2068 }
2069#if __FreeBSD_version >= 800000
2070 /* Allocate a buf ring */
2071 txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
2072 M_WAITOK, &txr->tx_mtx);
2073 if (txr->br == NULL) {
2074 device_printf(dev,
2075 "Critical Failure setting up buf ring\n");
2076 error = ENOMEM;
2077 goto err_tx_desc;
2078 }
2079#endif
2080 }
2081
2082 /*
2083 * Next the RX queues...
2084 */
2085 rsize = roundup2(adapter->num_rx_desc *
2086 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2087 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2088 rxr = &adapter->rx_rings[i];
2089 /* Set up some basics */
2090 rxr->adapter = adapter;
2091 rxr->me = i;
2092
2093 /* Initialize the RX side lock */
2094 snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
2095 device_get_nameunit(dev), rxr->me);
2096 mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
2097
2098 if (ixv_dma_malloc(adapter, rsize,
2099 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2100 device_printf(dev,
2101 "Unable to allocate RxDescriptor memory\n");
2102 error = ENOMEM;
2103 goto err_rx_desc;
2104 }
2105 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2106 bzero((void *)rxr->rx_base, rsize);
2107
2108 /* Allocate receive buffers for the ring*/
2109 if (ixv_allocate_receive_buffers(rxr)) {
2110 device_printf(dev,
2111 "Critical Failure setting up receive buffers\n");
2112 error = ENOMEM;
2113 goto err_rx_desc;
2114 }
2115 }
2116
2117 /*
2118 ** Finally set up the queue holding structs
2119 */
2120 for (int i = 0; i < adapter->num_queues; i++) {
2121 que = &adapter->queues[i];
2122 que->adapter = adapter;
2123 que->txr = &adapter->tx_rings[i];
2124 que->rxr = &adapter->rx_rings[i];
2125 }
2126
2127 return (0);
2128
2129err_rx_desc:
2130 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2131 ixv_dma_free(adapter, &rxr->rxdma);
2132err_tx_desc:
2133 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2134 ixv_dma_free(adapter, &txr->txdma);
2135 free(adapter->rx_rings, M_DEVBUF);
2136rx_fail:
2137 free(adapter->tx_rings, M_DEVBUF);
2138tx_fail:
2139 free(adapter->queues, M_DEVBUF);
2140fail:
2141 return (error);
2142}
2143
2144
2145/*********************************************************************
2146 *
2147 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2148 * the information needed to transmit a packet on the wire. This is
2149 * called only once at attach, setup is done every reset.
2150 *
2151 **********************************************************************/
2152static int
2153ixv_allocate_transmit_buffers(struct tx_ring *txr)
2154{
2155 struct adapter *adapter = txr->adapter;
2156 device_t dev = adapter->dev;
2157 struct ixv_tx_buf *txbuf;
2158 int error, i;
2159
2160 /*
2161 * Setup DMA descriptor areas.
2162 */
2163 if ((error = bus_dma_tag_create(
2164 bus_get_dma_tag(adapter->dev), /* parent */
2165 1, 0, /* alignment, bounds */
2166 BUS_SPACE_MAXADDR, /* lowaddr */
2167 BUS_SPACE_MAXADDR, /* highaddr */
2168 NULL, NULL, /* filter, filterarg */
2169 IXV_TSO_SIZE, /* maxsize */
2170 32, /* nsegments */
2171 PAGE_SIZE, /* maxsegsize */
2172 0, /* flags */
2173 NULL, /* lockfunc */
2174 NULL, /* lockfuncarg */
2175 &txr->txtag))) {
2176 device_printf(dev,"Unable to allocate TX DMA tag\n");
2177 goto fail;
2178 }
2179
2180 if (!(txr->tx_buffers =
2181 (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
2182 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2183 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2184 error = ENOMEM;
2185 goto fail;
2186 }
2187
2188 /* Create the descriptor buffer dma maps */
2189 txbuf = txr->tx_buffers;
2190 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2191 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2192 if (error != 0) {
2193 device_printf(dev, "Unable to create TX DMA map\n");
2194 goto fail;
2195 }
2196 }
2197
2198 return 0;
2199fail:
2200 /* We free all, it handles case where we are in the middle */
2201 ixv_free_transmit_structures(adapter);
2202 return (error);
2203}
2204
2205/*********************************************************************
2206 *
2207 * Initialize a transmit ring.
2208 *
2209 **********************************************************************/
2210static void
2211ixv_setup_transmit_ring(struct tx_ring *txr)
2212{
2213 struct adapter *adapter = txr->adapter;
2214 struct ixv_tx_buf *txbuf;
2215 int i;
2216
2217 /* Clear the old ring contents */
2218 IXV_TX_LOCK(txr);
2219 bzero((void *)txr->tx_base,
2220 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2221 /* Reset indices */
2222 txr->next_avail_desc = 0;
2223 txr->next_to_clean = 0;
2224
2225 /* Free any existing tx buffers. */
2226 txbuf = txr->tx_buffers;
2227 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2228 if (txbuf->m_head != NULL) {
2229 bus_dmamap_sync(txr->txtag, txbuf->map,
2230 BUS_DMASYNC_POSTWRITE);
2231 bus_dmamap_unload(txr->txtag, txbuf->map);
2232 m_freem(txbuf->m_head);
2233 txbuf->m_head = NULL;
2234 }
2235 /* Clear the EOP index */
2236 txbuf->eop_index = -1;
2237 }
2238
2239 /* Set number of descriptors available */
2240 txr->tx_avail = adapter->num_tx_desc;
2241
2242 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2243 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2244 IXV_TX_UNLOCK(txr);
2245}
2246
2247/*********************************************************************
2248 *
2249 * Initialize all transmit rings.
2250 *
2251 **********************************************************************/
2252static int
2253ixv_setup_transmit_structures(struct adapter *adapter)
2254{
2255 struct tx_ring *txr = adapter->tx_rings;
2256
2257 for (int i = 0; i < adapter->num_queues; i++, txr++)
2258 ixv_setup_transmit_ring(txr);
2259
2260 return (0);
2261}
2262
2263/*********************************************************************
2264 *
2265 * Enable transmit unit.
2266 *
2267 **********************************************************************/
2268static void
2269ixv_initialize_transmit_units(struct adapter *adapter)
2270{
2271 struct tx_ring *txr = adapter->tx_rings;
2272 struct ixgbe_hw *hw = &adapter->hw;
2273
2274
2275 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2276 u64 tdba = txr->txdma.dma_paddr;
2277 u32 txctrl, txdctl;
2278
2279 /* Set WTHRESH to 8, burst writeback */
2280 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2281 txdctl |= (8 << 16);
2282 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2283 /* Now enable */
2284 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
2285 txdctl |= IXGBE_TXDCTL_ENABLE;
2286 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
2287
2288 /* Set the HW Tx Head and Tail indices */
2289 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
2290 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
2291
2292 /* Setup Transmit Descriptor Cmd Settings */
2293 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2294 txr->watchdog_check = FALSE;
2295
2296 /* Set Ring parameters */
2297 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
2298 (tdba & 0x00000000ffffffffULL));
2299 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
2300 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
2301 adapter->num_tx_desc *
2302 sizeof(struct ixgbe_legacy_tx_desc));
2303 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
2304 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2305 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
2306 break;
2307 }
2308
2309 return;
2310}
2311
2312/*********************************************************************
2313 *
2314 * Free all transmit rings.
2315 *
2316 **********************************************************************/
2317static void
2318ixv_free_transmit_structures(struct adapter *adapter)
2319{
2320 struct tx_ring *txr = adapter->tx_rings;
2321
2322 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2323 IXV_TX_LOCK(txr);
2324 ixv_free_transmit_buffers(txr);
2325 ixv_dma_free(adapter, &txr->txdma);
2326 IXV_TX_UNLOCK(txr);
2327 IXV_TX_LOCK_DESTROY(txr);
2328 }
2329 free(adapter->tx_rings, M_DEVBUF);
2330}
2331
2332/*********************************************************************
2333 *
2334 * Free transmit ring related data structures.
2335 *
2336 **********************************************************************/
2337static void
2338ixv_free_transmit_buffers(struct tx_ring *txr)
2339{
2340 struct adapter *adapter = txr->adapter;
2341 struct ixv_tx_buf *tx_buffer;
2342 int i;
2343
2344 INIT_DEBUGOUT("free_transmit_ring: begin");
2345
2346 if (txr->tx_buffers == NULL)
2347 return;
2348
2349 tx_buffer = txr->tx_buffers;
2350 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2351 if (tx_buffer->m_head != NULL) {
2352 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2353 BUS_DMASYNC_POSTWRITE);
2354 bus_dmamap_unload(txr->txtag,
2355 tx_buffer->map);
2356 m_freem(tx_buffer->m_head);
2357 tx_buffer->m_head = NULL;
2358 if (tx_buffer->map != NULL) {
2359 bus_dmamap_destroy(txr->txtag,
2360 tx_buffer->map);
2361 tx_buffer->map = NULL;
2362 }
2363 } else if (tx_buffer->map != NULL) {
2364 bus_dmamap_unload(txr->txtag,
2365 tx_buffer->map);
2366 bus_dmamap_destroy(txr->txtag,
2367 tx_buffer->map);
2368 tx_buffer->map = NULL;
2369 }
2370 }
2371#if __FreeBSD_version >= 800000
2372 if (txr->br != NULL)
2373 buf_ring_free(txr->br, M_DEVBUF);
2374#endif
2375 if (txr->tx_buffers != NULL) {
2376 free(txr->tx_buffers, M_DEVBUF);
2377 txr->tx_buffers = NULL;
2378 }
2379 if (txr->txtag != NULL) {
2380 bus_dma_tag_destroy(txr->txtag);
2381 txr->txtag = NULL;
2382 }
2383 return;
2384}
2385
2386/*********************************************************************
2387 *
2388 * Advanced Context Descriptor setup for VLAN or CSUM
2389 *
2390 **********************************************************************/
2391
2392static bool
2393ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2394{
2395 struct adapter *adapter = txr->adapter;
2396 struct ixgbe_adv_tx_context_desc *TXD;
2397 struct ixv_tx_buf *tx_buffer;
2398 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2399 struct ether_vlan_header *eh;
2400 struct ip *ip;
2401 struct ip6_hdr *ip6;
2402 int ehdrlen, ip_hlen = 0;
2403 u16 etype;
2404 u8 ipproto = 0;
2405 bool offload = TRUE;
2406 int ctxd = txr->next_avail_desc;
2407 u16 vtag = 0;
2408
2409
2410 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
2411 offload = FALSE;
2412
2413
2414 tx_buffer = &txr->tx_buffers[ctxd];
2415 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2416
2417 /*
2418 ** In advanced descriptors the vlan tag must
2419 ** be placed into the descriptor itself.
2420 */
2421 if (mp->m_flags & M_VLANTAG) {
2422 vtag = htole16(mp->m_pkthdr.ether_vtag);
2423 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2424 } else if (offload == FALSE)
2425 return FALSE;
2426
2427 /*
2428 * Determine where frame payload starts.
2429 * Jump over vlan headers if already present,
2430 * helpful for QinQ too.
2431 */
2432 eh = mtod(mp, struct ether_vlan_header *);
2433 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2434 etype = ntohs(eh->evl_proto);
2435 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2436 } else {
2437 etype = ntohs(eh->evl_encap_proto);
2438 ehdrlen = ETHER_HDR_LEN;
2439 }
2440
2441 /* Set the ether header length */
2442 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2443
2444 switch (etype) {
2445 case ETHERTYPE_IP:
2446 ip = (struct ip *)(mp->m_data + ehdrlen);
2447 ip_hlen = ip->ip_hl << 2;
2448 if (mp->m_len < ehdrlen + ip_hlen)
2449 return (FALSE);
2450 ipproto = ip->ip_p;
2451 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2452 break;
2453 case ETHERTYPE_IPV6:
2454 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2455 ip_hlen = sizeof(struct ip6_hdr);
2456 if (mp->m_len < ehdrlen + ip_hlen)
2457 return (FALSE);
2458 ipproto = ip6->ip6_nxt;
2459 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2460 break;
2461 default:
2462 offload = FALSE;
2463 break;
2464 }
2465
2466 vlan_macip_lens |= ip_hlen;
2467 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2468
2469 switch (ipproto) {
2470 case IPPROTO_TCP:
2471 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
2472 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2473 break;
2474
2475 case IPPROTO_UDP:
2476 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
2477 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2478 break;
2479
2480#if __FreeBSD_version >= 800000
2481 case IPPROTO_SCTP:
2482 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
2483 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2484 break;
2485#endif
2486 default:
2487 offload = FALSE;
2488 break;
2489 }
2490
2491 /* Now copy bits into descriptor */
2492 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2493 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2494 TXD->seqnum_seed = htole32(0);
2495 TXD->mss_l4len_idx = htole32(0);
2496
2497 tx_buffer->m_head = NULL;
2498 tx_buffer->eop_index = -1;
2499
2500 /* We've consumed the first desc, adjust counters */
2501 if (++ctxd == adapter->num_tx_desc)
2502 ctxd = 0;
2503 txr->next_avail_desc = ctxd;
2504 --txr->tx_avail;
2505
2506 return (offload);
2507}
2508
2509/**********************************************************************
2510 *
2511 * Setup work for hardware segmentation offload (TSO) on
2512 * adapters using advanced tx descriptors
2513 *
2514 **********************************************************************/
2515static bool
2516ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
2517{
2518 struct adapter *adapter = txr->adapter;
2519 struct ixgbe_adv_tx_context_desc *TXD;
2520 struct ixv_tx_buf *tx_buffer;
2521 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2522 u32 mss_l4len_idx = 0;
2523 u16 vtag = 0;
2524 int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen;
2525 struct ether_vlan_header *eh;
2526 struct ip *ip;
2527 struct tcphdr *th;
2528
2529
2530 /*
2531 * Determine where frame payload starts.
2532 * Jump over vlan headers if already present
2533 */
2534 eh = mtod(mp, struct ether_vlan_header *);
2535 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2536 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2537 else
2538 ehdrlen = ETHER_HDR_LEN;
2539
2540 /* Ensure we have at least the IP+TCP header in the first mbuf. */
2541 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2542 return FALSE;
2543
2544 ctxd = txr->next_avail_desc;
2545 tx_buffer = &txr->tx_buffers[ctxd];
2546 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2547
2548 ip = (struct ip *)(mp->m_data + ehdrlen);
2549 if (ip->ip_p != IPPROTO_TCP)
2550 return FALSE; /* 0 */
2551 ip->ip_sum = 0;
2552 ip_hlen = ip->ip_hl << 2;
2553 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2554 th->th_sum = in_pseudo(ip->ip_src.s_addr,
2555 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2556 tcp_hlen = th->th_off << 2;
2557 hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2558
2559 /* This is used in the transmit desc in encap */
2560 *paylen = mp->m_pkthdr.len - hdrlen;
2561
2562 /* VLAN MACLEN IPLEN */
2563 if (mp->m_flags & M_VLANTAG) {
2564 vtag = htole16(mp->m_pkthdr.ether_vtag);
2565 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2566 }
2567
2568 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2569 vlan_macip_lens |= ip_hlen;
2570 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2571
2572 /* ADV DTYPE TUCMD */
2573 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2574 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2575 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2576 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2577
2578
2579 /* MSS L4LEN IDX */
2580 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2581 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2582 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2583
2584 TXD->seqnum_seed = htole32(0);
2585 tx_buffer->m_head = NULL;
2586 tx_buffer->eop_index = -1;
2587
2588 if (++ctxd == adapter->num_tx_desc)
2589 ctxd = 0;
2590
2591 txr->tx_avail--;
2592 txr->next_avail_desc = ctxd;
2593 return TRUE;
2594}
2595
2596
2597/**********************************************************************
2598 *
2599 * Examine each tx_buffer in the used queue. If the hardware is done
2600 * processing the packet then free associated resources. The
2601 * tx_buffer is put back on the free queue.
2602 *
2603 **********************************************************************/
2604static bool
2605ixv_txeof(struct tx_ring *txr)
2606{
2607 struct adapter *adapter = txr->adapter;
2608 struct ifnet *ifp = adapter->ifp;
2609 u32 first, last, done;
2610 struct ixv_tx_buf *tx_buffer;
2611 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2612
2613 mtx_assert(&txr->tx_mtx, MA_OWNED);
2614
2615 if (txr->tx_avail == adapter->num_tx_desc)
2616 return FALSE;
2617
2618 first = txr->next_to_clean;
2619 tx_buffer = &txr->tx_buffers[first];
2620 /* For cleanup we just use legacy struct */
2621 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2622 last = tx_buffer->eop_index;
2623 if (last == -1)
2624 return FALSE;
2625 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2626
2627 /*
2628 ** Get the index of the first descriptor
2629 ** BEYOND the EOP and call that 'done'.
2630 ** I do this so the comparison in the
2631 ** inner while loop below can be simple
2632 */
2633 if (++last == adapter->num_tx_desc) last = 0;
2634 done = last;
2635
2636 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2637 BUS_DMASYNC_POSTREAD);
2638 /*
2639 ** Only the EOP descriptor of a packet now has the DD
2640 ** bit set, this is what we look for...
2641 */
2642 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2643 /* We clean the range of the packet */
2644 while (first != done) {
2645 tx_desc->upper.data = 0;
2646 tx_desc->lower.data = 0;
2647 tx_desc->buffer_addr = 0;
2648 ++txr->tx_avail;
2649
2650 if (tx_buffer->m_head) {
2651 bus_dmamap_sync(txr->txtag,
2652 tx_buffer->map,
2653 BUS_DMASYNC_POSTWRITE);
2654 bus_dmamap_unload(txr->txtag,
2655 tx_buffer->map);
2656 m_freem(tx_buffer->m_head);
2657 tx_buffer->m_head = NULL;
2658 tx_buffer->map = NULL;
2659 }
2660 tx_buffer->eop_index = -1;
2661 txr->watchdog_time = ticks;
2662
2663 if (++first == adapter->num_tx_desc)
2664 first = 0;
2665
2666 tx_buffer = &txr->tx_buffers[first];
2667 tx_desc =
2668 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2669 }
2670 ++ifp->if_opackets;
2671 /* See if there is more work now */
2672 last = tx_buffer->eop_index;
2673 if (last != -1) {
2674 eop_desc =
2675 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2676 /* Get next done point */
2677 if (++last == adapter->num_tx_desc) last = 0;
2678 done = last;
2679 } else
2680 break;
2681 }
2682 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2683 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2684
2685 txr->next_to_clean = first;
2686
2687 /*
2688 * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
2689 * it is OK to send packets. If there are no pending descriptors,
2690 * clear the timeout. Otherwise, if some descriptors have been freed,
2691 * restart the timeout.
2692 */
2693 if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
2694 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2695 if (txr->tx_avail == adapter->num_tx_desc) {
2696 txr->watchdog_check = FALSE;
2697 return FALSE;
2698 }
2699 }
2700
2701 return TRUE;
2702}
2703
2704/*********************************************************************
2705 *
2706 * Refresh mbuf buffers for RX descriptor rings
2707 * - now keeps its own state so discards due to resource
2708 * exhaustion are unnecessary, if an mbuf cannot be obtained
2709 * it just returns, keeping its placeholder, thus it can simply
2710 * be recalled to try again.
2711 *
2712 **********************************************************************/
2713static void
2714ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
2715{
2716 struct adapter *adapter = rxr->adapter;
2717 bus_dma_segment_t hseg[1];
2718 bus_dma_segment_t pseg[1];
2719 struct ixv_rx_buf *rxbuf;
2720 struct mbuf *mh, *mp;
2721 int i, j, nsegs, error;
2722 bool refreshed = FALSE;
2723
2724 i = j = rxr->next_to_refresh;
2725 /* Get the control variable, one beyond refresh point */
2726 if (++j == adapter->num_rx_desc)
2727 j = 0;
2728 while (j != limit) {
2729 rxbuf = &rxr->rx_buffers[i];
2730 if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
2731 mh = m_gethdr(M_DONTWAIT, MT_DATA);
2732 if (mh == NULL)
2733 goto update;
2734 mh->m_pkthdr.len = mh->m_len = MHLEN;
2735 mh->m_len = MHLEN;
2736 mh->m_flags |= M_PKTHDR;
2737 m_adj(mh, ETHER_ALIGN);
2738 /* Get the memory mapping */
2739 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2740 rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
2741 if (error != 0) {
2742 printf("GET BUF: dmamap load"
2743 " failure - %d\n", error);
2744 m_free(mh);
2745 goto update;
2746 }
2747 rxbuf->m_head = mh;
2748 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2749 BUS_DMASYNC_PREREAD);
2750 rxr->rx_base[i].read.hdr_addr =
2751 htole64(hseg[0].ds_addr);
2752 }
2753
2754 if (rxbuf->m_pack == NULL) {
2755 mp = m_getjcl(M_DONTWAIT, MT_DATA,
2756 M_PKTHDR, adapter->rx_mbuf_sz);
2757 if (mp == NULL)
2758 goto update;
2759 } else
2760 mp = rxbuf->m_pack;
2761
2762 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2763 /* Get the memory mapping */
2764 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2765 rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
2766 if (error != 0) {
2767 printf("GET BUF: dmamap load"
2768 " failure - %d\n", error);
2769 m_free(mp);
2770 rxbuf->m_pack = NULL;
2771 goto update;
2772 }
2773 rxbuf->m_pack = mp;
2774 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2775 BUS_DMASYNC_PREREAD);
2776 rxr->rx_base[i].read.pkt_addr =
2777 htole64(pseg[0].ds_addr);
2778
2779 refreshed = TRUE;
2780 rxr->next_to_refresh = i = j;
2781 /* Calculate next index */
2782 if (++j == adapter->num_rx_desc)
2783 j = 0;
2784 }
2785update:
2786 if (refreshed) /* update tail index */
2787 IXGBE_WRITE_REG(&adapter->hw,
2788 IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
2789 return;
2790}
2791
2792/*********************************************************************
2793 *
2794 * Allocate memory for rx_buffer structures. Since we use one
2795 * rx_buffer per received packet, the maximum number of rx_buffer's
2796 * that we'll need is equal to the number of receive descriptors
2797 * that we've allocated.
2798 *
2799 **********************************************************************/
2800static int
2801ixv_allocate_receive_buffers(struct rx_ring *rxr)
2802{
2803 struct adapter *adapter = rxr->adapter;
2804 device_t dev = adapter->dev;
2805 struct ixv_rx_buf *rxbuf;
2806 int i, bsize, error;
2807
2808 bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
2809 if (!(rxr->rx_buffers =
2810 (struct ixv_rx_buf *) malloc(bsize,
2811 M_DEVBUF, M_NOWAIT | M_ZERO))) {
2812 device_printf(dev, "Unable to allocate rx_buffer memory\n");
2813 error = ENOMEM;
2814 goto fail;
2815 }
2816
2817 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2818 1, 0, /* alignment, bounds */
2819 BUS_SPACE_MAXADDR, /* lowaddr */
2820 BUS_SPACE_MAXADDR, /* highaddr */
2821 NULL, NULL, /* filter, filterarg */
2822 MSIZE, /* maxsize */
2823 1, /* nsegments */
2824 MSIZE, /* maxsegsize */
2825 0, /* flags */
2826 NULL, /* lockfunc */
2827 NULL, /* lockfuncarg */
2828 &rxr->htag))) {
2829 device_printf(dev, "Unable to create RX DMA tag\n");
2830 goto fail;
2831 }
2832
2833 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
2834 1, 0, /* alignment, bounds */
2835 BUS_SPACE_MAXADDR, /* lowaddr */
2836 BUS_SPACE_MAXADDR, /* highaddr */
2837 NULL, NULL, /* filter, filterarg */
2838 MJUMPAGESIZE, /* maxsize */
2839 1, /* nsegments */
2840 MJUMPAGESIZE, /* maxsegsize */
2841 0, /* flags */
2842 NULL, /* lockfunc */
2843 NULL, /* lockfuncarg */
2844 &rxr->ptag))) {
2845 device_printf(dev, "Unable to create RX DMA tag\n");
2846 goto fail;
2847 }
2848
2849 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
2850 rxbuf = &rxr->rx_buffers[i];
2851 error = bus_dmamap_create(rxr->htag,
2852 BUS_DMA_NOWAIT, &rxbuf->hmap);
2853 if (error) {
2854 device_printf(dev, "Unable to create RX head map\n");
2855 goto fail;
2856 }
2857 error = bus_dmamap_create(rxr->ptag,
2858 BUS_DMA_NOWAIT, &rxbuf->pmap);
2859 if (error) {
2860 device_printf(dev, "Unable to create RX pkt map\n");
2861 goto fail;
2862 }
2863 }
2864
2865 return (0);
2866
2867fail:
2868 /* Frees all, but can handle partial completion */
2869 ixv_free_receive_structures(adapter);
2870 return (error);
2871}
2872
2873static void
2874ixv_free_receive_ring(struct rx_ring *rxr)
2875{
2876 struct adapter *adapter;
2877 struct ixv_rx_buf *rxbuf;
2878 int i;
2879
2880 adapter = rxr->adapter;
2881 for (i = 0; i < adapter->num_rx_desc; i++) {
2882 rxbuf = &rxr->rx_buffers[i];
2883 if (rxbuf->m_head != NULL) {
2884 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
2885 BUS_DMASYNC_POSTREAD);
2886 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
2887 rxbuf->m_head->m_flags |= M_PKTHDR;
2888 m_freem(rxbuf->m_head);
2889 }
2890 if (rxbuf->m_pack != NULL) {
2891 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
2892 BUS_DMASYNC_POSTREAD);
2893 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
2894 rxbuf->m_pack->m_flags |= M_PKTHDR;
2895 m_freem(rxbuf->m_pack);
2896 }
2897 rxbuf->m_head = NULL;
2898 rxbuf->m_pack = NULL;
2899 }
2900}
2901
2902
2903/*********************************************************************
2904 *
2905 * Initialize a receive ring and its buffers.
2906 *
2907 **********************************************************************/
2908static int
2909ixv_setup_receive_ring(struct rx_ring *rxr)
2910{
2911 struct adapter *adapter;
2912 struct ifnet *ifp;
2913 device_t dev;
2914 struct ixv_rx_buf *rxbuf;
2915 bus_dma_segment_t pseg[1], hseg[1];
2916 struct lro_ctrl *lro = &rxr->lro;
2917 int rsize, nsegs, error = 0;
2918
2919 adapter = rxr->adapter;
2920 ifp = adapter->ifp;
2921 dev = adapter->dev;
2922
2923 /* Clear the ring contents */
2924 IXV_RX_LOCK(rxr);
2925 rsize = roundup2(adapter->num_rx_desc *
2926 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2927 bzero((void *)rxr->rx_base, rsize);
2928
2929 /* Free current RX buffer structs and their mbufs */
2930 ixv_free_receive_ring(rxr);
2931
2932 /* Configure header split? */
2933 if (ixv_header_split)
2934 rxr->hdr_split = TRUE;
2935
2936 /* Now replenish the mbufs */
2937 for (int j = 0; j != adapter->num_rx_desc; ++j) {
2938 struct mbuf *mh, *mp;
2939
2940 rxbuf = &rxr->rx_buffers[j];
2941 /*
2942 ** Dont allocate mbufs if not
2943 ** doing header split, its wasteful
2944 */
2945 if (rxr->hdr_split == FALSE)
2946 goto skip_head;
2947
2948 /* First the header */
2949 rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
2950 if (rxbuf->m_head == NULL) {
2951 error = ENOBUFS;
2952 goto fail;
2953 }
2954 m_adj(rxbuf->m_head, ETHER_ALIGN);
2955 mh = rxbuf->m_head;
2956 mh->m_len = mh->m_pkthdr.len = MHLEN;
2957 mh->m_flags |= M_PKTHDR;
2958 /* Get the memory mapping */
2959 error = bus_dmamap_load_mbuf_sg(rxr->htag,
2960 rxbuf->hmap, rxbuf->m_head, hseg,
2961 &nsegs, BUS_DMA_NOWAIT);
2962 if (error != 0) /* Nothing elegant to do here */
2963 goto fail;
2964 bus_dmamap_sync(rxr->htag,
2965 rxbuf->hmap, BUS_DMASYNC_PREREAD);
2966 /* Update descriptor */
2967 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
2968
2969skip_head:
2970 /* Now the payload cluster */
2971 rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
2972 M_PKTHDR, adapter->rx_mbuf_sz);
2973 if (rxbuf->m_pack == NULL) {
2974 error = ENOBUFS;
2975 goto fail;
2976 }
2977 mp = rxbuf->m_pack;
2978 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
2979 /* Get the memory mapping */
2980 error = bus_dmamap_load_mbuf_sg(rxr->ptag,
2981 rxbuf->pmap, mp, pseg,
2982 &nsegs, BUS_DMA_NOWAIT);
2983 if (error != 0)
2984 goto fail;
2985 bus_dmamap_sync(rxr->ptag,
2986 rxbuf->pmap, BUS_DMASYNC_PREREAD);
2987 /* Update descriptor */
2988 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
2989 }
2990
2991
2992 /* Setup our descriptor indices */
2993 rxr->next_to_check = 0;
2994 rxr->next_to_refresh = 0;
2995 rxr->lro_enabled = FALSE;
2996 rxr->rx_split_packets = 0;
2997 rxr->rx_bytes = 0;
2998 rxr->discard = FALSE;
2999
3000 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3001 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3002
3003 /*
3004 ** Now set up the LRO interface:
3005 */
3006 if (ifp->if_capenable & IFCAP_LRO) {
3007 int err = tcp_lro_init(lro);
3008 if (err) {
3009 device_printf(dev, "LRO Initialization failed!\n");
3010 goto fail;
3011 }
3012 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3013 rxr->lro_enabled = TRUE;
3014 lro->ifp = adapter->ifp;
3015 }
3016
3017 IXV_RX_UNLOCK(rxr);
3018 return (0);
3019
3020fail:
3021 ixv_free_receive_ring(rxr);
3022 IXV_RX_UNLOCK(rxr);
3023 return (error);
3024}
3025
3026/*********************************************************************
3027 *
3028 * Initialize all receive rings.
3029 *
3030 **********************************************************************/
3031static int
3032ixv_setup_receive_structures(struct adapter *adapter)
3033{
3034 struct rx_ring *rxr = adapter->rx_rings;
3035 int j;
3036
3037 for (j = 0; j < adapter->num_queues; j++, rxr++)
3038 if (ixv_setup_receive_ring(rxr))
3039 goto fail;
3040
3041 return (0);
3042fail:
3043 /*
3044 * Free RX buffers allocated so far, we will only handle
3045 * the rings that completed, the failing case will have
3046 * cleaned up for itself. 'j' failed, so its the terminus.
3047 */
3048 for (int i = 0; i < j; ++i) {
3049 rxr = &adapter->rx_rings[i];
3050 ixv_free_receive_ring(rxr);
3051 }
3052
3053 return (ENOBUFS);
3054}
3055
3056/*********************************************************************
3057 *
3058 * Setup receive registers and features.
3059 *
3060 **********************************************************************/
3061#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3062
3063static void
3064ixv_initialize_receive_units(struct adapter *adapter)
3065{
3066 struct rx_ring *rxr = adapter->rx_rings;
3067 struct ixgbe_hw *hw = &adapter->hw;
3068 struct ifnet *ifp = adapter->ifp;
3069 u32 bufsz, fctrl, rxcsum, hlreg;
3070
3071
3072 /* Enable broadcasts */
3073 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3074 fctrl |= IXGBE_FCTRL_BAM;
3075 fctrl |= IXGBE_FCTRL_DPF;
3076 fctrl |= IXGBE_FCTRL_PMCF;
3077 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3078
3079 /* Set for Jumbo Frames? */
3080 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3081 if (ifp->if_mtu > ETHERMTU) {
3082 hlreg |= IXGBE_HLREG0_JUMBOEN;
3083 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3084 } else {
3085 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
3086 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
3087 }
3088 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
3089
3090 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3091 u64 rdba = rxr->rxdma.dma_paddr;
3092 u32 reg, rxdctl;
3093
3094 /* Do the queue enabling first */
3095 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3096 rxdctl |= IXGBE_RXDCTL_ENABLE;
3097 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
3098 for (int k = 0; k < 10; k++) {
3099 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
3100 IXGBE_RXDCTL_ENABLE)
3101 break;
3102 else
3103 msec_delay(1);
3104 }
3105 wmb();
3106
3107 /* Setup the Base and Length of the Rx Descriptor Ring */
3108 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
3109 (rdba & 0x00000000ffffffffULL));
3110 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
3111 (rdba >> 32));
3112 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
3113 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
3114
3115 /* Set up the SRRCTL register */
3116 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
3117 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
3118 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
3119 reg |= bufsz;
3120 if (rxr->hdr_split) {
3121 /* Use a standard mbuf for the header */
3122 reg |= ((IXV_RX_HDR <<
3123 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
3124 & IXGBE_SRRCTL_BSIZEHDR_MASK);
3125 reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3126 } else
3127 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
3128 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
3129
3130 /* Setup the HW Rx Head and Tail Descriptor Pointers */
3131 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
3132 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
3133 adapter->num_rx_desc - 1);
3134 }
3135
3136 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3137
3138 if (ifp->if_capenable & IFCAP_RXCSUM)
3139 rxcsum |= IXGBE_RXCSUM_PCSD;
3140
3141 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
3142 rxcsum |= IXGBE_RXCSUM_IPPCSE;
3143
3144 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3145
3146 return;
3147}
3148
3149/*********************************************************************
3150 *
3151 * Free all receive rings.
3152 *
3153 **********************************************************************/
3154static void
3155ixv_free_receive_structures(struct adapter *adapter)
3156{
3157 struct rx_ring *rxr = adapter->rx_rings;
3158
3159 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
3160 struct lro_ctrl *lro = &rxr->lro;
3161 ixv_free_receive_buffers(rxr);
3162 /* Free LRO memory */
3163 tcp_lro_free(lro);
3164 /* Free the ring memory as well */
3165 ixv_dma_free(adapter, &rxr->rxdma);
3166 }
3167
3168 free(adapter->rx_rings, M_DEVBUF);
3169}
3170
3171
3172/*********************************************************************
3173 *
3174 * Free receive ring data structures
3175 *
3176 **********************************************************************/
3177static void
3178ixv_free_receive_buffers(struct rx_ring *rxr)
3179{
3180 struct adapter *adapter = rxr->adapter;
3181 struct ixv_rx_buf *rxbuf;
3182
3183 INIT_DEBUGOUT("free_receive_structures: begin");
3184
3185 /* Cleanup any existing buffers */
3186 if (rxr->rx_buffers != NULL) {
3187 for (int i = 0; i < adapter->num_rx_desc; i++) {
3188 rxbuf = &rxr->rx_buffers[i];
3189 if (rxbuf->m_head != NULL) {
3190 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3191 BUS_DMASYNC_POSTREAD);
3192 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3193 rxbuf->m_head->m_flags |= M_PKTHDR;
3194 m_freem(rxbuf->m_head);
3195 }
3196 if (rxbuf->m_pack != NULL) {
3197 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3198 BUS_DMASYNC_POSTREAD);
3199 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3200 rxbuf->m_pack->m_flags |= M_PKTHDR;
3201 m_freem(rxbuf->m_pack);
3202 }
3203 rxbuf->m_head = NULL;
3204 rxbuf->m_pack = NULL;
3205 if (rxbuf->hmap != NULL) {
3206 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
3207 rxbuf->hmap = NULL;
3208 }
3209 if (rxbuf->pmap != NULL) {
3210 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
3211 rxbuf->pmap = NULL;
3212 }
3213 }
3214 if (rxr->rx_buffers != NULL) {
3215 free(rxr->rx_buffers, M_DEVBUF);
3216 rxr->rx_buffers = NULL;
3217 }
3218 }
3219
3220 if (rxr->htag != NULL) {
3221 bus_dma_tag_destroy(rxr->htag);
3222 rxr->htag = NULL;
3223 }
3224 if (rxr->ptag != NULL) {
3225 bus_dma_tag_destroy(rxr->ptag);
3226 rxr->ptag = NULL;
3227 }
3228
3229 return;
3230}
3231
3232static __inline void
3233ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
3234{
3235
3236 /*
3237 * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
3238 * should be computed by hardware. Also it should not have VLAN tag in
3239 * ethernet header.
3240 */
3241 if (rxr->lro_enabled &&
3242 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3243 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3244 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
3245 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
3246 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3247 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
3248 /*
3249 * Send to the stack if:
3250 ** - LRO not enabled, or
3251 ** - no LRO resources, or
3252 ** - lro enqueue fails
3253 */
3254 if (rxr->lro.lro_cnt != 0)
3255 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
3256 return;
3257 }
3258 IXV_RX_UNLOCK(rxr);
3259 (*ifp->if_input)(ifp, m);
3260 IXV_RX_LOCK(rxr);
3261}
3262
3263static __inline void
3264ixv_rx_discard(struct rx_ring *rxr, int i)
3265{
3266 struct ixv_rx_buf *rbuf;
3267
3268 rbuf = &rxr->rx_buffers[i];
3269
3270 if (rbuf->fmp != NULL) {/* Partial chain ? */
3271 rbuf->fmp->m_flags |= M_PKTHDR;
3272 m_freem(rbuf->fmp);
3273 rbuf->fmp = NULL;
3274 }
3275
3276 /*
3277 ** With advanced descriptors the writeback
3278 ** clobbers the buffer addrs, so its easier
3279 ** to just free the existing mbufs and take
3280 ** the normal refresh path to get new buffers
3281 ** and mapping.
3282 */
3283 if (rbuf->m_head) {
3284 m_free(rbuf->m_head);
3285 rbuf->m_head = NULL;
3286 }
3287
3288 if (rbuf->m_pack) {
3289 m_free(rbuf->m_pack);
3290 rbuf->m_pack = NULL;
3291 }
3292
3293 return;
3294}
3295
3296
3297/*********************************************************************
3298 *
3299 * This routine executes in interrupt context. It replenishes
3300 * the mbufs in the descriptor and sends data which has been
3301 * dma'ed into host memory to upper layer.
3302 *
3303 * We loop at most count times if count is > 0, or until done if
3304 * count < 0.
3305 *
3306 * Return TRUE for more work, FALSE for all clean.
3307 *********************************************************************/
3308static bool
3309ixv_rxeof(struct ix_queue *que, int count)
3310{
3311 struct adapter *adapter = que->adapter;
3312 struct rx_ring *rxr = que->rxr;
3313 struct ifnet *ifp = adapter->ifp;
3314 struct lro_ctrl *lro = &rxr->lro;
3315 struct lro_entry *queued;
3316 int i, nextp, processed = 0;
3317 u32 staterr = 0;
3318 union ixgbe_adv_rx_desc *cur;
3319 struct ixv_rx_buf *rbuf, *nbuf;
3320
3321 IXV_RX_LOCK(rxr);
3322
3323 for (i = rxr->next_to_check; count != 0;) {
3324 struct mbuf *sendmp, *mh, *mp;
3325 u32 rsc, ptype;
3326 u16 hlen, plen, hdr, vtag;
3327 bool eop;
3328
3329 /* Sync the ring. */
3330 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3331 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3332
3333 cur = &rxr->rx_base[i];
3334 staterr = le32toh(cur->wb.upper.status_error);
3335
3336 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
3337 break;
3338 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
3339 break;
3340
3341 count--;
3342 sendmp = NULL;
3343 nbuf = NULL;
3344 rsc = 0;
3345 cur->wb.upper.status_error = 0;
3346 rbuf = &rxr->rx_buffers[i];
3347 mh = rbuf->m_head;
3348 mp = rbuf->m_pack;
3349
3350 plen = le16toh(cur->wb.upper.length);
3351 ptype = le32toh(cur->wb.lower.lo_dword.data) &
3352 IXGBE_RXDADV_PKTTYPE_MASK;
3353 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
3354 vtag = le16toh(cur->wb.upper.vlan);
3355 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3356
3357 /* Make sure all parts of a bad packet are discarded */
3358 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
3359 (rxr->discard)) {
3360 ifp->if_ierrors++;
3361 rxr->rx_discarded++;
3362 if (!eop)
3363 rxr->discard = TRUE;
3364 else
3365 rxr->discard = FALSE;
3366 ixv_rx_discard(rxr, i);
3367 goto next_desc;
3368 }
3369
3370 if (!eop) {
3371 nextp = i + 1;
3372 if (nextp == adapter->num_rx_desc)
3373 nextp = 0;
3374 nbuf = &rxr->rx_buffers[nextp];
3375 prefetch(nbuf);
3376 }
3377 /*
3378 ** The header mbuf is ONLY used when header
3379 ** split is enabled, otherwise we get normal
3380 ** behavior, ie, both header and payload
3381 ** are DMA'd into the payload buffer.
3382 **
3383 ** Rather than using the fmp/lmp global pointers
3384 ** we now keep the head of a packet chain in the
3385 ** buffer struct and pass this along from one
3386 ** descriptor to the next, until we get EOP.
3387 */
3388 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
3389 /* This must be an initial descriptor */
3390 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3391 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3392 if (hlen > IXV_RX_HDR)
3393 hlen = IXV_RX_HDR;
3394 mh->m_len = hlen;
3395 mh->m_flags |= M_PKTHDR;
3396 mh->m_next = NULL;
3397 mh->m_pkthdr.len = mh->m_len;
3398 /* Null buf pointer so it is refreshed */
3399 rbuf->m_head = NULL;
3400 /*
3401 ** Check the payload length, this
3402 ** could be zero if its a small
3403 ** packet.
3404 */
3405 if (plen > 0) {
3406 mp->m_len = plen;
3407 mp->m_next = NULL;
3408 mp->m_flags &= ~M_PKTHDR;
3409 mh->m_next = mp;
3410 mh->m_pkthdr.len += mp->m_len;
3411 /* Null buf pointer so it is refreshed */
3412 rbuf->m_pack = NULL;
3413 rxr->rx_split_packets++;
3414 }
3415 /*
3416 ** Now create the forward
3417 ** chain so when complete
3418 ** we wont have to.
3419 */
3420 if (eop == 0) {
3421 /* stash the chain head */
3422 nbuf->fmp = mh;
3423 /* Make forward chain */
3424 if (plen)
3425 mp->m_next = nbuf->m_pack;
3426 else
3427 mh->m_next = nbuf->m_pack;
3428 } else {
3429 /* Singlet, prepare to send */
3430 sendmp = mh;
3431 if ((adapter->num_vlans) &&
3432 (staterr & IXGBE_RXD_STAT_VP)) {
3433 sendmp->m_pkthdr.ether_vtag = vtag;
3434 sendmp->m_flags |= M_VLANTAG;
3435 }
3436 }
3437 } else {
3438 /*
3439 ** Either no header split, or a
3440 ** secondary piece of a fragmented
3441 ** split packet.
3442 */
3443 mp->m_len = plen;
3444 /*
3445 ** See if there is a stored head
3446 ** that determines what we are
3447 */
3448 sendmp = rbuf->fmp;
3449 rbuf->m_pack = rbuf->fmp = NULL;
3450
3451 if (sendmp != NULL) /* secondary frag */
3452 sendmp->m_pkthdr.len += mp->m_len;
3453 else {
3454 /* first desc of a non-ps chain */
3455 sendmp = mp;
3456 sendmp->m_flags |= M_PKTHDR;
3457 sendmp->m_pkthdr.len = mp->m_len;
3458 if (staterr & IXGBE_RXD_STAT_VP) {
3459 sendmp->m_pkthdr.ether_vtag = vtag;
3460 sendmp->m_flags |= M_VLANTAG;
3461 }
3462 }
3463 /* Pass the head pointer on */
3464 if (eop == 0) {
3465 nbuf->fmp = sendmp;
3466 sendmp = NULL;
3467 mp->m_next = nbuf->m_pack;
3468 }
3469 }
3470 ++processed;
3471 /* Sending this frame? */
3472 if (eop) {
3473 sendmp->m_pkthdr.rcvif = ifp;
3474 ifp->if_ipackets++;
3475 rxr->rx_packets++;
3476 /* capture data for AIM */
3477 rxr->bytes += sendmp->m_pkthdr.len;
3478 rxr->rx_bytes += sendmp->m_pkthdr.len;
3479 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3480 ixv_rx_checksum(staterr, sendmp, ptype);
3481#if __FreeBSD_version >= 800000
3482 sendmp->m_pkthdr.flowid = que->msix;
3483 sendmp->m_flags |= M_FLOWID;
3484#endif
3485 }
3486next_desc:
3487 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3488 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3489
3490 /* Advance our pointers to the next descriptor. */
3491 if (++i == adapter->num_rx_desc)
3492 i = 0;
3493
3494 /* Now send to the stack or do LRO */
3495 if (sendmp != NULL)
3496 ixv_rx_input(rxr, ifp, sendmp, ptype);
3497
3498 /* Every 8 descriptors we go to refresh mbufs */
3499 if (processed == 8) {
3500 ixv_refresh_mbufs(rxr, i);
3501 processed = 0;
3502 }
3503 }
3504
3505 /* Refresh any remaining buf structs */
3506 if (ixv_rx_unrefreshed(rxr))
3507 ixv_refresh_mbufs(rxr, i);
3508
3509 rxr->next_to_check = i;
3510
3511 /*
3512 * Flush any outstanding LRO work
3513 */
3514 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
3515 SLIST_REMOVE_HEAD(&lro->lro_active, next);
3516 tcp_lro_flush(lro, queued);
3517 }
3518
3519 IXV_RX_UNLOCK(rxr);
3520
3521 /*
3522 ** We still have cleaning to do?
3523 ** Schedule another interrupt if so.
3524 */
3525 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
3526 ixv_rearm_queues(adapter, (u64)(1 << que->msix));
3527 return (TRUE);
3528 }
3529
3530 return (FALSE);
3531}
3532
3533
3534/*********************************************************************
3535 *
3536 * Verify that the hardware indicated that the checksum is valid.
3537 * Inform the stack about the status of checksum so that stack
3538 * doesn't spend time verifying the checksum.
3539 *
3540 *********************************************************************/
3541static void
3542ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
3543{
3544 u16 status = (u16) staterr;
3545 u8 errors = (u8) (staterr >> 24);
3546 bool sctp = FALSE;
3547
3548 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
3549 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
3550 sctp = TRUE;
3551
3552 if (status & IXGBE_RXD_STAT_IPCS) {
3553 if (!(errors & IXGBE_RXD_ERR_IPE)) {
3554 /* IP Checksum Good */
3555 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3556 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3557
3558 } else
3559 mp->m_pkthdr.csum_flags = 0;
3560 }
3561 if (status & IXGBE_RXD_STAT_L4CS) {
3562 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3563#if __FreeBSD_version >= 800000
3564 if (sctp)
3565 type = CSUM_SCTP_VALID;
3566#endif
3567 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
3568 mp->m_pkthdr.csum_flags |= type;
3569 if (!sctp)
3570 mp->m_pkthdr.csum_data = htons(0xffff);
3571 }
3572 }
3573 return;
3574}
3575
3576static void
3577ixv_setup_vlan_support(struct adapter *adapter)
3578{
3579 struct ixgbe_hw *hw = &adapter->hw;
3580 u32 ctrl, vid, vfta, retry;
3581
3582
3583 /*
3584 ** We get here thru init_locked, meaning
3585 ** a soft reset, this has already cleared
3586 ** the VFTA and other state, so if there
3587 ** have been no vlan's registered do nothing.
3588 */
3589 if (adapter->num_vlans == 0)
3590 return;
3591
3592 /* Enable the queues */
3593 for (int i = 0; i < adapter->num_queues; i++) {
3594 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
3595 ctrl |= IXGBE_RXDCTL_VME;
3596 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
3597 }
3598
3599 /*
3600 ** A soft reset zero's out the VFTA, so
3601 ** we need to repopulate it now.
3602 */
3603 for (int i = 0; i < VFTA_SIZE; i++) {
3604 if (ixv_shadow_vfta[i] == 0)
3605 continue;
3606 vfta = ixv_shadow_vfta[i];
3607 /*
3608 ** Reconstruct the vlan id's
3609 ** based on the bits set in each
3610 ** of the array ints.
3611 */
3612 for ( int j = 0; j < 32; j++) {
3613 retry = 0;
3614 if ((vfta & (1 << j)) == 0)
3615 continue;
3616 vid = (i * 32) + j;
3617 /* Call the shared code mailbox routine */
3618 while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
3619 if (++retry > 5)
3620 break;
3621 }
3622 }
3623 }
3624}
3625
3626/*
3627** This routine is run via an vlan config EVENT,
3628** it enables us to use the HW Filter table since
3629** we can get the vlan id. This just creates the
3630** entry in the soft version of the VFTA, init will
3631** repopulate the real table.
3632*/
3633static void
3634ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3635{
3636 struct adapter *adapter = ifp->if_softc;
3637 u16 index, bit;
3638
3639 if (ifp->if_softc != arg) /* Not our event */
3640 return;
3641
3642 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3643 return;
3644
3645 IXV_CORE_LOCK(adapter);
3646 index = (vtag >> 5) & 0x7F;
3647 bit = vtag & 0x1F;
3648 ixv_shadow_vfta[index] |= (1 << bit);
3649 ++adapter->num_vlans;
3650 /* Re-init to load the changes */
3651 ixv_init_locked(adapter);
3652 IXV_CORE_UNLOCK(adapter);
3653}
3654
3655/*
3656** This routine is run via an vlan
3657** unconfig EVENT, remove our entry
3658** in the soft vfta.
3659*/
3660static void
3661ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
3662{
3663 struct adapter *adapter = ifp->if_softc;
3664 u16 index, bit;
3665
3666 if (ifp->if_softc != arg)
3667 return;
3668
3669 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
3670 return;
3671
3672 IXV_CORE_LOCK(adapter);
3673 index = (vtag >> 5) & 0x7F;
3674 bit = vtag & 0x1F;
3675 ixv_shadow_vfta[index] &= ~(1 << bit);
3676 --adapter->num_vlans;
3677 /* Re-init to load the changes */
3678 ixv_init_locked(adapter);
3679 IXV_CORE_UNLOCK(adapter);
3680}
3681
3682static void
3683ixv_enable_intr(struct adapter *adapter)
3684{
3685 struct ixgbe_hw *hw = &adapter->hw;
3686 struct ix_queue *que = adapter->queues;
3687 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3688
3689
3690 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
3691
3692 mask = IXGBE_EIMS_ENABLE_MASK;
3693 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
3694 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
3695
3696 for (int i = 0; i < adapter->num_queues; i++, que++)
3697 ixv_enable_queue(adapter, que->msix);
3698
3699 IXGBE_WRITE_FLUSH(hw);
3700
3701 return;
3702}
3703
3704static void
3705ixv_disable_intr(struct adapter *adapter)
3706{
3707 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
3708 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
3709 IXGBE_WRITE_FLUSH(&adapter->hw);
3710 return;
3711}
3712
3713/*
3714** Setup the correct IVAR register for a particular MSIX interrupt
3715** - entry is the register array entry
3716** - vector is the MSIX vector for this queue
3717** - type is RX/TX/MISC
3718*/
3719static void
3720ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
3721{
3722 struct ixgbe_hw *hw = &adapter->hw;
3723 u32 ivar, index;
3724
3725 vector |= IXGBE_IVAR_ALLOC_VAL;
3726
3727 if (type == -1) { /* MISC IVAR */
3728 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
3729 ivar &= ~0xFF;
3730 ivar |= vector;
3731 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
3732 } else { /* RX/TX IVARS */
3733 index = (16 * (entry & 1)) + (8 * type);
3734 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
3735 ivar &= ~(0xFF << index);
3736 ivar |= (vector << index);
3737 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
3738 }
3739}
3740
3741static void
3742ixv_configure_ivars(struct adapter *adapter)
3743{
3744 struct ix_queue *que = adapter->queues;
3745
3746 for (int i = 0; i < adapter->num_queues; i++, que++) {
3747 /* First the RX queue entry */
3748 ixv_set_ivar(adapter, i, que->msix, 0);
3749 /* ... and the TX */
3750 ixv_set_ivar(adapter, i, que->msix, 1);
3751 /* Set an initial value in EITR */
3752 IXGBE_WRITE_REG(&adapter->hw,
3753 IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
3754 }
3755
3756 /* For the Link interrupt */
3757 ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
3758}
3759
3760
3761/*
3762** Tasklet handler for MSIX MBX interrupts
3763** - do outside interrupt since it might sleep
3764*/
3765static void
3766ixv_handle_mbx(void *context, int pending)
3767{
3768 struct adapter *adapter = context;
3769
3770 ixgbe_check_link(&adapter->hw,
3771 &adapter->link_speed, &adapter->link_up, 0);
3772 ixv_update_link_status(adapter);
3773}
3774
3775/*
3776** The VF stats registers never have a truely virgin
3777** starting point, so this routine tries to make an
3778** artificial one, marking ground zero on attach as
3779** it were.
3780*/
3781static void
3782ixv_save_stats(struct adapter *adapter)
3783{
3784 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
3785 adapter->stats.saved_reset_vfgprc +=
3786 adapter->stats.vfgprc - adapter->stats.base_vfgprc;
3787 adapter->stats.saved_reset_vfgptc +=
3788 adapter->stats.vfgptc - adapter->stats.base_vfgptc;
3789 adapter->stats.saved_reset_vfgorc +=
3790 adapter->stats.vfgorc - adapter->stats.base_vfgorc;
3791 adapter->stats.saved_reset_vfgotc +=
3792 adapter->stats.vfgotc - adapter->stats.base_vfgotc;
3793 adapter->stats.saved_reset_vfmprc +=
3794 adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3795 }
3796}
3797
3798static void
3799ixv_init_stats(struct adapter *adapter)
3800{
3801 struct ixgbe_hw *hw = &adapter->hw;
3802
3803 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
3804 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
3805 adapter->stats.last_vfgorc |=
3806 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
3807
3808 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
3809 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
3810 adapter->stats.last_vfgotc |=
3811 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
3812
3813 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
3814
3815 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
3816 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
3817 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
3818 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
3819 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
3820}
3821
3822#define UPDATE_STAT_32(reg, last, count) \
3823{ \
3824 u32 current = IXGBE_READ_REG(hw, reg); \
3825 if (current < last) \
3826 count += 0x100000000LL; \
3827 last = current; \
3828 count &= 0xFFFFFFFF00000000LL; \
3829 count |= current; \
3830}
3831
3832#define UPDATE_STAT_36(lsb, msb, last, count) \
3833{ \
3834 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
3835 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
3836 u64 current = ((cur_msb << 32) | cur_lsb); \
3837 if (current < last) \
3838 count += 0x1000000000LL; \
3839 last = current; \
3840 count &= 0xFFFFFFF000000000LL; \
3841 count |= current; \
3842}
3843
3844/*
3845** ixv_update_stats - Update the board statistics counters.
3846*/
3847void
3848ixv_update_stats(struct adapter *adapter)
3849{
3850 struct ixgbe_hw *hw = &adapter->hw;
3851
3852 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
3853 adapter->stats.vfgprc);
3854 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
3855 adapter->stats.vfgptc);
3856 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
3857 adapter->stats.last_vfgorc, adapter->stats.vfgorc);
3858 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
3859 adapter->stats.last_vfgotc, adapter->stats.vfgotc);
3860 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
3861 adapter->stats.vfmprc);
3862}
3863
3864/**********************************************************************
3865 *
3866 * This routine is called only when ixgbe_display_debug_stats is enabled.
3867 * This routine provides a way to take a look at important statistics
3868 * maintained by the driver and hardware.
3869 *
3870 **********************************************************************/
3871static void
3872ixv_print_hw_stats(struct adapter * adapter)
3873{
3874 device_t dev = adapter->dev;
3875
3876 device_printf(dev,"Std Mbuf Failed = %lu\n",
3877 adapter->mbuf_defrag_failed);
3878 device_printf(dev,"Driver dropped packets = %lu\n",
3879 adapter->dropped_pkts);
3880 device_printf(dev, "watchdog timeouts = %ld\n",
3881 adapter->watchdog_events);
3882
3883 device_printf(dev,"Good Packets Rcvd = %llu\n",
3884 (long long)adapter->stats.vfgprc);
3885 device_printf(dev,"Good Packets Xmtd = %llu\n",
3886 (long long)adapter->stats.vfgptc);
3887 device_printf(dev,"TSO Transmissions = %lu\n",
3888 adapter->tso_tx);
3889
3890}
3891
3892/**********************************************************************
3893 *
3894 * This routine is called only when em_display_debug_stats is enabled.
3895 * This routine provides a way to take a look at important statistics
3896 * maintained by the driver and hardware.
3897 *
3898 **********************************************************************/
3899static void
3900ixv_print_debug_info(struct adapter *adapter)
3901{
3902 device_t dev = adapter->dev;
3903 struct ixgbe_hw *hw = &adapter->hw;
3904 struct ix_queue *que = adapter->queues;
3905 struct rx_ring *rxr;
3906 struct tx_ring *txr;
3907 struct lro_ctrl *lro;
3908
3909 device_printf(dev,"Error Byte Count = %u \n",
3910 IXGBE_READ_REG(hw, IXGBE_ERRBC));
3911
3912 for (int i = 0; i < adapter->num_queues; i++, que++) {
3913 txr = que->txr;
3914 rxr = que->rxr;
3915 lro = &rxr->lro;
3916 device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
3917 que->msix, (long)que->irqs);
3918 device_printf(dev,"RX(%d) Packets Received: %lld\n",
3919 rxr->me, (long long)rxr->rx_packets);
3920 device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
3921 rxr->me, (long long)rxr->rx_split_packets);
3922 device_printf(dev,"RX(%d) Bytes Received: %lu\n",
3923 rxr->me, (long)rxr->rx_bytes);
3924 device_printf(dev,"RX(%d) LRO Queued= %d\n",
3925 rxr->me, lro->lro_queued);
3926 device_printf(dev,"RX(%d) LRO Flushed= %d\n",
3927 rxr->me, lro->lro_flushed);
3928 device_printf(dev,"TX(%d) Packets Sent: %lu\n",
3929 txr->me, (long)txr->total_packets);
3930 device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
3931 txr->me, (long)txr->no_desc_avail);
3932 }
3933
3934 device_printf(dev,"MBX IRQ Handled: %lu\n",
3935 (long)adapter->mbx_irq);
3936 return;
3937}
3938
3939static int
3940ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
3941{
3942 int error;
3943 int result;
3944 struct adapter *adapter;
3945
3946 result = -1;
3947 error = sysctl_handle_int(oidp, &result, 0, req);
3948
3949 if (error || !req->newptr)
3950 return (error);
3951
3952 if (result == 1) {
3953 adapter = (struct adapter *) arg1;
3954 ixv_print_hw_stats(adapter);
3955 }
3956 return error;
3957}
3958
3959static int
3960ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
3961{
3962 int error, result;
3963 struct adapter *adapter;
3964
3965 result = -1;
3966 error = sysctl_handle_int(oidp, &result, 0, req);
3967
3968 if (error || !req->newptr)
3969 return (error);
3970
3971 if (result == 1) {
3972 adapter = (struct adapter *) arg1;
3973 ixv_print_debug_info(adapter);
3974 }
3975 return error;
3976}
3977
3978/*
3979** Set flow control using sysctl:
3980** Flow control values:
3981** 0 - off
3982** 1 - rx pause
3983** 2 - tx pause
3984** 3 - full
3985*/
3986static int
3987ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
3988{
3989 int error;
3990 struct adapter *adapter;
3991
3992 error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
3993
3994 if (error)
3995 return (error);
3996
3997 adapter = (struct adapter *) arg1;
3998 switch (ixv_flow_control) {
3999 case ixgbe_fc_rx_pause:
4000 case ixgbe_fc_tx_pause:
4001 case ixgbe_fc_full:
4002 adapter->hw.fc.requested_mode = ixv_flow_control;
4003 break;
4004 case ixgbe_fc_none:
4005 default:
4006 adapter->hw.fc.requested_mode = ixgbe_fc_none;
4007 }
4008
4009 ixgbe_fc_enable(&adapter->hw);
4010 return error;
4011}
4012
4013static void
4014ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
4015 const char *description, int *limit, int value)
4016{
4017 *limit = value;
4018 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
4019 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
4020 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
4021}
4022