Deleted Added
full compact
ixgbe.h (302408) ixgbe.h (320897)
1/******************************************************************************
2
1/******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
18 this software without specific prior written permission.
19
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/ixgbe.h 294795 2016-01-26 12:30:17Z smh $*/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/ixgbe.h 320897 2017-07-11 21:25:07Z erj $*/
34
35
36#ifndef _IXGBE_H_
37#define _IXGBE_H_
38
39
40#include <sys/param.h>
41#include <sys/systm.h>
34
35
36#ifndef _IXGBE_H_
37#define _IXGBE_H_
38
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#ifndef IXGBE_LEGACY_TX
43#include <sys/buf_ring.h>
42#include <sys/buf_ring.h>
44#endif
45#include <sys/mbuf.h>
46#include <sys/protosw.h>
47#include <sys/socket.h>
48#include <sys/malloc.h>
49#include <sys/kernel.h>
50#include <sys/module.h>
51#include <sys/sockio.h>
52#include <sys/eventhandler.h>

--- 34 unchanged lines hidden (view full) ---

87#include <sys/sysctl.h>
88#include <sys/endian.h>
89#include <sys/taskqueue.h>
90#include <sys/pcpu.h>
91#include <sys/smp.h>
92#include <machine/smp.h>
93#include <sys/sbuf.h>
94
43#include <sys/mbuf.h>
44#include <sys/protosw.h>
45#include <sys/socket.h>
46#include <sys/malloc.h>
47#include <sys/kernel.h>
48#include <sys/module.h>
49#include <sys/sockio.h>
50#include <sys/eventhandler.h>

--- 34 unchanged lines hidden (view full) ---

85#include <sys/sysctl.h>
86#include <sys/endian.h>
87#include <sys/taskqueue.h>
88#include <sys/pcpu.h>
89#include <sys/smp.h>
90#include <machine/smp.h>
91#include <sys/sbuf.h>
92
95#ifdef PCI_IOV
96#include <sys/nv.h>
97#include <sys/iov_schema.h>
98#include <dev/pci/pci_iov.h>
99#endif
100
101#include "ixgbe_api.h"
102#include "ixgbe_common.h"
103#include "ixgbe_phy.h"
104#include "ixgbe_vf.h"
93#include "ixgbe_api.h"
94#include "ixgbe_common.h"
95#include "ixgbe_phy.h"
96#include "ixgbe_vf.h"
97#include "ixgbe_features.h"
105
98
106#ifdef PCI_IOV
107#include "ixgbe_common.h"
108#include "ixgbe_mbx.h"
109#endif
110
111/* Tunables */
112
113/*
114 * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
115 * number of transmit descriptors allocated by the driver. Increasing this
116 * value allows the driver to queue more transmits. Each descriptor is 16
117 * bytes. Performance tests have show the 2K value to be optimal for top
118 * performance.
119 */
99/* Tunables */
100
101/*
102 * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
103 * number of transmit descriptors allocated by the driver. Increasing this
104 * value allows the driver to queue more transmits. Each descriptor is 16
105 * bytes. Performance tests have show the 2K value to be optimal for top
106 * performance.
107 */
120#define DEFAULT_TXD 1024
121#define PERFORM_TXD 2048
122#define MAX_TXD 4096
123#define MIN_TXD 64
108#define DEFAULT_TXD 1024
109#define PERFORM_TXD 2048
110#define MAX_TXD 4096
111#define MIN_TXD 64
124
125/*
126 * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
127 * number of receive descriptors allocated for each RX queue. Increasing this
128 * value allows the driver to buffer more incoming packets. Each descriptor
112
113/*
114 * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
115 * number of receive descriptors allocated for each RX queue. Increasing this
116 * value allows the driver to buffer more incoming packets. Each descriptor
129 * is 16 bytes. A receive buffer is also allocated for each descriptor.
130 *
131 * Note: with 8 rings and a dual port card, it is possible to bump up
132 * against the system mbuf pool limit, you can tune nmbclusters
133 * to adjust for this.
117 * is 16 bytes. A receive buffer is also allocated for each descriptor.
118 *
119 * Note: with 8 rings and a dual port card, it is possible to bump up
120 * against the system mbuf pool limit, you can tune nmbclusters
121 * to adjust for this.
134 */
122 */
135#define DEFAULT_RXD 1024
136#define PERFORM_RXD 2048
137#define MAX_RXD 4096
138#define MIN_RXD 64
123#define DEFAULT_RXD 1024
124#define PERFORM_RXD 2048
125#define MAX_RXD 4096
126#define MIN_RXD 64
139
140/* Alignment for rings */
127
128/* Alignment for rings */
141#define DBA_ALIGN 128
129#define DBA_ALIGN 128
142
143/*
144 * This is the max watchdog interval, ie. the time that can
145 * pass between any two TX clean operations, such only happening
146 * when the TX hardware is functioning.
147 */
130
131/*
132 * This is the max watchdog interval, ie. the time that can
133 * pass between any two TX clean operations, such only happening
134 * when the TX hardware is functioning.
135 */
148#define IXGBE_WATCHDOG (10 * hz)
136#define IXGBE_WATCHDOG (10 * hz)
149
150/*
151 * This parameters control when the driver calls the routine to reclaim
152 * transmit descriptors.
153 */
137
138/*
139 * This parameters control when the driver calls the routine to reclaim
140 * transmit descriptors.
141 */
154#define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
155#define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
142#define IXGBE_TX_CLEANUP_THRESHOLD(_a) ((_a)->num_tx_desc / 8)
143#define IXGBE_TX_OP_THRESHOLD(_a) ((_a)->num_tx_desc / 32)
156
157/* These defines are used in MTU calculations */
144
145/* These defines are used in MTU calculations */
158#define IXGBE_MAX_FRAME_SIZE 9728
159#define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN)
160#define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
161 ETHER_VLAN_ENCAP_LEN)
162#define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
163#define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
146#define IXGBE_MAX_FRAME_SIZE 9728
147#define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN)
148#define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
149 ETHER_VLAN_ENCAP_LEN)
150#define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
151#define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
164
165/* Flow control constants */
152
153/* Flow control constants */
166#define IXGBE_FC_PAUSE 0xFFFF
167#define IXGBE_FC_HI 0x20000
168#define IXGBE_FC_LO 0x10000
154#define IXGBE_FC_PAUSE 0xFFFF
155#define IXGBE_FC_HI 0x20000
156#define IXGBE_FC_LO 0x10000
169
170/*
171 * Used for optimizing small rx mbufs. Effort is made to keep the copy
172 * small and aligned for the CPU L1 cache.
157
158/*
159 * Used for optimizing small rx mbufs. Effort is made to keep the copy
160 * small and aligned for the CPU L1 cache.
173 *
161 *
174 * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting
175 * 32 byte alignment needed for the fast bcopy results in 8 bytes being
176 * wasted. Getting 64 byte alignment, which _should_ be ideal for
177 * modern Intel CPUs, results in 40 bytes wasted and a significant drop
178 * in observed efficiency of the optimization, 97.9% -> 81.8%.
179 */
180#if __FreeBSD_version < 1002000
162 * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting
163 * 32 byte alignment needed for the fast bcopy results in 8 bytes being
164 * wasted. Getting 64 byte alignment, which _should_ be ideal for
165 * modern Intel CPUs, results in 40 bytes wasted and a significant drop
166 * in observed efficiency of the optimization, 97.9% -> 81.8%.
167 */
168#if __FreeBSD_version < 1002000
181#define MPKTHSIZE (sizeof(struct m_hdr) + sizeof(struct pkthdr))
169#define MPKTHSIZE (sizeof(struct m_hdr) + sizeof(struct pkthdr))
182#endif
170#endif
183#define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
184#define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
185#define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
171#define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
172#define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
173#define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
186
187/* Keep older OS drivers building... */
188#if !defined(SYSCTL_ADD_UQUAD)
189#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
190#endif
191
192/* Defines for printing debug information */
193#define DEBUG_INIT 0

--- 6 unchanged lines hidden (view full) ---

200#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
201#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
202#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
203#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
204#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
205#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
206
207#define MAX_NUM_MULTICAST_ADDRESSES 128
174
175/* Keep older OS drivers building... */
176#if !defined(SYSCTL_ADD_UQUAD)
177#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
178#endif
179
180/* Defines for printing debug information */
181#define DEBUG_INIT 0

--- 6 unchanged lines hidden (view full) ---

188#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
189#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
190#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
191#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
192#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
193#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
194
195#define MAX_NUM_MULTICAST_ADDRESSES 128
208#define IXGBE_82598_SCATTER 100
209#define IXGBE_82599_SCATTER 32
210#define MSIX_82598_BAR 3
211#define MSIX_82599_BAR 4
212#define IXGBE_TSO_SIZE 262140
213#define IXGBE_RX_HDR 128
214#define IXGBE_VFTA_SIZE 128
215#define IXGBE_BR_SIZE 4096
216#define IXGBE_QUEUE_MIN_FREE 32
217#define IXGBE_MAX_TX_BUSY 10
218#define IXGBE_QUEUE_HUNG 0x80000000
196#define IXGBE_82598_SCATTER 100
197#define IXGBE_82599_SCATTER 32
198#define MSIX_82598_BAR 3
199#define MSIX_82599_BAR 4
200#define IXGBE_TSO_SIZE 262140
201#define IXGBE_RX_HDR 128
202#define IXGBE_VFTA_SIZE 128
203#define IXGBE_BR_SIZE 4096
204#define IXGBE_QUEUE_MIN_FREE 32
205#define IXGBE_MAX_TX_BUSY 10
206#define IXGBE_QUEUE_HUNG 0x80000000
219
207
220#define IXV_EITR_DEFAULT 128
208#define IXGBE_EITR_DEFAULT 128
221
222/* Supported offload bits in mbuf flag */
223#if __FreeBSD_version >= 1000000
209
210/* Supported offload bits in mbuf flag */
211#if __FreeBSD_version >= 1000000
224#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
225 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
226 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
212#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
213 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
214 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
227#elif __FreeBSD_version >= 800000
215#elif __FreeBSD_version >= 800000
228#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
216#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
229#else
217#else
230#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
218#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
231#endif
232
233/* Backward compatibility items for very old versions */
234#ifndef pci_find_cap
235#define pci_find_cap pci_find_extcap
236#endif
237
238#ifndef DEVMETHOD_END
239#define DEVMETHOD_END { NULL, NULL }
240#endif
241
242/*
219#endif
220
221/* Backward compatibility items for very old versions */
222#ifndef pci_find_cap
223#define pci_find_cap pci_find_extcap
224#endif
225
226#ifndef DEVMETHOD_END
227#define DEVMETHOD_END { NULL, NULL }
228#endif
229
230/*
243 * Interrupt Moderation parameters
231 * Interrupt Moderation parameters
244 */
232 */
245#define IXGBE_LOW_LATENCY 128
246#define IXGBE_AVE_LATENCY 400
247#define IXGBE_BULK_LATENCY 1200
233#define IXGBE_LOW_LATENCY 128
234#define IXGBE_AVE_LATENCY 400
235#define IXGBE_BULK_LATENCY 1200
248
249/* Using 1FF (the max value), the interval is ~1.05ms */
236
237/* Using 1FF (the max value), the interval is ~1.05ms */
250#define IXGBE_LINK_ITR_QUANTA 0x1FF
251#define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \
252 IXGBE_EITR_ITR_INT_MASK)
238#define IXGBE_LINK_ITR_QUANTA 0x1FF
239#define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \
240 IXGBE_EITR_ITR_INT_MASK)
253
241
254/* MAC type macros */
255#define IXGBE_IS_X550VF(_adapter) \
256 ((_adapter->hw.mac.type == ixgbe_mac_X550_vf) || \
257 (_adapter->hw.mac.type == ixgbe_mac_X550EM_x_vf))
258
242
259#define IXGBE_IS_VF(_adapter) \
260 (IXGBE_IS_X550VF(_adapter) || \
261 (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \
262 (_adapter->hw.mac.type == ixgbe_mac_82599_vf))
263
243
264#ifdef PCI_IOV
265#define IXGBE_VF_INDEX(vmdq) ((vmdq) / 32)
266#define IXGBE_VF_BIT(vmdq) (1 << ((vmdq) % 32))
267
268#define IXGBE_VT_MSG_MASK 0xFFFF
269
270#define IXGBE_VT_MSGINFO(msg) \
271 (((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT)
272
273#define IXGBE_VF_GET_QUEUES_RESP_LEN 5
274
275#define IXGBE_API_VER_1_0 0
276#define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */
277#define IXGBE_API_VER_1_1 2
278#define IXGBE_API_VER_UNKNOWN UINT16_MAX
279
280enum ixgbe_iov_mode {
281 IXGBE_64_VM,
282 IXGBE_32_VM,
283 IXGBE_NO_VM
284};
285#endif /* PCI_IOV */
286
287
288/*
289 *****************************************************************************
244/************************************************************************
290 * vendor_info_array
245 * vendor_info_array
291 *
292 * This array contains the list of Subvendor/Subdevice IDs on which the driver
293 * should load.
294 *
295 *****************************************************************************
296 */
246 *
247 * Contains the list of Subvendor/Subdevice IDs on
248 * which the driver should load.
249 ************************************************************************/
297typedef struct _ixgbe_vendor_info_t {
250typedef struct _ixgbe_vendor_info_t {
298 unsigned int vendor_id;
299 unsigned int device_id;
300 unsigned int subvendor_id;
301 unsigned int subdevice_id;
302 unsigned int index;
251 unsigned int vendor_id;
252 unsigned int device_id;
253 unsigned int subvendor_id;
254 unsigned int subdevice_id;
255 unsigned int index;
303} ixgbe_vendor_info_t;
304
256} ixgbe_vendor_info_t;
257
258struct ixgbe_bp_data {
259 u32 low;
260 u32 high;
261 u32 log;
262};
305
306struct ixgbe_tx_buf {
263
264struct ixgbe_tx_buf {
307 union ixgbe_adv_tx_desc *eop;
308 struct mbuf *m_head;
309 bus_dmamap_t map;
265 union ixgbe_adv_tx_desc *eop;
266 struct mbuf *m_head;
267 bus_dmamap_t map;
310};
311
312struct ixgbe_rx_buf {
268};
269
270struct ixgbe_rx_buf {
313 struct mbuf *buf;
314 struct mbuf *fmp;
315 bus_dmamap_t pmap;
316 u_int flags;
317#define IXGBE_RX_COPY 0x01
318 uint64_t addr;
271 struct mbuf *buf;
272 struct mbuf *fmp;
273 bus_dmamap_t pmap;
274 u_int flags;
275#define IXGBE_RX_COPY 0x01
276 uint64_t addr;
319};
320
321/*
277};
278
279/*
322 * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
280 * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free
323 */
324struct ixgbe_dma_alloc {
281 */
282struct ixgbe_dma_alloc {
325 bus_addr_t dma_paddr;
326 caddr_t dma_vaddr;
327 bus_dma_tag_t dma_tag;
328 bus_dmamap_t dma_map;
329 bus_dma_segment_t dma_seg;
330 bus_size_t dma_size;
331 int dma_nseg;
283 bus_addr_t dma_paddr;
284 caddr_t dma_vaddr;
285 bus_dma_tag_t dma_tag;
286 bus_dmamap_t dma_map;
287 bus_dma_segment_t dma_seg;
288 bus_size_t dma_size;
289 int dma_nseg;
332};
333
334struct ixgbe_mc_addr {
290};
291
292struct ixgbe_mc_addr {
335 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
293 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
336 u32 vmdq;
337};
338
339/*
294 u32 vmdq;
295};
296
297/*
340** Driver queue struct: this is the interrupt container
341** for the associated tx and rx ring.
342*/
298 * Driver queue struct: this is the interrupt container
299 * for the associated tx and rx ring.
300 */
343struct ix_queue {
301struct ix_queue {
344 struct adapter *adapter;
345 u32 msix; /* This queue's MSIX vector */
346 u32 eims; /* This queue's EIMS bit */
347 u32 eitr_setting;
348 u32 me;
349 struct resource *res;
350 void *tag;
351 int busy;
352 struct tx_ring *txr;
353 struct rx_ring *rxr;
354 struct task que_task;
355 struct taskqueue *tq;
356 u64 irqs;
302 struct adapter *adapter;
303 u32 msix; /* This queue's MSI-X vector */
304 u32 eims; /* This queue's EIMS bit */
305 u32 eitr_setting;
306 u32 me;
307 struct resource *res;
308 void *tag;
309 int busy;
310 struct tx_ring *txr;
311 struct rx_ring *rxr;
312 struct task que_task;
313 struct taskqueue *tq;
314 u64 irqs;
357};
358
359/*
360 * The transmit ring, one per queue
361 */
362struct tx_ring {
315};
316
317/*
318 * The transmit ring, one per queue
319 */
320struct tx_ring {
363 struct adapter *adapter;
364 struct mtx tx_mtx;
365 u32 me;
366 u32 tail;
367 int busy;
368 union ixgbe_adv_tx_desc *tx_base;
369 struct ixgbe_tx_buf *tx_buffers;
370 struct ixgbe_dma_alloc txdma;
371 volatile u16 tx_avail;
372 u16 next_avail_desc;
373 u16 next_to_clean;
374 u16 num_desc;
375 u32 txd_cmd;
376 bus_dma_tag_t txtag;
377 char mtx_name[16];
378#ifndef IXGBE_LEGACY_TX
379 struct buf_ring *br;
380 struct task txq_task;
381#endif
382#ifdef IXGBE_FDIR
383 u16 atr_sample;
384 u16 atr_count;
385#endif
386 u32 bytes; /* used for AIM */
387 u32 packets;
321 struct adapter *adapter;
322 struct mtx tx_mtx;
323 u32 me;
324 u32 tail;
325 int busy;
326 union ixgbe_adv_tx_desc *tx_base;
327 struct ixgbe_tx_buf *tx_buffers;
328 struct ixgbe_dma_alloc txdma;
329 volatile u16 tx_avail;
330 u16 next_avail_desc;
331 u16 next_to_clean;
332 u16 num_desc;
333 u32 txd_cmd;
334 bus_dma_tag_t txtag;
335 char mtx_name[16];
336 struct buf_ring *br;
337 struct task txq_task;
338
339 /* Flow Director */
340 u16 atr_sample;
341 u16 atr_count;
342
343 u32 bytes; /* used for AIM */
344 u32 packets;
388 /* Soft Stats */
345 /* Soft Stats */
389 unsigned long tso_tx;
390 unsigned long no_tx_map_avail;
391 unsigned long no_tx_dma_setup;
392 u64 no_desc_avail;
393 u64 total_packets;
346 u64 tso_tx;
347 u64 no_tx_map_avail;
348 u64 no_tx_dma_setup;
349 u64 no_desc_avail;
350 u64 total_packets;
394};
395
396
397/*
398 * The Receive ring, one per rx queue
399 */
400struct rx_ring {
351};
352
353
354/*
355 * The Receive ring, one per rx queue
356 */
357struct rx_ring {
401 struct adapter *adapter;
402 struct mtx rx_mtx;
403 u32 me;
404 u32 tail;
405 union ixgbe_adv_rx_desc *rx_base;
406 struct ixgbe_dma_alloc rxdma;
407 struct lro_ctrl lro;
408 bool lro_enabled;
409 bool hw_rsc;
410 bool vtag_strip;
411 u16 next_to_refresh;
412 u16 next_to_check;
413 u16 num_desc;
414 u16 mbuf_sz;
415 char mtx_name[16];
416 struct ixgbe_rx_buf *rx_buffers;
417 bus_dma_tag_t ptag;
358 struct adapter *adapter;
359 struct mtx rx_mtx;
360 u32 me;
361 u32 tail;
362 union ixgbe_adv_rx_desc *rx_base;
363 struct ixgbe_dma_alloc rxdma;
364 struct lro_ctrl lro;
365 bool lro_enabled;
366 bool hw_rsc;
367 bool vtag_strip;
368 u16 next_to_refresh;
369 u16 next_to_check;
370 u16 num_desc;
371 u16 mbuf_sz;
372 char mtx_name[16];
373 struct ixgbe_rx_buf *rx_buffers;
374 bus_dma_tag_t ptag;
418
375
419 u32 bytes; /* Used for AIM calc */
420 u32 packets;
376 u32 bytes; /* Used for AIM calc */
377 u32 packets;
421
422 /* Soft stats */
378
379 /* Soft stats */
423 u64 rx_irq;
424 u64 rx_copies;
425 u64 rx_packets;
426 u64 rx_bytes;
427 u64 rx_discarded;
428 u64 rsc_num;
429#ifdef IXGBE_FDIR
430 u64 flm;
431#endif
380 u64 rx_irq;
381 u64 rx_copies;
382 u64 rx_packets;
383 u64 rx_bytes;
384 u64 rx_discarded;
385 u64 rsc_num;
386
387 /* Flow Director */
388 u64 flm;
432};
433
389};
390
434#ifdef PCI_IOV
435#define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */
436#define IXGBE_VF_CAP_MAC (1 << 1) /* VF is permitted to change MAC. */
437#define IXGBE_VF_CAP_VLAN (1 << 2) /* VF is permitted to join vlans. */
438#define IXGBE_VF_ACTIVE (1 << 3) /* VF is active. */
439
440#define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */
441
442struct ixgbe_vf {
391#define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */
392
393struct ixgbe_vf {
443 u_int pool;
444 u_int rar_index;
445 u_int max_frame_size;
446 uint32_t flags;
447 uint8_t ether_addr[ETHER_ADDR_LEN];
448 uint16_t mc_hash[IXGBE_MAX_VF_MC];
449 uint16_t num_mc_hashes;
450 uint16_t default_vlan;
451 uint16_t vlan_tag;
452 uint16_t api_ver;
394 u_int pool;
395 u_int rar_index;
396 u_int max_frame_size;
397 uint32_t flags;
398 uint8_t ether_addr[ETHER_ADDR_LEN];
399 uint16_t mc_hash[IXGBE_MAX_VF_MC];
400 uint16_t num_mc_hashes;
401 uint16_t default_vlan;
402 uint16_t vlan_tag;
403 uint16_t api_ver;
453};
404};
454#endif /* PCI_IOV */
455
456/* Our adapter structure */
457struct adapter {
405
406/* Our adapter structure */
407struct adapter {
458 struct ixgbe_hw hw;
459 struct ixgbe_osdep osdep;
408 struct ixgbe_hw hw;
409 struct ixgbe_osdep osdep;
460
410
461 struct device *dev;
462 struct ifnet *ifp;
411 device_t dev;
412 struct ifnet *ifp;
463
413
464 struct resource *pci_mem;
465 struct resource *msix_mem;
414 struct resource *pci_mem;
415 struct resource *msix_mem;
466
467 /*
468 * Interrupt resources: this set is
469 * either used for legacy, or for Link
416
417 /*
418 * Interrupt resources: this set is
419 * either used for legacy, or for Link
470 * when doing MSIX
420 * when doing MSI-X
471 */
421 */
472 void *tag;
473 struct resource *res;
422 void *tag;
423 struct resource *res;
474
424
475 struct ifmedia media;
476 struct callout timer;
477 int msix;
478 int if_flags;
425 struct ifmedia media;
426 struct callout timer;
427 int link_rid;
428 int if_flags;
479
429
480 struct mtx core_mtx;
430 struct mtx core_mtx;
481
431
482 eventhandler_tag vlan_attach;
483 eventhandler_tag vlan_detach;
432 eventhandler_tag vlan_attach;
433 eventhandler_tag vlan_detach;
484
434
485 u16 num_vlans;
486 u16 num_queues;
435 u16 num_vlans;
436 u16 num_queues;
487
488 /*
437
438 /*
489 ** Shadow VFTA table, this is needed because
490 ** the real vlan filter table gets cleared during
491 ** a soft reset and the driver needs to be able
492 ** to repopulate it.
493 */
494 u32 shadow_vfta[IXGBE_VFTA_SIZE];
439 * Shadow VFTA table, this is needed because
440 * the real vlan filter table gets cleared during
441 * a soft reset and the driver needs to be able
442 * to repopulate it.
443 */
444 u32 shadow_vfta[IXGBE_VFTA_SIZE];
495
496 /* Info about the interface */
445
446 /* Info about the interface */
497 u32 optics;
498 u32 fc; /* local flow ctrl setting */
499 int advertise; /* link speeds */
500 bool enable_aim; /* adaptive interrupt moderation */
501 bool link_active;
502 u16 max_frame_size;
503 u16 num_segs;
504 u32 link_speed;
505 bool link_up;
506 u32 vector;
507 u16 dmac;
508 bool eee_enabled;
509 u32 phy_layer;
447 int advertise; /* link speeds */
448 int enable_aim; /* adaptive interrupt moderation */
449 bool link_active;
450 u16 max_frame_size;
451 u16 num_segs;
452 u32 link_speed;
453 bool link_up;
454 u32 vector;
455 u16 dmac;
456 u32 phy_layer;
510
511 /* Power management-related */
457
458 /* Power management-related */
512 bool wol_support;
513 u32 wufc;
459 bool wol_support;
460 u32 wufc;
514
515 /* Mbuf cluster size */
461
462 /* Mbuf cluster size */
516 u32 rx_mbuf_sz;
463 u32 rx_mbuf_sz;
517
518 /* Support for pluggable optics */
464
465 /* Support for pluggable optics */
519 bool sfp_probe;
520 struct task link_task; /* Link tasklet */
521 struct task mod_task; /* SFP tasklet */
522 struct task msf_task; /* Multispeed Fiber */
523#ifdef PCI_IOV
524 struct task mbx_task; /* VF -> PF mailbox interrupt */
525#endif /* PCI_IOV */
526#ifdef IXGBE_FDIR
527 int fdir_reinit;
528 struct task fdir_task;
529#endif
530 struct task phy_task; /* PHY intr tasklet */
531 struct taskqueue *tq;
466 bool sfp_probe;
467 struct task link_task; /* Link tasklet */
468 struct task mod_task; /* SFP tasklet */
469 struct task msf_task; /* Multispeed Fiber */
470 struct task mbx_task; /* VF -> PF mailbox interrupt */
532
471
472 /* Flow Director */
473 int fdir_reinit;
474 struct task fdir_task;
475
476 struct task phy_task; /* PHY intr tasklet */
477 struct taskqueue *tq;
478
533 /*
479 /*
534 ** Queues:
535 ** This is the irq holder, it has
536 ** and RX/TX pair or rings associated
537 ** with it.
538 */
539 struct ix_queue *queues;
480 * Queues:
481 * This is the irq holder, it has
482 * and RX/TX pair or rings associated
483 * with it.
484 */
485 struct ix_queue *queues;
540
541 /*
486
487 /*
542 * Transmit rings:
543 * Allocated at run time, an array of rings.
488 * Transmit rings
489 * Allocated at run time, an array of rings
544 */
490 */
545 struct tx_ring *tx_rings;
546 u32 num_tx_desc;
547 u32 tx_process_limit;
491 struct tx_ring *tx_rings;
492 u32 num_tx_desc;
493 u32 tx_process_limit;
548
549 /*
494
495 /*
550 * Receive rings:
551 * Allocated at run time, an array of rings.
496 * Receive rings
497 * Allocated at run time, an array of rings
552 */
498 */
553 struct rx_ring *rx_rings;
554 u64 active_queues;
555 u32 num_rx_desc;
556 u32 rx_process_limit;
499 struct rx_ring *rx_rings;
500 u64 active_queues;
501 u32 num_rx_desc;
502 u32 rx_process_limit;
557
558 /* Multicast array memory */
503
504 /* Multicast array memory */
559 struct ixgbe_mc_addr *mta;
560 int num_vfs;
561 int pool;
562#ifdef PCI_IOV
563 struct ixgbe_vf *vfs;
564#endif
565#ifdef DEV_NETMAP
566 void (*init_locked)(struct adapter *);
567 void (*stop_locked)(void *);
568#endif
505 struct ixgbe_mc_addr *mta;
569
506
507 /* SR-IOV */
508 int iov_mode;
509 int num_vfs;
510 int pool;
511 struct ixgbe_vf *vfs;
512
513 /* Bypass */
514 struct ixgbe_bp_data bypass;
515
516 /* Netmap */
517 void (*init_locked)(struct adapter *);
518 void (*stop_locked)(void *);
519
570 /* Misc stats maintained by the driver */
520 /* Misc stats maintained by the driver */
571 unsigned long dropped_pkts;
572 unsigned long mbuf_defrag_failed;
573 unsigned long mbuf_header_failed;
574 unsigned long mbuf_packet_failed;
575 unsigned long watchdog_events;
576 unsigned long link_irq;
521 unsigned long dropped_pkts;
522 unsigned long mbuf_defrag_failed;
523 unsigned long mbuf_header_failed;
524 unsigned long mbuf_packet_failed;
525 unsigned long watchdog_events;
526 unsigned long link_irq;
577 union {
578 struct ixgbe_hw_stats pf;
579 struct ixgbevf_hw_stats vf;
580 } stats;
581#if __FreeBSD_version >= 1100036
582 /* counter(9) stats */
527 union {
528 struct ixgbe_hw_stats pf;
529 struct ixgbevf_hw_stats vf;
530 } stats;
531#if __FreeBSD_version >= 1100036
532 /* counter(9) stats */
583 u64 ipackets;
584 u64 ierrors;
585 u64 opackets;
586 u64 oerrors;
587 u64 ibytes;
588 u64 obytes;
589 u64 imcasts;
590 u64 omcasts;
591 u64 iqdrops;
592 u64 noproto;
533 u64 ipackets;
534 u64 ierrors;
535 u64 opackets;
536 u64 oerrors;
537 u64 ibytes;
538 u64 obytes;
539 u64 imcasts;
540 u64 omcasts;
541 u64 iqdrops;
542 u64 noproto;
593#endif
543#endif
544 /* Feature capable/enabled flags. See ixgbe_features.h */
545 u32 feat_cap;
546 u32 feat_en;
594};
595
596
597/* Precision Time Sync (IEEE 1588) defines */
598#define ETHERTYPE_IEEE1588 0x88F7
599#define PICOSECS_PER_TICK 20833
600#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
547};
548
549
550/* Precision Time Sync (IEEE 1588) defines */
551#define ETHERTYPE_IEEE1588 0x88F7
552#define PICOSECS_PER_TICK 20833
553#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
601#define IXGBE_ADVTXD_TSTAMP 0x00080000
554#define IXGBE_ADVTXD_TSTAMP 0x00080000
602
603
604#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
605 mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF)
606#define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
607#define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
608#define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
609#define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)

--- 32 unchanged lines hidden (view full) ---

642#define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count)
643#define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count)
644#define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count)
645#define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count)
646#define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count)
647#endif
648
649/* External PHY register addresses */
555
556
557#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
558 mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF)
559#define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
560#define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
561#define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
562#define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)

--- 32 unchanged lines hidden (view full) ---

595#define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count)
596#define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count)
597#define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count)
598#define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count)
599#define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count)
600#endif
601
602/* External PHY register addresses */
650#define IXGBE_PHY_CURRENT_TEMP 0xC820
651#define IXGBE_PHY_OVERTEMP_STATUS 0xC830
603#define IXGBE_PHY_CURRENT_TEMP 0xC820
604#define IXGBE_PHY_OVERTEMP_STATUS 0xC830
652
653/* Sysctl help messages; displayed with sysctl -d */
654#define IXGBE_SYSCTL_DESC_ADV_SPEED \
605
606/* Sysctl help messages; displayed with sysctl -d */
607#define IXGBE_SYSCTL_DESC_ADV_SPEED \
655 "\nControl advertised link speed using these flags:\n" \
656 "\t0x1 - advertise 100M\n" \
657 "\t0x2 - advertise 1G\n" \
658 "\t0x4 - advertise 10G\n\n" \
659 "\t100M is only supported on certain 10GBaseT adapters.\n"
608 "\nControl advertised link speed using these flags:\n" \
609 "\t0x1 - advertise 100M\n" \
610 "\t0x2 - advertise 1G\n" \
611 "\t0x4 - advertise 10G\n" \
612 "\t0x8 - advertise 10M\n\n" \
613 "\t100M and 10M are only supported on certain adapters.\n"
660
661#define IXGBE_SYSCTL_DESC_SET_FC \
614
615#define IXGBE_SYSCTL_DESC_SET_FC \
662 "\nSet flow control mode using these values:\n" \
663 "\t0 - off\n" \
664 "\t1 - rx pause\n" \
665 "\t2 - tx pause\n" \
666 "\t3 - tx and rx pause"
616 "\nSet flow control mode using these values:\n" \
617 "\t0 - off\n" \
618 "\t1 - rx pause\n" \
619 "\t2 - tx pause\n" \
620 "\t3 - tx and rx pause"
667
621
668static inline bool
669ixgbe_is_sfp(struct ixgbe_hw *hw)
670{
671 switch (hw->phy.type) {
672 case ixgbe_phy_sfp_avago:
673 case ixgbe_phy_sfp_ftl:
674 case ixgbe_phy_sfp_intel:
675 case ixgbe_phy_sfp_unknown:
676 case ixgbe_phy_sfp_passive_tyco:
677 case ixgbe_phy_sfp_passive_unknown:
678 case ixgbe_phy_qsfp_passive_unknown:
679 case ixgbe_phy_qsfp_active_unknown:
680 case ixgbe_phy_qsfp_intel:
681 case ixgbe_phy_qsfp_unknown:
682 return TRUE;
683 default:
684 return FALSE;
685 }
686}
687
688/* Workaround to make 8.0 buildable */
689#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
690static __inline int
691drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
692{
693#ifdef ALTQ
694 if (ALTQ_IS_ENABLED(&ifp->if_snd))
695 return (1);
696#endif
697 return (!buf_ring_empty(br));
698}
699#endif
700
701/*
622/* Workaround to make 8.0 buildable */
623#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
624static __inline int
625drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
626{
627#ifdef ALTQ
628 if (ALTQ_IS_ENABLED(&ifp->if_snd))
629 return (1);
630#endif
631 return (!buf_ring_empty(br));
632}
633#endif
634
635/*
702** Find the number of unrefreshed RX descriptors
703*/
636 * Find the number of unrefreshed RX descriptors
637 */
704static inline u16
705ixgbe_rx_unrefreshed(struct rx_ring *rxr)
638static inline u16
639ixgbe_rx_unrefreshed(struct rx_ring *rxr)
706{
640{
707 if (rxr->next_to_check > rxr->next_to_refresh)
708 return (rxr->next_to_check - rxr->next_to_refresh - 1);
709 else
710 return ((rxr->num_desc + rxr->next_to_check) -
711 rxr->next_to_refresh - 1);
641 if (rxr->next_to_check > rxr->next_to_refresh)
642 return (rxr->next_to_check - rxr->next_to_refresh - 1);
643 else
644 return ((rxr->num_desc + rxr->next_to_check) -
645 rxr->next_to_refresh - 1);
712}
646}
713
647
648static inline int
649ixgbe_legacy_ring_empty(struct ifnet *ifp, struct buf_ring *dummy)
650{
651 UNREFERENCED_1PARAMETER(dummy);
652
653 return IFQ_DRV_IS_EMPTY(&ifp->if_snd);
654}
655
714/*
656/*
715** This checks for a zero mac addr, something that will be likely
716** unless the Admin on the Host has created one.
717*/
657 * This checks for a zero mac addr, something that will be likely
658 * unless the Admin on the Host has created one.
659 */
718static inline bool
719ixv_check_ether_addr(u8 *addr)
720{
721 bool status = TRUE;
722
723 if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
724 addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
725 status = FALSE;
660static inline bool
661ixv_check_ether_addr(u8 *addr)
662{
663 bool status = TRUE;
664
665 if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
666 addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
667 status = FALSE;
668
726 return (status);
727}
728
729/* Shared Prototypes */
669 return (status);
670}
671
672/* Shared Prototypes */
673void ixgbe_legacy_start(struct ifnet *);
674int ixgbe_legacy_start_locked(struct ifnet *, struct tx_ring *);
675int ixgbe_mq_start(struct ifnet *, struct mbuf *);
676int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
677void ixgbe_qflush(struct ifnet *);
678void ixgbe_deferred_mq_start(void *, int);
730
679
731#ifdef IXGBE_LEGACY_TX
732void ixgbe_start(struct ifnet *);
733void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
734#else /* ! IXGBE_LEGACY_TX */
735int ixgbe_mq_start(struct ifnet *, struct mbuf *);
736int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
737void ixgbe_qflush(struct ifnet *);
738void ixgbe_deferred_mq_start(void *, int);
739#endif /* IXGBE_LEGACY_TX */
680int ixgbe_allocate_queues(struct adapter *);
681int ixgbe_setup_transmit_structures(struct adapter *);
682void ixgbe_free_transmit_structures(struct adapter *);
683int ixgbe_setup_receive_structures(struct adapter *);
684void ixgbe_free_receive_structures(struct adapter *);
685void ixgbe_txeof(struct tx_ring *);
686bool ixgbe_rxeof(struct ix_queue *);
740
687
741int ixgbe_allocate_queues(struct adapter *);
742int ixgbe_allocate_transmit_buffers(struct tx_ring *);
743int ixgbe_setup_transmit_structures(struct adapter *);
744void ixgbe_free_transmit_structures(struct adapter *);
745int ixgbe_allocate_receive_buffers(struct rx_ring *);
746int ixgbe_setup_receive_structures(struct adapter *);
747void ixgbe_free_receive_structures(struct adapter *);
748void ixgbe_txeof(struct tx_ring *);
749bool ixgbe_rxeof(struct ix_queue *);
688#include "ixgbe_bypass.h"
689#include "ixgbe_sriov.h"
690#include "ixgbe_fdir.h"
691#include "ixgbe_rss.h"
692#include "ixgbe_netmap.h"
750
693
751int ixgbe_dma_malloc(struct adapter *,
752 bus_size_t, struct ixgbe_dma_alloc *, int);
753void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
754
755#ifdef PCI_IOV
756
757static inline boolean_t
758ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
759{
760 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
761}
762
763static inline void
764ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
765{
766
767 if (vf->flags & IXGBE_VF_CTS)
768 msg |= IXGBE_VT_MSGTYPE_CTS;
769
770 ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
771}
772
773static inline void
774ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
775{
776 msg &= IXGBE_VT_MSG_MASK;
777 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
778}
779
780static inline void
781ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
782{
783 msg &= IXGBE_VT_MSG_MASK;
784 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
785}
786
787static inline void
788ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
789{
790 if (!(vf->flags & IXGBE_VF_CTS))
791 ixgbe_send_vf_nack(adapter, vf, 0);
792}
793
794static inline enum ixgbe_iov_mode
795ixgbe_get_iov_mode(struct adapter *adapter)
796{
797 if (adapter->num_vfs == 0)
798 return (IXGBE_NO_VM);
799 if (adapter->num_queues <= 2)
800 return (IXGBE_64_VM);
801 else if (adapter->num_queues <= 4)
802 return (IXGBE_32_VM);
803 else
804 return (IXGBE_NO_VM);
805}
806
807static inline u16
808ixgbe_max_vfs(enum ixgbe_iov_mode mode)
809{
810 /*
811 * We return odd numbers below because we
812 * reserve 1 VM's worth of queues for the PF.
813 */
814 switch (mode) {
815 case IXGBE_64_VM:
816 return (63);
817 case IXGBE_32_VM:
818 return (31);
819 case IXGBE_NO_VM:
820 default:
821 return (0);
822 }
823}
824
825static inline int
826ixgbe_vf_queues(enum ixgbe_iov_mode mode)
827{
828 switch (mode) {
829 case IXGBE_64_VM:
830 return (2);
831 case IXGBE_32_VM:
832 return (4);
833 case IXGBE_NO_VM:
834 default:
835 return (0);
836 }
837}
838
839static inline int
840ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num)
841{
842 return ((vfnum * ixgbe_vf_queues(mode)) + num);
843}
844
845static inline int
846ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num)
847{
848 return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num));
849}
850
851static inline void
852ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
853{
854 if (adapter->max_frame_size < max_frame)
855 adapter->max_frame_size = max_frame;
856}
857
858static inline u32
859ixgbe_get_mrqc(enum ixgbe_iov_mode mode)
860{
861 u32 mrqc = 0;
862 switch (mode) {
863 case IXGBE_64_VM:
864 mrqc = IXGBE_MRQC_VMDQRSS64EN;
865 break;
866 case IXGBE_32_VM:
867 mrqc = IXGBE_MRQC_VMDQRSS32EN;
868 break;
869 case IXGBE_NO_VM:
870 mrqc = 0;
871 break;
872 default:
873 panic("Unexpected SR-IOV mode %d", mode);
874 }
875 return(mrqc);
876}
877
878
879static inline u32
880ixgbe_get_mtqc(enum ixgbe_iov_mode mode)
881{
882 uint32_t mtqc = 0;
883 switch (mode) {
884 case IXGBE_64_VM:
885 mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
886 break;
887 case IXGBE_32_VM:
888 mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
889 break;
890 case IXGBE_NO_VM:
891 mtqc = IXGBE_MTQC_64Q_1PB;
892 break;
893 default:
894 panic("Unexpected SR-IOV mode %d", mode);
895 }
896 return(mtqc);
897}
898#endif /* PCI_IOV */
899
900#endif /* _IXGBE_H_ */
694#endif /* _IXGBE_H_ */