Deleted Added
full compact
3c3
< Copyright (c) 2001-2015, Intel Corporation
---
> Copyright (c) 2001-2017, Intel Corporation
5,6c5,6
<
< Redistribution and use in source and binary forms, with or without
---
>
> Redistribution and use in source and binary forms, with or without
8,9c8,9
<
< 1. Redistributions of source code must retain the above copyright notice,
---
>
> 1. Redistributions of source code must retain the above copyright notice,
11,13c11,13
<
< 2. Redistributions in binary form must reproduce the above copyright
< notice, this list of conditions and the following disclaimer in the
---
>
> 2. Redistributions in binary form must reproduce the above copyright
> notice, this list of conditions and the following disclaimer in the
15,17c15,17
<
< 3. Neither the name of the Intel Corporation nor the names of its
< contributors may be used to endorse or promote products derived from
---
>
> 3. Neither the name of the Intel Corporation nor the names of its
> contributors may be used to endorse or promote products derived from
19c19
<
---
>
21,28c21,28
< AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
< IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
< ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
< LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
< CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
< SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
< INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
< CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
---
> AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
> LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33c33
< /*$FreeBSD: stable/11/sys/dev/ixgbe/ixgbe.h 294795 2016-01-26 12:30:17Z smh $*/
---
> /*$FreeBSD: stable/11/sys/dev/ixgbe/ixgbe.h 320897 2017-07-11 21:25:07Z erj $*/
42d41
< #ifndef IXGBE_LEGACY_TX
44d42
< #endif
95,100d92
< #ifdef PCI_IOV
< #include <sys/nv.h>
< #include <sys/iov_schema.h>
< #include <dev/pci/pci_iov.h>
< #endif
<
104a97
> #include "ixgbe_features.h"
106,110d98
< #ifdef PCI_IOV
< #include "ixgbe_common.h"
< #include "ixgbe_mbx.h"
< #endif
<
120,123c108,111
< #define DEFAULT_TXD 1024
< #define PERFORM_TXD 2048
< #define MAX_TXD 4096
< #define MIN_TXD 64
---
> #define DEFAULT_TXD 1024
> #define PERFORM_TXD 2048
> #define MAX_TXD 4096
> #define MIN_TXD 64
129,133c117,121
< * is 16 bytes. A receive buffer is also allocated for each descriptor.
< *
< * Note: with 8 rings and a dual port card, it is possible to bump up
< * against the system mbuf pool limit, you can tune nmbclusters
< * to adjust for this.
---
> * is 16 bytes. A receive buffer is also allocated for each descriptor.
> *
> * Note: with 8 rings and a dual port card, it is possible to bump up
> * against the system mbuf pool limit, you can tune nmbclusters
> * to adjust for this.
135,138c123,126
< #define DEFAULT_RXD 1024
< #define PERFORM_RXD 2048
< #define MAX_RXD 4096
< #define MIN_RXD 64
---
> #define DEFAULT_RXD 1024
> #define PERFORM_RXD 2048
> #define MAX_RXD 4096
> #define MIN_RXD 64
141c129
< #define DBA_ALIGN 128
---
> #define DBA_ALIGN 128
148c136
< #define IXGBE_WATCHDOG (10 * hz)
---
> #define IXGBE_WATCHDOG (10 * hz)
154,155c142,143
< #define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
< #define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
---
> #define IXGBE_TX_CLEANUP_THRESHOLD(_a) ((_a)->num_tx_desc / 8)
> #define IXGBE_TX_OP_THRESHOLD(_a) ((_a)->num_tx_desc / 32)
158,163c146,151
< #define IXGBE_MAX_FRAME_SIZE 9728
< #define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN)
< #define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
< ETHER_VLAN_ENCAP_LEN)
< #define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
< #define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
---
> #define IXGBE_MAX_FRAME_SIZE 9728
> #define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN)
> #define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
> ETHER_VLAN_ENCAP_LEN)
> #define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
> #define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
166,168c154,156
< #define IXGBE_FC_PAUSE 0xFFFF
< #define IXGBE_FC_HI 0x20000
< #define IXGBE_FC_LO 0x10000
---
> #define IXGBE_FC_PAUSE 0xFFFF
> #define IXGBE_FC_HI 0x20000
> #define IXGBE_FC_LO 0x10000
173c161
< *
---
> *
181c169
< #define MPKTHSIZE (sizeof(struct m_hdr) + sizeof(struct pkthdr))
---
> #define MPKTHSIZE (sizeof(struct m_hdr) + sizeof(struct pkthdr))
183,185c171,173
< #define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
< #define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
< #define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
---
> #define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
> #define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
> #define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
208,218c196,206
< #define IXGBE_82598_SCATTER 100
< #define IXGBE_82599_SCATTER 32
< #define MSIX_82598_BAR 3
< #define MSIX_82599_BAR 4
< #define IXGBE_TSO_SIZE 262140
< #define IXGBE_RX_HDR 128
< #define IXGBE_VFTA_SIZE 128
< #define IXGBE_BR_SIZE 4096
< #define IXGBE_QUEUE_MIN_FREE 32
< #define IXGBE_MAX_TX_BUSY 10
< #define IXGBE_QUEUE_HUNG 0x80000000
---
> #define IXGBE_82598_SCATTER 100
> #define IXGBE_82599_SCATTER 32
> #define MSIX_82598_BAR 3
> #define MSIX_82599_BAR 4
> #define IXGBE_TSO_SIZE 262140
> #define IXGBE_RX_HDR 128
> #define IXGBE_VFTA_SIZE 128
> #define IXGBE_BR_SIZE 4096
> #define IXGBE_QUEUE_MIN_FREE 32
> #define IXGBE_MAX_TX_BUSY 10
> #define IXGBE_QUEUE_HUNG 0x80000000
220c208
< #define IXV_EITR_DEFAULT 128
---
> #define IXGBE_EITR_DEFAULT 128
224,226c212,214
< #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
< CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
< CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
---
> #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
> CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
> CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
228c216
< #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
---
> #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
230c218
< #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
---
> #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
243c231
< * Interrupt Moderation parameters
---
> * Interrupt Moderation parameters
245,247c233,235
< #define IXGBE_LOW_LATENCY 128
< #define IXGBE_AVE_LATENCY 400
< #define IXGBE_BULK_LATENCY 1200
---
> #define IXGBE_LOW_LATENCY 128
> #define IXGBE_AVE_LATENCY 400
> #define IXGBE_BULK_LATENCY 1200
250,252c238,240
< #define IXGBE_LINK_ITR_QUANTA 0x1FF
< #define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \
< IXGBE_EITR_ITR_INT_MASK)
---
> #define IXGBE_LINK_ITR_QUANTA 0x1FF
> #define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \
> IXGBE_EITR_ITR_INT_MASK)
254,257d241
< /* MAC type macros */
< #define IXGBE_IS_X550VF(_adapter) \
< ((_adapter->hw.mac.type == ixgbe_mac_X550_vf) || \
< (_adapter->hw.mac.type == ixgbe_mac_X550EM_x_vf))
259,262d242
< #define IXGBE_IS_VF(_adapter) \
< (IXGBE_IS_X550VF(_adapter) || \
< (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \
< (_adapter->hw.mac.type == ixgbe_mac_82599_vf))
264,289c244
< #ifdef PCI_IOV
< #define IXGBE_VF_INDEX(vmdq) ((vmdq) / 32)
< #define IXGBE_VF_BIT(vmdq) (1 << ((vmdq) % 32))
<
< #define IXGBE_VT_MSG_MASK 0xFFFF
<
< #define IXGBE_VT_MSGINFO(msg) \
< (((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT)
<
< #define IXGBE_VF_GET_QUEUES_RESP_LEN 5
<
< #define IXGBE_API_VER_1_0 0
< #define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */
< #define IXGBE_API_VER_1_1 2
< #define IXGBE_API_VER_UNKNOWN UINT16_MAX
<
< enum ixgbe_iov_mode {
< IXGBE_64_VM,
< IXGBE_32_VM,
< IXGBE_NO_VM
< };
< #endif /* PCI_IOV */
<
<
< /*
< *****************************************************************************
---
> /************************************************************************
291,296c246,249
< *
< * This array contains the list of Subvendor/Subdevice IDs on which the driver
< * should load.
< *
< *****************************************************************************
< */
---
> *
> * Contains the list of Subvendor/Subdevice IDs on
> * which the driver should load.
> ************************************************************************/
298,302c251,255
< unsigned int vendor_id;
< unsigned int device_id;
< unsigned int subvendor_id;
< unsigned int subdevice_id;
< unsigned int index;
---
> unsigned int vendor_id;
> unsigned int device_id;
> unsigned int subvendor_id;
> unsigned int subdevice_id;
> unsigned int index;
304a258,262
> struct ixgbe_bp_data {
> u32 low;
> u32 high;
> u32 log;
> };
307,309c265,267
< union ixgbe_adv_tx_desc *eop;
< struct mbuf *m_head;
< bus_dmamap_t map;
---
> union ixgbe_adv_tx_desc *eop;
> struct mbuf *m_head;
> bus_dmamap_t map;
313,318c271,276
< struct mbuf *buf;
< struct mbuf *fmp;
< bus_dmamap_t pmap;
< u_int flags;
< #define IXGBE_RX_COPY 0x01
< uint64_t addr;
---
> struct mbuf *buf;
> struct mbuf *fmp;
> bus_dmamap_t pmap;
> u_int flags;
> #define IXGBE_RX_COPY 0x01
> uint64_t addr;
322c280
< * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
---
> * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free
325,331c283,289
< bus_addr_t dma_paddr;
< caddr_t dma_vaddr;
< bus_dma_tag_t dma_tag;
< bus_dmamap_t dma_map;
< bus_dma_segment_t dma_seg;
< bus_size_t dma_size;
< int dma_nseg;
---
> bus_addr_t dma_paddr;
> caddr_t dma_vaddr;
> bus_dma_tag_t dma_tag;
> bus_dmamap_t dma_map;
> bus_dma_segment_t dma_seg;
> bus_size_t dma_size;
> int dma_nseg;
335c293
< u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
---
> u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
340,342c298,300
< ** Driver queue struct: this is the interrupt container
< ** for the associated tx and rx ring.
< */
---
> * Driver queue struct: this is the interrupt container
> * for the associated tx and rx ring.
> */
344,356c302,314
< struct adapter *adapter;
< u32 msix; /* This queue's MSIX vector */
< u32 eims; /* This queue's EIMS bit */
< u32 eitr_setting;
< u32 me;
< struct resource *res;
< void *tag;
< int busy;
< struct tx_ring *txr;
< struct rx_ring *rxr;
< struct task que_task;
< struct taskqueue *tq;
< u64 irqs;
---
> struct adapter *adapter;
> u32 msix; /* This queue's MSI-X vector */
> u32 eims; /* This queue's EIMS bit */
> u32 eitr_setting;
> u32 me;
> struct resource *res;
> void *tag;
> int busy;
> struct tx_ring *txr;
> struct rx_ring *rxr;
> struct task que_task;
> struct taskqueue *tq;
> u64 irqs;
363,387c321,344
< struct adapter *adapter;
< struct mtx tx_mtx;
< u32 me;
< u32 tail;
< int busy;
< union ixgbe_adv_tx_desc *tx_base;
< struct ixgbe_tx_buf *tx_buffers;
< struct ixgbe_dma_alloc txdma;
< volatile u16 tx_avail;
< u16 next_avail_desc;
< u16 next_to_clean;
< u16 num_desc;
< u32 txd_cmd;
< bus_dma_tag_t txtag;
< char mtx_name[16];
< #ifndef IXGBE_LEGACY_TX
< struct buf_ring *br;
< struct task txq_task;
< #endif
< #ifdef IXGBE_FDIR
< u16 atr_sample;
< u16 atr_count;
< #endif
< u32 bytes; /* used for AIM */
< u32 packets;
---
> struct adapter *adapter;
> struct mtx tx_mtx;
> u32 me;
> u32 tail;
> int busy;
> union ixgbe_adv_tx_desc *tx_base;
> struct ixgbe_tx_buf *tx_buffers;
> struct ixgbe_dma_alloc txdma;
> volatile u16 tx_avail;
> u16 next_avail_desc;
> u16 next_to_clean;
> u16 num_desc;
> u32 txd_cmd;
> bus_dma_tag_t txtag;
> char mtx_name[16];
> struct buf_ring *br;
> struct task txq_task;
>
> /* Flow Director */
> u16 atr_sample;
> u16 atr_count;
>
> u32 bytes; /* used for AIM */
> u32 packets;
389,393c346,350
< unsigned long tso_tx;
< unsigned long no_tx_map_avail;
< unsigned long no_tx_dma_setup;
< u64 no_desc_avail;
< u64 total_packets;
---
> u64 tso_tx;
> u64 no_tx_map_avail;
> u64 no_tx_dma_setup;
> u64 no_desc_avail;
> u64 total_packets;
401,417c358,374
< struct adapter *adapter;
< struct mtx rx_mtx;
< u32 me;
< u32 tail;
< union ixgbe_adv_rx_desc *rx_base;
< struct ixgbe_dma_alloc rxdma;
< struct lro_ctrl lro;
< bool lro_enabled;
< bool hw_rsc;
< bool vtag_strip;
< u16 next_to_refresh;
< u16 next_to_check;
< u16 num_desc;
< u16 mbuf_sz;
< char mtx_name[16];
< struct ixgbe_rx_buf *rx_buffers;
< bus_dma_tag_t ptag;
---
> struct adapter *adapter;
> struct mtx rx_mtx;
> u32 me;
> u32 tail;
> union ixgbe_adv_rx_desc *rx_base;
> struct ixgbe_dma_alloc rxdma;
> struct lro_ctrl lro;
> bool lro_enabled;
> bool hw_rsc;
> bool vtag_strip;
> u16 next_to_refresh;
> u16 next_to_check;
> u16 num_desc;
> u16 mbuf_sz;
> char mtx_name[16];
> struct ixgbe_rx_buf *rx_buffers;
> bus_dma_tag_t ptag;
419,420c376,377
< u32 bytes; /* Used for AIM calc */
< u32 packets;
---
> u32 bytes; /* Used for AIM calc */
> u32 packets;
423,431c380,388
< u64 rx_irq;
< u64 rx_copies;
< u64 rx_packets;
< u64 rx_bytes;
< u64 rx_discarded;
< u64 rsc_num;
< #ifdef IXGBE_FDIR
< u64 flm;
< #endif
---
> u64 rx_irq;
> u64 rx_copies;
> u64 rx_packets;
> u64 rx_bytes;
> u64 rx_discarded;
> u64 rsc_num;
>
> /* Flow Director */
> u64 flm;
434,439d390
< #ifdef PCI_IOV
< #define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */
< #define IXGBE_VF_CAP_MAC (1 << 1) /* VF is permitted to change MAC. */
< #define IXGBE_VF_CAP_VLAN (1 << 2) /* VF is permitted to join vlans. */
< #define IXGBE_VF_ACTIVE (1 << 3) /* VF is active. */
<
443,452c394,403
< u_int pool;
< u_int rar_index;
< u_int max_frame_size;
< uint32_t flags;
< uint8_t ether_addr[ETHER_ADDR_LEN];
< uint16_t mc_hash[IXGBE_MAX_VF_MC];
< uint16_t num_mc_hashes;
< uint16_t default_vlan;
< uint16_t vlan_tag;
< uint16_t api_ver;
---
> u_int pool;
> u_int rar_index;
> u_int max_frame_size;
> uint32_t flags;
> uint8_t ether_addr[ETHER_ADDR_LEN];
> uint16_t mc_hash[IXGBE_MAX_VF_MC];
> uint16_t num_mc_hashes;
> uint16_t default_vlan;
> uint16_t vlan_tag;
> uint16_t api_ver;
454d404
< #endif /* PCI_IOV */
458,459c408,409
< struct ixgbe_hw hw;
< struct ixgbe_osdep osdep;
---
> struct ixgbe_hw hw;
> struct ixgbe_osdep osdep;
461,462c411,412
< struct device *dev;
< struct ifnet *ifp;
---
> device_t dev;
> struct ifnet *ifp;
464,465c414,415
< struct resource *pci_mem;
< struct resource *msix_mem;
---
> struct resource *pci_mem;
> struct resource *msix_mem;
470c420
< * when doing MSIX
---
> * when doing MSI-X
472,473c422,423
< void *tag;
< struct resource *res;
---
> void *tag;
> struct resource *res;
475,478c425,428
< struct ifmedia media;
< struct callout timer;
< int msix;
< int if_flags;
---
> struct ifmedia media;
> struct callout timer;
> int link_rid;
> int if_flags;
480c430
< struct mtx core_mtx;
---
> struct mtx core_mtx;
482,483c432,433
< eventhandler_tag vlan_attach;
< eventhandler_tag vlan_detach;
---
> eventhandler_tag vlan_attach;
> eventhandler_tag vlan_detach;
485,486c435,436
< u16 num_vlans;
< u16 num_queues;
---
> u16 num_vlans;
> u16 num_queues;
489,494c439,444
< ** Shadow VFTA table, this is needed because
< ** the real vlan filter table gets cleared during
< ** a soft reset and the driver needs to be able
< ** to repopulate it.
< */
< u32 shadow_vfta[IXGBE_VFTA_SIZE];
---
> * Shadow VFTA table, this is needed because
> * the real vlan filter table gets cleared during
> * a soft reset and the driver needs to be able
> * to repopulate it.
> */
> u32 shadow_vfta[IXGBE_VFTA_SIZE];
497,509c447,456
< u32 optics;
< u32 fc; /* local flow ctrl setting */
< int advertise; /* link speeds */
< bool enable_aim; /* adaptive interrupt moderation */
< bool link_active;
< u16 max_frame_size;
< u16 num_segs;
< u32 link_speed;
< bool link_up;
< u32 vector;
< u16 dmac;
< bool eee_enabled;
< u32 phy_layer;
---
> int advertise; /* link speeds */
> int enable_aim; /* adaptive interrupt moderation */
> bool link_active;
> u16 max_frame_size;
> u16 num_segs;
> u32 link_speed;
> bool link_up;
> u32 vector;
> u16 dmac;
> u32 phy_layer;
512,513c459,460
< bool wol_support;
< u32 wufc;
---
> bool wol_support;
> u32 wufc;
516c463
< u32 rx_mbuf_sz;
---
> u32 rx_mbuf_sz;
519,531c466,470
< bool sfp_probe;
< struct task link_task; /* Link tasklet */
< struct task mod_task; /* SFP tasklet */
< struct task msf_task; /* Multispeed Fiber */
< #ifdef PCI_IOV
< struct task mbx_task; /* VF -> PF mailbox interrupt */
< #endif /* PCI_IOV */
< #ifdef IXGBE_FDIR
< int fdir_reinit;
< struct task fdir_task;
< #endif
< struct task phy_task; /* PHY intr tasklet */
< struct taskqueue *tq;
---
> bool sfp_probe;
> struct task link_task; /* Link tasklet */
> struct task mod_task; /* SFP tasklet */
> struct task msf_task; /* Multispeed Fiber */
> struct task mbx_task; /* VF -> PF mailbox interrupt */
532a472,478
> /* Flow Director */
> int fdir_reinit;
> struct task fdir_task;
>
> struct task phy_task; /* PHY intr tasklet */
> struct taskqueue *tq;
>
534,539c480,485
< ** Queues:
< ** This is the irq holder, it has
< ** and RX/TX pair or rings associated
< ** with it.
< */
< struct ix_queue *queues;
---
> * Queues:
> * This is the irq holder, it has
> * and RX/TX pair or rings associated
> * with it.
> */
> struct ix_queue *queues;
542,543c488,489
< * Transmit rings:
< * Allocated at run time, an array of rings.
---
> * Transmit rings
> * Allocated at run time, an array of rings
545,547c491,493
< struct tx_ring *tx_rings;
< u32 num_tx_desc;
< u32 tx_process_limit;
---
> struct tx_ring *tx_rings;
> u32 num_tx_desc;
> u32 tx_process_limit;
550,551c496,497
< * Receive rings:
< * Allocated at run time, an array of rings.
---
> * Receive rings
> * Allocated at run time, an array of rings
553,556c499,502
< struct rx_ring *rx_rings;
< u64 active_queues;
< u32 num_rx_desc;
< u32 rx_process_limit;
---
> struct rx_ring *rx_rings;
> u64 active_queues;
> u32 num_rx_desc;
> u32 rx_process_limit;
559,568c505
< struct ixgbe_mc_addr *mta;
< int num_vfs;
< int pool;
< #ifdef PCI_IOV
< struct ixgbe_vf *vfs;
< #endif
< #ifdef DEV_NETMAP
< void (*init_locked)(struct adapter *);
< void (*stop_locked)(void *);
< #endif
---
> struct ixgbe_mc_addr *mta;
569a507,519
> /* SR-IOV */
> int iov_mode;
> int num_vfs;
> int pool;
> struct ixgbe_vf *vfs;
>
> /* Bypass */
> struct ixgbe_bp_data bypass;
>
> /* Netmap */
> void (*init_locked)(struct adapter *);
> void (*stop_locked)(void *);
>
571,576c521,526
< unsigned long dropped_pkts;
< unsigned long mbuf_defrag_failed;
< unsigned long mbuf_header_failed;
< unsigned long mbuf_packet_failed;
< unsigned long watchdog_events;
< unsigned long link_irq;
---
> unsigned long dropped_pkts;
> unsigned long mbuf_defrag_failed;
> unsigned long mbuf_header_failed;
> unsigned long mbuf_packet_failed;
> unsigned long watchdog_events;
> unsigned long link_irq;
583,592c533,542
< u64 ipackets;
< u64 ierrors;
< u64 opackets;
< u64 oerrors;
< u64 ibytes;
< u64 obytes;
< u64 imcasts;
< u64 omcasts;
< u64 iqdrops;
< u64 noproto;
---
> u64 ipackets;
> u64 ierrors;
> u64 opackets;
> u64 oerrors;
> u64 ibytes;
> u64 obytes;
> u64 imcasts;
> u64 omcasts;
> u64 iqdrops;
> u64 noproto;
593a544,546
> /* Feature capable/enabled flags. See ixgbe_features.h */
> u32 feat_cap;
> u32 feat_en;
601c554
< #define IXGBE_ADVTXD_TSTAMP 0x00080000
---
> #define IXGBE_ADVTXD_TSTAMP 0x00080000
650,651c603,604
< #define IXGBE_PHY_CURRENT_TEMP 0xC820
< #define IXGBE_PHY_OVERTEMP_STATUS 0xC830
---
> #define IXGBE_PHY_CURRENT_TEMP 0xC820
> #define IXGBE_PHY_OVERTEMP_STATUS 0xC830
655,659c608,613
< "\nControl advertised link speed using these flags:\n" \
< "\t0x1 - advertise 100M\n" \
< "\t0x2 - advertise 1G\n" \
< "\t0x4 - advertise 10G\n\n" \
< "\t100M is only supported on certain 10GBaseT adapters.\n"
---
> "\nControl advertised link speed using these flags:\n" \
> "\t0x1 - advertise 100M\n" \
> "\t0x2 - advertise 1G\n" \
> "\t0x4 - advertise 10G\n" \
> "\t0x8 - advertise 10M\n\n" \
> "\t100M and 10M are only supported on certain adapters.\n"
662,666c616,620
< "\nSet flow control mode using these values:\n" \
< "\t0 - off\n" \
< "\t1 - rx pause\n" \
< "\t2 - tx pause\n" \
< "\t3 - tx and rx pause"
---
> "\nSet flow control mode using these values:\n" \
> "\t0 - off\n" \
> "\t1 - rx pause\n" \
> "\t2 - tx pause\n" \
> "\t3 - tx and rx pause"
668,687d621
< static inline bool
< ixgbe_is_sfp(struct ixgbe_hw *hw)
< {
< switch (hw->phy.type) {
< case ixgbe_phy_sfp_avago:
< case ixgbe_phy_sfp_ftl:
< case ixgbe_phy_sfp_intel:
< case ixgbe_phy_sfp_unknown:
< case ixgbe_phy_sfp_passive_tyco:
< case ixgbe_phy_sfp_passive_unknown:
< case ixgbe_phy_qsfp_passive_unknown:
< case ixgbe_phy_qsfp_active_unknown:
< case ixgbe_phy_qsfp_intel:
< case ixgbe_phy_qsfp_unknown:
< return TRUE;
< default:
< return FALSE;
< }
< }
<
702,703c636,637
< ** Find the number of unrefreshed RX descriptors
< */
---
> * Find the number of unrefreshed RX descriptors
> */
706c640
< {
---
> {
712c646
< }
---
> }
713a648,655
> static inline int
> ixgbe_legacy_ring_empty(struct ifnet *ifp, struct buf_ring *dummy)
> {
> UNREFERENCED_1PARAMETER(dummy);
>
> return IFQ_DRV_IS_EMPTY(&ifp->if_snd);
> }
>
715,717c657,659
< ** This checks for a zero mac addr, something that will be likely
< ** unless the Admin on the Host has created one.
< */
---
> * This checks for a zero mac addr, something that will be likely
> * unless the Admin on the Host has created one.
> */
725a668
>
729a673,678
> void ixgbe_legacy_start(struct ifnet *);
> int ixgbe_legacy_start_locked(struct ifnet *, struct tx_ring *);
> int ixgbe_mq_start(struct ifnet *, struct mbuf *);
> int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
> void ixgbe_qflush(struct ifnet *);
> void ixgbe_deferred_mq_start(void *, int);
731,739c680,686
< #ifdef IXGBE_LEGACY_TX
< void ixgbe_start(struct ifnet *);
< void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
< #else /* ! IXGBE_LEGACY_TX */
< int ixgbe_mq_start(struct ifnet *, struct mbuf *);
< int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
< void ixgbe_qflush(struct ifnet *);
< void ixgbe_deferred_mq_start(void *, int);
< #endif /* IXGBE_LEGACY_TX */
---
> int ixgbe_allocate_queues(struct adapter *);
> int ixgbe_setup_transmit_structures(struct adapter *);
> void ixgbe_free_transmit_structures(struct adapter *);
> int ixgbe_setup_receive_structures(struct adapter *);
> void ixgbe_free_receive_structures(struct adapter *);
> void ixgbe_txeof(struct tx_ring *);
> bool ixgbe_rxeof(struct ix_queue *);
741,749c688,692
< int ixgbe_allocate_queues(struct adapter *);
< int ixgbe_allocate_transmit_buffers(struct tx_ring *);
< int ixgbe_setup_transmit_structures(struct adapter *);
< void ixgbe_free_transmit_structures(struct adapter *);
< int ixgbe_allocate_receive_buffers(struct rx_ring *);
< int ixgbe_setup_receive_structures(struct adapter *);
< void ixgbe_free_receive_structures(struct adapter *);
< void ixgbe_txeof(struct tx_ring *);
< bool ixgbe_rxeof(struct ix_queue *);
---
> #include "ixgbe_bypass.h"
> #include "ixgbe_sriov.h"
> #include "ixgbe_fdir.h"
> #include "ixgbe_rss.h"
> #include "ixgbe_netmap.h"
751,899d693
< int ixgbe_dma_malloc(struct adapter *,
< bus_size_t, struct ixgbe_dma_alloc *, int);
< void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
<
< #ifdef PCI_IOV
<
< static inline boolean_t
< ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
< {
< return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
< }
<
< static inline void
< ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
< {
<
< if (vf->flags & IXGBE_VF_CTS)
< msg |= IXGBE_VT_MSGTYPE_CTS;
<
< ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
< }
<
< static inline void
< ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
< {
< msg &= IXGBE_VT_MSG_MASK;
< ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
< }
<
< static inline void
< ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
< {
< msg &= IXGBE_VT_MSG_MASK;
< ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
< }
<
< static inline void
< ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
< {
< if (!(vf->flags & IXGBE_VF_CTS))
< ixgbe_send_vf_nack(adapter, vf, 0);
< }
<
< static inline enum ixgbe_iov_mode
< ixgbe_get_iov_mode(struct adapter *adapter)
< {
< if (adapter->num_vfs == 0)
< return (IXGBE_NO_VM);
< if (adapter->num_queues <= 2)
< return (IXGBE_64_VM);
< else if (adapter->num_queues <= 4)
< return (IXGBE_32_VM);
< else
< return (IXGBE_NO_VM);
< }
<
< static inline u16
< ixgbe_max_vfs(enum ixgbe_iov_mode mode)
< {
< /*
< * We return odd numbers below because we
< * reserve 1 VM's worth of queues for the PF.
< */
< switch (mode) {
< case IXGBE_64_VM:
< return (63);
< case IXGBE_32_VM:
< return (31);
< case IXGBE_NO_VM:
< default:
< return (0);
< }
< }
<
< static inline int
< ixgbe_vf_queues(enum ixgbe_iov_mode mode)
< {
< switch (mode) {
< case IXGBE_64_VM:
< return (2);
< case IXGBE_32_VM:
< return (4);
< case IXGBE_NO_VM:
< default:
< return (0);
< }
< }
<
< static inline int
< ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num)
< {
< return ((vfnum * ixgbe_vf_queues(mode)) + num);
< }
<
< static inline int
< ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num)
< {
< return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num));
< }
<
< static inline void
< ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
< {
< if (adapter->max_frame_size < max_frame)
< adapter->max_frame_size = max_frame;
< }
<
< static inline u32
< ixgbe_get_mrqc(enum ixgbe_iov_mode mode)
< {
< u32 mrqc = 0;
< switch (mode) {
< case IXGBE_64_VM:
< mrqc = IXGBE_MRQC_VMDQRSS64EN;
< break;
< case IXGBE_32_VM:
< mrqc = IXGBE_MRQC_VMDQRSS32EN;
< break;
< case IXGBE_NO_VM:
< mrqc = 0;
< break;
< default:
< panic("Unexpected SR-IOV mode %d", mode);
< }
< return(mrqc);
< }
<
<
< static inline u32
< ixgbe_get_mtqc(enum ixgbe_iov_mode mode)
< {
< uint32_t mtqc = 0;
< switch (mode) {
< case IXGBE_64_VM:
< mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
< break;
< case IXGBE_32_VM:
< mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
< break;
< case IXGBE_NO_VM:
< mtqc = IXGBE_MTQC_64Q_1PB;
< break;
< default:
< panic("Unexpected SR-IOV mode %d", mode);
< }
< return(mtqc);
< }
< #endif /* PCI_IOV */
<