Deleted Added
sdiff udiff text old ( 302408 ) new ( 320897 )
full compact
1/******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD: stable/11/sys/dev/ixgbe/ixgbe.h 294795 2016-01-26 12:30:17Z smh $*/
34
35
36#ifndef _IXGBE_H_
37#define _IXGBE_H_
38
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#ifndef IXGBE_LEGACY_TX
43#include <sys/buf_ring.h>
44#endif
45#include <sys/mbuf.h>
46#include <sys/protosw.h>
47#include <sys/socket.h>
48#include <sys/malloc.h>
49#include <sys/kernel.h>
50#include <sys/module.h>
51#include <sys/sockio.h>
52#include <sys/eventhandler.h>

--- 34 unchanged lines hidden (view full) ---

87#include <sys/sysctl.h>
88#include <sys/endian.h>
89#include <sys/taskqueue.h>
90#include <sys/pcpu.h>
91#include <sys/smp.h>
92#include <machine/smp.h>
93#include <sys/sbuf.h>
94
95#ifdef PCI_IOV
96#include <sys/nv.h>
97#include <sys/iov_schema.h>
98#include <dev/pci/pci_iov.h>
99#endif
100
101#include "ixgbe_api.h"
102#include "ixgbe_common.h"
103#include "ixgbe_phy.h"
104#include "ixgbe_vf.h"
105
106#ifdef PCI_IOV
107#include "ixgbe_common.h"
108#include "ixgbe_mbx.h"
109#endif
110
111/* Tunables */
112
113/*
114 * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
115 * number of transmit descriptors allocated by the driver. Increasing this
116 * value allows the driver to queue more transmits. Each descriptor is 16
117 * bytes. Performance tests have show the 2K value to be optimal for top
118 * performance.
119 */
120#define DEFAULT_TXD 1024
121#define PERFORM_TXD 2048
122#define MAX_TXD 4096
123#define MIN_TXD 64
124
125/*
126 * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
127 * number of receive descriptors allocated for each RX queue. Increasing this
128 * value allows the driver to buffer more incoming packets. Each descriptor
129 * is 16 bytes. A receive buffer is also allocated for each descriptor.
130 *
131 * Note: with 8 rings and a dual port card, it is possible to bump up
132 * against the system mbuf pool limit, you can tune nmbclusters
133 * to adjust for this.
134 */
135#define DEFAULT_RXD 1024
136#define PERFORM_RXD 2048
137#define MAX_RXD 4096
138#define MIN_RXD 64
139
140/* Alignment for rings */
141#define DBA_ALIGN 128
142
143/*
144 * This is the max watchdog interval, ie. the time that can
145 * pass between any two TX clean operations, such only happening
146 * when the TX hardware is functioning.
147 */
148#define IXGBE_WATCHDOG (10 * hz)
149
150/*
151 * This parameters control when the driver calls the routine to reclaim
152 * transmit descriptors.
153 */
154#define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
155#define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
156
157/* These defines are used in MTU calculations */
158#define IXGBE_MAX_FRAME_SIZE 9728
159#define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN)
160#define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \
161 ETHER_VLAN_ENCAP_LEN)
162#define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
163#define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
164
165/* Flow control constants */
166#define IXGBE_FC_PAUSE 0xFFFF
167#define IXGBE_FC_HI 0x20000
168#define IXGBE_FC_LO 0x10000
169
170/*
171 * Used for optimizing small rx mbufs. Effort is made to keep the copy
172 * small and aligned for the CPU L1 cache.
173 *
174 * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting
175 * 32 byte alignment needed for the fast bcopy results in 8 bytes being
176 * wasted. Getting 64 byte alignment, which _should_ be ideal for
177 * modern Intel CPUs, results in 40 bytes wasted and a significant drop
178 * in observed efficiency of the optimization, 97.9% -> 81.8%.
179 */
180#if __FreeBSD_version < 1002000
181#define MPKTHSIZE (sizeof(struct m_hdr) + sizeof(struct pkthdr))
182#endif
183#define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32)
184#define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
185#define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE)
186
187/* Keep older OS drivers building... */
188#if !defined(SYSCTL_ADD_UQUAD)
189#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
190#endif
191
192/* Defines for printing debug information */
193#define DEBUG_INIT 0

--- 6 unchanged lines hidden (view full) ---

200#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
201#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
202#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
203#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
204#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
205#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
206
207#define MAX_NUM_MULTICAST_ADDRESSES 128
208#define IXGBE_82598_SCATTER 100
209#define IXGBE_82599_SCATTER 32
210#define MSIX_82598_BAR 3
211#define MSIX_82599_BAR 4
212#define IXGBE_TSO_SIZE 262140
213#define IXGBE_RX_HDR 128
214#define IXGBE_VFTA_SIZE 128
215#define IXGBE_BR_SIZE 4096
216#define IXGBE_QUEUE_MIN_FREE 32
217#define IXGBE_MAX_TX_BUSY 10
218#define IXGBE_QUEUE_HUNG 0x80000000
219
220#define IXV_EITR_DEFAULT 128
221
222/* Supported offload bits in mbuf flag */
223#if __FreeBSD_version >= 1000000
224#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
225 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
226 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
227#elif __FreeBSD_version >= 800000
228#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
229#else
230#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
231#endif
232
233/* Backward compatibility items for very old versions */
234#ifndef pci_find_cap
235#define pci_find_cap pci_find_extcap
236#endif
237
238#ifndef DEVMETHOD_END
239#define DEVMETHOD_END { NULL, NULL }
240#endif
241
242/*
243 * Interrupt Moderation parameters
244 */
245#define IXGBE_LOW_LATENCY 128
246#define IXGBE_AVE_LATENCY 400
247#define IXGBE_BULK_LATENCY 1200
248
249/* Using 1FF (the max value), the interval is ~1.05ms */
250#define IXGBE_LINK_ITR_QUANTA 0x1FF
251#define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \
252 IXGBE_EITR_ITR_INT_MASK)
253
254/* MAC type macros */
255#define IXGBE_IS_X550VF(_adapter) \
256 ((_adapter->hw.mac.type == ixgbe_mac_X550_vf) || \
257 (_adapter->hw.mac.type == ixgbe_mac_X550EM_x_vf))
258
259#define IXGBE_IS_VF(_adapter) \
260 (IXGBE_IS_X550VF(_adapter) || \
261 (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \
262 (_adapter->hw.mac.type == ixgbe_mac_82599_vf))
263
264#ifdef PCI_IOV
265#define IXGBE_VF_INDEX(vmdq) ((vmdq) / 32)
266#define IXGBE_VF_BIT(vmdq) (1 << ((vmdq) % 32))
267
268#define IXGBE_VT_MSG_MASK 0xFFFF
269
270#define IXGBE_VT_MSGINFO(msg) \
271 (((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT)
272
273#define IXGBE_VF_GET_QUEUES_RESP_LEN 5
274
275#define IXGBE_API_VER_1_0 0
276#define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */
277#define IXGBE_API_VER_1_1 2
278#define IXGBE_API_VER_UNKNOWN UINT16_MAX
279
280enum ixgbe_iov_mode {
281 IXGBE_64_VM,
282 IXGBE_32_VM,
283 IXGBE_NO_VM
284};
285#endif /* PCI_IOV */
286
287
288/*
289 *****************************************************************************
290 * vendor_info_array
291 *
292 * This array contains the list of Subvendor/Subdevice IDs on which the driver
293 * should load.
294 *
295 *****************************************************************************
296 */
297typedef struct _ixgbe_vendor_info_t {
298 unsigned int vendor_id;
299 unsigned int device_id;
300 unsigned int subvendor_id;
301 unsigned int subdevice_id;
302 unsigned int index;
303} ixgbe_vendor_info_t;
304
305
306struct ixgbe_tx_buf {
307 union ixgbe_adv_tx_desc *eop;
308 struct mbuf *m_head;
309 bus_dmamap_t map;
310};
311
312struct ixgbe_rx_buf {
313 struct mbuf *buf;
314 struct mbuf *fmp;
315 bus_dmamap_t pmap;
316 u_int flags;
317#define IXGBE_RX_COPY 0x01
318 uint64_t addr;
319};
320
321/*
322 * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
323 */
324struct ixgbe_dma_alloc {
325 bus_addr_t dma_paddr;
326 caddr_t dma_vaddr;
327 bus_dma_tag_t dma_tag;
328 bus_dmamap_t dma_map;
329 bus_dma_segment_t dma_seg;
330 bus_size_t dma_size;
331 int dma_nseg;
332};
333
334struct ixgbe_mc_addr {
335 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
336 u32 vmdq;
337};
338
339/*
340** Driver queue struct: this is the interrupt container
341** for the associated tx and rx ring.
342*/
343struct ix_queue {
344 struct adapter *adapter;
345 u32 msix; /* This queue's MSIX vector */
346 u32 eims; /* This queue's EIMS bit */
347 u32 eitr_setting;
348 u32 me;
349 struct resource *res;
350 void *tag;
351 int busy;
352 struct tx_ring *txr;
353 struct rx_ring *rxr;
354 struct task que_task;
355 struct taskqueue *tq;
356 u64 irqs;
357};
358
359/*
360 * The transmit ring, one per queue
361 */
362struct tx_ring {
363 struct adapter *adapter;
364 struct mtx tx_mtx;
365 u32 me;
366 u32 tail;
367 int busy;
368 union ixgbe_adv_tx_desc *tx_base;
369 struct ixgbe_tx_buf *tx_buffers;
370 struct ixgbe_dma_alloc txdma;
371 volatile u16 tx_avail;
372 u16 next_avail_desc;
373 u16 next_to_clean;
374 u16 num_desc;
375 u32 txd_cmd;
376 bus_dma_tag_t txtag;
377 char mtx_name[16];
378#ifndef IXGBE_LEGACY_TX
379 struct buf_ring *br;
380 struct task txq_task;
381#endif
382#ifdef IXGBE_FDIR
383 u16 atr_sample;
384 u16 atr_count;
385#endif
386 u32 bytes; /* used for AIM */
387 u32 packets;
388 /* Soft Stats */
389 unsigned long tso_tx;
390 unsigned long no_tx_map_avail;
391 unsigned long no_tx_dma_setup;
392 u64 no_desc_avail;
393 u64 total_packets;
394};
395
396
397/*
398 * The Receive ring, one per rx queue
399 */
400struct rx_ring {
401 struct adapter *adapter;
402 struct mtx rx_mtx;
403 u32 me;
404 u32 tail;
405 union ixgbe_adv_rx_desc *rx_base;
406 struct ixgbe_dma_alloc rxdma;
407 struct lro_ctrl lro;
408 bool lro_enabled;
409 bool hw_rsc;
410 bool vtag_strip;
411 u16 next_to_refresh;
412 u16 next_to_check;
413 u16 num_desc;
414 u16 mbuf_sz;
415 char mtx_name[16];
416 struct ixgbe_rx_buf *rx_buffers;
417 bus_dma_tag_t ptag;
418
419 u32 bytes; /* Used for AIM calc */
420 u32 packets;
421
422 /* Soft stats */
423 u64 rx_irq;
424 u64 rx_copies;
425 u64 rx_packets;
426 u64 rx_bytes;
427 u64 rx_discarded;
428 u64 rsc_num;
429#ifdef IXGBE_FDIR
430 u64 flm;
431#endif
432};
433
434#ifdef PCI_IOV
435#define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */
436#define IXGBE_VF_CAP_MAC (1 << 1) /* VF is permitted to change MAC. */
437#define IXGBE_VF_CAP_VLAN (1 << 2) /* VF is permitted to join vlans. */
438#define IXGBE_VF_ACTIVE (1 << 3) /* VF is active. */
439
440#define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */
441
442struct ixgbe_vf {
443 u_int pool;
444 u_int rar_index;
445 u_int max_frame_size;
446 uint32_t flags;
447 uint8_t ether_addr[ETHER_ADDR_LEN];
448 uint16_t mc_hash[IXGBE_MAX_VF_MC];
449 uint16_t num_mc_hashes;
450 uint16_t default_vlan;
451 uint16_t vlan_tag;
452 uint16_t api_ver;
453};
454#endif /* PCI_IOV */
455
456/* Our adapter structure */
457struct adapter {
458 struct ixgbe_hw hw;
459 struct ixgbe_osdep osdep;
460
461 struct device *dev;
462 struct ifnet *ifp;
463
464 struct resource *pci_mem;
465 struct resource *msix_mem;
466
467 /*
468 * Interrupt resources: this set is
469 * either used for legacy, or for Link
470 * when doing MSIX
471 */
472 void *tag;
473 struct resource *res;
474
475 struct ifmedia media;
476 struct callout timer;
477 int msix;
478 int if_flags;
479
480 struct mtx core_mtx;
481
482 eventhandler_tag vlan_attach;
483 eventhandler_tag vlan_detach;
484
485 u16 num_vlans;
486 u16 num_queues;
487
488 /*
489 ** Shadow VFTA table, this is needed because
490 ** the real vlan filter table gets cleared during
491 ** a soft reset and the driver needs to be able
492 ** to repopulate it.
493 */
494 u32 shadow_vfta[IXGBE_VFTA_SIZE];
495
496 /* Info about the interface */
497 u32 optics;
498 u32 fc; /* local flow ctrl setting */
499 int advertise; /* link speeds */
500 bool enable_aim; /* adaptive interrupt moderation */
501 bool link_active;
502 u16 max_frame_size;
503 u16 num_segs;
504 u32 link_speed;
505 bool link_up;
506 u32 vector;
507 u16 dmac;
508 bool eee_enabled;
509 u32 phy_layer;
510
511 /* Power management-related */
512 bool wol_support;
513 u32 wufc;
514
515 /* Mbuf cluster size */
516 u32 rx_mbuf_sz;
517
518 /* Support for pluggable optics */
519 bool sfp_probe;
520 struct task link_task; /* Link tasklet */
521 struct task mod_task; /* SFP tasklet */
522 struct task msf_task; /* Multispeed Fiber */
523#ifdef PCI_IOV
524 struct task mbx_task; /* VF -> PF mailbox interrupt */
525#endif /* PCI_IOV */
526#ifdef IXGBE_FDIR
527 int fdir_reinit;
528 struct task fdir_task;
529#endif
530 struct task phy_task; /* PHY intr tasklet */
531 struct taskqueue *tq;
532
533 /*
534 ** Queues:
535 ** This is the irq holder, it has
536 ** and RX/TX pair or rings associated
537 ** with it.
538 */
539 struct ix_queue *queues;
540
541 /*
542 * Transmit rings:
543 * Allocated at run time, an array of rings.
544 */
545 struct tx_ring *tx_rings;
546 u32 num_tx_desc;
547 u32 tx_process_limit;
548
549 /*
550 * Receive rings:
551 * Allocated at run time, an array of rings.
552 */
553 struct rx_ring *rx_rings;
554 u64 active_queues;
555 u32 num_rx_desc;
556 u32 rx_process_limit;
557
558 /* Multicast array memory */
559 struct ixgbe_mc_addr *mta;
560 int num_vfs;
561 int pool;
562#ifdef PCI_IOV
563 struct ixgbe_vf *vfs;
564#endif
565#ifdef DEV_NETMAP
566 void (*init_locked)(struct adapter *);
567 void (*stop_locked)(void *);
568#endif
569
570 /* Misc stats maintained by the driver */
571 unsigned long dropped_pkts;
572 unsigned long mbuf_defrag_failed;
573 unsigned long mbuf_header_failed;
574 unsigned long mbuf_packet_failed;
575 unsigned long watchdog_events;
576 unsigned long link_irq;
577 union {
578 struct ixgbe_hw_stats pf;
579 struct ixgbevf_hw_stats vf;
580 } stats;
581#if __FreeBSD_version >= 1100036
582 /* counter(9) stats */
583 u64 ipackets;
584 u64 ierrors;
585 u64 opackets;
586 u64 oerrors;
587 u64 ibytes;
588 u64 obytes;
589 u64 imcasts;
590 u64 omcasts;
591 u64 iqdrops;
592 u64 noproto;
593#endif
594};
595
596
597/* Precision Time Sync (IEEE 1588) defines */
598#define ETHERTYPE_IEEE1588 0x88F7
599#define PICOSECS_PER_TICK 20833
600#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
601#define IXGBE_ADVTXD_TSTAMP 0x00080000
602
603
604#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
605 mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF)
606#define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
607#define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
608#define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
609#define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)

--- 32 unchanged lines hidden (view full) ---

642#define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count)
643#define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count)
644#define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count)
645#define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count)
646#define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count)
647#endif
648
649/* External PHY register addresses */
650#define IXGBE_PHY_CURRENT_TEMP 0xC820
651#define IXGBE_PHY_OVERTEMP_STATUS 0xC830
652
653/* Sysctl help messages; displayed with sysctl -d */
654#define IXGBE_SYSCTL_DESC_ADV_SPEED \
655 "\nControl advertised link speed using these flags:\n" \
656 "\t0x1 - advertise 100M\n" \
657 "\t0x2 - advertise 1G\n" \
658 "\t0x4 - advertise 10G\n\n" \
659 "\t100M is only supported on certain 10GBaseT adapters.\n"
660
661#define IXGBE_SYSCTL_DESC_SET_FC \
662 "\nSet flow control mode using these values:\n" \
663 "\t0 - off\n" \
664 "\t1 - rx pause\n" \
665 "\t2 - tx pause\n" \
666 "\t3 - tx and rx pause"
667
668static inline bool
669ixgbe_is_sfp(struct ixgbe_hw *hw)
670{
671 switch (hw->phy.type) {
672 case ixgbe_phy_sfp_avago:
673 case ixgbe_phy_sfp_ftl:
674 case ixgbe_phy_sfp_intel:
675 case ixgbe_phy_sfp_unknown:
676 case ixgbe_phy_sfp_passive_tyco:
677 case ixgbe_phy_sfp_passive_unknown:
678 case ixgbe_phy_qsfp_passive_unknown:
679 case ixgbe_phy_qsfp_active_unknown:
680 case ixgbe_phy_qsfp_intel:
681 case ixgbe_phy_qsfp_unknown:
682 return TRUE;
683 default:
684 return FALSE;
685 }
686}
687
688/* Workaround to make 8.0 buildable */
689#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
690static __inline int
691drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
692{
693#ifdef ALTQ
694 if (ALTQ_IS_ENABLED(&ifp->if_snd))
695 return (1);
696#endif
697 return (!buf_ring_empty(br));
698}
699#endif
700
701/*
702** Find the number of unrefreshed RX descriptors
703*/
704static inline u16
705ixgbe_rx_unrefreshed(struct rx_ring *rxr)
706{
707 if (rxr->next_to_check > rxr->next_to_refresh)
708 return (rxr->next_to_check - rxr->next_to_refresh - 1);
709 else
710 return ((rxr->num_desc + rxr->next_to_check) -
711 rxr->next_to_refresh - 1);
712}
713
714/*
715** This checks for a zero mac addr, something that will be likely
716** unless the Admin on the Host has created one.
717*/
718static inline bool
719ixv_check_ether_addr(u8 *addr)
720{
721 bool status = TRUE;
722
723 if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
724 addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
725 status = FALSE;
726 return (status);
727}
728
729/* Shared Prototypes */
730
731#ifdef IXGBE_LEGACY_TX
732void ixgbe_start(struct ifnet *);
733void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
734#else /* ! IXGBE_LEGACY_TX */
735int ixgbe_mq_start(struct ifnet *, struct mbuf *);
736int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
737void ixgbe_qflush(struct ifnet *);
738void ixgbe_deferred_mq_start(void *, int);
739#endif /* IXGBE_LEGACY_TX */
740
741int ixgbe_allocate_queues(struct adapter *);
742int ixgbe_allocate_transmit_buffers(struct tx_ring *);
743int ixgbe_setup_transmit_structures(struct adapter *);
744void ixgbe_free_transmit_structures(struct adapter *);
745int ixgbe_allocate_receive_buffers(struct rx_ring *);
746int ixgbe_setup_receive_structures(struct adapter *);
747void ixgbe_free_receive_structures(struct adapter *);
748void ixgbe_txeof(struct tx_ring *);
749bool ixgbe_rxeof(struct ix_queue *);
750
751int ixgbe_dma_malloc(struct adapter *,
752 bus_size_t, struct ixgbe_dma_alloc *, int);
753void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
754
755#ifdef PCI_IOV
756
757static inline boolean_t
758ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
759{
760 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
761}
762
763static inline void
764ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
765{
766
767 if (vf->flags & IXGBE_VF_CTS)
768 msg |= IXGBE_VT_MSGTYPE_CTS;
769
770 ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
771}
772
773static inline void
774ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
775{
776 msg &= IXGBE_VT_MSG_MASK;
777 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
778}
779
780static inline void
781ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
782{
783 msg &= IXGBE_VT_MSG_MASK;
784 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
785}
786
787static inline void
788ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
789{
790 if (!(vf->flags & IXGBE_VF_CTS))
791 ixgbe_send_vf_nack(adapter, vf, 0);
792}
793
794static inline enum ixgbe_iov_mode
795ixgbe_get_iov_mode(struct adapter *adapter)
796{
797 if (adapter->num_vfs == 0)
798 return (IXGBE_NO_VM);
799 if (adapter->num_queues <= 2)
800 return (IXGBE_64_VM);
801 else if (adapter->num_queues <= 4)
802 return (IXGBE_32_VM);
803 else
804 return (IXGBE_NO_VM);
805}
806
807static inline u16
808ixgbe_max_vfs(enum ixgbe_iov_mode mode)
809{
810 /*
811 * We return odd numbers below because we
812 * reserve 1 VM's worth of queues for the PF.
813 */
814 switch (mode) {
815 case IXGBE_64_VM:
816 return (63);
817 case IXGBE_32_VM:
818 return (31);
819 case IXGBE_NO_VM:
820 default:
821 return (0);
822 }
823}
824
825static inline int
826ixgbe_vf_queues(enum ixgbe_iov_mode mode)
827{
828 switch (mode) {
829 case IXGBE_64_VM:
830 return (2);
831 case IXGBE_32_VM:
832 return (4);
833 case IXGBE_NO_VM:
834 default:
835 return (0);
836 }
837}
838
839static inline int
840ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num)
841{
842 return ((vfnum * ixgbe_vf_queues(mode)) + num);
843}
844
845static inline int
846ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num)
847{
848 return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num));
849}
850
851static inline void
852ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
853{
854 if (adapter->max_frame_size < max_frame)
855 adapter->max_frame_size = max_frame;
856}
857
858static inline u32
859ixgbe_get_mrqc(enum ixgbe_iov_mode mode)
860{
861 u32 mrqc = 0;
862 switch (mode) {
863 case IXGBE_64_VM:
864 mrqc = IXGBE_MRQC_VMDQRSS64EN;
865 break;
866 case IXGBE_32_VM:
867 mrqc = IXGBE_MRQC_VMDQRSS32EN;
868 break;
869 case IXGBE_NO_VM:
870 mrqc = 0;
871 break;
872 default:
873 panic("Unexpected SR-IOV mode %d", mode);
874 }
875 return(mrqc);
876}
877
878
879static inline u32
880ixgbe_get_mtqc(enum ixgbe_iov_mode mode)
881{
882 uint32_t mtqc = 0;
883 switch (mode) {
884 case IXGBE_64_VM:
885 mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
886 break;
887 case IXGBE_32_VM:
888 mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
889 break;
890 case IXGBE_NO_VM:
891 mtqc = IXGBE_MTQC_64Q_1PB;
892 break;
893 default:
894 panic("Unexpected SR-IOV mode %d", mode);
895 }
896 return(mtqc);
897}
898#endif /* PCI_IOV */
899
900#endif /* _IXGBE_H_ */