1227652Sgrehan/*- 2252707Sbryanv * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 3227652Sgrehan * All rights reserved. 4227652Sgrehan * 5227652Sgrehan * Redistribution and use in source and binary forms, with or without 6227652Sgrehan * modification, are permitted provided that the following conditions 7227652Sgrehan * are met: 8227652Sgrehan * 1. Redistributions of source code must retain the above copyright 9227652Sgrehan * notice unmodified, this list of conditions, and the following 10227652Sgrehan * disclaimer. 11227652Sgrehan * 2. Redistributions in binary form must reproduce the above copyright 12227652Sgrehan * notice, this list of conditions and the following disclaimer in the 13227652Sgrehan * documentation and/or other materials provided with the distribution. 14227652Sgrehan * 15227652Sgrehan * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16227652Sgrehan * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17227652Sgrehan * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18227652Sgrehan * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19227652Sgrehan * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20227652Sgrehan * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21227652Sgrehan * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22227652Sgrehan * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23227652Sgrehan * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24227652Sgrehan * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25227652Sgrehan * 26227652Sgrehan * $FreeBSD$ 27227652Sgrehan */ 28227652Sgrehan 29227652Sgrehan#ifndef _IF_VTNETVAR_H 30227652Sgrehan#define _IF_VTNETVAR_H 31227652Sgrehan 32255112Sbryanvstruct vtnet_softc; 33255112Sbryanv 34227652Sgrehanstruct vtnet_statistics { 35255112Sbryanv uint64_t mbuf_alloc_failed; 36227652Sgrehan 37255112Sbryanv uint64_t rx_frame_too_large; 38255112Sbryanv uint64_t rx_enq_replacement_failed; 39255112Sbryanv uint64_t rx_mergeable_failed; 40255112Sbryanv uint64_t rx_csum_bad_ethtype; 41255112Sbryanv uint64_t rx_csum_bad_ipproto; 42255112Sbryanv uint64_t rx_csum_bad_offset; 43255112Sbryanv uint64_t rx_csum_bad_proto; 44255112Sbryanv uint64_t tx_csum_bad_ethtype; 45255112Sbryanv uint64_t tx_tso_bad_ethtype; 46255112Sbryanv uint64_t tx_tso_not_tcp; 47265286Sbryanv uint64_t tx_defragged; 48265286Sbryanv uint64_t tx_defrag_failed; 49227652Sgrehan 50255112Sbryanv /* 51255112Sbryanv * These are accumulated from each Rx/Tx queue. 52255112Sbryanv */ 53255112Sbryanv uint64_t rx_csum_failed; 54255112Sbryanv uint64_t rx_csum_offloaded; 55255112Sbryanv uint64_t rx_task_rescheduled; 56255112Sbryanv uint64_t tx_csum_offloaded; 57255112Sbryanv uint64_t tx_tso_offloaded; 58255112Sbryanv uint64_t tx_task_rescheduled; 59227652Sgrehan}; 60227652Sgrehan 61255112Sbryanvstruct vtnet_rxq_stats { 62255112Sbryanv uint64_t vrxs_ipackets; /* if_ipackets */ 63255112Sbryanv uint64_t vrxs_ibytes; /* if_ibytes */ 64255112Sbryanv uint64_t vrxs_iqdrops; /* if_iqdrops */ 65255112Sbryanv uint64_t vrxs_ierrors; /* if_ierrors */ 66255112Sbryanv uint64_t vrxs_csum; 67255112Sbryanv uint64_t vrxs_csum_failed; 68255112Sbryanv uint64_t vrxs_rescheduled; 69255112Sbryanv}; 70255112Sbryanv 71255112Sbryanvstruct vtnet_rxq { 72255112Sbryanv struct mtx vtnrx_mtx; 73255112Sbryanv struct vtnet_softc *vtnrx_sc; 74255112Sbryanv struct virtqueue *vtnrx_vq; 75265286Sbryanv struct sglist *vtnrx_sg; 76255112Sbryanv int vtnrx_id; 77255112Sbryanv struct vtnet_rxq_stats vtnrx_stats; 78255112Sbryanv struct taskqueue *vtnrx_tq; 79255112Sbryanv struct task vtnrx_intrtask; 80255112Sbryanv char vtnrx_name[16]; 81255112Sbryanv} __aligned(CACHE_LINE_SIZE); 82255112Sbryanv 83255112Sbryanv#define VTNET_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vtnrx_mtx) 84255112Sbryanv#define VTNET_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vtnrx_mtx) 85255112Sbryanv#define VTNET_RXQ_LOCK_ASSERT(_rxq) \ 86255112Sbryanv mtx_assert(&(_rxq)->vtnrx_mtx, MA_OWNED) 87255112Sbryanv#define VTNET_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \ 88255112Sbryanv mtx_assert(&(_rxq)->vtnrx_mtx, MA_NOTOWNED) 89255112Sbryanv 90255112Sbryanvstruct vtnet_txq_stats { 91255112Sbryanv uint64_t vtxs_opackets; /* if_opackets */ 92255112Sbryanv uint64_t vtxs_obytes; /* if_obytes */ 93255112Sbryanv uint64_t vtxs_omcasts; /* if_omcasts */ 94255112Sbryanv uint64_t vtxs_csum; 95255112Sbryanv uint64_t vtxs_tso; 96255112Sbryanv uint64_t vtxs_rescheduled; 97255112Sbryanv}; 98255112Sbryanv 99255112Sbryanvstruct vtnet_txq { 100255112Sbryanv struct mtx vtntx_mtx; 101255112Sbryanv struct vtnet_softc *vtntx_sc; 102255112Sbryanv struct virtqueue *vtntx_vq; 103265286Sbryanv struct sglist *vtntx_sg; 104255112Sbryanv#ifndef VTNET_LEGACY_TX 105255112Sbryanv struct buf_ring *vtntx_br; 106255112Sbryanv#endif 107255112Sbryanv int vtntx_id; 108255112Sbryanv int vtntx_watchdog; 109255112Sbryanv struct vtnet_txq_stats vtntx_stats; 110255112Sbryanv struct taskqueue *vtntx_tq; 111255112Sbryanv struct task vtntx_intrtask; 112255112Sbryanv#ifndef VTNET_LEGACY_TX 113255112Sbryanv struct task vtntx_defrtask; 114255112Sbryanv#endif 115255112Sbryanv char vtntx_name[16]; 116255112Sbryanv} __aligned(CACHE_LINE_SIZE); 117255112Sbryanv 118255112Sbryanv#define VTNET_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vtntx_mtx) 119255112Sbryanv#define VTNET_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vtntx_mtx) 120255112Sbryanv#define VTNET_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vtntx_mtx) 121255112Sbryanv#define VTNET_TXQ_LOCK_ASSERT(_txq) \ 122255112Sbryanv mtx_assert(&(_txq)->vtntx_mtx, MA_OWNED) 123255112Sbryanv#define VTNET_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \ 124255112Sbryanv mtx_assert(&(_txq)->vtntx_mtx, MA_NOTOWNED) 125255112Sbryanv 126227652Sgrehanstruct vtnet_softc { 127227652Sgrehan device_t vtnet_dev; 128227652Sgrehan struct ifnet *vtnet_ifp; 129255112Sbryanv struct vtnet_rxq *vtnet_rxqs; 130255112Sbryanv struct vtnet_txq *vtnet_txqs; 131227652Sgrehan 132227652Sgrehan uint32_t vtnet_flags; 133255112Sbryanv#define VTNET_FLAG_SUSPENDED 0x0001 134255112Sbryanv#define VTNET_FLAG_MAC 0x0002 135227652Sgrehan#define VTNET_FLAG_CTRL_VQ 0x0004 136227652Sgrehan#define VTNET_FLAG_CTRL_RX 0x0008 137255112Sbryanv#define VTNET_FLAG_CTRL_MAC 0x0010 138255112Sbryanv#define VTNET_FLAG_VLAN_FILTER 0x0020 139255112Sbryanv#define VTNET_FLAG_TSO_ECN 0x0040 140255112Sbryanv#define VTNET_FLAG_MRG_RXBUFS 0x0080 141255112Sbryanv#define VTNET_FLAG_LRO_NOMRG 0x0100 142255112Sbryanv#define VTNET_FLAG_MULTIQ 0x0200 143277389Sbryanv#define VTNET_FLAG_INDIRECT 0x0400 144277389Sbryanv#define VTNET_FLAG_EVENT_IDX 0x0800 145227652Sgrehan 146255112Sbryanv int vtnet_link_active; 147227652Sgrehan int vtnet_hdr_size; 148227652Sgrehan int vtnet_rx_process_limit; 149265286Sbryanv int vtnet_rx_nsegs; 150255112Sbryanv int vtnet_rx_nmbufs; 151255112Sbryanv int vtnet_rx_clsize; 152255112Sbryanv int vtnet_rx_new_clsize; 153270334Sbryanv int vtnet_tx_intr_thresh; 154265286Sbryanv int vtnet_tx_nsegs; 155227652Sgrehan int vtnet_if_flags; 156255112Sbryanv int vtnet_act_vq_pairs; 157255112Sbryanv int vtnet_max_vq_pairs; 158304081Ssmh int vtnet_requested_vq_pairs; 159255112Sbryanv 160255112Sbryanv struct virtqueue *vtnet_ctrl_vq; 161255112Sbryanv struct vtnet_mac_filter *vtnet_mac_filter; 162255112Sbryanv uint32_t *vtnet_vlan_filter; 163255112Sbryanv 164227652Sgrehan uint64_t vtnet_features; 165227652Sgrehan struct vtnet_statistics vtnet_stats; 166227652Sgrehan struct callout vtnet_tick_ch; 167255112Sbryanv struct ifmedia vtnet_media; 168227652Sgrehan eventhandler_tag vtnet_vlan_attach; 169227652Sgrehan eventhandler_tag vtnet_vlan_detach; 170227652Sgrehan 171255112Sbryanv struct mtx vtnet_mtx; 172255112Sbryanv char vtnet_mtx_name[16]; 173227652Sgrehan char vtnet_hwaddr[ETHER_ADDR_LEN]; 174255112Sbryanv}; 175227652Sgrehan 176255112Sbryanv/* 177255112Sbryanv * Maximum number of queue pairs we will autoconfigure to. 178255112Sbryanv */ 179255112Sbryanv#define VTNET_MAX_QUEUE_PAIRS 8 180227652Sgrehan 181255112Sbryanv/* 182255112Sbryanv * Additional completed entries can appear in a virtqueue before we can 183255112Sbryanv * reenable interrupts. Number of times to retry before scheduling the 184255112Sbryanv * taskqueue to process the completed entries. 185255112Sbryanv */ 186255112Sbryanv#define VTNET_INTR_DISABLE_RETRIES 4 187227652Sgrehan 188227652Sgrehan/* 189270334Sbryanv * Similarly, additional completed entries can appear in a virtqueue 190270334Sbryanv * between when lasted checked and before notifying the host. Number 191270334Sbryanv * of times to retry before scheduling the taskqueue to process the 192270334Sbryanv * queue. 193270334Sbryanv */ 194270334Sbryanv#define VTNET_NOTIFY_RETRIES 4 195270334Sbryanv 196270334Sbryanv/* 197255112Sbryanv * Fake the media type. The host does not provide us with any real media 198255112Sbryanv * information. 199255112Sbryanv */ 200255112Sbryanv#define VTNET_MEDIATYPE (IFM_ETHER | IFM_10G_T | IFM_FDX) 201255112Sbryanv 202255112Sbryanv/* 203255112Sbryanv * Number of words to allocate for the VLAN shadow table. There is one 204255112Sbryanv * bit for each VLAN. 205255112Sbryanv */ 206255112Sbryanv#define VTNET_VLAN_FILTER_NWORDS (4096 / 32) 207255112Sbryanv 208255112Sbryanv/* 209227652Sgrehan * When mergeable buffers are not negotiated, the vtnet_rx_header structure 210227652Sgrehan * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to 211227652Sgrehan * both keep the VirtIO header and the data non-contiguous and to keep the 212227652Sgrehan * frame's payload 4 byte aligned. 213227652Sgrehan * 214227652Sgrehan * When mergeable buffers are negotiated, the host puts the VirtIO header in 215227652Sgrehan * the beginning of the first mbuf's data. 216227652Sgrehan */ 217227652Sgrehan#define VTNET_RX_HEADER_PAD 4 218227652Sgrehanstruct vtnet_rx_header { 219227652Sgrehan struct virtio_net_hdr vrh_hdr; 220227652Sgrehan char vrh_pad[VTNET_RX_HEADER_PAD]; 221227652Sgrehan} __packed; 222227652Sgrehan 223227652Sgrehan/* 224227652Sgrehan * For each outgoing frame, the vtnet_tx_header below is allocated from 225227652Sgrehan * the vtnet_tx_header_zone. 226227652Sgrehan */ 227227652Sgrehanstruct vtnet_tx_header { 228227652Sgrehan union { 229227652Sgrehan struct virtio_net_hdr hdr; 230227652Sgrehan struct virtio_net_hdr_mrg_rxbuf mhdr; 231227652Sgrehan } vth_uhdr; 232227652Sgrehan 233227652Sgrehan struct mbuf *vth_mbuf; 234227652Sgrehan}; 235227652Sgrehan 236227652Sgrehan/* 237227652Sgrehan * The VirtIO specification does not place a limit on the number of MAC 238227652Sgrehan * addresses the guest driver may request to be filtered. In practice, 239227652Sgrehan * the host is constrained by available resources. To simplify this driver, 240227652Sgrehan * impose a reasonably high limit of MAC addresses we will filter before 241227652Sgrehan * falling back to promiscuous or all-multicast modes. 242227652Sgrehan */ 243227652Sgrehan#define VTNET_MAX_MAC_ENTRIES 128 244227652Sgrehan 245227652Sgrehanstruct vtnet_mac_table { 246227652Sgrehan uint32_t nentries; 247227652Sgrehan uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN]; 248227652Sgrehan} __packed; 249227652Sgrehan 250227652Sgrehanstruct vtnet_mac_filter { 251227652Sgrehan struct vtnet_mac_table vmf_unicast; 252227652Sgrehan uint32_t vmf_pad; /* Make tables non-contiguous. */ 253227652Sgrehan struct vtnet_mac_table vmf_multicast; 254227652Sgrehan}; 255227652Sgrehan 256227652Sgrehan/* 257227652Sgrehan * The MAC filter table is malloc(9)'d when needed. Ensure it will 258227652Sgrehan * always fit in one segment. 259227652Sgrehan */ 260227652SgrehanCTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE); 261227652Sgrehan 262255112Sbryanv#define VTNET_TX_TIMEOUT 5 263227652Sgrehan#define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) 264255112Sbryanv#define VTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6) 265227652Sgrehan 266255112Sbryanv#define VTNET_CSUM_ALL_OFFLOAD \ 267255112Sbryanv (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO) 268255112Sbryanv 269227652Sgrehan/* Features desired/implemented by this driver. */ 270227652Sgrehan#define VTNET_FEATURES \ 271227652Sgrehan (VIRTIO_NET_F_MAC | \ 272227652Sgrehan VIRTIO_NET_F_STATUS | \ 273227652Sgrehan VIRTIO_NET_F_CTRL_VQ | \ 274227652Sgrehan VIRTIO_NET_F_CTRL_RX | \ 275255112Sbryanv VIRTIO_NET_F_CTRL_MAC_ADDR | \ 276227652Sgrehan VIRTIO_NET_F_CTRL_VLAN | \ 277227652Sgrehan VIRTIO_NET_F_CSUM | \ 278255112Sbryanv VIRTIO_NET_F_GSO | \ 279227652Sgrehan VIRTIO_NET_F_HOST_TSO4 | \ 280227652Sgrehan VIRTIO_NET_F_HOST_TSO6 | \ 281227652Sgrehan VIRTIO_NET_F_HOST_ECN | \ 282227652Sgrehan VIRTIO_NET_F_GUEST_CSUM | \ 283227652Sgrehan VIRTIO_NET_F_GUEST_TSO4 | \ 284227652Sgrehan VIRTIO_NET_F_GUEST_TSO6 | \ 285227652Sgrehan VIRTIO_NET_F_GUEST_ECN | \ 286227652Sgrehan VIRTIO_NET_F_MRG_RXBUF | \ 287255112Sbryanv VIRTIO_NET_F_MQ | \ 288255112Sbryanv VIRTIO_RING_F_EVENT_IDX | \ 289227652Sgrehan VIRTIO_RING_F_INDIRECT_DESC) 290227652Sgrehan 291227652Sgrehan/* 292255112Sbryanv * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host 293255112Sbryanv * frames larger than 1514 bytes. 294255112Sbryanv */ 295255112Sbryanv#define VTNET_TSO_FEATURES (VIRTIO_NET_F_GSO | VIRTIO_NET_F_HOST_TSO4 | \ 296255112Sbryanv VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN) 297255112Sbryanv 298255112Sbryanv/* 299227652Sgrehan * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us 300227652Sgrehan * frames larger than 1514 bytes. We do not yet support software LRO 301227652Sgrehan * via tcp_lro_rx(). 302227652Sgrehan */ 303227652Sgrehan#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \ 304227652Sgrehan VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN) 305227652Sgrehan 306227652Sgrehan#define VTNET_MAX_MTU 65536 307227652Sgrehan#define VTNET_MAX_RX_SIZE 65550 308227652Sgrehan 309227652Sgrehan/* 310227652Sgrehan * Used to preallocate the Vq indirect descriptors. The first segment 311265286Sbryanv * is reserved for the header, except for mergeable buffers since the 312265286Sbryanv * header is placed inline with the data. 313227652Sgrehan */ 314265286Sbryanv#define VTNET_MRG_RX_SEGS 1 315227652Sgrehan#define VTNET_MIN_RX_SEGS 2 316227652Sgrehan#define VTNET_MAX_RX_SEGS 34 317265286Sbryanv#define VTNET_MIN_TX_SEGS 4 318265286Sbryanv#define VTNET_MAX_TX_SEGS 64 319227652Sgrehan 320227652Sgrehan/* 321227652Sgrehan * Assert we can receive and transmit the maximum with regular 322227652Sgrehan * size clusters. 323227652Sgrehan */ 324227652SgrehanCTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE); 325227652SgrehanCTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU); 326227652Sgrehan 327227652Sgrehan/* 328255112Sbryanv * Number of slots in the Tx bufrings. This value matches most other 329255112Sbryanv * multiqueue drivers. 330255112Sbryanv */ 331255112Sbryanv#define VTNET_DEFAULT_BUFRING_SIZE 4096 332255112Sbryanv 333255112Sbryanv/* 334227652Sgrehan * Determine how many mbufs are in each receive buffer. For LRO without 335265286Sbryanv * mergeable buffers, we must allocate an mbuf chain large enough to 336227652Sgrehan * hold both the vtnet_rx_header and the maximum receivable data. 337227652Sgrehan */ 338255112Sbryanv#define VTNET_NEEDED_RX_MBUFS(_sc, _clsize) \ 339227652Sgrehan ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \ 340227652Sgrehan howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \ 341255112Sbryanv (_clsize)) 342227652Sgrehan 343255112Sbryanv#define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx 344255112Sbryanv#define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc))) 345255112Sbryanv#define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc))) 346255112Sbryanv#define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc))) 347255112Sbryanv#define VTNET_CORE_LOCK_ASSERT(_sc) \ 348255112Sbryanv mtx_assert(VTNET_CORE_MTX((_sc)), MA_OWNED) 349255112Sbryanv#define VTNET_CORE_LOCK_ASSERT_NOTOWNED(_sc) \ 350255112Sbryanv mtx_assert(VTNET_CORE_MTX((_sc)), MA_NOTOWNED) 351227652Sgrehan 352255112Sbryanv#define VTNET_CORE_LOCK_INIT(_sc) do { \ 353227652Sgrehan snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name), \ 354227652Sgrehan "%s", device_get_nameunit((_sc)->vtnet_dev)); \ 355255112Sbryanv mtx_init(VTNET_CORE_MTX((_sc)), (_sc)->vtnet_mtx_name, \ 356227652Sgrehan "VTNET Core Lock", MTX_DEF); \ 357227652Sgrehan} while (0) 358227652Sgrehan 359227652Sgrehan#endif /* _IF_VTNETVAR_H */ 360