nicvf_queues.h revision 289550
1/* 2 * Copyright (C) 2015 Cavium Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/dev/vnic/nicvf_queues.h 289550 2015-10-18 21:39:15Z zbb $ 27 * 28 */ 29 30#ifndef NICVF_QUEUES_H 31#define NICVF_QUEUES_H 32 33#include <linux/netdevice.h> 34#include "q_struct.h" 35 36#define MAX_QUEUE_SET 128 37#define MAX_RCV_QUEUES_PER_QS 8 38#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 39#define MAX_SND_QUEUES_PER_QS 8 40#define MAX_CMP_QUEUES_PER_QS 8 41 42/* VF's queue interrupt ranges */ 43#define NICVF_INTR_ID_CQ 0 44#define NICVF_INTR_ID_SQ 8 45#define NICVF_INTR_ID_RBDR 16 46#define NICVF_INTR_ID_MISC 18 47#define NICVF_INTR_ID_QS_ERR 19 48 49#define for_each_cq_irq(irq) \ 50 for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++) 51#define for_each_sq_irq(irq) \ 52 for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++) 53#define for_each_rbdr_irq(irq) \ 54 for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++) 55 56#define RBDR_SIZE0 0ULL /* 8K entries */ 57#define RBDR_SIZE1 1ULL /* 16K entries */ 58#define RBDR_SIZE2 2ULL /* 32K entries */ 59#define RBDR_SIZE3 3ULL /* 64K entries */ 60#define RBDR_SIZE4 4ULL /* 126K entries */ 61#define RBDR_SIZE5 5ULL /* 256K entries */ 62#define RBDR_SIZE6 6ULL /* 512K entries */ 63 64#define SND_QUEUE_SIZE0 0ULL /* 1K entries */ 65#define SND_QUEUE_SIZE1 1ULL /* 2K entries */ 66#define SND_QUEUE_SIZE2 2ULL /* 4K entries */ 67#define SND_QUEUE_SIZE3 3ULL /* 8K entries */ 68#define SND_QUEUE_SIZE4 4ULL /* 16K entries */ 69#define SND_QUEUE_SIZE5 5ULL /* 32K entries */ 70#define SND_QUEUE_SIZE6 6ULL /* 64K entries */ 71 72#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */ 73#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */ 74#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */ 75#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */ 76#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */ 77#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */ 78#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ 79 80/* Default queue count per QS, its lengths and threshold values */ 81#define RBDR_CNT 1 82#define RCV_QUEUE_CNT 8 83#define SND_QUEUE_CNT 8 84#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ 85 86#define SND_QSIZE SND_QUEUE_SIZE2 87#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) 88#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) 89#define SND_QUEUE_THRESH 2ULL 90#define MIN_SQ_DESC_PER_PKT_XMIT 2 91/* Since timestamp not enabled, otherwise 2 */ 92#define MAX_CQE_PER_PKT_XMIT 1 93 94/* Keep CQ and SQ sizes same, if timestamping 95 * is enabled this equation will change. 96 */ 97#define CMP_QSIZE CMP_QUEUE_SIZE2 98#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 99#define CMP_QUEUE_CQE_THRESH 0 100#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 101 102#define RBDR_SIZE RBDR_SIZE0 103#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) 104#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) 105#define RBDR_THRESH (RCV_BUF_COUNT / 2) 106#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ 107#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ 108 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ 109 (NICVF_RCV_BUF_ALIGN_BYTES * 2)) 110#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES 111 112#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ 113 MAX_CQE_PER_PKT_XMIT) 114/* Calculate number of CQEs to reserve for all SQEs. 115 * Its 1/256th level of CQ size. 116 * '+ 1' to account for pipelining 117 */ 118#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \ 119 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1) 120 121/* Descriptor size in bytes */ 122#define SND_QUEUE_DESC_SIZE 16 123#define CMP_QUEUE_DESC_SIZE 512 124 125/* Buffer / descriptor alignments */ 126#define NICVF_RCV_BUF_ALIGN 7 127#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN) 128#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */ 129#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ 130 131#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) 132#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\ 133 (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES) 134#define NICVF_RCV_BUF_ALIGN_LEN(X)\ 135 (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X) 136 137/* Queue enable/disable */ 138#define NICVF_SQ_EN BIT_ULL(19) 139 140/* Queue reset */ 141#define NICVF_CQ_RESET BIT_ULL(41) 142#define NICVF_SQ_RESET BIT_ULL(17) 143#define NICVF_RBDR_RESET BIT_ULL(43) 144 145enum CQ_RX_ERRLVL_E { 146 CQ_ERRLVL_MAC, 147 CQ_ERRLVL_L2, 148 CQ_ERRLVL_L3, 149 CQ_ERRLVL_L4, 150}; 151 152enum CQ_RX_ERROP_E { 153 CQ_RX_ERROP_RE_NONE = 0x0, 154 CQ_RX_ERROP_RE_PARTIAL = 0x1, 155 CQ_RX_ERROP_RE_JABBER = 0x2, 156 CQ_RX_ERROP_RE_FCS = 0x7, 157 CQ_RX_ERROP_RE_TERMINATE = 0x9, 158 CQ_RX_ERROP_RE_RX_CTL = 0xb, 159 CQ_RX_ERROP_PREL2_ERR = 0x1f, 160 CQ_RX_ERROP_L2_FRAGMENT = 0x20, 161 CQ_RX_ERROP_L2_OVERRUN = 0x21, 162 CQ_RX_ERROP_L2_PFCS = 0x22, 163 CQ_RX_ERROP_L2_PUNY = 0x23, 164 CQ_RX_ERROP_L2_MAL = 0x24, 165 CQ_RX_ERROP_L2_OVERSIZE = 0x25, 166 CQ_RX_ERROP_L2_UNDERSIZE = 0x26, 167 CQ_RX_ERROP_L2_LENMISM = 0x27, 168 CQ_RX_ERROP_L2_PCLP = 0x28, 169 CQ_RX_ERROP_IP_NOT = 0x41, 170 CQ_RX_ERROP_IP_CSUM_ERR = 0x42, 171 CQ_RX_ERROP_IP_MAL = 0x43, 172 CQ_RX_ERROP_IP_MALD = 0x44, 173 CQ_RX_ERROP_IP_HOP = 0x45, 174 CQ_RX_ERROP_L3_ICRC = 0x46, 175 CQ_RX_ERROP_L3_PCLP = 0x47, 176 CQ_RX_ERROP_L4_MAL = 0x61, 177 CQ_RX_ERROP_L4_CHK = 0x62, 178 CQ_RX_ERROP_UDP_LEN = 0x63, 179 CQ_RX_ERROP_L4_PORT = 0x64, 180 CQ_RX_ERROP_TCP_FLAG = 0x65, 181 CQ_RX_ERROP_TCP_OFFSET = 0x66, 182 CQ_RX_ERROP_L4_PCLP = 0x67, 183 CQ_RX_ERROP_RBDR_TRUNC = 0x70, 184}; 185 186enum CQ_TX_ERROP_E { 187 CQ_TX_ERROP_GOOD = 0x0, 188 CQ_TX_ERROP_DESC_FAULT = 0x10, 189 CQ_TX_ERROP_HDR_CONS_ERR = 0x11, 190 CQ_TX_ERROP_SUBDC_ERR = 0x12, 191 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, 192 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, 193 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, 194 CQ_TX_ERROP_LOCK_VIOL = 0x83, 195 CQ_TX_ERROP_DATA_FAULT = 0x84, 196 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, 197 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, 198 CQ_TX_ERROP_MEM_FAULT = 0x87, 199 CQ_TX_ERROP_CK_OVERLAP = 0x88, 200 CQ_TX_ERROP_CK_OFLOW = 0x89, 201 CQ_TX_ERROP_ENUM_LAST = 0x8a, 202}; 203 204struct cmp_queue_stats { 205 struct tx_stats { 206 u64 good; 207 u64 desc_fault; 208 u64 hdr_cons_err; 209 u64 subdesc_err; 210 u64 imm_size_oflow; 211 u64 data_seq_err; 212 u64 mem_seq_err; 213 u64 lock_viol; 214 u64 data_fault; 215 u64 tstmp_conflict; 216 u64 tstmp_timeout; 217 u64 mem_fault; 218 u64 csum_overlap; 219 u64 csum_overflow; 220 } tx; 221} ____cacheline_aligned_in_smp; 222 223enum RQ_SQ_STATS { 224 RQ_SQ_STATS_OCTS, 225 RQ_SQ_STATS_PKTS, 226}; 227 228struct rx_tx_queue_stats { 229 u64 bytes; 230 u64 pkts; 231} ____cacheline_aligned_in_smp; 232 233struct q_desc_mem { 234 dma_addr_t dma; 235 u64 size; 236 u16 q_len; 237 dma_addr_t phys_base; 238 void *base; 239 void *unalign_base; 240}; 241 242struct rbdr { 243 bool enable; 244 u32 dma_size; 245 u32 frag_len; 246 u32 thresh; /* Threshold level for interrupt */ 247 void *desc; 248 u32 head; 249 u32 tail; 250 struct q_desc_mem dmem; 251} ____cacheline_aligned_in_smp; 252 253struct rcv_queue { 254 bool enable; 255 struct rbdr *rbdr_start; 256 struct rbdr *rbdr_cont; 257 bool en_tcp_reassembly; 258 u8 cq_qs; /* CQ's QS to which this RQ is assigned */ 259 u8 cq_idx; /* CQ index (0 to 7) in the QS */ 260 u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */ 261 u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ 262 u8 start_rbdr_qs; /* First buffer ptrs - QS num */ 263 u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ 264 u8 caching; 265 struct rx_tx_queue_stats stats; 266} ____cacheline_aligned_in_smp; 267 268struct cmp_queue { 269 bool enable; 270 u16 thresh; 271 spinlock_t lock; /* lock to serialize processing CQEs */ 272 void *desc; 273 struct q_desc_mem dmem; 274 struct cmp_queue_stats stats; 275 int irq; 276} ____cacheline_aligned_in_smp; 277 278struct snd_queue { 279 bool enable; 280 u8 cq_qs; /* CQ's QS to which this SQ is pointing */ 281 u8 cq_idx; /* CQ index (0 to 7) in the above QS */ 282 u16 thresh; 283 atomic_t free_cnt; 284 u32 head; 285 u32 tail; 286 u64 *skbuff; 287 void *desc; 288 289 struct q_desc_mem dmem; 290 struct rx_tx_queue_stats stats; 291} ____cacheline_aligned_in_smp; 292 293struct queue_set { 294 bool enable; 295 bool be_en; 296 u8 vnic_id; 297 u8 rq_cnt; 298 u8 cq_cnt; 299 u64 cq_len; 300 u8 sq_cnt; 301 u64 sq_len; 302 u8 rbdr_cnt; 303 u64 rbdr_len; 304 struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS]; 305 struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; 306 struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; 307 struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS]; 308} ____cacheline_aligned_in_smp; 309 310#define GET_RBDR_DESC(RING, idx)\ 311 (&(((struct rbdr_entry_t *)((RING)->desc))[idx])) 312#define GET_SQ_DESC(RING, idx)\ 313 (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx])) 314#define GET_CQ_DESC(RING, idx)\ 315 (&(((union cq_desc_t *)((RING)->desc))[idx])) 316 317/* CQ status bits */ 318#define CQ_WR_FULL BIT(26) 319#define CQ_WR_DISABLE BIT(25) 320#define CQ_WR_FAULT BIT(24) 321#define CQ_CQE_COUNT (0xFFFF << 0) 322 323#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) 324 325void nicvf_config_vlan_stripping(struct nicvf *nic, 326 netdev_features_t features); 327int nicvf_set_qset_resources(struct nicvf *nic); 328int nicvf_config_data_transfer(struct nicvf *nic, bool enable); 329void nicvf_qset_config(struct nicvf *nic, bool enable); 330void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, 331 int qidx, bool enable); 332 333void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); 334void nicvf_sq_disable(struct nicvf *nic, int qidx); 335void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); 336void nicvf_sq_free_used_descs(struct net_device *netdev, 337 struct snd_queue *sq, int qidx); 338int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb); 339 340struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx); 341void nicvf_rbdr_task(unsigned long data); 342void nicvf_rbdr_work(struct work_struct *work); 343 344void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); 345void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); 346void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); 347int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx); 348 349/* Register access APIs */ 350void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val); 351u64 nicvf_reg_read(struct nicvf *nic, u64 offset); 352void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val); 353u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset); 354void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, 355 u64 qidx, u64 val); 356u64 nicvf_queue_reg_read(struct nicvf *nic, 357 u64 offset, u64 qidx); 358 359/* Stats */ 360void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 361void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 362int nicvf_check_cqe_rx_errs(struct nicvf *nic, 363 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx); 364int nicvf_check_cqe_tx_errs(struct nicvf *nic, 365 struct cmp_queue *cq, struct cqe_send_t *cqe_tx); 366#endif /* NICVF_QUEUES_H */ 367