1289550Szbb/*
2289550Szbb * Copyright (C) 2015 Cavium Inc.
3289550Szbb * All rights reserved.
4289550Szbb *
5289550Szbb * Redistribution and use in source and binary forms, with or without
6289550Szbb * modification, are permitted provided that the following conditions
7289550Szbb * are met:
8289550Szbb * 1. Redistributions of source code must retain the above copyright
9289550Szbb *    notice, this list of conditions and the following disclaimer.
10289550Szbb * 2. Redistributions in binary form must reproduce the above copyright
11289550Szbb *    notice, this list of conditions and the following disclaimer in the
12289550Szbb *    documentation and/or other materials provided with the distribution.
13289550Szbb *
14289550Szbb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15289550Szbb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16289550Szbb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17289550Szbb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18289550Szbb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19289550Szbb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20289550Szbb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21289550Szbb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22289550Szbb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23289550Szbb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24289550Szbb * SUCH DAMAGE.
25289550Szbb *
26289550Szbb * $FreeBSD$
27289550Szbb *
28289550Szbb */
29289550Szbb
30289550Szbb#ifndef NICVF_QUEUES_H
31289551Szbb#define	NICVF_QUEUES_H
32289550Szbb
33289550Szbb#include "q_struct.h"
34289550Szbb
35289551Szbb#define	MAX_QUEUE_SET			128
36289551Szbb#define	MAX_RCV_QUEUES_PER_QS		8
37289551Szbb#define	MAX_RCV_BUF_DESC_RINGS_PER_QS	2
38289551Szbb#define	MAX_SND_QUEUES_PER_QS		8
39289551Szbb#define	MAX_CMP_QUEUES_PER_QS		8
40289550Szbb
41289550Szbb/* VF's queue interrupt ranges */
42289550Szbb#define	NICVF_INTR_ID_CQ		0
43289550Szbb#define	NICVF_INTR_ID_SQ		8
44289550Szbb#define	NICVF_INTR_ID_RBDR		16
45289550Szbb#define	NICVF_INTR_ID_MISC		18
46289550Szbb#define	NICVF_INTR_ID_QS_ERR		19
47289550Szbb
48289550Szbb#define	for_each_cq_irq(irq)	\
49289551Szbb	for ((irq) = NICVF_INTR_ID_CQ; (irq) < NICVF_INTR_ID_SQ; (irq)++)
50289550Szbb#define	for_each_sq_irq(irq)	\
51289551Szbb	for ((irq) = NICVF_INTR_ID_SQ; (irq) < NICVF_INTR_ID_RBDR; (irq)++)
52289550Szbb#define	for_each_rbdr_irq(irq)	\
53289551Szbb	for ((irq) = NICVF_INTR_ID_RBDR; (irq) < NICVF_INTR_ID_MISC; (irq)++)
54289550Szbb
55289551Szbb#define	RBDR_SIZE0		0UL /* 8K entries */
56289551Szbb#define	RBDR_SIZE1		1UL /* 16K entries */
57289551Szbb#define	RBDR_SIZE2		2UL /* 32K entries */
58289551Szbb#define	RBDR_SIZE3		3UL /* 64K entries */
59289551Szbb#define	RBDR_SIZE4		4UL /* 126K entries */
60289551Szbb#define	RBDR_SIZE5		5UL /* 256K entries */
61289551Szbb#define	RBDR_SIZE6		6UL /* 512K entries */
62289550Szbb
63289551Szbb#define	SND_QUEUE_SIZE0		0UL /* 1K entries */
64289551Szbb#define	SND_QUEUE_SIZE1		1UL /* 2K entries */
65289551Szbb#define	SND_QUEUE_SIZE2		2UL /* 4K entries */
66289551Szbb#define	SND_QUEUE_SIZE3		3UL /* 8K entries */
67289551Szbb#define	SND_QUEUE_SIZE4		4UL /* 16K entries */
68289551Szbb#define	SND_QUEUE_SIZE5		5UL /* 32K entries */
69289551Szbb#define	SND_QUEUE_SIZE6		6UL /* 64K entries */
70289550Szbb
71289551Szbb#define	CMP_QUEUE_SIZE0		0UL /* 1K entries */
72289551Szbb#define	CMP_QUEUE_SIZE1		1UL /* 2K entries */
73289551Szbb#define	CMP_QUEUE_SIZE2		2UL /* 4K entries */
74289551Szbb#define	CMP_QUEUE_SIZE3		3UL /* 8K entries */
75289551Szbb#define	CMP_QUEUE_SIZE4		4UL /* 16K entries */
76289551Szbb#define	CMP_QUEUE_SIZE5		5UL /* 32K entries */
77289551Szbb#define	CMP_QUEUE_SIZE6		6UL /* 64K entries */
78289550Szbb
79289550Szbb/* Default queue count per QS, its lengths and threshold values */
80289551Szbb#define	RBDR_CNT		1
81289551Szbb#define	RCV_QUEUE_CNT		8
82289551Szbb#define	SND_QUEUE_CNT		8
83289551Szbb#define	CMP_QUEUE_CNT		8 /* Max of RCV and SND qcount */
84289550Szbb
85289551Szbb#define	SND_QSIZE		SND_QUEUE_SIZE2
86289551Szbb#define	SND_QUEUE_LEN		(1UL << (SND_QSIZE + 10))
87289551Szbb#define	MAX_SND_QUEUE_LEN	(1UL << (SND_QUEUE_SIZE6 + 10))
88289551Szbb#define	SND_QUEUE_THRESH	2UL
89289551Szbb#define	MIN_SQ_DESC_PER_PKT_XMIT	2
90289550Szbb/* Since timestamp not enabled, otherwise 2 */
91289551Szbb#define	MAX_CQE_PER_PKT_XMIT		1
92289550Szbb
93289551Szbb/*
94289551Szbb * Keep CQ and SQ sizes same, if timestamping
95289550Szbb * is enabled this equation will change.
96289550Szbb */
97289551Szbb#define	CMP_QSIZE		CMP_QUEUE_SIZE2
98289551Szbb#define	CMP_QUEUE_LEN		(1UL << (CMP_QSIZE + 10))
99296038Szbb#define	CMP_QUEUE_CQE_THRESH	32
100289551Szbb#define	CMP_QUEUE_TIMER_THRESH	220 /* 10usec */
101289550Szbb
102289551Szbb#define	RBDR_SIZE		RBDR_SIZE0
103289551Szbb#define	RCV_BUF_COUNT		(1UL << (RBDR_SIZE + 13))
104289551Szbb#define	MAX_RCV_BUF_COUNT	(1UL << (RBDR_SIZE6 + 13))
105289551Szbb#define	RBDR_THRESH		(RCV_BUF_COUNT / 2)
106289551Szbb#define	DMA_BUFFER_LEN		2048 /* In multiples of 128bytes */
107289550Szbb
108289551Szbb#define	MAX_CQES_FOR_TX		\
109289551Szbb    ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * MAX_CQE_PER_PKT_XMIT)
110289550Szbb/* Calculate number of CQEs to reserve for all SQEs.
111289550Szbb * Its 1/256th level of CQ size.
112289550Szbb * '+ 1' to account for pipelining
113289550Szbb */
114289551Szbb#define	RQ_CQ_DROP		\
115289551Szbb    ((256 / (CMP_QUEUE_LEN / (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
116289550Szbb
117289550Szbb/* Descriptor size in bytes */
118289551Szbb#define	SND_QUEUE_DESC_SIZE	16
119289551Szbb#define	CMP_QUEUE_DESC_SIZE	512
120289550Szbb
121289550Szbb/* Buffer / descriptor alignments */
122289551Szbb#define	NICVF_RCV_BUF_ALIGN		7
123289551Szbb#define	NICVF_RCV_BUF_ALIGN_BYTES	(1UL << NICVF_RCV_BUF_ALIGN)
124289551Szbb#define	NICVF_CQ_BASE_ALIGN_BYTES	512  /* 9 bits */
125289551Szbb#define	NICVF_SQ_BASE_ALIGN_BYTES	128  /* 7 bits */
126289550Szbb
127289551Szbb#define	NICVF_ALIGNED_ADDR(addr, align_bytes)	\
128289551Szbb    roundup2((addr), (align_bytes))
129289551Szbb#define	NICVF_ADDR_ALIGN_LEN(addr, bytes)	\
130289551Szbb    (NICVF_ALIGNED_ADDR((addr), (bytes)) - (bytes))
131289551Szbb#define	NICVF_RCV_BUF_ALIGN_LEN(addr)		\
132289551Szbb    (NICVF_ALIGNED_ADDR((addr), NICVF_RCV_BUF_ALIGN_BYTES) - (addr))
133289550Szbb
134296039Szbb#define	NICVF_TXBUF_MAXSIZE	NIC_HW_MAX_FRS	/* Total max payload without TSO */
135289551Szbb#define	NICVF_TXBUF_NSEGS	256	/* Single command is at most 256 buffers
136289551Szbb					   (hdr + 255 subcmds) */
137296039Szbb/* TSO-related definitions */
138296039Szbb#define	NICVF_TSO_MAXSIZE	IP_MAXPACKET
139296039Szbb#define	NICVF_TSO_NSEGS		NICVF_TXBUF_NSEGS
140296039Szbb#define	NICVF_TSO_HEADER_SIZE	128
141289551Szbb
142289550Szbb/* Queue enable/disable */
143289551Szbb#define	NICVF_SQ_EN		(1UL << 19)
144289550Szbb
145289550Szbb/* Queue reset */
146289551Szbb#define	NICVF_CQ_RESET		(1UL << 41)
147289551Szbb#define	NICVF_SQ_RESET		(1UL << 17)
148289551Szbb#define	NICVF_RBDR_RESET	(1UL << 43)
149289550Szbb
150289550Szbbenum CQ_RX_ERRLVL_E {
151289550Szbb	CQ_ERRLVL_MAC,
152289550Szbb	CQ_ERRLVL_L2,
153289550Szbb	CQ_ERRLVL_L3,
154289550Szbb	CQ_ERRLVL_L4,
155289550Szbb};
156289550Szbb
157289550Szbbenum CQ_RX_ERROP_E {
158289550Szbb	CQ_RX_ERROP_RE_NONE = 0x0,
159289550Szbb	CQ_RX_ERROP_RE_PARTIAL = 0x1,
160289550Szbb	CQ_RX_ERROP_RE_JABBER = 0x2,
161289550Szbb	CQ_RX_ERROP_RE_FCS = 0x7,
162289550Szbb	CQ_RX_ERROP_RE_TERMINATE = 0x9,
163289550Szbb	CQ_RX_ERROP_RE_RX_CTL = 0xb,
164289550Szbb	CQ_RX_ERROP_PREL2_ERR = 0x1f,
165289550Szbb	CQ_RX_ERROP_L2_FRAGMENT = 0x20,
166289550Szbb	CQ_RX_ERROP_L2_OVERRUN = 0x21,
167289550Szbb	CQ_RX_ERROP_L2_PFCS = 0x22,
168289550Szbb	CQ_RX_ERROP_L2_PUNY = 0x23,
169289550Szbb	CQ_RX_ERROP_L2_MAL = 0x24,
170289550Szbb	CQ_RX_ERROP_L2_OVERSIZE = 0x25,
171289550Szbb	CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
172289550Szbb	CQ_RX_ERROP_L2_LENMISM = 0x27,
173289550Szbb	CQ_RX_ERROP_L2_PCLP = 0x28,
174289550Szbb	CQ_RX_ERROP_IP_NOT = 0x41,
175289550Szbb	CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
176289550Szbb	CQ_RX_ERROP_IP_MAL = 0x43,
177289550Szbb	CQ_RX_ERROP_IP_MALD = 0x44,
178289550Szbb	CQ_RX_ERROP_IP_HOP = 0x45,
179289550Szbb	CQ_RX_ERROP_L3_ICRC = 0x46,
180289550Szbb	CQ_RX_ERROP_L3_PCLP = 0x47,
181289550Szbb	CQ_RX_ERROP_L4_MAL = 0x61,
182289550Szbb	CQ_RX_ERROP_L4_CHK = 0x62,
183289550Szbb	CQ_RX_ERROP_UDP_LEN = 0x63,
184289550Szbb	CQ_RX_ERROP_L4_PORT = 0x64,
185289550Szbb	CQ_RX_ERROP_TCP_FLAG = 0x65,
186289550Szbb	CQ_RX_ERROP_TCP_OFFSET = 0x66,
187289550Szbb	CQ_RX_ERROP_L4_PCLP = 0x67,
188289550Szbb	CQ_RX_ERROP_RBDR_TRUNC = 0x70,
189289550Szbb};
190289550Szbb
191289550Szbbenum CQ_TX_ERROP_E {
192289550Szbb	CQ_TX_ERROP_GOOD = 0x0,
193289550Szbb	CQ_TX_ERROP_DESC_FAULT = 0x10,
194289550Szbb	CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
195289550Szbb	CQ_TX_ERROP_SUBDC_ERR = 0x12,
196289550Szbb	CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
197289550Szbb	CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
198289550Szbb	CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
199289550Szbb	CQ_TX_ERROP_LOCK_VIOL = 0x83,
200289550Szbb	CQ_TX_ERROP_DATA_FAULT = 0x84,
201289550Szbb	CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
202289550Szbb	CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
203289550Szbb	CQ_TX_ERROP_MEM_FAULT = 0x87,
204289550Szbb	CQ_TX_ERROP_CK_OVERLAP = 0x88,
205289550Szbb	CQ_TX_ERROP_CK_OFLOW = 0x89,
206289550Szbb	CQ_TX_ERROP_ENUM_LAST = 0x8a,
207289550Szbb};
208289550Szbb
209289550Szbbstruct cmp_queue_stats {
210289550Szbb	struct tx_stats {
211289551Szbb		uint64_t good;
212289551Szbb		uint64_t desc_fault;
213289551Szbb		uint64_t hdr_cons_err;
214289551Szbb		uint64_t subdesc_err;
215289551Szbb		uint64_t imm_size_oflow;
216289551Szbb		uint64_t data_seq_err;
217289551Szbb		uint64_t mem_seq_err;
218289551Szbb		uint64_t lock_viol;
219289551Szbb		uint64_t data_fault;
220289551Szbb		uint64_t tstmp_conflict;
221289551Szbb		uint64_t tstmp_timeout;
222289551Szbb		uint64_t mem_fault;
223289551Szbb		uint64_t csum_overlap;
224289551Szbb		uint64_t csum_overflow;
225289550Szbb	} tx;
226289551Szbb} __aligned(CACHE_LINE_SIZE);
227289550Szbb
228289550Szbbenum RQ_SQ_STATS {
229289550Szbb	RQ_SQ_STATS_OCTS,
230289550Szbb	RQ_SQ_STATS_PKTS,
231289550Szbb};
232289550Szbb
233289550Szbbstruct rx_tx_queue_stats {
234289551Szbb	uint64_t	bytes;
235289551Szbb	uint64_t	pkts;
236289551Szbb} __aligned(CACHE_LINE_SIZE);
237289550Szbb
238289550Szbbstruct q_desc_mem {
239289551Szbb	bus_dma_tag_t	dmat;
240289551Szbb	bus_dmamap_t	dmap;
241289550Szbb	void		*base;
242289551Szbb	bus_addr_t	phys_base;
243289551Szbb	uint64_t	size;
244289551Szbb	uint16_t	q_len;
245289550Szbb};
246289550Szbb
247289550Szbbstruct rbdr {
248289551Szbb	boolean_t		enable;
249289551Szbb	uint32_t		dma_size;
250289551Szbb	uint32_t		frag_len;
251289551Szbb	uint32_t		thresh;		/* Threshold level for interrupt */
252289551Szbb	void			*desc;
253289551Szbb	uint32_t		head;
254289551Szbb	uint32_t		tail;
255289551Szbb	struct q_desc_mem	dmem;
256289550Szbb
257289551Szbb	struct nicvf		*nic;
258289551Szbb	int			idx;
259289551Szbb
260289551Szbb	struct task		rbdr_task;
261289551Szbb	struct task		rbdr_task_nowait;
262289551Szbb	struct taskqueue	*rbdr_taskq;
263289551Szbb
264289551Szbb	bus_dma_tag_t		rbdr_buff_dmat;
265289551Szbb	bus_dmamap_t		*rbdr_buff_dmaps;
266289551Szbb} __aligned(CACHE_LINE_SIZE);
267289551Szbb
268289550Szbbstruct rcv_queue {
269289551Szbb	boolean_t	enable;
270289550Szbb	struct	rbdr	*rbdr_start;
271289550Szbb	struct	rbdr	*rbdr_cont;
272289551Szbb	boolean_t	en_tcp_reassembly;
273289551Szbb	uint8_t		cq_qs;  /* CQ's QS to which this RQ is assigned */
274289551Szbb	uint8_t		cq_idx; /* CQ index (0 to 7) in the QS */
275289551Szbb	uint8_t		cont_rbdr_qs;      /* Continue buffer ptrs - QS num */
276289551Szbb	uint8_t		cont_qs_rbdr_idx;  /* RBDR idx in the cont QS */
277289551Szbb	uint8_t		start_rbdr_qs;     /* First buffer ptrs - QS num */
278289551Szbb	uint8_t		start_qs_rbdr_idx; /* RBDR idx in the above QS */
279289551Szbb	uint8_t		caching;
280289550Szbb	struct		rx_tx_queue_stats stats;
281296031Szbb
282296031Szbb	boolean_t	lro_enabled;
283296031Szbb	struct lro_ctrl	lro;
284289551Szbb} __aligned(CACHE_LINE_SIZE);
285289550Szbb
286289550Szbbstruct cmp_queue {
287289551Szbb	boolean_t		enable;
288289551Szbb	uint16_t		thresh;
289289551Szbb
290289551Szbb	struct nicvf		*nic;
291289551Szbb	int			idx;	/* This queue index */
292289551Szbb
293289551Szbb	struct buf_ring		*rx_br;	/* Reception buf ring */
294289551Szbb	struct mtx		mtx;	/* lock to serialize processing CQEs */
295289551Szbb	char			mtx_name[32];
296289551Szbb
297289551Szbb	struct task		cmp_task;
298289551Szbb	struct taskqueue	*cmp_taskq;
299299443Szbb	u_int			cmp_cpuid; /* CPU to which bind the CQ task */
300289551Szbb
301289551Szbb	void			*desc;
302289551Szbb	struct q_desc_mem	dmem;
303289550Szbb	struct cmp_queue_stats	stats;
304289551Szbb	int			irq;
305289551Szbb} __aligned(CACHE_LINE_SIZE);
306289550Szbb
307289551Szbbstruct snd_buff {
308289551Szbb	bus_dmamap_t	dmap;
309289551Szbb	struct mbuf	*mbuf;
310289551Szbb};
311289551Szbb
312289550Szbbstruct snd_queue {
313289551Szbb	boolean_t		enable;
314289551Szbb	uint8_t			cq_qs;  /* CQ's QS to which this SQ is pointing */
315289551Szbb	uint8_t			cq_idx; /* CQ index (0 to 7) in the above QS */
316289551Szbb	uint16_t		thresh;
317289551Szbb	volatile int		free_cnt;
318289551Szbb	uint32_t		head;
319289551Szbb	uint32_t		tail;
320289551Szbb	uint64_t		*skbuff;
321289551Szbb	void			*desc;
322289550Szbb
323289551Szbb	struct nicvf		*nic;
324289551Szbb	int			idx;	/* This queue index */
325289551Szbb
326289551Szbb	bus_dma_tag_t		snd_buff_dmat;
327289551Szbb	struct snd_buff		*snd_buff;
328289551Szbb
329289551Szbb	struct buf_ring		*br;	/* Transmission buf ring */
330289551Szbb	struct mtx		mtx;
331289551Szbb	char			mtx_name[32];
332289551Szbb
333289551Szbb	struct task		snd_task;
334289551Szbb	struct taskqueue	*snd_taskq;
335289551Szbb
336289551Szbb	struct q_desc_mem	dmem;
337289550Szbb	struct rx_tx_queue_stats stats;
338289551Szbb} __aligned(CACHE_LINE_SIZE);
339289550Szbb
340289550Szbbstruct queue_set {
341289551Szbb	boolean_t	enable;
342289551Szbb	boolean_t	be_en;
343289551Szbb	uint8_t		vnic_id;
344289551Szbb	uint8_t		rq_cnt;
345289551Szbb	uint8_t		cq_cnt;
346289551Szbb	uint64_t	cq_len;
347289551Szbb	uint8_t		sq_cnt;
348289551Szbb	uint64_t	sq_len;
349289551Szbb	uint8_t		rbdr_cnt;
350289551Szbb	uint64_t	rbdr_len;
351289550Szbb	struct	rcv_queue	rq[MAX_RCV_QUEUES_PER_QS];
352289550Szbb	struct	cmp_queue	cq[MAX_CMP_QUEUES_PER_QS];
353289550Szbb	struct	snd_queue	sq[MAX_SND_QUEUES_PER_QS];
354289550Szbb	struct	rbdr		rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
355289550Szbb
356289551Szbb	struct task		qs_err_task;
357289551Szbb	struct taskqueue	*qs_err_taskq;
358289551Szbb} __aligned(CACHE_LINE_SIZE);
359289550Szbb
360289551Szbb#define	GET_RBDR_DESC(RING, idx)				\
361289551Szbb    (&(((struct rbdr_entry_t *)((RING)->desc))[(idx)]))
362289551Szbb#define	GET_SQ_DESC(RING, idx)					\
363289551Szbb    (&(((struct sq_hdr_subdesc *)((RING)->desc))[(idx)]))
364289551Szbb#define	GET_CQ_DESC(RING, idx)					\
365289551Szbb    (&(((union cq_desc_t *)((RING)->desc))[(idx)]))
366289551Szbb
367289550Szbb/* CQ status bits */
368289551Szbb#define	CQ_WR_FUL	(1UL << 26)
369289551Szbb#define	CQ_WR_DISABLE	(1UL << 25)
370289551Szbb#define	CQ_WR_FAULT	(1UL << 24)
371289550Szbb#define	CQ_CQE_COUNT	(0xFFFF << 0)
372289550Szbb
373289551Szbb#define	CQ_ERR_MASK	(CQ_WR_FUL | CQ_WR_DISABLE | CQ_WR_FAULT)
374289550Szbb
375289551Szbb#define	NICVF_TX_LOCK(sq)		mtx_lock(&(sq)->mtx)
376289551Szbb#define	NICVF_TX_TRYLOCK(sq)		mtx_trylock(&(sq)->mtx)
377289551Szbb#define	NICVF_TX_UNLOCK(sq)		mtx_unlock(&(sq)->mtx)
378289551Szbb#define	NICVF_TX_LOCK_ASSERT(sq)	mtx_assert(&(sq)->mtx, MA_OWNED)
379289550Szbb
380289551Szbb#define	NICVF_CMP_LOCK(cq)		mtx_lock(&(cq)->mtx)
381289551Szbb#define	NICVF_CMP_UNLOCK(cq)		mtx_unlock(&(cq)->mtx)
382289550Szbb
383289551Szbbint nicvf_set_qset_resources(struct nicvf *);
384289551Szbbint nicvf_config_data_transfer(struct nicvf *, boolean_t);
385289551Szbbvoid nicvf_qset_config(struct nicvf *, boolean_t);
386289550Szbb
387289551Szbbvoid nicvf_enable_intr(struct nicvf *, int, int);
388289551Szbbvoid nicvf_disable_intr(struct nicvf *, int, int);
389289551Szbbvoid nicvf_clear_intr(struct nicvf *, int, int);
390289551Szbbint nicvf_is_intr_enabled(struct nicvf *, int, int);
391289550Szbb
392297450Szbbint nicvf_xmit_locked(struct snd_queue *sq);
393296035Szbb
394289550Szbb/* Register access APIs */
395289551Szbbvoid nicvf_reg_write(struct nicvf *, uint64_t, uint64_t);
396289551Szbbuint64_t nicvf_reg_read(struct nicvf *, uint64_t);
397289551Szbbvoid nicvf_qset_reg_write(struct nicvf *, uint64_t, uint64_t);
398289551Szbbuint64_t nicvf_qset_reg_read(struct nicvf *, uint64_t);
399289551Szbbvoid nicvf_queue_reg_write(struct nicvf *, uint64_t, uint64_t, uint64_t);
400289551Szbbuint64_t nicvf_queue_reg_read(struct nicvf *, uint64_t, uint64_t);
401289550Szbb
402289550Szbb/* Stats */
403289551Szbbvoid nicvf_update_rq_stats(struct nicvf *, int);
404289551Szbbvoid nicvf_update_sq_stats(struct nicvf *, int);
405289551Szbbint nicvf_check_cqe_rx_errs(struct nicvf *, struct cmp_queue *,
406289551Szbb    struct cqe_rx_t *);
407289551Szbbint nicvf_check_cqe_tx_errs(struct nicvf *,struct cmp_queue *,
408289551Szbb    struct cqe_send_t *);
409289550Szbb#endif /* NICVF_QUEUES_H */
410