1289550Szbb/*
2289550Szbb * Copyright (C) 2015 Cavium Inc.
3289550Szbb * All rights reserved.
4289550Szbb *
5289550Szbb * Redistribution and use in source and binary forms, with or without
6289550Szbb * modification, are permitted provided that the following conditions
7289550Szbb * are met:
8289550Szbb * 1. Redistributions of source code must retain the above copyright
9289550Szbb *    notice, this list of conditions and the following disclaimer.
10289550Szbb * 2. Redistributions in binary form must reproduce the above copyright
11289550Szbb *    notice, this list of conditions and the following disclaimer in the
12289550Szbb *    documentation and/or other materials provided with the distribution.
13289550Szbb *
14289550Szbb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15289550Szbb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16289550Szbb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17289550Szbb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18289550Szbb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19289550Szbb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20289550Szbb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21289550Szbb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22289550Szbb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23289550Szbb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24289550Szbb * SUCH DAMAGE.
25289550Szbb *
26289550Szbb * $FreeBSD: stable/11/sys/dev/vnic/nicvf_queues.c 326788 2017-12-12 01:20:45Z emaste $
27289550Szbb *
28289550Szbb */
29289551Szbb#include <sys/cdefs.h>
30289551Szbb__FBSDID("$FreeBSD: stable/11/sys/dev/vnic/nicvf_queues.c 326788 2017-12-12 01:20:45Z emaste $");
31289550Szbb
32296030Szbb#include "opt_inet.h"
33296030Szbb#include "opt_inet6.h"
34296030Szbb
35289551Szbb#include <sys/param.h>
36289551Szbb#include <sys/systm.h>
37289551Szbb#include <sys/bitset.h>
38289551Szbb#include <sys/bitstring.h>
39289551Szbb#include <sys/buf_ring.h>
40289551Szbb#include <sys/bus.h>
41289551Szbb#include <sys/endian.h>
42289551Szbb#include <sys/kernel.h>
43289551Szbb#include <sys/malloc.h>
44289551Szbb#include <sys/module.h>
45289551Szbb#include <sys/rman.h>
46289551Szbb#include <sys/pciio.h>
47289551Szbb#include <sys/pcpu.h>
48289551Szbb#include <sys/proc.h>
49289551Szbb#include <sys/sockio.h>
50289551Szbb#include <sys/socket.h>
51299446Szbb#include <sys/stdatomic.h>
52289551Szbb#include <sys/cpuset.h>
53289551Szbb#include <sys/lock.h>
54289551Szbb#include <sys/mutex.h>
55289551Szbb#include <sys/smp.h>
56289551Szbb#include <sys/taskqueue.h>
57289550Szbb
58289551Szbb#include <vm/vm.h>
59289551Szbb#include <vm/pmap.h>
60289551Szbb
61289551Szbb#include <machine/bus.h>
62289551Szbb#include <machine/vmparam.h>
63289551Szbb
64289551Szbb#include <net/if.h>
65289551Szbb#include <net/if_var.h>
66289551Szbb#include <net/if_media.h>
67289551Szbb#include <net/ifq.h>
68297450Szbb#include <net/bpf.h>
69297450Szbb#include <net/ethernet.h>
70289551Szbb
71296030Szbb#include <netinet/in_systm.h>
72296030Szbb#include <netinet/in.h>
73296030Szbb#include <netinet/if_ether.h>
74296030Szbb#include <netinet/ip.h>
75296030Szbb#include <netinet/ip6.h>
76296030Szbb#include <netinet/sctp.h>
77296030Szbb#include <netinet/tcp.h>
78296030Szbb#include <netinet/tcp_lro.h>
79296030Szbb#include <netinet/udp.h>
80296030Szbb
81326062Semaste#include <netinet6/ip6_var.h>
82326062Semaste
83289551Szbb#include <dev/pci/pcireg.h>
84289551Szbb#include <dev/pci/pcivar.h>
85289551Szbb
86289551Szbb#include "thunder_bgx.h"
87289550Szbb#include "nic_reg.h"
88289550Szbb#include "nic.h"
89289550Szbb#include "q_struct.h"
90289550Szbb#include "nicvf_queues.h"
91289550Szbb
92289551Szbb#define	DEBUG
93289551Szbb#undef DEBUG
94289551Szbb
95289551Szbb#ifdef DEBUG
96289551Szbb#define	dprintf(dev, fmt, ...)	device_printf(dev, fmt, ##__VA_ARGS__)
97289551Szbb#else
98289551Szbb#define	dprintf(dev, fmt, ...)
99289551Szbb#endif
100289551Szbb
101289551SzbbMALLOC_DECLARE(M_NICVF);
102289551Szbb
103289551Szbbstatic void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
104289551Szbbstatic struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
105289551Szbbstatic void nicvf_sq_disable(struct nicvf *, int);
106289551Szbbstatic void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
107289551Szbbstatic void nicvf_put_sq_desc(struct snd_queue *, int);
108289551Szbbstatic void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
109289551Szbb    boolean_t);
110289551Szbbstatic void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
111289551Szbb
112297450Szbbstatic int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
113297450Szbb
114289551Szbbstatic void nicvf_rbdr_task(void *, int);
115289551Szbbstatic void nicvf_rbdr_task_nowait(void *, int);
116289551Szbb
117289550Szbbstruct rbuf_info {
118289551Szbb	bus_dma_tag_t	dmat;
119289551Szbb	bus_dmamap_t	dmap;
120289551Szbb	struct mbuf *	mbuf;
121289550Szbb};
122289550Szbb
123289551Szbb#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
124289550Szbb
125289550Szbb/* Poll a register for a specific value */
126289550Szbbstatic int nicvf_poll_reg(struct nicvf *nic, int qidx,
127289551Szbb			  uint64_t reg, int bit_pos, int bits, int val)
128289550Szbb{
129289551Szbb	uint64_t bit_mask;
130289551Szbb	uint64_t reg_val;
131289550Szbb	int timeout = 10;
132289550Szbb
133289551Szbb	bit_mask = (1UL << bits) - 1;
134289550Szbb	bit_mask = (bit_mask << bit_pos);
135289550Szbb
136289550Szbb	while (timeout) {
137289550Szbb		reg_val = nicvf_queue_reg_read(nic, reg, qidx);
138289550Szbb		if (((reg_val & bit_mask) >> bit_pos) == val)
139289551Szbb			return (0);
140289551Szbb
141289551Szbb		DELAY(1000);
142289550Szbb		timeout--;
143289550Szbb	}
144289551Szbb	device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
145289551Szbb	return (ETIMEDOUT);
146289550Szbb}
147289550Szbb
148289551Szbb/* Callback for bus_dmamap_load() */
149289551Szbbstatic void
150289551Szbbnicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
151289551Szbb{
152289551Szbb	bus_addr_t *paddr;
153289551Szbb
154289551Szbb	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
155289551Szbb	paddr = arg;
156289551Szbb	*paddr = segs->ds_addr;
157289551Szbb}
158289551Szbb
159289550Szbb/* Allocate memory for a queue's descriptors */
160289551Szbbstatic int
161289551Szbbnicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
162289551Szbb    int q_len, int desc_size, int align_bytes)
163289550Szbb{
164289551Szbb	int err, err_dmat;
165289551Szbb
166289551Szbb	/* Create DMA tag first */
167289551Szbb	err = bus_dma_tag_create(
168289551Szbb	    bus_get_dma_tag(nic->dev),		/* parent tag */
169289551Szbb	    align_bytes,			/* alignment */
170289551Szbb	    0,					/* boundary */
171289551Szbb	    BUS_SPACE_MAXADDR,			/* lowaddr */
172289551Szbb	    BUS_SPACE_MAXADDR,			/* highaddr */
173289551Szbb	    NULL, NULL,				/* filtfunc, filtfuncarg */
174289551Szbb	    (q_len * desc_size),		/* maxsize */
175289551Szbb	    1,					/* nsegments */
176289551Szbb	    (q_len * desc_size),		/* maxsegsize */
177289551Szbb	    0,					/* flags */
178289551Szbb	    NULL, NULL,				/* lockfunc, lockfuncarg */
179289551Szbb	    &dmem->dmat);			/* dmat */
180289551Szbb
181289551Szbb	if (err != 0) {
182289551Szbb		device_printf(nic->dev,
183289551Szbb		    "Failed to create busdma tag for descriptors ring\n");
184289551Szbb		return (err);
185289551Szbb	}
186289551Szbb
187289551Szbb	/* Allocate segment of continuous DMA safe memory */
188289551Szbb	err = bus_dmamem_alloc(
189289551Szbb	    dmem->dmat,				/* DMA tag */
190289551Szbb	    &dmem->base,			/* virtual address */
191289551Szbb	    (BUS_DMA_NOWAIT | BUS_DMA_ZERO),	/* flags */
192289551Szbb	    &dmem->dmap);			/* DMA map */
193289551Szbb	if (err != 0) {
194289551Szbb		device_printf(nic->dev, "Failed to allocate DMA safe memory for"
195289551Szbb		    "descriptors ring\n");
196289551Szbb		goto dmamem_fail;
197289551Szbb	}
198289551Szbb
199289551Szbb	err = bus_dmamap_load(
200289551Szbb	    dmem->dmat,
201289551Szbb	    dmem->dmap,
202289551Szbb	    dmem->base,
203289551Szbb	    (q_len * desc_size),		/* allocation size */
204289551Szbb	    nicvf_dmamap_q_cb,			/* map to DMA address cb. */
205289551Szbb	    &dmem->phys_base,			/* physical address */
206289551Szbb	    BUS_DMA_NOWAIT);
207289551Szbb	if (err != 0) {
208289551Szbb		device_printf(nic->dev,
209289551Szbb		    "Cannot load DMA map of descriptors ring\n");
210289551Szbb		goto dmamap_fail;
211289551Szbb	}
212289551Szbb
213289550Szbb	dmem->q_len = q_len;
214289551Szbb	dmem->size = (desc_size * q_len);
215289550Szbb
216289551Szbb	return (0);
217289551Szbb
218289551Szbbdmamap_fail:
219289551Szbb	bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
220289551Szbb	dmem->phys_base = 0;
221289551Szbbdmamem_fail:
222289551Szbb	err_dmat = bus_dma_tag_destroy(dmem->dmat);
223289551Szbb	dmem->base = NULL;
224289551Szbb	KASSERT(err_dmat == 0,
225289551Szbb	    ("%s: Trying to destroy BUSY DMA tag", __func__));
226289551Szbb
227289551Szbb	return (err);
228289550Szbb}
229289550Szbb
230289550Szbb/* Free queue's descriptor memory */
231289551Szbbstatic void
232289551Szbbnicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
233289550Szbb{
234289551Szbb	int err;
235289551Szbb
236289551Szbb	if ((dmem == NULL) || (dmem->base == NULL))
237289550Szbb		return;
238289550Szbb
239289551Szbb	/* Unload a map */
240289551Szbb	bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
241289551Szbb	bus_dmamap_unload(dmem->dmat, dmem->dmap);
242289551Szbb	/* Free DMA memory */
243289551Szbb	bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
244289551Szbb	/* Destroy DMA tag */
245289551Szbb	err = bus_dma_tag_destroy(dmem->dmat);
246289551Szbb
247289551Szbb	KASSERT(err == 0,
248289551Szbb	    ("%s: Trying to destroy BUSY DMA tag", __func__));
249289551Szbb
250289551Szbb	dmem->phys_base = 0;
251289550Szbb	dmem->base = NULL;
252289550Szbb}
253289550Szbb
254289551Szbb/*
255289551Szbb * Allocate buffer for packet reception
256289550Szbb * HW returns memory address where packet is DMA'ed but not a pointer
257289550Szbb * into RBDR ring, so save buffer address at the start of fragment and
258289550Szbb * align the start address to a cache aligned address
259289550Szbb */
260289551Szbbstatic __inline int
261289551Szbbnicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
262289551Szbb    bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
263289550Szbb{
264289551Szbb	struct mbuf *mbuf;
265289550Szbb	struct rbuf_info *rinfo;
266289551Szbb	bus_dma_segment_t segs[1];
267289551Szbb	int nsegs;
268289551Szbb	int err;
269289550Szbb
270289551Szbb	mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
271289551Szbb	if (mbuf == NULL)
272289551Szbb		return (ENOMEM);
273289550Szbb
274289551Szbb	/*
275289551Szbb	 * The length is equal to the actual length + one 128b line
276289551Szbb	 * used as a room for rbuf_info structure.
277289551Szbb	 */
278289551Szbb	mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
279289551Szbb
280289551Szbb	err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
281289551Szbb	    &nsegs, BUS_DMA_NOWAIT);
282289551Szbb	if (err != 0) {
283289551Szbb		device_printf(nic->dev,
284289551Szbb		    "Failed to map mbuf into DMA visible memory, err: %d\n",
285289551Szbb		    err);
286289551Szbb		m_freem(mbuf);
287289551Szbb		bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
288289551Szbb		return (err);
289289550Szbb	}
290289551Szbb	if (nsegs != 1)
291289551Szbb		panic("Unexpected number of DMA segments for RB: %d", nsegs);
292289551Szbb	/*
293289551Szbb	 * Now use the room for rbuf_info structure
294289551Szbb	 * and adjust mbuf data and length.
295289551Szbb	 */
296289551Szbb	rinfo = (struct rbuf_info *)mbuf->m_data;
297289551Szbb	m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
298289550Szbb
299289551Szbb	rinfo->dmat = rbdr->rbdr_buff_dmat;
300289551Szbb	rinfo->dmap = dmap;
301289551Szbb	rinfo->mbuf = mbuf;
302289550Szbb
303289551Szbb	*rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
304289550Szbb
305289551Szbb	return (0);
306289550Szbb}
307289550Szbb
308289551Szbb/* Retrieve mbuf for received packet */
309289551Szbbstatic struct mbuf *
310289551Szbbnicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
311289550Szbb{
312289551Szbb	struct mbuf *mbuf;
313289550Szbb	struct rbuf_info *rinfo;
314289550Szbb
315289550Szbb	/* Get buffer start address and alignment offset */
316289551Szbb	rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
317289550Szbb
318289551Szbb	/* Now retrieve mbuf to give to stack */
319289551Szbb	mbuf = rinfo->mbuf;
320289551Szbb	if (__predict_false(mbuf == NULL)) {
321289551Szbb		panic("%s: Received packet fragment with NULL mbuf",
322289551Szbb		    device_get_nameunit(nic->dev));
323289550Szbb	}
324289551Szbb	/*
325289551Szbb	 * Clear the mbuf in the descriptor to indicate
326289551Szbb	 * that this slot is processed and free to use.
327289551Szbb	 */
328289551Szbb	rinfo->mbuf = NULL;
329289550Szbb
330289551Szbb	bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
331289551Szbb	bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
332289550Szbb
333289551Szbb	return (mbuf);
334289550Szbb}
335289550Szbb
336289550Szbb/* Allocate RBDR ring and populate receive buffers */
337289551Szbbstatic int
338289551Szbbnicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
339289551Szbb    int buf_size, int qidx)
340289550Szbb{
341289551Szbb	bus_dmamap_t dmap;
342289551Szbb	bus_addr_t rbuf;
343289551Szbb	struct rbdr_entry_t *desc;
344289550Szbb	int idx;
345289550Szbb	int err;
346289550Szbb
347289551Szbb	/* Allocate rbdr descriptors ring */
348289550Szbb	err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
349289551Szbb	    sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
350289551Szbb	if (err != 0) {
351289551Szbb		device_printf(nic->dev,
352289551Szbb		    "Failed to create RBDR descriptors ring\n");
353289551Szbb		return (err);
354289551Szbb	}
355289550Szbb
356289550Szbb	rbdr->desc = rbdr->dmem.base;
357289551Szbb	/*
358289551Szbb	 * Buffer size has to be in multiples of 128 bytes.
359289551Szbb	 * Make room for metadata of size of one line (128 bytes).
360289551Szbb	 */
361289551Szbb	rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
362289551Szbb	rbdr->enable = TRUE;
363289550Szbb	rbdr->thresh = RBDR_THRESH;
364289551Szbb	rbdr->nic = nic;
365289551Szbb	rbdr->idx = qidx;
366289550Szbb
367289551Szbb	/*
368289551Szbb	 * Create DMA tag for Rx buffers.
369289551Szbb	 * Each map created using this tag is intended to store Rx payload for
370289551Szbb	 * one fragment and one header structure containing rbuf_info (thus
371289551Szbb	 * additional 128 byte line since RB must be a multiple of 128 byte
372289551Szbb	 * cache line).
373289551Szbb	 */
374289551Szbb	if (buf_size > MCLBYTES) {
375289551Szbb		device_printf(nic->dev,
376289551Szbb		    "Buffer size to large for mbuf cluster\n");
377289551Szbb		return (EINVAL);
378289551Szbb	}
379289551Szbb	err = bus_dma_tag_create(
380289551Szbb	    bus_get_dma_tag(nic->dev),		/* parent tag */
381289551Szbb	    NICVF_RCV_BUF_ALIGN_BYTES,		/* alignment */
382289551Szbb	    0,					/* boundary */
383289551Szbb	    DMAP_MAX_PHYSADDR,			/* lowaddr */
384289551Szbb	    DMAP_MIN_PHYSADDR,			/* highaddr */
385289551Szbb	    NULL, NULL,				/* filtfunc, filtfuncarg */
386289551Szbb	    roundup2(buf_size, MCLBYTES),	/* maxsize */
387289551Szbb	    1,					/* nsegments */
388289551Szbb	    roundup2(buf_size, MCLBYTES),	/* maxsegsize */
389289551Szbb	    0,					/* flags */
390289551Szbb	    NULL, NULL,				/* lockfunc, lockfuncarg */
391289551Szbb	    &rbdr->rbdr_buff_dmat);		/* dmat */
392289551Szbb
393289551Szbb	if (err != 0) {
394289551Szbb		device_printf(nic->dev,
395289551Szbb		    "Failed to create busdma tag for RBDR buffers\n");
396289551Szbb		return (err);
397289551Szbb	}
398289551Szbb
399289551Szbb	rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
400289551Szbb	    ring_len, M_NICVF, (M_WAITOK | M_ZERO));
401289551Szbb
402289550Szbb	for (idx = 0; idx < ring_len; idx++) {
403289551Szbb		err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
404289551Szbb		if (err != 0) {
405289551Szbb			device_printf(nic->dev,
406289551Szbb			    "Failed to create DMA map for RB\n");
407289551Szbb			return (err);
408289551Szbb		}
409289551Szbb		rbdr->rbdr_buff_dmaps[idx] = dmap;
410289550Szbb
411289551Szbb		err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
412289551Szbb		    DMA_BUFFER_LEN, &rbuf);
413289551Szbb		if (err != 0)
414289551Szbb			return (err);
415289551Szbb
416289550Szbb		desc = GET_RBDR_DESC(rbdr, idx);
417289551Szbb		desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
418289550Szbb	}
419289551Szbb
420289551Szbb	/* Allocate taskqueue */
421289551Szbb	TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
422289551Szbb	TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
423289551Szbb	rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
424289551Szbb	    taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
425289551Szbb	taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
426289551Szbb	    device_get_nameunit(nic->dev));
427289551Szbb
428289551Szbb	return (0);
429289550Szbb}
430289550Szbb
431289550Szbb/* Free RBDR ring and its receive buffers */
432289551Szbbstatic void
433289551Szbbnicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
434289550Szbb{
435289551Szbb	struct mbuf *mbuf;
436289551Szbb	struct queue_set *qs;
437289550Szbb	struct rbdr_entry_t *desc;
438289550Szbb	struct rbuf_info *rinfo;
439289551Szbb	bus_addr_t buf_addr;
440289551Szbb	int head, tail, idx;
441289551Szbb	int err;
442289550Szbb
443289551Szbb	qs = nic->qs;
444289550Szbb
445289551Szbb	if ((qs == NULL) || (rbdr == NULL))
446289550Szbb		return;
447289550Szbb
448289551Szbb	rbdr->enable = FALSE;
449289551Szbb	if (rbdr->rbdr_taskq != NULL) {
450289551Szbb		/* Remove tasks */
451289551Szbb		while (taskqueue_cancel(rbdr->rbdr_taskq,
452289551Szbb		    &rbdr->rbdr_task_nowait, NULL) != 0) {
453289551Szbb			/* Finish the nowait task first */
454289551Szbb			taskqueue_drain(rbdr->rbdr_taskq,
455289551Szbb			    &rbdr->rbdr_task_nowait);
456289551Szbb		}
457289551Szbb		taskqueue_free(rbdr->rbdr_taskq);
458289551Szbb		rbdr->rbdr_taskq = NULL;
459289550Szbb
460289551Szbb		while (taskqueue_cancel(taskqueue_thread,
461289551Szbb		    &rbdr->rbdr_task, NULL) != 0) {
462289551Szbb			/* Now finish the sleepable task */
463289551Szbb			taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
464289551Szbb		}
465289551Szbb	}
466289551Szbb
467289551Szbb	/*
468289551Szbb	 * Free all of the memory under the RB descriptors.
469289551Szbb	 * There are assumptions here:
470289551Szbb	 * 1. Corresponding RBDR is disabled
471289551Szbb	 *    - it is safe to operate using head and tail indexes
472289551Szbb	 * 2. All bffers that were received are properly freed by
473289551Szbb	 *    the receive handler
474289551Szbb	 *    - there is no need to unload DMA map and free MBUF for other
475289551Szbb	 *      descriptors than unused ones
476289551Szbb	 */
477289551Szbb	if (rbdr->rbdr_buff_dmat != NULL) {
478289551Szbb		head = rbdr->head;
479289551Szbb		tail = rbdr->tail;
480289551Szbb		while (head != tail) {
481289551Szbb			desc = GET_RBDR_DESC(rbdr, head);
482289551Szbb			buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
483289551Szbb			rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
484289551Szbb			bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
485289551Szbb			mbuf = rinfo->mbuf;
486289551Szbb			/* This will destroy everything including rinfo! */
487289551Szbb			m_freem(mbuf);
488289551Szbb			head++;
489289551Szbb			head &= (rbdr->dmem.q_len - 1);
490289551Szbb		}
491289551Szbb		/* Free tail descriptor */
492289551Szbb		desc = GET_RBDR_DESC(rbdr, tail);
493289550Szbb		buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
494289551Szbb		rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
495289551Szbb		bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
496289551Szbb		mbuf = rinfo->mbuf;
497289551Szbb		/* This will destroy everything including rinfo! */
498289551Szbb		m_freem(mbuf);
499289551Szbb
500289551Szbb		/* Destroy DMA maps */
501289551Szbb		for (idx = 0; idx < qs->rbdr_len; idx++) {
502289551Szbb			if (rbdr->rbdr_buff_dmaps[idx] == NULL)
503289551Szbb				continue;
504289551Szbb			err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
505289551Szbb			    rbdr->rbdr_buff_dmaps[idx]);
506289551Szbb			KASSERT(err == 0,
507289551Szbb			    ("%s: Could not destroy DMA map for RB, desc: %d",
508289551Szbb			    __func__, idx));
509289551Szbb			rbdr->rbdr_buff_dmaps[idx] = NULL;
510289551Szbb		}
511289551Szbb
512289551Szbb		/* Now destroy the tag */
513289551Szbb		err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
514289551Szbb		KASSERT(err == 0,
515289551Szbb		    ("%s: Trying to destroy BUSY DMA tag", __func__));
516289551Szbb
517289551Szbb		rbdr->head = 0;
518289551Szbb		rbdr->tail = 0;
519289550Szbb	}
520289550Szbb
521289550Szbb	/* Free RBDR ring */
522289550Szbb	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
523289550Szbb}
524289550Szbb
525289551Szbb/*
526289551Szbb * Refill receive buffer descriptors with new buffers.
527289550Szbb */
528289551Szbbstatic int
529289551Szbbnicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
530289550Szbb{
531289551Szbb	struct nicvf *nic;
532289551Szbb	struct queue_set *qs;
533289551Szbb	int rbdr_idx;
534289550Szbb	int tail, qcount;
535289550Szbb	int refill_rb_cnt;
536289550Szbb	struct rbdr_entry_t *desc;
537289551Szbb	bus_dmamap_t dmap;
538289551Szbb	bus_addr_t rbuf;
539289551Szbb	boolean_t rb_alloc_fail;
540289551Szbb	int new_rb;
541289550Szbb
542289551Szbb	rb_alloc_fail = TRUE;
543289551Szbb	new_rb = 0;
544289551Szbb	nic = rbdr->nic;
545289551Szbb	qs = nic->qs;
546289551Szbb	rbdr_idx = rbdr->idx;
547289551Szbb
548289550Szbb	/* Check if it's enabled */
549289550Szbb	if (!rbdr->enable)
550289551Szbb		return (0);
551289550Szbb
552289550Szbb	/* Get no of desc's to be refilled */
553289550Szbb	qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
554289550Szbb	qcount &= 0x7FFFF;
555289550Szbb	/* Doorbell can be ringed with a max of ring size minus 1 */
556289551Szbb	if (qcount >= (qs->rbdr_len - 1)) {
557289551Szbb		rb_alloc_fail = FALSE;
558289551Szbb		goto out;
559289551Szbb	} else
560289550Szbb		refill_rb_cnt = qs->rbdr_len - qcount - 1;
561289550Szbb
562289550Szbb	/* Start filling descs from tail */
563289550Szbb	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
564289550Szbb	while (refill_rb_cnt) {
565289550Szbb		tail++;
566289550Szbb		tail &= (rbdr->dmem.q_len - 1);
567289550Szbb
568289551Szbb		dmap = rbdr->rbdr_buff_dmaps[tail];
569289551Szbb		if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
570289551Szbb		    DMA_BUFFER_LEN, &rbuf)) {
571289551Szbb			/* Something went wrong. Resign */
572289550Szbb			break;
573289551Szbb		}
574289550Szbb		desc = GET_RBDR_DESC(rbdr, tail);
575289551Szbb		desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
576289550Szbb		refill_rb_cnt--;
577289550Szbb		new_rb++;
578289550Szbb	}
579289550Szbb
580289550Szbb	/* make sure all memory stores are done before ringing doorbell */
581289551Szbb	wmb();
582289550Szbb
583289550Szbb	/* Check if buffer allocation failed */
584289551Szbb	if (refill_rb_cnt == 0)
585289551Szbb		rb_alloc_fail = FALSE;
586289550Szbb
587289550Szbb	/* Notify HW */
588289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
589289550Szbb			      rbdr_idx, new_rb);
590289551Szbbout:
591289551Szbb	if (!rb_alloc_fail) {
592289551Szbb		/*
593289551Szbb		 * Re-enable RBDR interrupts only
594289551Szbb		 * if buffer allocation is success.
595289551Szbb		 */
596289550Szbb		nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
597289550Szbb
598289551Szbb		return (0);
599289551Szbb	}
600289551Szbb
601289551Szbb	return (ENOMEM);
602289550Szbb}
603289550Szbb
604289551Szbb/* Refill RBs even if sleep is needed to reclaim memory */
605289551Szbbstatic void
606289551Szbbnicvf_rbdr_task(void *arg, int pending)
607289550Szbb{
608289551Szbb	struct rbdr *rbdr;
609289551Szbb	int err;
610289550Szbb
611289551Szbb	rbdr = (struct rbdr *)arg;
612289551Szbb
613289551Szbb	err = nicvf_refill_rbdr(rbdr, M_WAITOK);
614289551Szbb	if (__predict_false(err != 0)) {
615289551Szbb		panic("%s: Failed to refill RBs even when sleep enabled",
616289551Szbb		    __func__);
617289551Szbb	}
618289550Szbb}
619289550Szbb
620289551Szbb/* Refill RBs as soon as possible without waiting */
621289551Szbbstatic void
622289551Szbbnicvf_rbdr_task_nowait(void *arg, int pending)
623289550Szbb{
624289551Szbb	struct rbdr *rbdr;
625289551Szbb	int err;
626289550Szbb
627289551Szbb	rbdr = (struct rbdr *)arg;
628289551Szbb
629289551Szbb	err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
630289551Szbb	if (err != 0) {
631289551Szbb		/*
632289551Szbb		 * Schedule another, sleepable kernel thread
633289551Szbb		 * that will for sure refill the buffers.
634289551Szbb		 */
635289551Szbb		taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
636289550Szbb	}
637289550Szbb}
638289550Szbb
639289551Szbbstatic int
640289551Szbbnicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
641289551Szbb    struct cqe_rx_t *cqe_rx, int cqe_type)
642289551Szbb{
643289551Szbb	struct mbuf *mbuf;
644296031Szbb	struct rcv_queue *rq;
645289551Szbb	int rq_idx;
646289551Szbb	int err = 0;
647289551Szbb
648289551Szbb	rq_idx = cqe_rx->rq_idx;
649296031Szbb	rq = &nic->qs->rq[rq_idx];
650289551Szbb
651289551Szbb	/* Check for errors */
652289551Szbb	err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
653289551Szbb	if (err && !cqe_rx->rb_cnt)
654289551Szbb		return (0);
655289551Szbb
656289551Szbb	mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
657289551Szbb	if (mbuf == NULL) {
658289551Szbb		dprintf(nic->dev, "Packet not received\n");
659289551Szbb		return (0);
660289551Szbb	}
661289551Szbb
662289551Szbb	/* If error packet */
663289551Szbb	if (err != 0) {
664289551Szbb		m_freem(mbuf);
665289551Szbb		return (0);
666289551Szbb	}
667289551Szbb
668296031Szbb	if (rq->lro_enabled &&
669296031Szbb	    ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
670296031Szbb	    (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
671296031Szbb            (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
672296031Szbb		/*
673296031Szbb		 * At this point it is known that there are no errors in the
674296031Szbb		 * packet. Attempt to LRO enqueue. Send to stack if no resources
675296031Szbb		 * or enqueue error.
676296031Szbb		 */
677296031Szbb		if ((rq->lro.lro_cnt != 0) &&
678296031Szbb		    (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
679296031Szbb			return (0);
680296031Szbb	}
681289551Szbb	/*
682289551Szbb	 * Push this packet to the stack later to avoid
683289551Szbb	 * unlocking completion task in the middle of work.
684289551Szbb	 */
685289551Szbb	err = buf_ring_enqueue(cq->rx_br, mbuf);
686289551Szbb	if (err != 0) {
687289551Szbb		/*
688289551Szbb		 * Failed to enqueue this mbuf.
689289551Szbb		 * We don't drop it, just schedule another task.
690289551Szbb		 */
691289551Szbb		return (err);
692289551Szbb	}
693289551Szbb
694289551Szbb	return (0);
695289551Szbb}
696289551Szbb
697299446Szbbstatic void
698289551Szbbnicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
699289551Szbb    struct cqe_send_t *cqe_tx, int cqe_type)
700289551Szbb{
701289551Szbb	bus_dmamap_t dmap;
702289551Szbb	struct mbuf *mbuf;
703289551Szbb	struct snd_queue *sq;
704289551Szbb	struct sq_hdr_subdesc *hdr;
705289551Szbb
706289551Szbb	mbuf = NULL;
707289551Szbb	sq = &nic->qs->sq[cqe_tx->sq_idx];
708289551Szbb
709289551Szbb	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
710299446Szbb	if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
711299446Szbb		return;
712289551Szbb
713289551Szbb	dprintf(nic->dev,
714289551Szbb	    "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
715289551Szbb	    __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
716289551Szbb	    cqe_tx->sqe_ptr, hdr->subdesc_cnt);
717289551Szbb
718289551Szbb	dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
719289551Szbb	bus_dmamap_unload(sq->snd_buff_dmat, dmap);
720289551Szbb
721289551Szbb	mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
722289551Szbb	if (mbuf != NULL) {
723289551Szbb		m_freem(mbuf);
724289551Szbb		sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
725296602Szbb		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
726289551Szbb	}
727289551Szbb
728289551Szbb	nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
729289551Szbb}
730289551Szbb
731289551Szbbstatic int
732289551Szbbnicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
733289551Szbb{
734289551Szbb	struct mbuf *mbuf;
735289551Szbb	struct ifnet *ifp;
736289551Szbb	int processed_cqe, work_done = 0, tx_done = 0;
737289551Szbb	int cqe_count, cqe_head;
738289551Szbb	struct queue_set *qs = nic->qs;
739289551Szbb	struct cmp_queue *cq = &qs->cq[cq_idx];
740297450Szbb	struct snd_queue *sq = &qs->sq[cq_idx];
741296031Szbb	struct rcv_queue *rq;
742289551Szbb	struct cqe_rx_t *cq_desc;
743296031Szbb	struct lro_ctrl	*lro;
744296031Szbb	int rq_idx;
745289551Szbb	int cmp_err;
746289551Szbb
747289551Szbb	NICVF_CMP_LOCK(cq);
748289551Szbb	cmp_err = 0;
749289551Szbb	processed_cqe = 0;
750289551Szbb	/* Get no of valid CQ entries to process */
751289551Szbb	cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
752289551Szbb	cqe_count &= CQ_CQE_COUNT;
753289551Szbb	if (cqe_count == 0)
754289551Szbb		goto out;
755289551Szbb
756289551Szbb	/* Get head of the valid CQ entries */
757289551Szbb	cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
758289551Szbb	cqe_head &= 0xFFFF;
759289551Szbb
760289551Szbb	dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
761289551Szbb	    __func__, cq_idx, cqe_count, cqe_head);
762289551Szbb	while (processed_cqe < cqe_count) {
763289551Szbb		/* Get the CQ descriptor */
764289551Szbb		cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
765289551Szbb		cqe_head++;
766289551Szbb		cqe_head &= (cq->dmem.q_len - 1);
767296032Szbb		/* Prefetch next CQ descriptor */
768296032Szbb		__builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
769289551Szbb
770289551Szbb		dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
771289551Szbb		    cq_desc->cqe_type);
772289551Szbb		switch (cq_desc->cqe_type) {
773289551Szbb		case CQE_TYPE_RX:
774289551Szbb			cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
775289551Szbb			    CQE_TYPE_RX);
776289551Szbb			if (__predict_false(cmp_err != 0)) {
777289551Szbb				/*
778289551Szbb				 * Ups. Cannot finish now.
779289551Szbb				 * Let's try again later.
780289551Szbb				 */
781289551Szbb				goto done;
782289551Szbb			}
783289551Szbb			work_done++;
784289551Szbb			break;
785289551Szbb		case CQE_TYPE_SEND:
786299446Szbb			nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc,
787299446Szbb			    CQE_TYPE_SEND);
788289551Szbb			tx_done++;
789289551Szbb			break;
790289551Szbb		case CQE_TYPE_INVALID:
791289551Szbb		case CQE_TYPE_RX_SPLIT:
792289551Szbb		case CQE_TYPE_RX_TCP:
793289551Szbb		case CQE_TYPE_SEND_PTP:
794289551Szbb			/* Ignore for now */
795289551Szbb			break;
796289551Szbb		}
797289551Szbb		processed_cqe++;
798289551Szbb	}
799289551Szbbdone:
800289551Szbb	dprintf(nic->dev,
801289551Szbb	    "%s CQ%d processed_cqe %d work_done %d\n",
802289551Szbb	    __func__, cq_idx, processed_cqe, work_done);
803289551Szbb
804289551Szbb	/* Ring doorbell to inform H/W to reuse processed CQEs */
805289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
806289551Szbb
807289551Szbb	if ((tx_done > 0) &&
808289551Szbb	    ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
809289551Szbb		/* Reenable TXQ if its stopped earlier due to SQ full */
810289551Szbb		if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
811297450Szbb		taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
812289551Szbb	}
813289551Szbbout:
814296031Szbb	/*
815296031Szbb	 * Flush any outstanding LRO work
816296031Szbb	 */
817296031Szbb	rq_idx = cq_idx;
818296031Szbb	rq = &nic->qs->rq[rq_idx];
819296031Szbb	lro = &rq->lro;
820297482Ssephe	tcp_lro_flush_all(lro);
821296031Szbb
822289551Szbb	NICVF_CMP_UNLOCK(cq);
823289551Szbb
824289551Szbb	ifp = nic->ifp;
825289551Szbb	/* Push received MBUFs to the stack */
826289551Szbb	while (!buf_ring_empty(cq->rx_br)) {
827289551Szbb		mbuf = buf_ring_dequeue_mc(cq->rx_br);
828289551Szbb		if (__predict_true(mbuf != NULL))
829289551Szbb			(*ifp->if_input)(ifp, mbuf);
830289551Szbb	}
831289551Szbb
832289551Szbb	return (cmp_err);
833289551Szbb}
834289551Szbb
835289551Szbb/*
836289551Szbb * Qset error interrupt handler
837289551Szbb *
838289551Szbb * As of now only CQ errors are handled
839289551Szbb */
840289551Szbbstatic void
841289551Szbbnicvf_qs_err_task(void *arg, int pending)
842289551Szbb{
843289551Szbb	struct nicvf *nic;
844289551Szbb	struct queue_set *qs;
845289551Szbb	int qidx;
846289551Szbb	uint64_t status;
847289551Szbb	boolean_t enable = TRUE;
848289551Szbb
849289551Szbb	nic = (struct nicvf *)arg;
850289551Szbb	qs = nic->qs;
851289551Szbb
852289551Szbb	/* Deactivate network interface */
853289551Szbb	if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
854289551Szbb
855289551Szbb	/* Check if it is CQ err */
856289551Szbb	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
857289551Szbb		status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
858289551Szbb		    qidx);
859289551Szbb		if ((status & CQ_ERR_MASK) == 0)
860289551Szbb			continue;
861289551Szbb		/* Process already queued CQEs and reconfig CQ */
862289551Szbb		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
863289551Szbb		nicvf_sq_disable(nic, qidx);
864289551Szbb		(void)nicvf_cq_intr_handler(nic, qidx);
865289551Szbb		nicvf_cmp_queue_config(nic, qs, qidx, enable);
866289551Szbb		nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
867289551Szbb		nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
868289551Szbb		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
869289551Szbb	}
870289551Szbb
871289551Szbb	if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
872289551Szbb	/* Re-enable Qset error interrupt */
873289551Szbb	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
874289551Szbb}
875289551Szbb
876289551Szbbstatic void
877289551Szbbnicvf_cmp_task(void *arg, int pending)
878289551Szbb{
879289551Szbb	struct cmp_queue *cq;
880289551Szbb	struct nicvf *nic;
881289551Szbb	int cmp_err;
882289551Szbb
883289551Szbb	cq = (struct cmp_queue *)arg;
884289551Szbb	nic = cq->nic;
885289551Szbb
886289551Szbb	/* Handle CQ descriptors */
887289551Szbb	cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
888289551Szbb	if (__predict_false(cmp_err != 0)) {
889289551Szbb		/*
890289551Szbb		 * Schedule another thread here since we did not
891289551Szbb		 * process the entire CQ due to Tx or Rx CQ parse error.
892289551Szbb		 */
893289551Szbb		taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
894289551Szbb
895289551Szbb	}
896289551Szbb
897296601Szbb	nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
898289551Szbb	/* Reenable interrupt (previously disabled in nicvf_intr_handler() */
899289551Szbb	nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
900289551Szbb
901289551Szbb}
902289551Szbb
903289550Szbb/* Initialize completion queue */
904289551Szbbstatic int
905289551Szbbnicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
906289551Szbb    int qidx)
907289550Szbb{
908289550Szbb	int err;
909289550Szbb
910289551Szbb	/* Initizalize lock */
911289551Szbb	snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
912289551Szbb	    device_get_nameunit(nic->dev), qidx);
913289551Szbb	mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
914289551Szbb
915289550Szbb	err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
916289550Szbb				     NICVF_CQ_BASE_ALIGN_BYTES);
917289550Szbb
918289551Szbb	if (err != 0) {
919289551Szbb		device_printf(nic->dev,
920289551Szbb		    "Could not allocate DMA memory for CQ\n");
921289551Szbb		return (err);
922289551Szbb	}
923289551Szbb
924289550Szbb	cq->desc = cq->dmem.base;
925296038Szbb	cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
926289551Szbb	cq->nic = nic;
927289551Szbb	cq->idx = qidx;
928289550Szbb	nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
929289550Szbb
930289551Szbb	cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
931289551Szbb	    &cq->mtx);
932289551Szbb
933289551Szbb	/* Allocate taskqueue */
934289551Szbb	TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
935289551Szbb	cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
936289551Szbb	    taskqueue_thread_enqueue, &cq->cmp_taskq);
937289551Szbb	taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
938289551Szbb	    device_get_nameunit(nic->dev), qidx);
939289551Szbb
940289551Szbb	return (0);
941289550Szbb}
942289550Szbb
943289551Szbbstatic void
944289551Szbbnicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
945289550Szbb{
946289551Szbb
947289551Szbb	if (cq == NULL)
948289550Szbb		return;
949289551Szbb	/*
950289551Szbb	 * The completion queue itself should be disabled by now
951289551Szbb	 * (ref. nicvf_snd_queue_config()).
952289551Szbb	 * Ensure that it is safe to disable it or panic.
953289551Szbb	 */
954289551Szbb	if (cq->enable)
955289551Szbb		panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
956289550Szbb
957289551Szbb	if (cq->cmp_taskq != NULL) {
958289551Szbb		/* Remove task */
959289551Szbb		while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
960289551Szbb			taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
961289551Szbb
962289551Szbb		taskqueue_free(cq->cmp_taskq);
963289551Szbb		cq->cmp_taskq = NULL;
964289551Szbb	}
965289551Szbb	/*
966289551Szbb	 * Completion interrupt will possibly enable interrupts again
967289551Szbb	 * so disable interrupting now after we finished processing
968289551Szbb	 * completion task. It is safe to do so since the corresponding CQ
969289551Szbb	 * was already disabled.
970289551Szbb	 */
971289551Szbb	nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
972289551Szbb	nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
973289551Szbb
974289551Szbb	NICVF_CMP_LOCK(cq);
975289550Szbb	nicvf_free_q_desc_mem(nic, &cq->dmem);
976289551Szbb	drbr_free(cq->rx_br, M_DEVBUF);
977289551Szbb	NICVF_CMP_UNLOCK(cq);
978289551Szbb	mtx_destroy(&cq->mtx);
979289551Szbb	memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
980289550Szbb}
981289550Szbb
982297450Szbbint
983297450Szbbnicvf_xmit_locked(struct snd_queue *sq)
984297450Szbb{
985297450Szbb	struct nicvf *nic;
986297450Szbb	struct ifnet *ifp;
987297450Szbb	struct mbuf *next;
988297450Szbb	int err;
989297450Szbb
990297450Szbb	NICVF_TX_LOCK_ASSERT(sq);
991297450Szbb
992297450Szbb	nic = sq->nic;
993297450Szbb	ifp = nic->ifp;
994297450Szbb	err = 0;
995297450Szbb
996297450Szbb	while ((next = drbr_peek(ifp, sq->br)) != NULL) {
997325916Semaste		/* Send a copy of the frame to the BPF listener */
998325916Semaste		ETHER_BPF_MTAP(ifp, next);
999325916Semaste
1000297450Szbb		err = nicvf_tx_mbuf_locked(sq, &next);
1001297450Szbb		if (err != 0) {
1002297450Szbb			if (next == NULL)
1003297450Szbb				drbr_advance(ifp, sq->br);
1004297450Szbb			else
1005297450Szbb				drbr_putback(ifp, sq->br, next);
1006297450Szbb
1007297450Szbb			break;
1008297450Szbb		}
1009297450Szbb		drbr_advance(ifp, sq->br);
1010297450Szbb	}
1011297450Szbb	return (err);
1012297450Szbb}
1013297450Szbb
1014289551Szbbstatic void
1015289551Szbbnicvf_snd_task(void *arg, int pending)
1016289551Szbb{
1017289551Szbb	struct snd_queue *sq = (struct snd_queue *)arg;
1018297450Szbb	struct nicvf *nic;
1019297450Szbb	struct ifnet *ifp;
1020297450Szbb	int err;
1021289551Szbb
1022297450Szbb	nic = sq->nic;
1023297450Szbb	ifp = nic->ifp;
1024297450Szbb
1025297450Szbb	/*
1026297450Szbb	 * Skip sending anything if the driver is not running,
1027297450Szbb	 * SQ full or link is down.
1028297450Szbb	 */
1029297450Szbb	if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1030297450Szbb	    IFF_DRV_RUNNING) || !nic->link_up)
1031297450Szbb		return;
1032297450Szbb
1033289551Szbb	NICVF_TX_LOCK(sq);
1034297450Szbb	err = nicvf_xmit_locked(sq);
1035289551Szbb	NICVF_TX_UNLOCK(sq);
1036297450Szbb	/* Try again */
1037297450Szbb	if (err != 0)
1038297450Szbb		taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
1039289551Szbb}
1040289551Szbb
1041289550Szbb/* Initialize transmit queue */
1042289551Szbbstatic int
1043289551Szbbnicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
1044289551Szbb    int qidx)
1045289550Szbb{
1046289551Szbb	size_t i;
1047289550Szbb	int err;
1048289550Szbb
1049289551Szbb	/* Initizalize TX lock for this queue */
1050289551Szbb	snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
1051289551Szbb	    device_get_nameunit(nic->dev), qidx);
1052289551Szbb	mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
1053289551Szbb
1054289551Szbb	NICVF_TX_LOCK(sq);
1055289551Szbb	/* Allocate buffer ring */
1056289551Szbb	sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
1057289551Szbb	    M_NOWAIT, &sq->mtx);
1058289551Szbb	if (sq->br == NULL) {
1059289551Szbb		device_printf(nic->dev,
1060289551Szbb		    "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
1061289551Szbb		err = ENOMEM;
1062289551Szbb		goto error;
1063289551Szbb	}
1064289551Szbb
1065289551Szbb	/* Allocate DMA memory for Tx descriptors */
1066289550Szbb	err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
1067289550Szbb				     NICVF_SQ_BASE_ALIGN_BYTES);
1068289551Szbb	if (err != 0) {
1069289551Szbb		device_printf(nic->dev,
1070289551Szbb		    "Could not allocate DMA memory for SQ\n");
1071289551Szbb		goto error;
1072289551Szbb	}
1073289550Szbb
1074289550Szbb	sq->desc = sq->dmem.base;
1075289551Szbb	sq->head = sq->tail = 0;
1076299446Szbb	atomic_store_rel_int(&sq->free_cnt, q_len - 1);
1077289550Szbb	sq->thresh = SND_QUEUE_THRESH;
1078289551Szbb	sq->idx = qidx;
1079289551Szbb	sq->nic = nic;
1080289550Szbb
1081289551Szbb	/*
1082289551Szbb	 * Allocate DMA maps for Tx buffers
1083289551Szbb	 */
1084289550Szbb
1085289551Szbb	/* Create DMA tag first */
1086289551Szbb	err = bus_dma_tag_create(
1087289551Szbb	    bus_get_dma_tag(nic->dev),		/* parent tag */
1088289551Szbb	    1,					/* alignment */
1089289551Szbb	    0,					/* boundary */
1090289551Szbb	    BUS_SPACE_MAXADDR,			/* lowaddr */
1091289551Szbb	    BUS_SPACE_MAXADDR,			/* highaddr */
1092289551Szbb	    NULL, NULL,				/* filtfunc, filtfuncarg */
1093296039Szbb	    NICVF_TSO_MAXSIZE,			/* maxsize */
1094296039Szbb	    NICVF_TSO_NSEGS,			/* nsegments */
1095289551Szbb	    MCLBYTES,				/* maxsegsize */
1096289551Szbb	    0,					/* flags */
1097289551Szbb	    NULL, NULL,				/* lockfunc, lockfuncarg */
1098289551Szbb	    &sq->snd_buff_dmat);		/* dmat */
1099289551Szbb
1100289551Szbb	if (err != 0) {
1101289551Szbb		device_printf(nic->dev,
1102289551Szbb		    "Failed to create busdma tag for Tx buffers\n");
1103289551Szbb		goto error;
1104289551Szbb	}
1105289551Szbb
1106289551Szbb	/* Allocate send buffers array */
1107289551Szbb	sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
1108289551Szbb	    (M_NOWAIT | M_ZERO));
1109289551Szbb	if (sq->snd_buff == NULL) {
1110289551Szbb		device_printf(nic->dev,
1111289551Szbb		    "Could not allocate memory for Tx buffers array\n");
1112289551Szbb		err = ENOMEM;
1113289551Szbb		goto error;
1114289551Szbb	}
1115289551Szbb
1116289551Szbb	/* Now populate maps */
1117289551Szbb	for (i = 0; i < q_len; i++) {
1118289551Szbb		err = bus_dmamap_create(sq->snd_buff_dmat, 0,
1119289551Szbb		    &sq->snd_buff[i].dmap);
1120289551Szbb		if (err != 0) {
1121289551Szbb			device_printf(nic->dev,
1122289551Szbb			    "Failed to create DMA maps for Tx buffers\n");
1123289551Szbb			goto error;
1124289551Szbb		}
1125289551Szbb	}
1126289551Szbb	NICVF_TX_UNLOCK(sq);
1127289551Szbb
1128289551Szbb	/* Allocate taskqueue */
1129289551Szbb	TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
1130289551Szbb	sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
1131289551Szbb	    taskqueue_thread_enqueue, &sq->snd_taskq);
1132289551Szbb	taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
1133289551Szbb	    device_get_nameunit(nic->dev), qidx);
1134289551Szbb
1135289551Szbb	return (0);
1136289551Szbberror:
1137289551Szbb	NICVF_TX_UNLOCK(sq);
1138289551Szbb	return (err);
1139289550Szbb}
1140289550Szbb
1141289551Szbbstatic void
1142289551Szbbnicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
1143289550Szbb{
1144289551Szbb	struct queue_set *qs = nic->qs;
1145289551Szbb	size_t i;
1146289551Szbb	int err;
1147289551Szbb
1148289551Szbb	if (sq == NULL)
1149289550Szbb		return;
1150289550Szbb
1151289551Szbb	if (sq->snd_taskq != NULL) {
1152289551Szbb		/* Remove task */
1153289551Szbb		while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
1154289551Szbb			taskqueue_drain(sq->snd_taskq, &sq->snd_task);
1155289550Szbb
1156289551Szbb		taskqueue_free(sq->snd_taskq);
1157289551Szbb		sq->snd_taskq = NULL;
1158289551Szbb	}
1159289551Szbb
1160289551Szbb	NICVF_TX_LOCK(sq);
1161289551Szbb	if (sq->snd_buff_dmat != NULL) {
1162289551Szbb		if (sq->snd_buff != NULL) {
1163289551Szbb			for (i = 0; i < qs->sq_len; i++) {
1164289551Szbb				m_freem(sq->snd_buff[i].mbuf);
1165289551Szbb				sq->snd_buff[i].mbuf = NULL;
1166289551Szbb
1167289551Szbb				bus_dmamap_unload(sq->snd_buff_dmat,
1168289551Szbb				    sq->snd_buff[i].dmap);
1169289551Szbb				err = bus_dmamap_destroy(sq->snd_buff_dmat,
1170289551Szbb				    sq->snd_buff[i].dmap);
1171289551Szbb				/*
1172289551Szbb				 * If bus_dmamap_destroy fails it can cause
1173289551Szbb				 * random panic later if the tag is also
1174289551Szbb				 * destroyed in the process.
1175289551Szbb				 */
1176289551Szbb				KASSERT(err == 0,
1177289551Szbb				    ("%s: Could not destroy DMA map for SQ",
1178289551Szbb				    __func__));
1179289551Szbb			}
1180289551Szbb		}
1181289551Szbb
1182289551Szbb		free(sq->snd_buff, M_NICVF);
1183289551Szbb
1184289551Szbb		err = bus_dma_tag_destroy(sq->snd_buff_dmat);
1185289551Szbb		KASSERT(err == 0,
1186289551Szbb		    ("%s: Trying to destroy BUSY DMA tag", __func__));
1187289551Szbb	}
1188289551Szbb
1189289551Szbb	/* Free private driver ring for this send queue */
1190289551Szbb	if (sq->br != NULL)
1191289551Szbb		drbr_free(sq->br, M_DEVBUF);
1192289551Szbb
1193289551Szbb	if (sq->dmem.base != NULL)
1194289551Szbb		nicvf_free_q_desc_mem(nic, &sq->dmem);
1195289551Szbb
1196289551Szbb	NICVF_TX_UNLOCK(sq);
1197289551Szbb	/* Destroy Tx lock */
1198289551Szbb	mtx_destroy(&sq->mtx);
1199289551Szbb	memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
1200289550Szbb}
1201289550Szbb
1202289551Szbbstatic void
1203289551Szbbnicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1204289550Szbb{
1205289551Szbb
1206289550Szbb	/* Disable send queue */
1207289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
1208289550Szbb	/* Check if SQ is stopped */
1209289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
1210289550Szbb		return;
1211289550Szbb	/* Reset send queue */
1212289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1213289550Szbb}
1214289550Szbb
1215289551Szbbstatic void
1216289551Szbbnicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1217289550Szbb{
1218289550Szbb	union nic_mbx mbx = {};
1219289550Szbb
1220289550Szbb	/* Make sure all packets in the pipeline are written back into mem */
1221289550Szbb	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
1222289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1223289550Szbb}
1224289550Szbb
1225289551Szbbstatic void
1226289551Szbbnicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1227289550Szbb{
1228289551Szbb
1229289550Szbb	/* Disable timer threshold (doesn't get reset upon CQ reset */
1230289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
1231289550Szbb	/* Disable completion queue */
1232289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
1233289550Szbb	/* Reset completion queue */
1234289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1235289550Szbb}
1236289550Szbb
1237289551Szbbstatic void
1238289551Szbbnicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
1239289550Szbb{
1240289551Szbb	uint64_t tmp, fifo_state;
1241289550Szbb	int timeout = 10;
1242289550Szbb
1243289550Szbb	/* Save head and tail pointers for feeing up buffers */
1244289551Szbb	rbdr->head =
1245289551Szbb	    nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
1246289551Szbb	rbdr->tail =
1247289551Szbb	    nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
1248289550Szbb
1249289551Szbb	/*
1250289551Szbb	 * If RBDR FIFO is in 'FAIL' state then do a reset first
1251289550Szbb	 * before relaiming.
1252289550Szbb	 */
1253289550Szbb	fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
1254289551Szbb	if (((fifo_state >> 62) & 0x03) == 0x3) {
1255289550Szbb		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
1256289551Szbb		    qidx, NICVF_RBDR_RESET);
1257289551Szbb	}
1258289550Szbb
1259289550Szbb	/* Disable RBDR */
1260289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
1261289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1262289550Szbb		return;
1263289550Szbb	while (1) {
1264289550Szbb		tmp = nicvf_queue_reg_read(nic,
1265289551Szbb		    NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
1266289550Szbb		if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
1267289550Szbb			break;
1268289551Szbb
1269289551Szbb		DELAY(1000);
1270289550Szbb		timeout--;
1271289550Szbb		if (!timeout) {
1272289551Szbb			device_printf(nic->dev,
1273289551Szbb			    "Failed polling on prefetch status\n");
1274289550Szbb			return;
1275289550Szbb		}
1276289550Szbb	}
1277289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1278289551Szbb	    NICVF_RBDR_RESET);
1279289550Szbb
1280289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
1281289550Szbb		return;
1282289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
1283289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1284289550Szbb		return;
1285289550Szbb}
1286289550Szbb
1287289550Szbb/* Configures receive queue */
1288289551Szbbstatic void
1289289551Szbbnicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
1290289551Szbb    int qidx, bool enable)
1291289550Szbb{
1292289550Szbb	union nic_mbx mbx = {};
1293289550Szbb	struct rcv_queue *rq;
1294289550Szbb	struct rq_cfg rq_cfg;
1295296031Szbb	struct ifnet *ifp;
1296296031Szbb	struct lro_ctrl	*lro;
1297289550Szbb
1298296031Szbb	ifp = nic->ifp;
1299296031Szbb
1300289550Szbb	rq = &qs->rq[qidx];
1301289550Szbb	rq->enable = enable;
1302289550Szbb
1303296031Szbb	lro = &rq->lro;
1304296031Szbb
1305289550Szbb	/* Disable receive queue */
1306289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
1307289550Szbb
1308289550Szbb	if (!rq->enable) {
1309289550Szbb		nicvf_reclaim_rcv_queue(nic, qs, qidx);
1310296031Szbb		/* Free LRO memory */
1311296031Szbb		tcp_lro_free(lro);
1312296031Szbb		rq->lro_enabled = FALSE;
1313289550Szbb		return;
1314289550Szbb	}
1315289550Szbb
1316296031Szbb	/* Configure LRO if enabled */
1317296031Szbb	rq->lro_enabled = FALSE;
1318296031Szbb	if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
1319296031Szbb		if (tcp_lro_init(lro) != 0) {
1320296031Szbb			device_printf(nic->dev,
1321296031Szbb			    "Failed to initialize LRO for RXQ%d\n", qidx);
1322296031Szbb		} else {
1323296031Szbb			rq->lro_enabled = TRUE;
1324296031Szbb			lro->ifp = nic->ifp;
1325296031Szbb		}
1326296031Szbb	}
1327296031Szbb
1328289550Szbb	rq->cq_qs = qs->vnic_id;
1329289550Szbb	rq->cq_idx = qidx;
1330289550Szbb	rq->start_rbdr_qs = qs->vnic_id;
1331289550Szbb	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
1332289550Szbb	rq->cont_rbdr_qs = qs->vnic_id;
1333289550Szbb	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
1334289550Szbb	/* all writes of RBDR data to be loaded into L2 Cache as well*/
1335289550Szbb	rq->caching = 1;
1336289550Szbb
1337289550Szbb	/* Send a mailbox msg to PF to config RQ */
1338289550Szbb	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
1339289550Szbb	mbx.rq.qs_num = qs->vnic_id;
1340289550Szbb	mbx.rq.rq_num = qidx;
1341289550Szbb	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
1342289551Szbb	    (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
1343289551Szbb	    (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
1344289551Szbb	    (rq->start_qs_rbdr_idx);
1345289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1346289550Szbb
1347289550Szbb	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
1348289551Szbb	mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
1349289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1350289550Szbb
1351289551Szbb	/*
1352289551Szbb	 * RQ drop config
1353289550Szbb	 * Enable CQ drop to reserve sufficient CQEs for all tx packets
1354289550Szbb	 */
1355289550Szbb	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
1356289551Szbb	mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
1357289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1358289550Szbb
1359289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
1360289550Szbb
1361289550Szbb	/* Enable Receive queue */
1362289550Szbb	rq_cfg.ena = 1;
1363289550Szbb	rq_cfg.tcp_ena = 0;
1364289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
1365289551Szbb	    *(uint64_t *)&rq_cfg);
1366289550Szbb}
1367289550Szbb
1368289550Szbb/* Configures completion queue */
1369289551Szbbstatic void
1370289551Szbbnicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
1371289551Szbb    int qidx, boolean_t enable)
1372289550Szbb{
1373289550Szbb	struct cmp_queue *cq;
1374289550Szbb	struct cq_cfg cq_cfg;
1375289550Szbb
1376289550Szbb	cq = &qs->cq[qidx];
1377289550Szbb	cq->enable = enable;
1378289550Szbb
1379289550Szbb	if (!cq->enable) {
1380289550Szbb		nicvf_reclaim_cmp_queue(nic, qs, qidx);
1381289550Szbb		return;
1382289550Szbb	}
1383289550Szbb
1384289550Szbb	/* Reset completion queue */
1385289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1386289550Szbb
1387289550Szbb	/* Set completion queue base address */
1388289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
1389289551Szbb	    (uint64_t)(cq->dmem.phys_base));
1390289550Szbb
1391289550Szbb	/* Enable Completion queue */
1392289550Szbb	cq_cfg.ena = 1;
1393289550Szbb	cq_cfg.reset = 0;
1394289550Szbb	cq_cfg.caching = 0;
1395289550Szbb	cq_cfg.qsize = CMP_QSIZE;
1396289550Szbb	cq_cfg.avg_con = 0;
1397289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
1398289550Szbb
1399289550Szbb	/* Set threshold value for interrupt generation */
1400289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
1401289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
1402289551Szbb	    nic->cq_coalesce_usecs);
1403289550Szbb}
1404289550Szbb
1405289550Szbb/* Configures transmit queue */
1406289551Szbbstatic void
1407289551Szbbnicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1408289551Szbb    boolean_t enable)
1409289550Szbb{
1410289550Szbb	union nic_mbx mbx = {};
1411289550Szbb	struct snd_queue *sq;
1412289550Szbb	struct sq_cfg sq_cfg;
1413289550Szbb
1414289550Szbb	sq = &qs->sq[qidx];
1415289550Szbb	sq->enable = enable;
1416289550Szbb
1417289550Szbb	if (!sq->enable) {
1418289550Szbb		nicvf_reclaim_snd_queue(nic, qs, qidx);
1419289550Szbb		return;
1420289550Szbb	}
1421289550Szbb
1422289550Szbb	/* Reset send queue */
1423289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1424289550Szbb
1425289550Szbb	sq->cq_qs = qs->vnic_id;
1426289550Szbb	sq->cq_idx = qidx;
1427289550Szbb
1428289550Szbb	/* Send a mailbox msg to PF to config SQ */
1429289550Szbb	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
1430289550Szbb	mbx.sq.qs_num = qs->vnic_id;
1431289550Szbb	mbx.sq.sq_num = qidx;
1432289550Szbb	mbx.sq.sqs_mode = nic->sqs_mode;
1433289550Szbb	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
1434289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1435289550Szbb
1436289550Szbb	/* Set queue base address */
1437289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
1438289551Szbb	    (uint64_t)(sq->dmem.phys_base));
1439289550Szbb
1440289550Szbb	/* Enable send queue  & set queue size */
1441289550Szbb	sq_cfg.ena = 1;
1442289550Szbb	sq_cfg.reset = 0;
1443289550Szbb	sq_cfg.ldwb = 0;
1444289550Szbb	sq_cfg.qsize = SND_QSIZE;
1445289550Szbb	sq_cfg.tstmp_bgx_intf = 0;
1446289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
1447289550Szbb
1448289550Szbb	/* Set threshold value for interrupt generation */
1449289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
1450289550Szbb}
1451289550Szbb
1452289550Szbb/* Configures receive buffer descriptor ring */
1453289551Szbbstatic void
1454289551Szbbnicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1455289551Szbb    boolean_t enable)
1456289550Szbb{
1457289550Szbb	struct rbdr *rbdr;
1458289550Szbb	struct rbdr_cfg rbdr_cfg;
1459289550Szbb
1460289550Szbb	rbdr = &qs->rbdr[qidx];
1461289550Szbb	nicvf_reclaim_rbdr(nic, rbdr, qidx);
1462289550Szbb	if (!enable)
1463289550Szbb		return;
1464289550Szbb
1465289550Szbb	/* Set descriptor base address */
1466289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
1467289551Szbb	    (uint64_t)(rbdr->dmem.phys_base));
1468289550Szbb
1469289550Szbb	/* Enable RBDR  & set queue size */
1470289550Szbb	/* Buffer size should be in multiples of 128 bytes */
1471289550Szbb	rbdr_cfg.ena = 1;
1472289550Szbb	rbdr_cfg.reset = 0;
1473289550Szbb	rbdr_cfg.ldwb = 0;
1474289550Szbb	rbdr_cfg.qsize = RBDR_SIZE;
1475289550Szbb	rbdr_cfg.avg_con = 0;
1476289550Szbb	rbdr_cfg.lines = rbdr->dma_size / 128;
1477289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1478289551Szbb	    *(uint64_t *)&rbdr_cfg);
1479289550Szbb
1480289550Szbb	/* Notify HW */
1481289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
1482289551Szbb	    qs->rbdr_len - 1);
1483289550Szbb
1484289550Szbb	/* Set threshold value for interrupt generation */
1485289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
1486289551Szbb	    rbdr->thresh - 1);
1487289550Szbb}
1488289550Szbb
1489289550Szbb/* Requests PF to assign and enable Qset */
1490289551Szbbvoid
1491289551Szbbnicvf_qset_config(struct nicvf *nic, boolean_t enable)
1492289550Szbb{
1493289550Szbb	union nic_mbx mbx = {};
1494289551Szbb	struct queue_set *qs;
1495289550Szbb	struct qs_cfg *qs_cfg;
1496289550Szbb
1497289551Szbb	qs = nic->qs;
1498289551Szbb	if (qs == NULL) {
1499289551Szbb		device_printf(nic->dev,
1500289551Szbb		    "Qset is still not allocated, don't init queues\n");
1501289550Szbb		return;
1502289550Szbb	}
1503289550Szbb
1504289550Szbb	qs->enable = enable;
1505289550Szbb	qs->vnic_id = nic->vf_id;
1506289550Szbb
1507289550Szbb	/* Send a mailbox msg to PF to config Qset */
1508289550Szbb	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
1509289550Szbb	mbx.qs.num = qs->vnic_id;
1510289550Szbb
1511289550Szbb	mbx.qs.cfg = 0;
1512289550Szbb	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
1513289550Szbb	if (qs->enable) {
1514289550Szbb		qs_cfg->ena = 1;
1515289550Szbb		qs_cfg->vnic = qs->vnic_id;
1516289550Szbb	}
1517289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1518289550Szbb}
1519289550Szbb
1520289551Szbbstatic void
1521289551Szbbnicvf_free_resources(struct nicvf *nic)
1522289550Szbb{
1523289550Szbb	int qidx;
1524289551Szbb	struct queue_set *qs;
1525289550Szbb
1526289551Szbb	qs = nic->qs;
1527289551Szbb	/*
1528289551Szbb	 * Remove QS error task first since it has to be dead
1529289551Szbb	 * to safely free completion queue tasks.
1530289551Szbb	 */
1531289551Szbb	if (qs->qs_err_taskq != NULL) {
1532289551Szbb		/* Shut down QS error tasks */
1533289551Szbb		while (taskqueue_cancel(qs->qs_err_taskq,
1534289551Szbb		    &qs->qs_err_task,  NULL) != 0) {
1535289551Szbb			taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
1536289551Szbb
1537289551Szbb		}
1538289551Szbb		taskqueue_free(qs->qs_err_taskq);
1539289551Szbb		qs->qs_err_taskq = NULL;
1540289551Szbb	}
1541289550Szbb	/* Free receive buffer descriptor ring */
1542289550Szbb	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1543289550Szbb		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1544289550Szbb
1545289550Szbb	/* Free completion queue */
1546289550Szbb	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1547289550Szbb		nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1548289550Szbb
1549289550Szbb	/* Free send queue */
1550289550Szbb	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1551289550Szbb		nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1552289550Szbb}
1553289550Szbb
1554289551Szbbstatic int
1555289551Szbbnicvf_alloc_resources(struct nicvf *nic)
1556289550Szbb{
1557289551Szbb	struct queue_set *qs = nic->qs;
1558289550Szbb	int qidx;
1559289550Szbb
1560289550Szbb	/* Alloc receive buffer descriptor ring */
1561289550Szbb	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1562289550Szbb		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1563289551Szbb				    DMA_BUFFER_LEN, qidx))
1564289550Szbb			goto alloc_fail;
1565289550Szbb	}
1566289550Szbb
1567289550Szbb	/* Alloc send queue */
1568289550Szbb	for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
1569289551Szbb		if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
1570289550Szbb			goto alloc_fail;
1571289550Szbb	}
1572289550Szbb
1573289550Szbb	/* Alloc completion queue */
1574289550Szbb	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1575289551Szbb		if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
1576289550Szbb			goto alloc_fail;
1577289550Szbb	}
1578289550Szbb
1579289551Szbb	/* Allocate QS error taskqueue */
1580289551Szbb	TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
1581289551Szbb	qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
1582289551Szbb	    taskqueue_thread_enqueue, &qs->qs_err_taskq);
1583289551Szbb	taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
1584289551Szbb	    device_get_nameunit(nic->dev));
1585289551Szbb
1586289551Szbb	return (0);
1587289550Szbballoc_fail:
1588289550Szbb	nicvf_free_resources(nic);
1589289551Szbb	return (ENOMEM);
1590289550Szbb}
1591289550Szbb
1592289551Szbbint
1593289551Szbbnicvf_set_qset_resources(struct nicvf *nic)
1594289550Szbb{
1595289550Szbb	struct queue_set *qs;
1596289550Szbb
1597289551Szbb	qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
1598289550Szbb	nic->qs = qs;
1599289550Szbb
1600289550Szbb	/* Set count of each queue */
1601289550Szbb	qs->rbdr_cnt = RBDR_CNT;
1602299444Szbb	qs->rq_cnt = RCV_QUEUE_CNT;
1603289551Szbb
1604289550Szbb	qs->sq_cnt = SND_QUEUE_CNT;
1605289550Szbb	qs->cq_cnt = CMP_QUEUE_CNT;
1606289550Szbb
1607289550Szbb	/* Set queue lengths */
1608289550Szbb	qs->rbdr_len = RCV_BUF_COUNT;
1609289550Szbb	qs->sq_len = SND_QUEUE_LEN;
1610289550Szbb	qs->cq_len = CMP_QUEUE_LEN;
1611289550Szbb
1612289550Szbb	nic->rx_queues = qs->rq_cnt;
1613289550Szbb	nic->tx_queues = qs->sq_cnt;
1614289550Szbb
1615289551Szbb	return (0);
1616289550Szbb}
1617289550Szbb
1618289551Szbbint
1619289551Szbbnicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
1620289550Szbb{
1621289551Szbb	boolean_t disable = FALSE;
1622289551Szbb	struct queue_set *qs;
1623289550Szbb	int qidx;
1624289550Szbb
1625289551Szbb	qs = nic->qs;
1626289551Szbb	if (qs == NULL)
1627289551Szbb		return (0);
1628289550Szbb
1629289550Szbb	if (enable) {
1630289551Szbb		if (nicvf_alloc_resources(nic) != 0)
1631289551Szbb			return (ENOMEM);
1632289550Szbb
1633289550Szbb		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1634289550Szbb			nicvf_snd_queue_config(nic, qs, qidx, enable);
1635289550Szbb		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1636289550Szbb			nicvf_cmp_queue_config(nic, qs, qidx, enable);
1637289550Szbb		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1638289550Szbb			nicvf_rbdr_config(nic, qs, qidx, enable);
1639289550Szbb		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1640289550Szbb			nicvf_rcv_queue_config(nic, qs, qidx, enable);
1641289550Szbb	} else {
1642289550Szbb		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1643289550Szbb			nicvf_rcv_queue_config(nic, qs, qidx, disable);
1644289550Szbb		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1645289550Szbb			nicvf_rbdr_config(nic, qs, qidx, disable);
1646289550Szbb		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1647289550Szbb			nicvf_snd_queue_config(nic, qs, qidx, disable);
1648289550Szbb		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1649289550Szbb			nicvf_cmp_queue_config(nic, qs, qidx, disable);
1650289550Szbb
1651289550Szbb		nicvf_free_resources(nic);
1652289550Szbb	}
1653289550Szbb
1654289551Szbb	return (0);
1655289550Szbb}
1656289550Szbb
1657289551Szbb/*
1658289551Szbb * Get a free desc from SQ
1659289550Szbb * returns descriptor ponter & descriptor number
1660289550Szbb */
1661289551Szbbstatic __inline int
1662289551Szbbnicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1663289550Szbb{
1664289550Szbb	int qentry;
1665289550Szbb
1666289550Szbb	qentry = sq->tail;
1667299446Szbb	atomic_subtract_int(&sq->free_cnt, desc_cnt);
1668289550Szbb	sq->tail += desc_cnt;
1669289550Szbb	sq->tail &= (sq->dmem.q_len - 1);
1670289550Szbb
1671289551Szbb	return (qentry);
1672289550Szbb}
1673289550Szbb
1674289550Szbb/* Free descriptor back to SQ for future use */
1675289551Szbbstatic void
1676289551Szbbnicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1677289550Szbb{
1678289551Szbb
1679299446Szbb	atomic_add_int(&sq->free_cnt, desc_cnt);
1680289550Szbb	sq->head += desc_cnt;
1681289550Szbb	sq->head &= (sq->dmem.q_len - 1);
1682289550Szbb}
1683289550Szbb
1684289551Szbbstatic __inline int
1685289551Szbbnicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1686289550Szbb{
1687289550Szbb	qentry++;
1688289550Szbb	qentry &= (sq->dmem.q_len - 1);
1689289551Szbb	return (qentry);
1690289550Szbb}
1691289550Szbb
1692289551Szbbstatic void
1693289551Szbbnicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1694289550Szbb{
1695289551Szbb	uint64_t sq_cfg;
1696289550Szbb
1697289550Szbb	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1698289550Szbb	sq_cfg |= NICVF_SQ_EN;
1699289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1700289550Szbb	/* Ring doorbell so that H/W restarts processing SQEs */
1701289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1702289550Szbb}
1703289550Szbb
1704289551Szbbstatic void
1705289551Szbbnicvf_sq_disable(struct nicvf *nic, int qidx)
1706289550Szbb{
1707289551Szbb	uint64_t sq_cfg;
1708289550Szbb
1709289550Szbb	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1710289550Szbb	sq_cfg &= ~NICVF_SQ_EN;
1711289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1712289550Szbb}
1713289550Szbb
1714289551Szbbstatic void
1715289551Szbbnicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
1716289550Szbb{
1717289551Szbb	uint64_t head, tail;
1718289551Szbb	struct snd_buff *snd_buff;
1719289550Szbb	struct sq_hdr_subdesc *hdr;
1720289550Szbb
1721289551Szbb	NICVF_TX_LOCK(sq);
1722289550Szbb	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1723289550Szbb	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1724289550Szbb	while (sq->head != head) {
1725289550Szbb		hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1726289550Szbb		if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1727289550Szbb			nicvf_put_sq_desc(sq, 1);
1728289550Szbb			continue;
1729289550Szbb		}
1730289551Szbb		snd_buff = &sq->snd_buff[sq->head];
1731289551Szbb		if (snd_buff->mbuf != NULL) {
1732289551Szbb			bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1733289551Szbb			m_freem(snd_buff->mbuf);
1734289551Szbb			sq->snd_buff[sq->head].mbuf = NULL;
1735289551Szbb		}
1736289550Szbb		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1737289550Szbb	}
1738289551Szbb	NICVF_TX_UNLOCK(sq);
1739289550Szbb}
1740289550Szbb
1741289551Szbb/*
1742289551Szbb * Add SQ HEADER subdescriptor.
1743289550Szbb * First subdescriptor for every send descriptor.
1744289550Szbb */
1745296030Szbbstatic __inline int
1746289550Szbbnicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1747289551Szbb			 int subdesc_cnt, struct mbuf *mbuf, int len)
1748289550Szbb{
1749296039Szbb	struct nicvf *nic;
1750289550Szbb	struct sq_hdr_subdesc *hdr;
1751296030Szbb	struct ether_vlan_header *eh;
1752296030Szbb#ifdef INET
1753296030Szbb	struct ip *ip;
1754296039Szbb	struct tcphdr *th;
1755296030Szbb#endif
1756296030Szbb	uint16_t etype;
1757326062Semaste	int ehdrlen, iphlen, poff, proto;
1758289550Szbb
1759296039Szbb	nic = sq->nic;
1760296039Szbb
1761289550Szbb	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1762289551Szbb	sq->snd_buff[qentry].mbuf = mbuf;
1763289550Szbb
1764289550Szbb	memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1765289550Szbb	hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1766289550Szbb	/* Enable notification via CQE after processing SQE */
1767289550Szbb	hdr->post_cqe = 1;
1768289550Szbb	/* No of subdescriptors following this */
1769289550Szbb	hdr->subdesc_cnt = subdesc_cnt;
1770289550Szbb	hdr->tot_len = len;
1771289550Szbb
1772296039Szbb	eh = mtod(mbuf, struct ether_vlan_header *);
1773296039Szbb	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1774296039Szbb		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1775296039Szbb		etype = ntohs(eh->evl_proto);
1776296039Szbb	} else {
1777296039Szbb		ehdrlen = ETHER_HDR_LEN;
1778296039Szbb		etype = ntohs(eh->evl_encap_proto);
1779296039Szbb	}
1780296030Szbb
1781326062Semaste	poff = proto = -1;
1782296039Szbb	switch (etype) {
1783296039Szbb#ifdef INET6
1784296039Szbb	case ETHERTYPE_IPV6:
1785326062Semaste		if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) {
1786326062Semaste			mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr));
1787326062Semaste			sq->snd_buff[qentry].mbuf = NULL;
1788326062Semaste			if (mbuf == NULL)
1789326062Semaste				return (ENOBUFS);
1790326062Semaste		}
1791326062Semaste		poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto);
1792326062Semaste		if (poff < 0)
1793326062Semaste			return (ENOBUFS);
1794326062Semaste		poff += ehdrlen;
1795326062Semaste		break;
1796296039Szbb#endif
1797296039Szbb#ifdef INET
1798296039Szbb	case ETHERTYPE_IP:
1799296030Szbb		if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
1800296030Szbb			mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
1801296030Szbb			sq->snd_buff[qentry].mbuf = mbuf;
1802296030Szbb			if (mbuf == NULL)
1803296030Szbb				return (ENOBUFS);
1804296030Szbb		}
1805326788Semaste		if (mbuf->m_pkthdr.csum_flags & CSUM_IP)
1806326788Semaste			hdr->csum_l3 = 1; /* Enable IP csum calculation */
1807296030Szbb
1808296039Szbb		ip = (struct ip *)(mbuf->m_data + ehdrlen);
1809296039Szbb		iphlen = ip->ip_hl << 2;
1810296039Szbb		poff = ehdrlen + iphlen;
1811326062Semaste		proto = ip->ip_p;
1812326062Semaste		break;
1813326062Semaste#endif
1814326062Semaste	}
1815296030Szbb
1816326062Semaste#if defined(INET6) || defined(INET)
1817326062Semaste	if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) {
1818326062Semaste		switch (proto) {
1819326062Semaste		case IPPROTO_TCP:
1820326062Semaste			if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
1821326062Semaste				break;
1822296030Szbb
1823326062Semaste			if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
1824326062Semaste				mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
1825326062Semaste				sq->snd_buff[qentry].mbuf = mbuf;
1826326062Semaste				if (mbuf == NULL)
1827326062Semaste					return (ENOBUFS);
1828326062Semaste			}
1829326062Semaste			hdr->csum_l4 = SEND_L4_CSUM_TCP;
1830326062Semaste			break;
1831326062Semaste		case IPPROTO_UDP:
1832326062Semaste			if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
1833296030Szbb				break;
1834296030Szbb
1835326062Semaste			if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
1836326062Semaste				mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
1837326062Semaste				sq->snd_buff[qentry].mbuf = mbuf;
1838326062Semaste				if (mbuf == NULL)
1839326062Semaste					return (ENOBUFS);
1840326062Semaste			}
1841326062Semaste			hdr->csum_l4 = SEND_L4_CSUM_UDP;
1842326062Semaste			break;
1843326062Semaste		case IPPROTO_SCTP:
1844326062Semaste			if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
1845296030Szbb				break;
1846296030Szbb
1847326062Semaste			if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
1848326062Semaste				mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
1849326062Semaste				sq->snd_buff[qentry].mbuf = mbuf;
1850326062Semaste				if (mbuf == NULL)
1851326062Semaste					return (ENOBUFS);
1852296030Szbb			}
1853326062Semaste			hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1854326062Semaste			break;
1855326062Semaste		default:
1856326062Semaste			break;
1857296030Szbb		}
1858326062Semaste		hdr->l3_offset = ehdrlen;
1859326062Semaste		hdr->l4_offset = poff;
1860326062Semaste	}
1861296030Szbb
1862326062Semaste	if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
1863326062Semaste		th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff));
1864296039Szbb
1865326062Semaste		hdr->tso = 1;
1866326062Semaste		hdr->tso_start = poff + (th->th_off * 4);
1867326062Semaste		hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
1868326062Semaste		hdr->inner_l3_offset = ehdrlen - 2;
1869326062Semaste		nic->drv_stats.tx_tso++;
1870326062Semaste	}
1871296039Szbb#endif
1872296030Szbb
1873296030Szbb	return (0);
1874289550Szbb}
1875289550Szbb
1876289551Szbb/*
1877289551Szbb * SQ GATHER subdescriptor
1878289550Szbb * Must follow HDR descriptor
1879289550Szbb */
1880289550Szbbstatic inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1881289551Szbb					       int size, uint64_t data)
1882289550Szbb{
1883289550Szbb	struct sq_gather_subdesc *gather;
1884289550Szbb
1885289550Szbb	qentry &= (sq->dmem.q_len - 1);
1886289550Szbb	gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1887289550Szbb
1888289550Szbb	memset(gather, 0, SND_QUEUE_DESC_SIZE);
1889289550Szbb	gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1890289550Szbb	gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1891289550Szbb	gather->size = size;
1892289550Szbb	gather->addr = data;
1893289550Szbb}
1894289550Szbb
1895289551Szbb/* Put an mbuf to a SQ for packet transfer. */
1896297450Szbbstatic int
1897297450Szbbnicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
1898289550Szbb{
1899289551Szbb	bus_dma_segment_t segs[256];
1900289551Szbb	struct snd_buff *snd_buff;
1901289551Szbb	size_t seg;
1902289551Szbb	int nsegs, qentry;
1903296039Szbb	int subdesc_cnt;
1904289551Szbb	int err;
1905289550Szbb
1906289551Szbb	NICVF_TX_LOCK_ASSERT(sq);
1907289551Szbb
1908289551Szbb	if (sq->free_cnt == 0)
1909289551Szbb		return (ENOBUFS);
1910289551Szbb
1911289551Szbb	snd_buff = &sq->snd_buff[sq->tail];
1912289551Szbb
1913289551Szbb	err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
1914297450Szbb	    *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
1915297450Szbb	if (__predict_false(err != 0)) {
1916289551Szbb		/* ARM64TODO: Add mbuf defragmenting if we lack maps */
1917297450Szbb		m_freem(*mbufp);
1918297450Szbb		*mbufp = NULL;
1919289551Szbb		return (err);
1920289550Szbb	}
1921289550Szbb
1922289551Szbb	/* Set how many subdescriptors is required */
1923297721Szbb	subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
1924289551Szbb	if (subdesc_cnt > sq->free_cnt) {
1925289551Szbb		/* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
1926289551Szbb		bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1927289551Szbb		return (ENOBUFS);
1928289551Szbb	}
1929289550Szbb
1930289550Szbb	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1931289550Szbb
1932289550Szbb	/* Add SQ header subdesc */
1933297450Szbb	err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
1934297450Szbb	    (*mbufp)->m_pkthdr.len);
1935296030Szbb	if (err != 0) {
1936297450Szbb		nicvf_put_sq_desc(sq, subdesc_cnt);
1937296030Szbb		bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1938297450Szbb		if (err == ENOBUFS) {
1939297450Szbb			m_freem(*mbufp);
1940297450Szbb			*mbufp = NULL;
1941297450Szbb		}
1942296030Szbb		return (err);
1943296030Szbb	}
1944289550Szbb
1945289550Szbb	/* Add SQ gather subdescs */
1946289551Szbb	for (seg = 0; seg < nsegs; seg++) {
1947289550Szbb		qentry = nicvf_get_nxt_sqentry(sq, qentry);
1948289551Szbb		nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
1949289551Szbb		    segs[seg].ds_addr);
1950289550Szbb	}
1951289550Szbb
1952289550Szbb	/* make sure all memory stores are done before ringing doorbell */
1953289551Szbb	bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
1954289550Szbb
1955289551Szbb	dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
1956289551Szbb	    __func__, sq->idx, subdesc_cnt);
1957289550Szbb	/* Inform HW to xmit new packet */
1958289551Szbb	nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
1959289551Szbb	    sq->idx, subdesc_cnt);
1960289551Szbb	return (0);
1961289550Szbb}
1962289550Szbb
1963289551Szbbstatic __inline u_int
1964289551Szbbfrag_num(u_int i)
1965289550Szbb{
1966289551Szbb#if BYTE_ORDER == BIG_ENDIAN
1967289551Szbb	return ((i & ~3) + 3 - (i & 3));
1968289550Szbb#else
1969289551Szbb	return (i);
1970289550Szbb#endif
1971289550Szbb}
1972289550Szbb
1973289551Szbb/* Returns MBUF for a received packet */
1974289551Szbbstruct mbuf *
1975289551Szbbnicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1976289550Szbb{
1977289550Szbb	int frag;
1978289550Szbb	int payload_len = 0;
1979289551Szbb	struct mbuf *mbuf;
1980289551Szbb	struct mbuf *mbuf_frag;
1981289551Szbb	uint16_t *rb_lens = NULL;
1982289551Szbb	uint64_t *rb_ptrs = NULL;
1983289550Szbb
1984289551Szbb	mbuf = NULL;
1985289551Szbb	rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
1986289551Szbb	rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
1987289550Szbb
1988289551Szbb	dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
1989289551Szbb	    __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1990289550Szbb
1991289550Szbb	for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1992289550Szbb		payload_len = rb_lens[frag_num(frag)];
1993289551Szbb		if (frag == 0) {
1994289550Szbb			/* First fragment */
1995289551Szbb			mbuf = nicvf_rb_ptr_to_mbuf(nic,
1996289551Szbb			    (*rb_ptrs - cqe_rx->align_pad));
1997289551Szbb			mbuf->m_len = payload_len;
1998289551Szbb			mbuf->m_data += cqe_rx->align_pad;
1999289551Szbb			if_setrcvif(mbuf, nic->ifp);
2000289550Szbb		} else {
2001289550Szbb			/* Add fragments */
2002289551Szbb			mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
2003289551Szbb			m_append(mbuf, payload_len, mbuf_frag->m_data);
2004289551Szbb			m_freem(mbuf_frag);
2005289550Szbb		}
2006289550Szbb		/* Next buffer pointer */
2007289550Szbb		rb_ptrs++;
2008289550Szbb	}
2009289551Szbb
2010289551Szbb	if (__predict_true(mbuf != NULL)) {
2011289551Szbb		m_fixhdr(mbuf);
2012289551Szbb		mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
2013289551Szbb		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
2014296030Szbb		if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
2015296030Szbb			/*
2016296030Szbb			 * HW by default verifies IP & TCP/UDP/SCTP checksums
2017296030Szbb			 */
2018297389Szbb			if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
2019296030Szbb				mbuf->m_pkthdr.csum_flags =
2020296030Szbb				    (CSUM_IP_CHECKED | CSUM_IP_VALID);
2021296030Szbb			}
2022297389Szbb
2023297389Szbb			switch (cqe_rx->l4_type) {
2024297389Szbb			case L4TYPE_UDP:
2025297389Szbb			case L4TYPE_TCP: /* fall through */
2026296030Szbb				mbuf->m_pkthdr.csum_flags |=
2027296030Szbb				    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2028297389Szbb				mbuf->m_pkthdr.csum_data = 0xffff;
2029297389Szbb				break;
2030297389Szbb			case L4TYPE_SCTP:
2031297389Szbb				mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
2032297389Szbb				break;
2033297389Szbb			default:
2034297389Szbb				break;
2035296030Szbb			}
2036296030Szbb		}
2037289551Szbb	}
2038289551Szbb
2039289551Szbb	return (mbuf);
2040289550Szbb}
2041289550Szbb
2042289550Szbb/* Enable interrupt */
2043289551Szbbvoid
2044289551Szbbnicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
2045289550Szbb{
2046289551Szbb	uint64_t reg_val;
2047289550Szbb
2048289550Szbb	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2049289550Szbb
2050289550Szbb	switch (int_type) {
2051289550Szbb	case NICVF_INTR_CQ:
2052289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2053289550Szbb		break;
2054289550Szbb	case NICVF_INTR_SQ:
2055289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2056289550Szbb		break;
2057289550Szbb	case NICVF_INTR_RBDR:
2058289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2059289550Szbb		break;
2060289550Szbb	case NICVF_INTR_PKT_DROP:
2061289551Szbb		reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2062289550Szbb		break;
2063289550Szbb	case NICVF_INTR_TCP_TIMER:
2064289551Szbb		reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2065289550Szbb		break;
2066289550Szbb	case NICVF_INTR_MBOX:
2067289551Szbb		reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2068289550Szbb		break;
2069289550Szbb	case NICVF_INTR_QS_ERR:
2070289551Szbb		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2071289550Szbb		break;
2072289550Szbb	default:
2073289551Szbb		device_printf(nic->dev,
2074289550Szbb			   "Failed to enable interrupt: unknown type\n");
2075289550Szbb		break;
2076289550Szbb	}
2077289550Szbb
2078289550Szbb	nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
2079289550Szbb}
2080289550Szbb
2081289550Szbb/* Disable interrupt */
2082289551Szbbvoid
2083289551Szbbnicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
2084289550Szbb{
2085289551Szbb	uint64_t reg_val = 0;
2086289550Szbb
2087289550Szbb	switch (int_type) {
2088289550Szbb	case NICVF_INTR_CQ:
2089289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2090289550Szbb		break;
2091289550Szbb	case NICVF_INTR_SQ:
2092289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2093289550Szbb		break;
2094289550Szbb	case NICVF_INTR_RBDR:
2095289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2096289550Szbb		break;
2097289550Szbb	case NICVF_INTR_PKT_DROP:
2098289551Szbb		reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2099289550Szbb		break;
2100289550Szbb	case NICVF_INTR_TCP_TIMER:
2101289551Szbb		reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2102289550Szbb		break;
2103289550Szbb	case NICVF_INTR_MBOX:
2104289551Szbb		reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2105289550Szbb		break;
2106289550Szbb	case NICVF_INTR_QS_ERR:
2107289551Szbb		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2108289550Szbb		break;
2109289550Szbb	default:
2110289551Szbb		device_printf(nic->dev,
2111289550Szbb			   "Failed to disable interrupt: unknown type\n");
2112289550Szbb		break;
2113289550Szbb	}
2114289550Szbb
2115289550Szbb	nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
2116289550Szbb}
2117289550Szbb
2118289550Szbb/* Clear interrupt */
2119289551Szbbvoid
2120289551Szbbnicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
2121289550Szbb{
2122289551Szbb	uint64_t reg_val = 0;
2123289550Szbb
2124289550Szbb	switch (int_type) {
2125289550Szbb	case NICVF_INTR_CQ:
2126289551Szbb		reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2127289550Szbb		break;
2128289550Szbb	case NICVF_INTR_SQ:
2129289551Szbb		reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2130289550Szbb		break;
2131289550Szbb	case NICVF_INTR_RBDR:
2132289551Szbb		reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2133289550Szbb		break;
2134289550Szbb	case NICVF_INTR_PKT_DROP:
2135289551Szbb		reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2136289550Szbb		break;
2137289550Szbb	case NICVF_INTR_TCP_TIMER:
2138289551Szbb		reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2139289550Szbb		break;
2140289550Szbb	case NICVF_INTR_MBOX:
2141289551Szbb		reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
2142289550Szbb		break;
2143289550Szbb	case NICVF_INTR_QS_ERR:
2144289551Szbb		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2145289550Szbb		break;
2146289550Szbb	default:
2147289551Szbb		device_printf(nic->dev,
2148289550Szbb			   "Failed to clear interrupt: unknown type\n");
2149289550Szbb		break;
2150289550Szbb	}
2151289550Szbb
2152289550Szbb	nicvf_reg_write(nic, NIC_VF_INT, reg_val);
2153289550Szbb}
2154289550Szbb
2155289550Szbb/* Check if interrupt is enabled */
2156289551Szbbint
2157289551Szbbnicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
2158289550Szbb{
2159289551Szbb	uint64_t reg_val;
2160289551Szbb	uint64_t mask = 0xff;
2161289550Szbb
2162289550Szbb	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2163289550Szbb
2164289550Szbb	switch (int_type) {
2165289550Szbb	case NICVF_INTR_CQ:
2166289551Szbb		mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2167289550Szbb		break;
2168289550Szbb	case NICVF_INTR_SQ:
2169289551Szbb		mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2170289550Szbb		break;
2171289550Szbb	case NICVF_INTR_RBDR:
2172289551Szbb		mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2173289550Szbb		break;
2174289550Szbb	case NICVF_INTR_PKT_DROP:
2175289550Szbb		mask = NICVF_INTR_PKT_DROP_MASK;
2176289550Szbb		break;
2177289550Szbb	case NICVF_INTR_TCP_TIMER:
2178289550Szbb		mask = NICVF_INTR_TCP_TIMER_MASK;
2179289550Szbb		break;
2180289550Szbb	case NICVF_INTR_MBOX:
2181289550Szbb		mask = NICVF_INTR_MBOX_MASK;
2182289550Szbb		break;
2183289550Szbb	case NICVF_INTR_QS_ERR:
2184289550Szbb		mask = NICVF_INTR_QS_ERR_MASK;
2185289550Szbb		break;
2186289550Szbb	default:
2187289551Szbb		device_printf(nic->dev,
2188289550Szbb			   "Failed to check interrupt enable: unknown type\n");
2189289550Szbb		break;
2190289550Szbb	}
2191289550Szbb
2192289550Szbb	return (reg_val & mask);
2193289550Szbb}
2194289550Szbb
2195289551Szbbvoid
2196289551Szbbnicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
2197289550Szbb{
2198289550Szbb	struct rcv_queue *rq;
2199289550Szbb
2200289550Szbb#define GET_RQ_STATS(reg) \
2201289550Szbb	nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
2202289550Szbb			    (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2203289550Szbb
2204289550Szbb	rq = &nic->qs->rq[rq_idx];
2205289550Szbb	rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
2206289550Szbb	rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
2207289550Szbb}
2208289550Szbb
2209289551Szbbvoid
2210289551Szbbnicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
2211289550Szbb{
2212289550Szbb	struct snd_queue *sq;
2213289550Szbb
2214289550Szbb#define GET_SQ_STATS(reg) \
2215289550Szbb	nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
2216289550Szbb			    (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2217289550Szbb
2218289550Szbb	sq = &nic->qs->sq[sq_idx];
2219289550Szbb	sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
2220289550Szbb	sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
2221289550Szbb}
2222289550Szbb
2223289550Szbb/* Check for errors in the receive cmp.queue entry */
2224289551Szbbint
2225289551Szbbnicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
2226289551Szbb    struct cqe_rx_t *cqe_rx)
2227289550Szbb{
2228289550Szbb	struct nicvf_hw_stats *stats = &nic->hw_stats;
2229289550Szbb	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
2230289550Szbb
2231289550Szbb	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
2232289550Szbb		drv_stats->rx_frames_ok++;
2233289551Szbb		return (0);
2234289550Szbb	}
2235289550Szbb
2236289550Szbb	switch (cqe_rx->err_opcode) {
2237289550Szbb	case CQ_RX_ERROP_RE_PARTIAL:
2238289550Szbb		stats->rx_bgx_truncated_pkts++;
2239289550Szbb		break;
2240289550Szbb	case CQ_RX_ERROP_RE_JABBER:
2241289550Szbb		stats->rx_jabber_errs++;
2242289550Szbb		break;
2243289550Szbb	case CQ_RX_ERROP_RE_FCS:
2244289550Szbb		stats->rx_fcs_errs++;
2245289550Szbb		break;
2246289550Szbb	case CQ_RX_ERROP_RE_RX_CTL:
2247289550Szbb		stats->rx_bgx_errs++;
2248289550Szbb		break;
2249289550Szbb	case CQ_RX_ERROP_PREL2_ERR:
2250289550Szbb		stats->rx_prel2_errs++;
2251289550Szbb		break;
2252289550Szbb	case CQ_RX_ERROP_L2_MAL:
2253289550Szbb		stats->rx_l2_hdr_malformed++;
2254289550Szbb		break;
2255289550Szbb	case CQ_RX_ERROP_L2_OVERSIZE:
2256289550Szbb		stats->rx_oversize++;
2257289550Szbb		break;
2258289550Szbb	case CQ_RX_ERROP_L2_UNDERSIZE:
2259289550Szbb		stats->rx_undersize++;
2260289550Szbb		break;
2261289550Szbb	case CQ_RX_ERROP_L2_LENMISM:
2262289550Szbb		stats->rx_l2_len_mismatch++;
2263289550Szbb		break;
2264289550Szbb	case CQ_RX_ERROP_L2_PCLP:
2265289550Szbb		stats->rx_l2_pclp++;
2266289550Szbb		break;
2267289550Szbb	case CQ_RX_ERROP_IP_NOT:
2268289550Szbb		stats->rx_ip_ver_errs++;
2269289550Szbb		break;
2270289550Szbb	case CQ_RX_ERROP_IP_CSUM_ERR:
2271289550Szbb		stats->rx_ip_csum_errs++;
2272289550Szbb		break;
2273289550Szbb	case CQ_RX_ERROP_IP_MAL:
2274289550Szbb		stats->rx_ip_hdr_malformed++;
2275289550Szbb		break;
2276289550Szbb	case CQ_RX_ERROP_IP_MALD:
2277289550Szbb		stats->rx_ip_payload_malformed++;
2278289550Szbb		break;
2279289550Szbb	case CQ_RX_ERROP_IP_HOP:
2280289550Szbb		stats->rx_ip_ttl_errs++;
2281289550Szbb		break;
2282289550Szbb	case CQ_RX_ERROP_L3_PCLP:
2283289550Szbb		stats->rx_l3_pclp++;
2284289550Szbb		break;
2285289550Szbb	case CQ_RX_ERROP_L4_MAL:
2286289550Szbb		stats->rx_l4_malformed++;
2287289550Szbb		break;
2288289550Szbb	case CQ_RX_ERROP_L4_CHK:
2289289550Szbb		stats->rx_l4_csum_errs++;
2290289550Szbb		break;
2291289550Szbb	case CQ_RX_ERROP_UDP_LEN:
2292289550Szbb		stats->rx_udp_len_errs++;
2293289550Szbb		break;
2294289550Szbb	case CQ_RX_ERROP_L4_PORT:
2295289550Szbb		stats->rx_l4_port_errs++;
2296289550Szbb		break;
2297289550Szbb	case CQ_RX_ERROP_TCP_FLAG:
2298289550Szbb		stats->rx_tcp_flag_errs++;
2299289550Szbb		break;
2300289550Szbb	case CQ_RX_ERROP_TCP_OFFSET:
2301289550Szbb		stats->rx_tcp_offset_errs++;
2302289550Szbb		break;
2303289550Szbb	case CQ_RX_ERROP_L4_PCLP:
2304289550Szbb		stats->rx_l4_pclp++;
2305289550Szbb		break;
2306289550Szbb	case CQ_RX_ERROP_RBDR_TRUNC:
2307289550Szbb		stats->rx_truncated_pkts++;
2308289550Szbb		break;
2309289550Szbb	}
2310289550Szbb
2311289551Szbb	return (1);
2312289550Szbb}
2313289550Szbb
2314289550Szbb/* Check for errors in the send cmp.queue entry */
2315289551Szbbint
2316289551Szbbnicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
2317289551Szbb    struct cqe_send_t *cqe_tx)
2318289550Szbb{
2319289550Szbb	struct cmp_queue_stats *stats = &cq->stats;
2320289550Szbb
2321289550Szbb	switch (cqe_tx->send_status) {
2322289550Szbb	case CQ_TX_ERROP_GOOD:
2323289550Szbb		stats->tx.good++;
2324289551Szbb		return (0);
2325289550Szbb	case CQ_TX_ERROP_DESC_FAULT:
2326289550Szbb		stats->tx.desc_fault++;
2327289550Szbb		break;
2328289550Szbb	case CQ_TX_ERROP_HDR_CONS_ERR:
2329289550Szbb		stats->tx.hdr_cons_err++;
2330289550Szbb		break;
2331289550Szbb	case CQ_TX_ERROP_SUBDC_ERR:
2332289550Szbb		stats->tx.subdesc_err++;
2333289550Szbb		break;
2334289550Szbb	case CQ_TX_ERROP_IMM_SIZE_OFLOW:
2335289550Szbb		stats->tx.imm_size_oflow++;
2336289550Szbb		break;
2337289550Szbb	case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
2338289550Szbb		stats->tx.data_seq_err++;
2339289550Szbb		break;
2340289550Szbb	case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
2341289550Szbb		stats->tx.mem_seq_err++;
2342289550Szbb		break;
2343289550Szbb	case CQ_TX_ERROP_LOCK_VIOL:
2344289550Szbb		stats->tx.lock_viol++;
2345289550Szbb		break;
2346289550Szbb	case CQ_TX_ERROP_DATA_FAULT:
2347289550Szbb		stats->tx.data_fault++;
2348289550Szbb		break;
2349289550Szbb	case CQ_TX_ERROP_TSTMP_CONFLICT:
2350289550Szbb		stats->tx.tstmp_conflict++;
2351289550Szbb		break;
2352289550Szbb	case CQ_TX_ERROP_TSTMP_TIMEOUT:
2353289550Szbb		stats->tx.tstmp_timeout++;
2354289550Szbb		break;
2355289550Szbb	case CQ_TX_ERROP_MEM_FAULT:
2356289550Szbb		stats->tx.mem_fault++;
2357289550Szbb		break;
2358289550Szbb	case CQ_TX_ERROP_CK_OVERLAP:
2359289550Szbb		stats->tx.csum_overlap++;
2360289550Szbb		break;
2361289550Szbb	case CQ_TX_ERROP_CK_OFLOW:
2362289550Szbb		stats->tx.csum_overflow++;
2363289550Szbb		break;
2364289550Szbb	}
2365289550Szbb
2366289551Szbb	return (1);
2367289550Szbb}
2368