nicvf_queues.c revision 289551
1289550Szbb/*
2289550Szbb * Copyright (C) 2015 Cavium Inc.
3289550Szbb * All rights reserved.
4289550Szbb *
5289550Szbb * Redistribution and use in source and binary forms, with or without
6289550Szbb * modification, are permitted provided that the following conditions
7289550Szbb * are met:
8289550Szbb * 1. Redistributions of source code must retain the above copyright
9289550Szbb *    notice, this list of conditions and the following disclaimer.
10289550Szbb * 2. Redistributions in binary form must reproduce the above copyright
11289550Szbb *    notice, this list of conditions and the following disclaimer in the
12289550Szbb *    documentation and/or other materials provided with the distribution.
13289550Szbb *
14289550Szbb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15289550Szbb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16289550Szbb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17289550Szbb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18289550Szbb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19289550Szbb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20289550Szbb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21289550Szbb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22289550Szbb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23289550Szbb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24289550Szbb * SUCH DAMAGE.
25289550Szbb *
26289550Szbb * $FreeBSD: head/sys/dev/vnic/nicvf_queues.c 289551 2015-10-18 22:02:58Z zbb $
27289550Szbb *
28289550Szbb */
29289551Szbb#include <sys/cdefs.h>
30289551Szbb__FBSDID("$FreeBSD: head/sys/dev/vnic/nicvf_queues.c 289551 2015-10-18 22:02:58Z zbb $");
31289550Szbb
32289551Szbb#include <sys/param.h>
33289551Szbb#include <sys/systm.h>
34289551Szbb#include <sys/bitset.h>
35289551Szbb#include <sys/bitstring.h>
36289551Szbb#include <sys/buf_ring.h>
37289551Szbb#include <sys/bus.h>
38289551Szbb#include <sys/endian.h>
39289551Szbb#include <sys/kernel.h>
40289551Szbb#include <sys/malloc.h>
41289551Szbb#include <sys/module.h>
42289551Szbb#include <sys/rman.h>
43289551Szbb#include <sys/pciio.h>
44289551Szbb#include <sys/pcpu.h>
45289551Szbb#include <sys/proc.h>
46289551Szbb#include <sys/sockio.h>
47289551Szbb#include <sys/socket.h>
48289551Szbb#include <sys/stdatomic.h>
49289551Szbb#include <sys/cpuset.h>
50289551Szbb#include <sys/lock.h>
51289551Szbb#include <sys/mutex.h>
52289551Szbb#include <sys/smp.h>
53289551Szbb#include <sys/taskqueue.h>
54289550Szbb
55289551Szbb#include <vm/vm.h>
56289551Szbb#include <vm/pmap.h>
57289551Szbb
58289551Szbb#include <machine/bus.h>
59289551Szbb#include <machine/vmparam.h>
60289551Szbb
61289551Szbb#include <net/ethernet.h>
62289551Szbb#include <net/if.h>
63289551Szbb#include <net/if_var.h>
64289551Szbb#include <net/if_media.h>
65289551Szbb#include <net/ifq.h>
66289551Szbb
67289551Szbb#include <dev/pci/pcireg.h>
68289551Szbb#include <dev/pci/pcivar.h>
69289551Szbb
70289551Szbb#include "thunder_bgx.h"
71289550Szbb#include "nic_reg.h"
72289550Szbb#include "nic.h"
73289550Szbb#include "q_struct.h"
74289550Szbb#include "nicvf_queues.h"
75289550Szbb
76289551Szbb#define	DEBUG
77289551Szbb#undef DEBUG
78289551Szbb
79289551Szbb#ifdef DEBUG
80289551Szbb#define	dprintf(dev, fmt, ...)	device_printf(dev, fmt, ##__VA_ARGS__)
81289551Szbb#else
82289551Szbb#define	dprintf(dev, fmt, ...)
83289551Szbb#endif
84289551Szbb
85289551SzbbMALLOC_DECLARE(M_NICVF);
86289551Szbb
87289551Szbbstatic void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
88289551Szbbstatic int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf *);
89289551Szbbstatic struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
90289551Szbbstatic void nicvf_sq_disable(struct nicvf *, int);
91289551Szbbstatic void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
92289551Szbbstatic void nicvf_put_sq_desc(struct snd_queue *, int);
93289551Szbbstatic void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
94289551Szbb    boolean_t);
95289551Szbbstatic void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
96289551Szbb
97289551Szbbstatic void nicvf_rbdr_task(void *, int);
98289551Szbbstatic void nicvf_rbdr_task_nowait(void *, int);
99289551Szbb
100289550Szbbstruct rbuf_info {
101289551Szbb	bus_dma_tag_t	dmat;
102289551Szbb	bus_dmamap_t	dmap;
103289551Szbb	struct mbuf *	mbuf;
104289550Szbb};
105289550Szbb
106289551Szbb#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
107289550Szbb
108289550Szbb/* Poll a register for a specific value */
109289550Szbbstatic int nicvf_poll_reg(struct nicvf *nic, int qidx,
110289551Szbb			  uint64_t reg, int bit_pos, int bits, int val)
111289550Szbb{
112289551Szbb	uint64_t bit_mask;
113289551Szbb	uint64_t reg_val;
114289550Szbb	int timeout = 10;
115289550Szbb
116289551Szbb	bit_mask = (1UL << bits) - 1;
117289550Szbb	bit_mask = (bit_mask << bit_pos);
118289550Szbb
119289550Szbb	while (timeout) {
120289550Szbb		reg_val = nicvf_queue_reg_read(nic, reg, qidx);
121289550Szbb		if (((reg_val & bit_mask) >> bit_pos) == val)
122289551Szbb			return (0);
123289551Szbb
124289551Szbb		DELAY(1000);
125289550Szbb		timeout--;
126289550Szbb	}
127289551Szbb	device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
128289551Szbb	return (ETIMEDOUT);
129289550Szbb}
130289550Szbb
131289551Szbb/* Callback for bus_dmamap_load() */
132289551Szbbstatic void
133289551Szbbnicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
134289551Szbb{
135289551Szbb	bus_addr_t *paddr;
136289551Szbb
137289551Szbb	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
138289551Szbb	paddr = arg;
139289551Szbb	*paddr = segs->ds_addr;
140289551Szbb}
141289551Szbb
142289550Szbb/* Allocate memory for a queue's descriptors */
143289551Szbbstatic int
144289551Szbbnicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
145289551Szbb    int q_len, int desc_size, int align_bytes)
146289550Szbb{
147289551Szbb	int err, err_dmat;
148289551Szbb
149289551Szbb	/* Create DMA tag first */
150289551Szbb	err = bus_dma_tag_create(
151289551Szbb	    bus_get_dma_tag(nic->dev),		/* parent tag */
152289551Szbb	    align_bytes,			/* alignment */
153289551Szbb	    0,					/* boundary */
154289551Szbb	    BUS_SPACE_MAXADDR,			/* lowaddr */
155289551Szbb	    BUS_SPACE_MAXADDR,			/* highaddr */
156289551Szbb	    NULL, NULL,				/* filtfunc, filtfuncarg */
157289551Szbb	    (q_len * desc_size),		/* maxsize */
158289551Szbb	    1,					/* nsegments */
159289551Szbb	    (q_len * desc_size),		/* maxsegsize */
160289551Szbb	    0,					/* flags */
161289551Szbb	    NULL, NULL,				/* lockfunc, lockfuncarg */
162289551Szbb	    &dmem->dmat);			/* dmat */
163289551Szbb
164289551Szbb	if (err != 0) {
165289551Szbb		device_printf(nic->dev,
166289551Szbb		    "Failed to create busdma tag for descriptors ring\n");
167289551Szbb		return (err);
168289551Szbb	}
169289551Szbb
170289551Szbb	/* Allocate segment of continuous DMA safe memory */
171289551Szbb	err = bus_dmamem_alloc(
172289551Szbb	    dmem->dmat,				/* DMA tag */
173289551Szbb	    &dmem->base,			/* virtual address */
174289551Szbb	    (BUS_DMA_NOWAIT | BUS_DMA_ZERO),	/* flags */
175289551Szbb	    &dmem->dmap);			/* DMA map */
176289551Szbb	if (err != 0) {
177289551Szbb		device_printf(nic->dev, "Failed to allocate DMA safe memory for"
178289551Szbb		    "descriptors ring\n");
179289551Szbb		goto dmamem_fail;
180289551Szbb	}
181289551Szbb
182289551Szbb	err = bus_dmamap_load(
183289551Szbb	    dmem->dmat,
184289551Szbb	    dmem->dmap,
185289551Szbb	    dmem->base,
186289551Szbb	    (q_len * desc_size),		/* allocation size */
187289551Szbb	    nicvf_dmamap_q_cb,			/* map to DMA address cb. */
188289551Szbb	    &dmem->phys_base,			/* physical address */
189289551Szbb	    BUS_DMA_NOWAIT);
190289551Szbb	if (err != 0) {
191289551Szbb		device_printf(nic->dev,
192289551Szbb		    "Cannot load DMA map of descriptors ring\n");
193289551Szbb		goto dmamap_fail;
194289551Szbb	}
195289551Szbb
196289550Szbb	dmem->q_len = q_len;
197289551Szbb	dmem->size = (desc_size * q_len);
198289550Szbb
199289551Szbb	return (0);
200289551Szbb
201289551Szbbdmamap_fail:
202289551Szbb	bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
203289551Szbb	dmem->phys_base = 0;
204289551Szbbdmamem_fail:
205289551Szbb	err_dmat = bus_dma_tag_destroy(dmem->dmat);
206289551Szbb	dmem->base = NULL;
207289551Szbb	KASSERT(err_dmat == 0,
208289551Szbb	    ("%s: Trying to destroy BUSY DMA tag", __func__));
209289551Szbb
210289551Szbb	return (err);
211289550Szbb}
212289550Szbb
213289550Szbb/* Free queue's descriptor memory */
214289551Szbbstatic void
215289551Szbbnicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
216289550Szbb{
217289551Szbb	int err;
218289551Szbb
219289551Szbb	if ((dmem == NULL) || (dmem->base == NULL))
220289550Szbb		return;
221289550Szbb
222289551Szbb	/* Unload a map */
223289551Szbb	bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
224289551Szbb	bus_dmamap_unload(dmem->dmat, dmem->dmap);
225289551Szbb	/* Free DMA memory */
226289551Szbb	bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
227289551Szbb	/* Destroy DMA tag */
228289551Szbb	err = bus_dma_tag_destroy(dmem->dmat);
229289551Szbb
230289551Szbb	KASSERT(err == 0,
231289551Szbb	    ("%s: Trying to destroy BUSY DMA tag", __func__));
232289551Szbb
233289551Szbb	dmem->phys_base = 0;
234289550Szbb	dmem->base = NULL;
235289550Szbb}
236289550Szbb
237289551Szbb/*
238289551Szbb * Allocate buffer for packet reception
239289550Szbb * HW returns memory address where packet is DMA'ed but not a pointer
240289550Szbb * into RBDR ring, so save buffer address at the start of fragment and
241289550Szbb * align the start address to a cache aligned address
242289550Szbb */
243289551Szbbstatic __inline int
244289551Szbbnicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
245289551Szbb    bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
246289550Szbb{
247289551Szbb	struct mbuf *mbuf;
248289550Szbb	struct rbuf_info *rinfo;
249289551Szbb	bus_dma_segment_t segs[1];
250289551Szbb	int nsegs;
251289551Szbb	int err;
252289550Szbb
253289551Szbb	mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
254289551Szbb	if (mbuf == NULL)
255289551Szbb		return (ENOMEM);
256289550Szbb
257289551Szbb	/*
258289551Szbb	 * The length is equal to the actual length + one 128b line
259289551Szbb	 * used as a room for rbuf_info structure.
260289551Szbb	 */
261289551Szbb	mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
262289551Szbb
263289551Szbb	err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
264289551Szbb	    &nsegs, BUS_DMA_NOWAIT);
265289551Szbb	if (err != 0) {
266289551Szbb		device_printf(nic->dev,
267289551Szbb		    "Failed to map mbuf into DMA visible memory, err: %d\n",
268289551Szbb		    err);
269289551Szbb		m_freem(mbuf);
270289551Szbb		bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
271289551Szbb		return (err);
272289550Szbb	}
273289551Szbb	if (nsegs != 1)
274289551Szbb		panic("Unexpected number of DMA segments for RB: %d", nsegs);
275289551Szbb	/*
276289551Szbb	 * Now use the room for rbuf_info structure
277289551Szbb	 * and adjust mbuf data and length.
278289551Szbb	 */
279289551Szbb	rinfo = (struct rbuf_info *)mbuf->m_data;
280289551Szbb	m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
281289550Szbb
282289551Szbb	rinfo->dmat = rbdr->rbdr_buff_dmat;
283289551Szbb	rinfo->dmap = dmap;
284289551Szbb	rinfo->mbuf = mbuf;
285289550Szbb
286289551Szbb	*rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
287289550Szbb
288289551Szbb	return (0);
289289550Szbb}
290289550Szbb
291289551Szbb/* Retrieve mbuf for received packet */
292289551Szbbstatic struct mbuf *
293289551Szbbnicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
294289550Szbb{
295289551Szbb	struct mbuf *mbuf;
296289550Szbb	struct rbuf_info *rinfo;
297289550Szbb
298289550Szbb	/* Get buffer start address and alignment offset */
299289551Szbb	rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
300289550Szbb
301289551Szbb	/* Now retrieve mbuf to give to stack */
302289551Szbb	mbuf = rinfo->mbuf;
303289551Szbb	if (__predict_false(mbuf == NULL)) {
304289551Szbb		panic("%s: Received packet fragment with NULL mbuf",
305289551Szbb		    device_get_nameunit(nic->dev));
306289550Szbb	}
307289551Szbb	/*
308289551Szbb	 * Clear the mbuf in the descriptor to indicate
309289551Szbb	 * that this slot is processed and free to use.
310289551Szbb	 */
311289551Szbb	rinfo->mbuf = NULL;
312289550Szbb
313289551Szbb	bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
314289551Szbb	bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
315289550Szbb
316289551Szbb	return (mbuf);
317289550Szbb}
318289550Szbb
319289550Szbb/* Allocate RBDR ring and populate receive buffers */
320289551Szbbstatic int
321289551Szbbnicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
322289551Szbb    int buf_size, int qidx)
323289550Szbb{
324289551Szbb	bus_dmamap_t dmap;
325289551Szbb	bus_addr_t rbuf;
326289551Szbb	struct rbdr_entry_t *desc;
327289550Szbb	int idx;
328289550Szbb	int err;
329289550Szbb
330289551Szbb	/* Allocate rbdr descriptors ring */
331289550Szbb	err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
332289551Szbb	    sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
333289551Szbb	if (err != 0) {
334289551Szbb		device_printf(nic->dev,
335289551Szbb		    "Failed to create RBDR descriptors ring\n");
336289551Szbb		return (err);
337289551Szbb	}
338289550Szbb
339289550Szbb	rbdr->desc = rbdr->dmem.base;
340289551Szbb	/*
341289551Szbb	 * Buffer size has to be in multiples of 128 bytes.
342289551Szbb	 * Make room for metadata of size of one line (128 bytes).
343289551Szbb	 */
344289551Szbb	rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
345289551Szbb	rbdr->enable = TRUE;
346289550Szbb	rbdr->thresh = RBDR_THRESH;
347289551Szbb	rbdr->nic = nic;
348289551Szbb	rbdr->idx = qidx;
349289550Szbb
350289551Szbb	/*
351289551Szbb	 * Create DMA tag for Rx buffers.
352289551Szbb	 * Each map created using this tag is intended to store Rx payload for
353289551Szbb	 * one fragment and one header structure containing rbuf_info (thus
354289551Szbb	 * additional 128 byte line since RB must be a multiple of 128 byte
355289551Szbb	 * cache line).
356289551Szbb	 */
357289551Szbb	if (buf_size > MCLBYTES) {
358289551Szbb		device_printf(nic->dev,
359289551Szbb		    "Buffer size to large for mbuf cluster\n");
360289551Szbb		return (EINVAL);
361289551Szbb	}
362289551Szbb	err = bus_dma_tag_create(
363289551Szbb	    bus_get_dma_tag(nic->dev),		/* parent tag */
364289551Szbb	    NICVF_RCV_BUF_ALIGN_BYTES,		/* alignment */
365289551Szbb	    0,					/* boundary */
366289551Szbb	    DMAP_MAX_PHYSADDR,			/* lowaddr */
367289551Szbb	    DMAP_MIN_PHYSADDR,			/* highaddr */
368289551Szbb	    NULL, NULL,				/* filtfunc, filtfuncarg */
369289551Szbb	    roundup2(buf_size, MCLBYTES),	/* maxsize */
370289551Szbb	    1,					/* nsegments */
371289551Szbb	    roundup2(buf_size, MCLBYTES),	/* maxsegsize */
372289551Szbb	    0,					/* flags */
373289551Szbb	    NULL, NULL,				/* lockfunc, lockfuncarg */
374289551Szbb	    &rbdr->rbdr_buff_dmat);		/* dmat */
375289551Szbb
376289551Szbb	if (err != 0) {
377289551Szbb		device_printf(nic->dev,
378289551Szbb		    "Failed to create busdma tag for RBDR buffers\n");
379289551Szbb		return (err);
380289551Szbb	}
381289551Szbb
382289551Szbb	rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
383289551Szbb	    ring_len, M_NICVF, (M_WAITOK | M_ZERO));
384289551Szbb
385289550Szbb	for (idx = 0; idx < ring_len; idx++) {
386289551Szbb		err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
387289551Szbb		if (err != 0) {
388289551Szbb			device_printf(nic->dev,
389289551Szbb			    "Failed to create DMA map for RB\n");
390289551Szbb			return (err);
391289551Szbb		}
392289551Szbb		rbdr->rbdr_buff_dmaps[idx] = dmap;
393289550Szbb
394289551Szbb		err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
395289551Szbb		    DMA_BUFFER_LEN, &rbuf);
396289551Szbb		if (err != 0)
397289551Szbb			return (err);
398289551Szbb
399289550Szbb		desc = GET_RBDR_DESC(rbdr, idx);
400289551Szbb		desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
401289550Szbb	}
402289551Szbb
403289551Szbb	/* Allocate taskqueue */
404289551Szbb	TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
405289551Szbb	TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
406289551Szbb	rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
407289551Szbb	    taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
408289551Szbb	taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
409289551Szbb	    device_get_nameunit(nic->dev));
410289551Szbb
411289551Szbb	return (0);
412289550Szbb}
413289550Szbb
414289550Szbb/* Free RBDR ring and its receive buffers */
415289551Szbbstatic void
416289551Szbbnicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
417289550Szbb{
418289551Szbb	struct mbuf *mbuf;
419289551Szbb	struct queue_set *qs;
420289550Szbb	struct rbdr_entry_t *desc;
421289550Szbb	struct rbuf_info *rinfo;
422289551Szbb	bus_addr_t buf_addr;
423289551Szbb	int head, tail, idx;
424289551Szbb	int err;
425289550Szbb
426289551Szbb	qs = nic->qs;
427289550Szbb
428289551Szbb	if ((qs == NULL) || (rbdr == NULL))
429289550Szbb		return;
430289550Szbb
431289551Szbb	rbdr->enable = FALSE;
432289551Szbb	if (rbdr->rbdr_taskq != NULL) {
433289551Szbb		/* Remove tasks */
434289551Szbb		while (taskqueue_cancel(rbdr->rbdr_taskq,
435289551Szbb		    &rbdr->rbdr_task_nowait, NULL) != 0) {
436289551Szbb			/* Finish the nowait task first */
437289551Szbb			taskqueue_drain(rbdr->rbdr_taskq,
438289551Szbb			    &rbdr->rbdr_task_nowait);
439289551Szbb		}
440289551Szbb		taskqueue_free(rbdr->rbdr_taskq);
441289551Szbb		rbdr->rbdr_taskq = NULL;
442289550Szbb
443289551Szbb		while (taskqueue_cancel(taskqueue_thread,
444289551Szbb		    &rbdr->rbdr_task, NULL) != 0) {
445289551Szbb			/* Now finish the sleepable task */
446289551Szbb			taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
447289551Szbb		}
448289551Szbb	}
449289551Szbb
450289551Szbb	/*
451289551Szbb	 * Free all of the memory under the RB descriptors.
452289551Szbb	 * There are assumptions here:
453289551Szbb	 * 1. Corresponding RBDR is disabled
454289551Szbb	 *    - it is safe to operate using head and tail indexes
455289551Szbb	 * 2. All bffers that were received are properly freed by
456289551Szbb	 *    the receive handler
457289551Szbb	 *    - there is no need to unload DMA map and free MBUF for other
458289551Szbb	 *      descriptors than unused ones
459289551Szbb	 */
460289551Szbb	if (rbdr->rbdr_buff_dmat != NULL) {
461289551Szbb		head = rbdr->head;
462289551Szbb		tail = rbdr->tail;
463289551Szbb		while (head != tail) {
464289551Szbb			desc = GET_RBDR_DESC(rbdr, head);
465289551Szbb			buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
466289551Szbb			rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
467289551Szbb			bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
468289551Szbb			mbuf = rinfo->mbuf;
469289551Szbb			/* This will destroy everything including rinfo! */
470289551Szbb			m_freem(mbuf);
471289551Szbb			head++;
472289551Szbb			head &= (rbdr->dmem.q_len - 1);
473289551Szbb		}
474289551Szbb		/* Free tail descriptor */
475289551Szbb		desc = GET_RBDR_DESC(rbdr, tail);
476289550Szbb		buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
477289551Szbb		rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
478289551Szbb		bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
479289551Szbb		mbuf = rinfo->mbuf;
480289551Szbb		/* This will destroy everything including rinfo! */
481289551Szbb		m_freem(mbuf);
482289551Szbb
483289551Szbb		/* Destroy DMA maps */
484289551Szbb		for (idx = 0; idx < qs->rbdr_len; idx++) {
485289551Szbb			if (rbdr->rbdr_buff_dmaps[idx] == NULL)
486289551Szbb				continue;
487289551Szbb			err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
488289551Szbb			    rbdr->rbdr_buff_dmaps[idx]);
489289551Szbb			KASSERT(err == 0,
490289551Szbb			    ("%s: Could not destroy DMA map for RB, desc: %d",
491289551Szbb			    __func__, idx));
492289551Szbb			rbdr->rbdr_buff_dmaps[idx] = NULL;
493289551Szbb		}
494289551Szbb
495289551Szbb		/* Now destroy the tag */
496289551Szbb		err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
497289551Szbb		KASSERT(err == 0,
498289551Szbb		    ("%s: Trying to destroy BUSY DMA tag", __func__));
499289551Szbb
500289551Szbb		rbdr->head = 0;
501289551Szbb		rbdr->tail = 0;
502289550Szbb	}
503289550Szbb
504289550Szbb	/* Free RBDR ring */
505289550Szbb	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
506289550Szbb}
507289550Szbb
508289551Szbb/*
509289551Szbb * Refill receive buffer descriptors with new buffers.
510289550Szbb */
511289551Szbbstatic int
512289551Szbbnicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
513289550Szbb{
514289551Szbb	struct nicvf *nic;
515289551Szbb	struct queue_set *qs;
516289551Szbb	int rbdr_idx;
517289550Szbb	int tail, qcount;
518289550Szbb	int refill_rb_cnt;
519289550Szbb	struct rbdr_entry_t *desc;
520289551Szbb	bus_dmamap_t dmap;
521289551Szbb	bus_addr_t rbuf;
522289551Szbb	boolean_t rb_alloc_fail;
523289551Szbb	int new_rb;
524289550Szbb
525289551Szbb	rb_alloc_fail = TRUE;
526289551Szbb	new_rb = 0;
527289551Szbb	nic = rbdr->nic;
528289551Szbb	qs = nic->qs;
529289551Szbb	rbdr_idx = rbdr->idx;
530289551Szbb
531289550Szbb	/* Check if it's enabled */
532289550Szbb	if (!rbdr->enable)
533289551Szbb		return (0);
534289550Szbb
535289550Szbb	/* Get no of desc's to be refilled */
536289550Szbb	qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
537289550Szbb	qcount &= 0x7FFFF;
538289550Szbb	/* Doorbell can be ringed with a max of ring size minus 1 */
539289551Szbb	if (qcount >= (qs->rbdr_len - 1)) {
540289551Szbb		rb_alloc_fail = FALSE;
541289551Szbb		goto out;
542289551Szbb	} else
543289550Szbb		refill_rb_cnt = qs->rbdr_len - qcount - 1;
544289550Szbb
545289550Szbb	/* Start filling descs from tail */
546289550Szbb	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
547289550Szbb	while (refill_rb_cnt) {
548289550Szbb		tail++;
549289550Szbb		tail &= (rbdr->dmem.q_len - 1);
550289550Szbb
551289551Szbb		dmap = rbdr->rbdr_buff_dmaps[tail];
552289551Szbb		if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
553289551Szbb		    DMA_BUFFER_LEN, &rbuf)) {
554289551Szbb			/* Something went wrong. Resign */
555289550Szbb			break;
556289551Szbb		}
557289550Szbb		desc = GET_RBDR_DESC(rbdr, tail);
558289551Szbb		desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
559289550Szbb		refill_rb_cnt--;
560289550Szbb		new_rb++;
561289550Szbb	}
562289550Szbb
563289550Szbb	/* make sure all memory stores are done before ringing doorbell */
564289551Szbb	wmb();
565289550Szbb
566289550Szbb	/* Check if buffer allocation failed */
567289551Szbb	if (refill_rb_cnt == 0)
568289551Szbb		rb_alloc_fail = FALSE;
569289550Szbb
570289550Szbb	/* Notify HW */
571289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
572289550Szbb			      rbdr_idx, new_rb);
573289551Szbbout:
574289551Szbb	if (!rb_alloc_fail) {
575289551Szbb		/*
576289551Szbb		 * Re-enable RBDR interrupts only
577289551Szbb		 * if buffer allocation is success.
578289551Szbb		 */
579289550Szbb		nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
580289550Szbb
581289551Szbb		return (0);
582289551Szbb	}
583289551Szbb
584289551Szbb	return (ENOMEM);
585289550Szbb}
586289550Szbb
587289551Szbb/* Refill RBs even if sleep is needed to reclaim memory */
588289551Szbbstatic void
589289551Szbbnicvf_rbdr_task(void *arg, int pending)
590289550Szbb{
591289551Szbb	struct rbdr *rbdr;
592289551Szbb	int err;
593289550Szbb
594289551Szbb	rbdr = (struct rbdr *)arg;
595289551Szbb
596289551Szbb	err = nicvf_refill_rbdr(rbdr, M_WAITOK);
597289551Szbb	if (__predict_false(err != 0)) {
598289551Szbb		panic("%s: Failed to refill RBs even when sleep enabled",
599289551Szbb		    __func__);
600289551Szbb	}
601289550Szbb}
602289550Szbb
603289551Szbb/* Refill RBs as soon as possible without waiting */
604289551Szbbstatic void
605289551Szbbnicvf_rbdr_task_nowait(void *arg, int pending)
606289550Szbb{
607289551Szbb	struct rbdr *rbdr;
608289551Szbb	int err;
609289550Szbb
610289551Szbb	rbdr = (struct rbdr *)arg;
611289551Szbb
612289551Szbb	err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
613289551Szbb	if (err != 0) {
614289551Szbb		/*
615289551Szbb		 * Schedule another, sleepable kernel thread
616289551Szbb		 * that will for sure refill the buffers.
617289551Szbb		 */
618289551Szbb		taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
619289550Szbb	}
620289550Szbb}
621289550Szbb
622289551Szbbstatic int
623289551Szbbnicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
624289551Szbb    struct cqe_rx_t *cqe_rx, int cqe_type)
625289551Szbb{
626289551Szbb	struct mbuf *mbuf;
627289551Szbb	int rq_idx;
628289551Szbb	int err = 0;
629289551Szbb
630289551Szbb	rq_idx = cqe_rx->rq_idx;
631289551Szbb
632289551Szbb	/* Check for errors */
633289551Szbb	err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
634289551Szbb	if (err && !cqe_rx->rb_cnt)
635289551Szbb		return (0);
636289551Szbb
637289551Szbb	mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
638289551Szbb	if (mbuf == NULL) {
639289551Szbb		dprintf(nic->dev, "Packet not received\n");
640289551Szbb		return (0);
641289551Szbb	}
642289551Szbb
643289551Szbb	/* If error packet */
644289551Szbb	if (err != 0) {
645289551Szbb		m_freem(mbuf);
646289551Szbb		return (0);
647289551Szbb	}
648289551Szbb
649289551Szbb	/*
650289551Szbb	 * Push this packet to the stack later to avoid
651289551Szbb	 * unlocking completion task in the middle of work.
652289551Szbb	 */
653289551Szbb	err = buf_ring_enqueue(cq->rx_br, mbuf);
654289551Szbb	if (err != 0) {
655289551Szbb		/*
656289551Szbb		 * Failed to enqueue this mbuf.
657289551Szbb		 * We don't drop it, just schedule another task.
658289551Szbb		 */
659289551Szbb		return (err);
660289551Szbb	}
661289551Szbb
662289551Szbb	return (0);
663289551Szbb}
664289551Szbb
665289551Szbbstatic int
666289551Szbbnicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
667289551Szbb    struct cqe_send_t *cqe_tx, int cqe_type)
668289551Szbb{
669289551Szbb	bus_dmamap_t dmap;
670289551Szbb	struct mbuf *mbuf;
671289551Szbb	struct snd_queue *sq;
672289551Szbb	struct sq_hdr_subdesc *hdr;
673289551Szbb
674289551Szbb	mbuf = NULL;
675289551Szbb	sq = &nic->qs->sq[cqe_tx->sq_idx];
676289551Szbb	/* Avoid blocking here since we hold a non-sleepable NICVF_CMP_LOCK */
677289551Szbb	if (NICVF_TX_TRYLOCK(sq) == 0)
678289551Szbb		return (EAGAIN);
679289551Szbb
680289551Szbb	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
681289551Szbb	if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
682289551Szbb		NICVF_TX_UNLOCK(sq);
683289551Szbb		return (0);
684289551Szbb	}
685289551Szbb
686289551Szbb	dprintf(nic->dev,
687289551Szbb	    "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
688289551Szbb	    __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
689289551Szbb	    cqe_tx->sqe_ptr, hdr->subdesc_cnt);
690289551Szbb
691289551Szbb	dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
692289551Szbb	bus_dmamap_unload(sq->snd_buff_dmat, dmap);
693289551Szbb
694289551Szbb	mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
695289551Szbb	if (mbuf != NULL) {
696289551Szbb		m_freem(mbuf);
697289551Szbb		sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
698289551Szbb	}
699289551Szbb
700289551Szbb	nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
701289551Szbb	nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
702289551Szbb
703289551Szbb	NICVF_TX_UNLOCK(sq);
704289551Szbb	return (0);
705289551Szbb}
706289551Szbb
707289551Szbbstatic int
708289551Szbbnicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
709289551Szbb{
710289551Szbb	struct mbuf *mbuf;
711289551Szbb	struct ifnet *ifp;
712289551Szbb	int processed_cqe, work_done = 0, tx_done = 0;
713289551Szbb	int cqe_count, cqe_head;
714289551Szbb	struct queue_set *qs = nic->qs;
715289551Szbb	struct cmp_queue *cq = &qs->cq[cq_idx];
716289551Szbb	struct cqe_rx_t *cq_desc;
717289551Szbb	int cmp_err;
718289551Szbb
719289551Szbb	NICVF_CMP_LOCK(cq);
720289551Szbb	cmp_err = 0;
721289551Szbb	processed_cqe = 0;
722289551Szbb	/* Get no of valid CQ entries to process */
723289551Szbb	cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
724289551Szbb	cqe_count &= CQ_CQE_COUNT;
725289551Szbb	if (cqe_count == 0)
726289551Szbb		goto out;
727289551Szbb
728289551Szbb	/* Get head of the valid CQ entries */
729289551Szbb	cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
730289551Szbb	cqe_head &= 0xFFFF;
731289551Szbb
732289551Szbb	dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
733289551Szbb	    __func__, cq_idx, cqe_count, cqe_head);
734289551Szbb	while (processed_cqe < cqe_count) {
735289551Szbb		/* Get the CQ descriptor */
736289551Szbb		cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
737289551Szbb		cqe_head++;
738289551Szbb		cqe_head &= (cq->dmem.q_len - 1);
739289551Szbb
740289551Szbb		dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
741289551Szbb		    cq_desc->cqe_type);
742289551Szbb		switch (cq_desc->cqe_type) {
743289551Szbb		case CQE_TYPE_RX:
744289551Szbb			cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
745289551Szbb			    CQE_TYPE_RX);
746289551Szbb			if (__predict_false(cmp_err != 0)) {
747289551Szbb				/*
748289551Szbb				 * Ups. Cannot finish now.
749289551Szbb				 * Let's try again later.
750289551Szbb				 */
751289551Szbb				goto done;
752289551Szbb			}
753289551Szbb			work_done++;
754289551Szbb			break;
755289551Szbb		case CQE_TYPE_SEND:
756289551Szbb			cmp_err = nicvf_snd_pkt_handler(nic, cq,
757289551Szbb			    (void *)cq_desc, CQE_TYPE_SEND);
758289551Szbb			if (__predict_false(cmp_err != 0)) {
759289551Szbb				/*
760289551Szbb				 * Ups. Cannot finish now.
761289551Szbb				 * Let's try again later.
762289551Szbb				 */
763289551Szbb				goto done;
764289551Szbb			}
765289551Szbb
766289551Szbb			tx_done++;
767289551Szbb			break;
768289551Szbb		case CQE_TYPE_INVALID:
769289551Szbb		case CQE_TYPE_RX_SPLIT:
770289551Szbb		case CQE_TYPE_RX_TCP:
771289551Szbb		case CQE_TYPE_SEND_PTP:
772289551Szbb			/* Ignore for now */
773289551Szbb			break;
774289551Szbb		}
775289551Szbb		processed_cqe++;
776289551Szbb	}
777289551Szbbdone:
778289551Szbb	dprintf(nic->dev,
779289551Szbb	    "%s CQ%d processed_cqe %d work_done %d\n",
780289551Szbb	    __func__, cq_idx, processed_cqe, work_done);
781289551Szbb
782289551Szbb	/* Ring doorbell to inform H/W to reuse processed CQEs */
783289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
784289551Szbb
785289551Szbb	if ((tx_done > 0) &&
786289551Szbb	    ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
787289551Szbb		/* Reenable TXQ if its stopped earlier due to SQ full */
788289551Szbb		if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
789289551Szbb	}
790289551Szbbout:
791289551Szbb	NICVF_CMP_UNLOCK(cq);
792289551Szbb
793289551Szbb	ifp = nic->ifp;
794289551Szbb	/* Push received MBUFs to the stack */
795289551Szbb	while (!buf_ring_empty(cq->rx_br)) {
796289551Szbb		mbuf = buf_ring_dequeue_mc(cq->rx_br);
797289551Szbb		if (__predict_true(mbuf != NULL))
798289551Szbb			(*ifp->if_input)(ifp, mbuf);
799289551Szbb	}
800289551Szbb
801289551Szbb	return (cmp_err);
802289551Szbb}
803289551Szbb
804289551Szbb/*
805289551Szbb * Qset error interrupt handler
806289551Szbb *
807289551Szbb * As of now only CQ errors are handled
808289551Szbb */
809289551Szbbstatic void
810289551Szbbnicvf_qs_err_task(void *arg, int pending)
811289551Szbb{
812289551Szbb	struct nicvf *nic;
813289551Szbb	struct queue_set *qs;
814289551Szbb	int qidx;
815289551Szbb	uint64_t status;
816289551Szbb	boolean_t enable = TRUE;
817289551Szbb
818289551Szbb	nic = (struct nicvf *)arg;
819289551Szbb	qs = nic->qs;
820289551Szbb
821289551Szbb	/* Deactivate network interface */
822289551Szbb	if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
823289551Szbb
824289551Szbb	/* Check if it is CQ err */
825289551Szbb	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
826289551Szbb		status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
827289551Szbb		    qidx);
828289551Szbb		if ((status & CQ_ERR_MASK) == 0)
829289551Szbb			continue;
830289551Szbb		/* Process already queued CQEs and reconfig CQ */
831289551Szbb		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
832289551Szbb		nicvf_sq_disable(nic, qidx);
833289551Szbb		(void)nicvf_cq_intr_handler(nic, qidx);
834289551Szbb		nicvf_cmp_queue_config(nic, qs, qidx, enable);
835289551Szbb		nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
836289551Szbb		nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
837289551Szbb		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
838289551Szbb	}
839289551Szbb
840289551Szbb	if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
841289551Szbb	/* Re-enable Qset error interrupt */
842289551Szbb	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
843289551Szbb}
844289551Szbb
845289551Szbbstatic void
846289551Szbbnicvf_cmp_task(void *arg, int pending)
847289551Szbb{
848289551Szbb	uint64_t cq_head;
849289551Szbb	struct cmp_queue *cq;
850289551Szbb	struct nicvf *nic;
851289551Szbb	int cmp_err;
852289551Szbb
853289551Szbb	cq = (struct cmp_queue *)arg;
854289551Szbb	nic = cq->nic;
855289551Szbb
856289551Szbb	/* Handle CQ descriptors */
857289551Szbb	cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
858289551Szbb	/* Re-enable interrupts */
859289551Szbb	cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq->idx);
860289551Szbb	nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
861289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, cq->idx, cq_head);
862289551Szbb
863289551Szbb	if (__predict_false(cmp_err != 0)) {
864289551Szbb		/*
865289551Szbb		 * Schedule another thread here since we did not
866289551Szbb		 * process the entire CQ due to Tx or Rx CQ parse error.
867289551Szbb		 */
868289551Szbb		taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
869289551Szbb
870289551Szbb	}
871289551Szbb
872289551Szbb	/* Reenable interrupt (previously disabled in nicvf_intr_handler() */
873289551Szbb	nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
874289551Szbb
875289551Szbb}
876289551Szbb
877289550Szbb/* Initialize completion queue */
878289551Szbbstatic int
879289551Szbbnicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
880289551Szbb    int qidx)
881289550Szbb{
882289550Szbb	int err;
883289550Szbb
884289551Szbb	/* Initizalize lock */
885289551Szbb	snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
886289551Szbb	    device_get_nameunit(nic->dev), qidx);
887289551Szbb	mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
888289551Szbb
889289550Szbb	err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
890289550Szbb				     NICVF_CQ_BASE_ALIGN_BYTES);
891289550Szbb
892289551Szbb	if (err != 0) {
893289551Szbb		device_printf(nic->dev,
894289551Szbb		    "Could not allocate DMA memory for CQ\n");
895289551Szbb		return (err);
896289551Szbb	}
897289551Szbb
898289550Szbb	cq->desc = cq->dmem.base;
899289550Szbb	cq->thresh = CMP_QUEUE_CQE_THRESH;
900289551Szbb	cq->nic = nic;
901289551Szbb	cq->idx = qidx;
902289550Szbb	nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
903289550Szbb
904289551Szbb	cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
905289551Szbb	    &cq->mtx);
906289551Szbb
907289551Szbb	/* Allocate taskqueue */
908289551Szbb	TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
909289551Szbb	cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
910289551Szbb	    taskqueue_thread_enqueue, &cq->cmp_taskq);
911289551Szbb	taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
912289551Szbb	    device_get_nameunit(nic->dev), qidx);
913289551Szbb
914289551Szbb	return (0);
915289550Szbb}
916289550Szbb
917289551Szbbstatic void
918289551Szbbnicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
919289550Szbb{
920289551Szbb
921289551Szbb	if (cq == NULL)
922289550Szbb		return;
923289551Szbb	/*
924289551Szbb	 * The completion queue itself should be disabled by now
925289551Szbb	 * (ref. nicvf_snd_queue_config()).
926289551Szbb	 * Ensure that it is safe to disable it or panic.
927289551Szbb	 */
928289551Szbb	if (cq->enable)
929289551Szbb		panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
930289550Szbb
931289551Szbb	if (cq->cmp_taskq != NULL) {
932289551Szbb		/* Remove task */
933289551Szbb		while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
934289551Szbb			taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
935289551Szbb
936289551Szbb		taskqueue_free(cq->cmp_taskq);
937289551Szbb		cq->cmp_taskq = NULL;
938289551Szbb	}
939289551Szbb	/*
940289551Szbb	 * Completion interrupt will possibly enable interrupts again
941289551Szbb	 * so disable interrupting now after we finished processing
942289551Szbb	 * completion task. It is safe to do so since the corresponding CQ
943289551Szbb	 * was already disabled.
944289551Szbb	 */
945289551Szbb	nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
946289551Szbb	nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
947289551Szbb
948289551Szbb	NICVF_CMP_LOCK(cq);
949289550Szbb	nicvf_free_q_desc_mem(nic, &cq->dmem);
950289551Szbb	drbr_free(cq->rx_br, M_DEVBUF);
951289551Szbb	NICVF_CMP_UNLOCK(cq);
952289551Szbb	mtx_destroy(&cq->mtx);
953289551Szbb	memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
954289550Szbb}
955289550Szbb
956289551Szbbstatic void
957289551Szbbnicvf_snd_task(void *arg, int pending)
958289551Szbb{
959289551Szbb	struct snd_queue *sq = (struct snd_queue *)arg;
960289551Szbb	struct mbuf *mbuf;
961289551Szbb
962289551Szbb	NICVF_TX_LOCK(sq);
963289551Szbb	while (1) {
964289551Szbb		mbuf = drbr_dequeue(NULL, sq->br);
965289551Szbb		if (mbuf == NULL)
966289551Szbb			break;
967289551Szbb
968289551Szbb		if (nicvf_tx_mbuf_locked(sq, mbuf) != 0) {
969289551Szbb			/* XXX ARM64TODO: Increase Tx drop counter */
970289551Szbb			m_freem(mbuf);
971289551Szbb			break;
972289551Szbb		}
973289551Szbb	}
974289551Szbb	NICVF_TX_UNLOCK(sq);
975289551Szbb}
976289551Szbb
977289550Szbb/* Initialize transmit queue */
978289551Szbbstatic int
979289551Szbbnicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
980289551Szbb    int qidx)
981289550Szbb{
982289551Szbb	size_t i;
983289550Szbb	int err;
984289550Szbb
985289551Szbb	/* Initizalize TX lock for this queue */
986289551Szbb	snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
987289551Szbb	    device_get_nameunit(nic->dev), qidx);
988289551Szbb	mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
989289551Szbb
990289551Szbb	NICVF_TX_LOCK(sq);
991289551Szbb	/* Allocate buffer ring */
992289551Szbb	sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
993289551Szbb	    M_NOWAIT, &sq->mtx);
994289551Szbb	if (sq->br == NULL) {
995289551Szbb		device_printf(nic->dev,
996289551Szbb		    "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
997289551Szbb		err = ENOMEM;
998289551Szbb		goto error;
999289551Szbb	}
1000289551Szbb
1001289551Szbb	/* Allocate DMA memory for Tx descriptors */
1002289550Szbb	err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
1003289550Szbb				     NICVF_SQ_BASE_ALIGN_BYTES);
1004289551Szbb	if (err != 0) {
1005289551Szbb		device_printf(nic->dev,
1006289551Szbb		    "Could not allocate DMA memory for SQ\n");
1007289551Szbb		goto error;
1008289551Szbb	}
1009289550Szbb
1010289550Szbb	sq->desc = sq->dmem.base;
1011289551Szbb	sq->head = sq->tail = 0;
1012289551Szbb	atomic_store_rel_int(&sq->free_cnt, q_len - 1);
1013289550Szbb	sq->thresh = SND_QUEUE_THRESH;
1014289551Szbb	sq->idx = qidx;
1015289551Szbb	sq->nic = nic;
1016289550Szbb
1017289551Szbb	/*
1018289551Szbb	 * Allocate DMA maps for Tx buffers
1019289551Szbb	 */
1020289550Szbb
1021289551Szbb	/* Create DMA tag first */
1022289551Szbb	err = bus_dma_tag_create(
1023289551Szbb	    bus_get_dma_tag(nic->dev),		/* parent tag */
1024289551Szbb	    1,					/* alignment */
1025289551Szbb	    0,					/* boundary */
1026289551Szbb	    BUS_SPACE_MAXADDR,			/* lowaddr */
1027289551Szbb	    BUS_SPACE_MAXADDR,			/* highaddr */
1028289551Szbb	    NULL, NULL,				/* filtfunc, filtfuncarg */
1029289551Szbb	    NICVF_TXBUF_MAXSIZE,		/* maxsize */
1030289551Szbb	    NICVF_TXBUF_NSEGS,			/* nsegments */
1031289551Szbb	    MCLBYTES,				/* maxsegsize */
1032289551Szbb	    0,					/* flags */
1033289551Szbb	    NULL, NULL,				/* lockfunc, lockfuncarg */
1034289551Szbb	    &sq->snd_buff_dmat);		/* dmat */
1035289551Szbb
1036289551Szbb	if (err != 0) {
1037289551Szbb		device_printf(nic->dev,
1038289551Szbb		    "Failed to create busdma tag for Tx buffers\n");
1039289551Szbb		goto error;
1040289551Szbb	}
1041289551Szbb
1042289551Szbb	/* Allocate send buffers array */
1043289551Szbb	sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
1044289551Szbb	    (M_NOWAIT | M_ZERO));
1045289551Szbb	if (sq->snd_buff == NULL) {
1046289551Szbb		device_printf(nic->dev,
1047289551Szbb		    "Could not allocate memory for Tx buffers array\n");
1048289551Szbb		err = ENOMEM;
1049289551Szbb		goto error;
1050289551Szbb	}
1051289551Szbb
1052289551Szbb	/* Now populate maps */
1053289551Szbb	for (i = 0; i < q_len; i++) {
1054289551Szbb		err = bus_dmamap_create(sq->snd_buff_dmat, 0,
1055289551Szbb		    &sq->snd_buff[i].dmap);
1056289551Szbb		if (err != 0) {
1057289551Szbb			device_printf(nic->dev,
1058289551Szbb			    "Failed to create DMA maps for Tx buffers\n");
1059289551Szbb			goto error;
1060289551Szbb		}
1061289551Szbb	}
1062289551Szbb	NICVF_TX_UNLOCK(sq);
1063289551Szbb
1064289551Szbb	/* Allocate taskqueue */
1065289551Szbb	TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
1066289551Szbb	sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
1067289551Szbb	    taskqueue_thread_enqueue, &sq->snd_taskq);
1068289551Szbb	taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
1069289551Szbb	    device_get_nameunit(nic->dev), qidx);
1070289551Szbb
1071289551Szbb	return (0);
1072289551Szbberror:
1073289551Szbb	NICVF_TX_UNLOCK(sq);
1074289551Szbb	return (err);
1075289550Szbb}
1076289550Szbb
1077289551Szbbstatic void
1078289551Szbbnicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
1079289550Szbb{
1080289551Szbb	struct queue_set *qs = nic->qs;
1081289551Szbb	size_t i;
1082289551Szbb	int err;
1083289551Szbb
1084289551Szbb	if (sq == NULL)
1085289550Szbb		return;
1086289550Szbb
1087289551Szbb	if (sq->snd_taskq != NULL) {
1088289551Szbb		/* Remove task */
1089289551Szbb		while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
1090289551Szbb			taskqueue_drain(sq->snd_taskq, &sq->snd_task);
1091289550Szbb
1092289551Szbb		taskqueue_free(sq->snd_taskq);
1093289551Szbb		sq->snd_taskq = NULL;
1094289551Szbb	}
1095289551Szbb
1096289551Szbb	NICVF_TX_LOCK(sq);
1097289551Szbb	if (sq->snd_buff_dmat != NULL) {
1098289551Szbb		if (sq->snd_buff != NULL) {
1099289551Szbb			for (i = 0; i < qs->sq_len; i++) {
1100289551Szbb				m_freem(sq->snd_buff[i].mbuf);
1101289551Szbb				sq->snd_buff[i].mbuf = NULL;
1102289551Szbb
1103289551Szbb				bus_dmamap_unload(sq->snd_buff_dmat,
1104289551Szbb				    sq->snd_buff[i].dmap);
1105289551Szbb				err = bus_dmamap_destroy(sq->snd_buff_dmat,
1106289551Szbb				    sq->snd_buff[i].dmap);
1107289551Szbb				/*
1108289551Szbb				 * If bus_dmamap_destroy fails it can cause
1109289551Szbb				 * random panic later if the tag is also
1110289551Szbb				 * destroyed in the process.
1111289551Szbb				 */
1112289551Szbb				KASSERT(err == 0,
1113289551Szbb				    ("%s: Could not destroy DMA map for SQ",
1114289551Szbb				    __func__));
1115289551Szbb			}
1116289551Szbb		}
1117289551Szbb
1118289551Szbb		free(sq->snd_buff, M_NICVF);
1119289551Szbb
1120289551Szbb		err = bus_dma_tag_destroy(sq->snd_buff_dmat);
1121289551Szbb		KASSERT(err == 0,
1122289551Szbb		    ("%s: Trying to destroy BUSY DMA tag", __func__));
1123289551Szbb	}
1124289551Szbb
1125289551Szbb	/* Free private driver ring for this send queue */
1126289551Szbb	if (sq->br != NULL)
1127289551Szbb		drbr_free(sq->br, M_DEVBUF);
1128289551Szbb
1129289551Szbb	if (sq->dmem.base != NULL)
1130289551Szbb		nicvf_free_q_desc_mem(nic, &sq->dmem);
1131289551Szbb
1132289551Szbb	NICVF_TX_UNLOCK(sq);
1133289551Szbb	/* Destroy Tx lock */
1134289551Szbb	mtx_destroy(&sq->mtx);
1135289551Szbb	memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
1136289550Szbb}
1137289550Szbb
1138289551Szbbstatic void
1139289551Szbbnicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1140289550Szbb{
1141289551Szbb
1142289550Szbb	/* Disable send queue */
1143289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
1144289550Szbb	/* Check if SQ is stopped */
1145289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
1146289550Szbb		return;
1147289550Szbb	/* Reset send queue */
1148289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1149289550Szbb}
1150289550Szbb
1151289551Szbbstatic void
1152289551Szbbnicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1153289550Szbb{
1154289550Szbb	union nic_mbx mbx = {};
1155289550Szbb
1156289550Szbb	/* Make sure all packets in the pipeline are written back into mem */
1157289550Szbb	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
1158289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1159289550Szbb}
1160289550Szbb
1161289551Szbbstatic void
1162289551Szbbnicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1163289550Szbb{
1164289551Szbb
1165289550Szbb	/* Disable timer threshold (doesn't get reset upon CQ reset */
1166289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
1167289550Szbb	/* Disable completion queue */
1168289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
1169289550Szbb	/* Reset completion queue */
1170289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1171289550Szbb}
1172289550Szbb
1173289551Szbbstatic void
1174289551Szbbnicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
1175289550Szbb{
1176289551Szbb	uint64_t tmp, fifo_state;
1177289550Szbb	int timeout = 10;
1178289550Szbb
1179289550Szbb	/* Save head and tail pointers for feeing up buffers */
1180289551Szbb	rbdr->head =
1181289551Szbb	    nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
1182289551Szbb	rbdr->tail =
1183289551Szbb	    nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
1184289550Szbb
1185289551Szbb	/*
1186289551Szbb	 * If RBDR FIFO is in 'FAIL' state then do a reset first
1187289550Szbb	 * before relaiming.
1188289550Szbb	 */
1189289550Szbb	fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
1190289551Szbb	if (((fifo_state >> 62) & 0x03) == 0x3) {
1191289550Szbb		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
1192289551Szbb		    qidx, NICVF_RBDR_RESET);
1193289551Szbb	}
1194289550Szbb
1195289550Szbb	/* Disable RBDR */
1196289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
1197289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1198289550Szbb		return;
1199289550Szbb	while (1) {
1200289550Szbb		tmp = nicvf_queue_reg_read(nic,
1201289551Szbb		    NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
1202289550Szbb		if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
1203289550Szbb			break;
1204289551Szbb
1205289551Szbb		DELAY(1000);
1206289550Szbb		timeout--;
1207289550Szbb		if (!timeout) {
1208289551Szbb			device_printf(nic->dev,
1209289551Szbb			    "Failed polling on prefetch status\n");
1210289550Szbb			return;
1211289550Szbb		}
1212289550Szbb	}
1213289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1214289551Szbb	    NICVF_RBDR_RESET);
1215289550Szbb
1216289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
1217289550Szbb		return;
1218289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
1219289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1220289550Szbb		return;
1221289550Szbb}
1222289550Szbb
1223289550Szbb/* Configures receive queue */
1224289551Szbbstatic void
1225289551Szbbnicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
1226289551Szbb    int qidx, bool enable)
1227289550Szbb{
1228289550Szbb	union nic_mbx mbx = {};
1229289550Szbb	struct rcv_queue *rq;
1230289550Szbb	struct rq_cfg rq_cfg;
1231289550Szbb
1232289550Szbb	rq = &qs->rq[qidx];
1233289550Szbb	rq->enable = enable;
1234289550Szbb
1235289550Szbb	/* Disable receive queue */
1236289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
1237289550Szbb
1238289550Szbb	if (!rq->enable) {
1239289550Szbb		nicvf_reclaim_rcv_queue(nic, qs, qidx);
1240289550Szbb		return;
1241289550Szbb	}
1242289550Szbb
1243289550Szbb	rq->cq_qs = qs->vnic_id;
1244289550Szbb	rq->cq_idx = qidx;
1245289550Szbb	rq->start_rbdr_qs = qs->vnic_id;
1246289550Szbb	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
1247289550Szbb	rq->cont_rbdr_qs = qs->vnic_id;
1248289550Szbb	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
1249289550Szbb	/* all writes of RBDR data to be loaded into L2 Cache as well*/
1250289550Szbb	rq->caching = 1;
1251289550Szbb
1252289550Szbb	/* Send a mailbox msg to PF to config RQ */
1253289550Szbb	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
1254289550Szbb	mbx.rq.qs_num = qs->vnic_id;
1255289550Szbb	mbx.rq.rq_num = qidx;
1256289550Szbb	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
1257289551Szbb	    (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
1258289551Szbb	    (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
1259289551Szbb	    (rq->start_qs_rbdr_idx);
1260289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1261289550Szbb
1262289550Szbb	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
1263289551Szbb	mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
1264289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1265289550Szbb
1266289551Szbb	/*
1267289551Szbb	 * RQ drop config
1268289550Szbb	 * Enable CQ drop to reserve sufficient CQEs for all tx packets
1269289550Szbb	 */
1270289550Szbb	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
1271289551Szbb	mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
1272289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1273289550Szbb
1274289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
1275289550Szbb
1276289550Szbb	/* Enable Receive queue */
1277289550Szbb	rq_cfg.ena = 1;
1278289550Szbb	rq_cfg.tcp_ena = 0;
1279289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
1280289551Szbb	    *(uint64_t *)&rq_cfg);
1281289550Szbb}
1282289550Szbb
1283289550Szbb/* Configures completion queue */
1284289551Szbbstatic void
1285289551Szbbnicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
1286289551Szbb    int qidx, boolean_t enable)
1287289550Szbb{
1288289550Szbb	struct cmp_queue *cq;
1289289550Szbb	struct cq_cfg cq_cfg;
1290289550Szbb
1291289550Szbb	cq = &qs->cq[qidx];
1292289550Szbb	cq->enable = enable;
1293289550Szbb
1294289550Szbb	if (!cq->enable) {
1295289550Szbb		nicvf_reclaim_cmp_queue(nic, qs, qidx);
1296289550Szbb		return;
1297289550Szbb	}
1298289550Szbb
1299289550Szbb	/* Reset completion queue */
1300289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1301289550Szbb
1302289550Szbb	/* Set completion queue base address */
1303289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
1304289551Szbb	    (uint64_t)(cq->dmem.phys_base));
1305289550Szbb
1306289550Szbb	/* Enable Completion queue */
1307289550Szbb	cq_cfg.ena = 1;
1308289550Szbb	cq_cfg.reset = 0;
1309289550Szbb	cq_cfg.caching = 0;
1310289550Szbb	cq_cfg.qsize = CMP_QSIZE;
1311289550Szbb	cq_cfg.avg_con = 0;
1312289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
1313289550Szbb
1314289550Szbb	/* Set threshold value for interrupt generation */
1315289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
1316289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
1317289551Szbb	    nic->cq_coalesce_usecs);
1318289550Szbb}
1319289550Szbb
1320289550Szbb/* Configures transmit queue */
1321289551Szbbstatic void
1322289551Szbbnicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1323289551Szbb    boolean_t enable)
1324289550Szbb{
1325289550Szbb	union nic_mbx mbx = {};
1326289550Szbb	struct snd_queue *sq;
1327289550Szbb	struct sq_cfg sq_cfg;
1328289550Szbb
1329289550Szbb	sq = &qs->sq[qidx];
1330289550Szbb	sq->enable = enable;
1331289550Szbb
1332289550Szbb	if (!sq->enable) {
1333289550Szbb		nicvf_reclaim_snd_queue(nic, qs, qidx);
1334289550Szbb		return;
1335289550Szbb	}
1336289550Szbb
1337289550Szbb	/* Reset send queue */
1338289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1339289550Szbb
1340289550Szbb	sq->cq_qs = qs->vnic_id;
1341289550Szbb	sq->cq_idx = qidx;
1342289550Szbb
1343289550Szbb	/* Send a mailbox msg to PF to config SQ */
1344289550Szbb	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
1345289550Szbb	mbx.sq.qs_num = qs->vnic_id;
1346289550Szbb	mbx.sq.sq_num = qidx;
1347289550Szbb	mbx.sq.sqs_mode = nic->sqs_mode;
1348289550Szbb	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
1349289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1350289550Szbb
1351289550Szbb	/* Set queue base address */
1352289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
1353289551Szbb	    (uint64_t)(sq->dmem.phys_base));
1354289550Szbb
1355289550Szbb	/* Enable send queue  & set queue size */
1356289550Szbb	sq_cfg.ena = 1;
1357289550Szbb	sq_cfg.reset = 0;
1358289550Szbb	sq_cfg.ldwb = 0;
1359289550Szbb	sq_cfg.qsize = SND_QSIZE;
1360289550Szbb	sq_cfg.tstmp_bgx_intf = 0;
1361289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
1362289550Szbb
1363289550Szbb	/* Set threshold value for interrupt generation */
1364289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
1365289550Szbb}
1366289550Szbb
1367289550Szbb/* Configures receive buffer descriptor ring */
1368289551Szbbstatic void
1369289551Szbbnicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1370289551Szbb    boolean_t enable)
1371289550Szbb{
1372289550Szbb	struct rbdr *rbdr;
1373289550Szbb	struct rbdr_cfg rbdr_cfg;
1374289550Szbb
1375289550Szbb	rbdr = &qs->rbdr[qidx];
1376289550Szbb	nicvf_reclaim_rbdr(nic, rbdr, qidx);
1377289550Szbb	if (!enable)
1378289550Szbb		return;
1379289550Szbb
1380289550Szbb	/* Set descriptor base address */
1381289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
1382289551Szbb	    (uint64_t)(rbdr->dmem.phys_base));
1383289550Szbb
1384289550Szbb	/* Enable RBDR  & set queue size */
1385289550Szbb	/* Buffer size should be in multiples of 128 bytes */
1386289550Szbb	rbdr_cfg.ena = 1;
1387289550Szbb	rbdr_cfg.reset = 0;
1388289550Szbb	rbdr_cfg.ldwb = 0;
1389289550Szbb	rbdr_cfg.qsize = RBDR_SIZE;
1390289550Szbb	rbdr_cfg.avg_con = 0;
1391289550Szbb	rbdr_cfg.lines = rbdr->dma_size / 128;
1392289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1393289551Szbb	    *(uint64_t *)&rbdr_cfg);
1394289550Szbb
1395289550Szbb	/* Notify HW */
1396289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
1397289551Szbb	    qs->rbdr_len - 1);
1398289550Szbb
1399289550Szbb	/* Set threshold value for interrupt generation */
1400289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
1401289551Szbb	    rbdr->thresh - 1);
1402289550Szbb}
1403289550Szbb
1404289550Szbb/* Requests PF to assign and enable Qset */
1405289551Szbbvoid
1406289551Szbbnicvf_qset_config(struct nicvf *nic, boolean_t enable)
1407289550Szbb{
1408289550Szbb	union nic_mbx mbx = {};
1409289551Szbb	struct queue_set *qs;
1410289550Szbb	struct qs_cfg *qs_cfg;
1411289550Szbb
1412289551Szbb	qs = nic->qs;
1413289551Szbb	if (qs == NULL) {
1414289551Szbb		device_printf(nic->dev,
1415289551Szbb		    "Qset is still not allocated, don't init queues\n");
1416289550Szbb		return;
1417289550Szbb	}
1418289550Szbb
1419289550Szbb	qs->enable = enable;
1420289550Szbb	qs->vnic_id = nic->vf_id;
1421289550Szbb
1422289550Szbb	/* Send a mailbox msg to PF to config Qset */
1423289550Szbb	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
1424289550Szbb	mbx.qs.num = qs->vnic_id;
1425289550Szbb
1426289550Szbb	mbx.qs.cfg = 0;
1427289550Szbb	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
1428289550Szbb	if (qs->enable) {
1429289550Szbb		qs_cfg->ena = 1;
1430289550Szbb		qs_cfg->vnic = qs->vnic_id;
1431289550Szbb	}
1432289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1433289550Szbb}
1434289550Szbb
1435289551Szbbstatic void
1436289551Szbbnicvf_free_resources(struct nicvf *nic)
1437289550Szbb{
1438289550Szbb	int qidx;
1439289551Szbb	struct queue_set *qs;
1440289550Szbb
1441289551Szbb	qs = nic->qs;
1442289551Szbb	/*
1443289551Szbb	 * Remove QS error task first since it has to be dead
1444289551Szbb	 * to safely free completion queue tasks.
1445289551Szbb	 */
1446289551Szbb	if (qs->qs_err_taskq != NULL) {
1447289551Szbb		/* Shut down QS error tasks */
1448289551Szbb		while (taskqueue_cancel(qs->qs_err_taskq,
1449289551Szbb		    &qs->qs_err_task,  NULL) != 0) {
1450289551Szbb			taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
1451289551Szbb
1452289551Szbb		}
1453289551Szbb		taskqueue_free(qs->qs_err_taskq);
1454289551Szbb		qs->qs_err_taskq = NULL;
1455289551Szbb	}
1456289550Szbb	/* Free receive buffer descriptor ring */
1457289550Szbb	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1458289550Szbb		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1459289550Szbb
1460289550Szbb	/* Free completion queue */
1461289550Szbb	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1462289550Szbb		nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1463289550Szbb
1464289550Szbb	/* Free send queue */
1465289550Szbb	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1466289550Szbb		nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1467289550Szbb}
1468289550Szbb
1469289551Szbbstatic int
1470289551Szbbnicvf_alloc_resources(struct nicvf *nic)
1471289550Szbb{
1472289551Szbb	struct queue_set *qs = nic->qs;
1473289550Szbb	int qidx;
1474289550Szbb
1475289550Szbb	/* Alloc receive buffer descriptor ring */
1476289550Szbb	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1477289550Szbb		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1478289551Szbb				    DMA_BUFFER_LEN, qidx))
1479289550Szbb			goto alloc_fail;
1480289550Szbb	}
1481289550Szbb
1482289550Szbb	/* Alloc send queue */
1483289550Szbb	for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
1484289551Szbb		if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
1485289550Szbb			goto alloc_fail;
1486289550Szbb	}
1487289550Szbb
1488289550Szbb	/* Alloc completion queue */
1489289550Szbb	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1490289551Szbb		if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
1491289550Szbb			goto alloc_fail;
1492289550Szbb	}
1493289550Szbb
1494289551Szbb	/* Allocate QS error taskqueue */
1495289551Szbb	TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
1496289551Szbb	qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
1497289551Szbb	    taskqueue_thread_enqueue, &qs->qs_err_taskq);
1498289551Szbb	taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
1499289551Szbb	    device_get_nameunit(nic->dev));
1500289551Szbb
1501289551Szbb	return (0);
1502289550Szbballoc_fail:
1503289550Szbb	nicvf_free_resources(nic);
1504289551Szbb	return (ENOMEM);
1505289550Szbb}
1506289550Szbb
1507289551Szbbint
1508289551Szbbnicvf_set_qset_resources(struct nicvf *nic)
1509289550Szbb{
1510289550Szbb	struct queue_set *qs;
1511289550Szbb
1512289551Szbb	qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
1513289550Szbb	nic->qs = qs;
1514289550Szbb
1515289550Szbb	/* Set count of each queue */
1516289550Szbb	qs->rbdr_cnt = RBDR_CNT;
1517289551Szbb	/* With no RSS we stay with single RQ */
1518289550Szbb	qs->rq_cnt = 1;
1519289551Szbb
1520289550Szbb	qs->sq_cnt = SND_QUEUE_CNT;
1521289550Szbb	qs->cq_cnt = CMP_QUEUE_CNT;
1522289550Szbb
1523289550Szbb	/* Set queue lengths */
1524289550Szbb	qs->rbdr_len = RCV_BUF_COUNT;
1525289550Szbb	qs->sq_len = SND_QUEUE_LEN;
1526289550Szbb	qs->cq_len = CMP_QUEUE_LEN;
1527289550Szbb
1528289550Szbb	nic->rx_queues = qs->rq_cnt;
1529289550Szbb	nic->tx_queues = qs->sq_cnt;
1530289550Szbb
1531289551Szbb	return (0);
1532289550Szbb}
1533289550Szbb
1534289551Szbbint
1535289551Szbbnicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
1536289550Szbb{
1537289551Szbb	boolean_t disable = FALSE;
1538289551Szbb	struct queue_set *qs;
1539289550Szbb	int qidx;
1540289550Szbb
1541289551Szbb	qs = nic->qs;
1542289551Szbb	if (qs == NULL)
1543289551Szbb		return (0);
1544289550Szbb
1545289550Szbb	if (enable) {
1546289551Szbb		if (nicvf_alloc_resources(nic) != 0)
1547289551Szbb			return (ENOMEM);
1548289550Szbb
1549289550Szbb		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1550289550Szbb			nicvf_snd_queue_config(nic, qs, qidx, enable);
1551289550Szbb		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1552289550Szbb			nicvf_cmp_queue_config(nic, qs, qidx, enable);
1553289550Szbb		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1554289550Szbb			nicvf_rbdr_config(nic, qs, qidx, enable);
1555289550Szbb		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1556289550Szbb			nicvf_rcv_queue_config(nic, qs, qidx, enable);
1557289550Szbb	} else {
1558289550Szbb		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1559289550Szbb			nicvf_rcv_queue_config(nic, qs, qidx, disable);
1560289550Szbb		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1561289550Szbb			nicvf_rbdr_config(nic, qs, qidx, disable);
1562289550Szbb		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1563289550Szbb			nicvf_snd_queue_config(nic, qs, qidx, disable);
1564289550Szbb		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1565289550Szbb			nicvf_cmp_queue_config(nic, qs, qidx, disable);
1566289550Szbb
1567289550Szbb		nicvf_free_resources(nic);
1568289550Szbb	}
1569289550Szbb
1570289551Szbb	return (0);
1571289550Szbb}
1572289550Szbb
1573289551Szbb/*
1574289551Szbb * Get a free desc from SQ
1575289550Szbb * returns descriptor ponter & descriptor number
1576289550Szbb */
1577289551Szbbstatic __inline int
1578289551Szbbnicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1579289550Szbb{
1580289550Szbb	int qentry;
1581289550Szbb
1582289550Szbb	qentry = sq->tail;
1583289551Szbb	atomic_subtract_int(&sq->free_cnt, desc_cnt);
1584289550Szbb	sq->tail += desc_cnt;
1585289550Szbb	sq->tail &= (sq->dmem.q_len - 1);
1586289550Szbb
1587289551Szbb	return (qentry);
1588289550Szbb}
1589289550Szbb
1590289550Szbb/* Free descriptor back to SQ for future use */
1591289551Szbbstatic void
1592289551Szbbnicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1593289550Szbb{
1594289551Szbb
1595289551Szbb	atomic_add_int(&sq->free_cnt, desc_cnt);
1596289550Szbb	sq->head += desc_cnt;
1597289550Szbb	sq->head &= (sq->dmem.q_len - 1);
1598289550Szbb}
1599289550Szbb
1600289551Szbbstatic __inline int
1601289551Szbbnicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1602289550Szbb{
1603289550Szbb	qentry++;
1604289550Szbb	qentry &= (sq->dmem.q_len - 1);
1605289551Szbb	return (qentry);
1606289550Szbb}
1607289550Szbb
1608289551Szbbstatic void
1609289551Szbbnicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1610289550Szbb{
1611289551Szbb	uint64_t sq_cfg;
1612289550Szbb
1613289550Szbb	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1614289550Szbb	sq_cfg |= NICVF_SQ_EN;
1615289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1616289550Szbb	/* Ring doorbell so that H/W restarts processing SQEs */
1617289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1618289550Szbb}
1619289550Szbb
1620289551Szbbstatic void
1621289551Szbbnicvf_sq_disable(struct nicvf *nic, int qidx)
1622289550Szbb{
1623289551Szbb	uint64_t sq_cfg;
1624289550Szbb
1625289550Szbb	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1626289550Szbb	sq_cfg &= ~NICVF_SQ_EN;
1627289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1628289550Szbb}
1629289550Szbb
1630289551Szbbstatic void
1631289551Szbbnicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
1632289550Szbb{
1633289551Szbb	uint64_t head, tail;
1634289551Szbb	struct snd_buff *snd_buff;
1635289550Szbb	struct sq_hdr_subdesc *hdr;
1636289550Szbb
1637289551Szbb	NICVF_TX_LOCK(sq);
1638289550Szbb	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1639289550Szbb	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1640289550Szbb	while (sq->head != head) {
1641289550Szbb		hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1642289550Szbb		if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1643289550Szbb			nicvf_put_sq_desc(sq, 1);
1644289550Szbb			continue;
1645289550Szbb		}
1646289551Szbb		snd_buff = &sq->snd_buff[sq->head];
1647289551Szbb		if (snd_buff->mbuf != NULL) {
1648289551Szbb			bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1649289551Szbb			m_freem(snd_buff->mbuf);
1650289551Szbb			sq->snd_buff[sq->head].mbuf = NULL;
1651289551Szbb		}
1652289550Szbb		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1653289550Szbb	}
1654289551Szbb	NICVF_TX_UNLOCK(sq);
1655289550Szbb}
1656289550Szbb
1657289551Szbb/*
1658289551Szbb * Add SQ HEADER subdescriptor.
1659289550Szbb * First subdescriptor for every send descriptor.
1660289550Szbb */
1661289551Szbbstatic __inline void
1662289550Szbbnicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1663289551Szbb			 int subdesc_cnt, struct mbuf *mbuf, int len)
1664289550Szbb{
1665289550Szbb	struct sq_hdr_subdesc *hdr;
1666289550Szbb
1667289550Szbb	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1668289551Szbb	sq->snd_buff[qentry].mbuf = mbuf;
1669289550Szbb
1670289550Szbb	memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1671289550Szbb	hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1672289550Szbb	/* Enable notification via CQE after processing SQE */
1673289550Szbb	hdr->post_cqe = 1;
1674289550Szbb	/* No of subdescriptors following this */
1675289550Szbb	hdr->subdesc_cnt = subdesc_cnt;
1676289550Szbb	hdr->tot_len = len;
1677289550Szbb
1678289551Szbb	/* ARM64TODO: Implement HW checksums calculation */
1679289550Szbb}
1680289550Szbb
1681289551Szbb/*
1682289551Szbb * SQ GATHER subdescriptor
1683289550Szbb * Must follow HDR descriptor
1684289550Szbb */
1685289550Szbbstatic inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1686289551Szbb					       int size, uint64_t data)
1687289550Szbb{
1688289550Szbb	struct sq_gather_subdesc *gather;
1689289550Szbb
1690289550Szbb	qentry &= (sq->dmem.q_len - 1);
1691289550Szbb	gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1692289550Szbb
1693289550Szbb	memset(gather, 0, SND_QUEUE_DESC_SIZE);
1694289550Szbb	gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1695289550Szbb	gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1696289550Szbb	gather->size = size;
1697289550Szbb	gather->addr = data;
1698289550Szbb}
1699289550Szbb
1700289551Szbb/* Put an mbuf to a SQ for packet transfer. */
1701289551Szbbstatic int
1702289551Szbbnicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf *mbuf)
1703289550Szbb{
1704289551Szbb	bus_dma_segment_t segs[256];
1705289551Szbb	struct snd_buff *snd_buff;
1706289551Szbb	size_t seg;
1707289551Szbb	int nsegs, qentry;
1708289551Szbb	int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT - 1;
1709289551Szbb	int err;
1710289550Szbb
1711289551Szbb	NICVF_TX_LOCK_ASSERT(sq);
1712289551Szbb
1713289551Szbb	if (sq->free_cnt == 0)
1714289551Szbb		return (ENOBUFS);
1715289551Szbb
1716289551Szbb	snd_buff = &sq->snd_buff[sq->tail];
1717289551Szbb
1718289551Szbb	err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
1719289551Szbb	    mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1720289551Szbb	if (err != 0) {
1721289551Szbb		/* ARM64TODO: Add mbuf defragmenting if we lack maps */
1722289551Szbb		return (err);
1723289550Szbb	}
1724289550Szbb
1725289551Szbb	/* Set how many subdescriptors is required */
1726289551Szbb	subdesc_cnt += nsegs;
1727289550Szbb
1728289551Szbb	if (subdesc_cnt > sq->free_cnt) {
1729289551Szbb		/* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
1730289551Szbb		bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1731289551Szbb		return (ENOBUFS);
1732289551Szbb	}
1733289550Szbb
1734289550Szbb	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1735289550Szbb
1736289550Szbb	/* Add SQ header subdesc */
1737289551Szbb	nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, mbuf,
1738289551Szbb	    mbuf->m_pkthdr.len);
1739289550Szbb
1740289550Szbb	/* Add SQ gather subdescs */
1741289551Szbb	for (seg = 0; seg < nsegs; seg++) {
1742289550Szbb		qentry = nicvf_get_nxt_sqentry(sq, qentry);
1743289551Szbb		nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
1744289551Szbb		    segs[seg].ds_addr);
1745289550Szbb	}
1746289550Szbb
1747289550Szbb	/* make sure all memory stores are done before ringing doorbell */
1748289551Szbb	bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
1749289550Szbb
1750289551Szbb	dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
1751289551Szbb	    __func__, sq->idx, subdesc_cnt);
1752289550Szbb	/* Inform HW to xmit new packet */
1753289551Szbb	nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
1754289551Szbb	    sq->idx, subdesc_cnt);
1755289551Szbb	return (0);
1756289550Szbb}
1757289550Szbb
1758289551Szbbstatic __inline u_int
1759289551Szbbfrag_num(u_int i)
1760289550Szbb{
1761289551Szbb#if BYTE_ORDER == BIG_ENDIAN
1762289551Szbb	return ((i & ~3) + 3 - (i & 3));
1763289550Szbb#else
1764289551Szbb	return (i);
1765289550Szbb#endif
1766289550Szbb}
1767289550Szbb
1768289551Szbb/* Returns MBUF for a received packet */
1769289551Szbbstruct mbuf *
1770289551Szbbnicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1771289550Szbb{
1772289550Szbb	int frag;
1773289550Szbb	int payload_len = 0;
1774289551Szbb	struct mbuf *mbuf;
1775289551Szbb	struct mbuf *mbuf_frag;
1776289551Szbb	uint16_t *rb_lens = NULL;
1777289551Szbb	uint64_t *rb_ptrs = NULL;
1778289550Szbb
1779289551Szbb	mbuf = NULL;
1780289551Szbb	rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
1781289551Szbb	rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
1782289550Szbb
1783289551Szbb	dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
1784289551Szbb	    __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1785289550Szbb
1786289550Szbb	for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1787289550Szbb		payload_len = rb_lens[frag_num(frag)];
1788289551Szbb		if (frag == 0) {
1789289550Szbb			/* First fragment */
1790289551Szbb			mbuf = nicvf_rb_ptr_to_mbuf(nic,
1791289551Szbb			    (*rb_ptrs - cqe_rx->align_pad));
1792289551Szbb			mbuf->m_len = payload_len;
1793289551Szbb			mbuf->m_data += cqe_rx->align_pad;
1794289551Szbb			if_setrcvif(mbuf, nic->ifp);
1795289550Szbb		} else {
1796289550Szbb			/* Add fragments */
1797289551Szbb			mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
1798289551Szbb			m_append(mbuf, payload_len, mbuf_frag->m_data);
1799289551Szbb			m_freem(mbuf_frag);
1800289550Szbb		}
1801289550Szbb		/* Next buffer pointer */
1802289550Szbb		rb_ptrs++;
1803289550Szbb	}
1804289551Szbb
1805289551Szbb	if (__predict_true(mbuf != NULL)) {
1806289551Szbb		m_fixhdr(mbuf);
1807289551Szbb		mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
1808289551Szbb		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1809289551Szbb	}
1810289551Szbb
1811289551Szbb	return (mbuf);
1812289550Szbb}
1813289550Szbb
1814289550Szbb/* Enable interrupt */
1815289551Szbbvoid
1816289551Szbbnicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1817289550Szbb{
1818289551Szbb	uint64_t reg_val;
1819289550Szbb
1820289550Szbb	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1821289550Szbb
1822289550Szbb	switch (int_type) {
1823289550Szbb	case NICVF_INTR_CQ:
1824289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
1825289550Szbb		break;
1826289550Szbb	case NICVF_INTR_SQ:
1827289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
1828289550Szbb		break;
1829289550Szbb	case NICVF_INTR_RBDR:
1830289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1831289550Szbb		break;
1832289550Szbb	case NICVF_INTR_PKT_DROP:
1833289551Szbb		reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
1834289550Szbb		break;
1835289550Szbb	case NICVF_INTR_TCP_TIMER:
1836289551Szbb		reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
1837289550Szbb		break;
1838289550Szbb	case NICVF_INTR_MBOX:
1839289551Szbb		reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
1840289550Szbb		break;
1841289550Szbb	case NICVF_INTR_QS_ERR:
1842289551Szbb		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
1843289550Szbb		break;
1844289550Szbb	default:
1845289551Szbb		device_printf(nic->dev,
1846289550Szbb			   "Failed to enable interrupt: unknown type\n");
1847289550Szbb		break;
1848289550Szbb	}
1849289550Szbb
1850289550Szbb	nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
1851289550Szbb}
1852289550Szbb
1853289550Szbb/* Disable interrupt */
1854289551Szbbvoid
1855289551Szbbnicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1856289550Szbb{
1857289551Szbb	uint64_t reg_val = 0;
1858289550Szbb
1859289550Szbb	switch (int_type) {
1860289550Szbb	case NICVF_INTR_CQ:
1861289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
1862289550Szbb		break;
1863289550Szbb	case NICVF_INTR_SQ:
1864289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
1865289550Szbb		break;
1866289550Szbb	case NICVF_INTR_RBDR:
1867289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1868289550Szbb		break;
1869289550Szbb	case NICVF_INTR_PKT_DROP:
1870289551Szbb		reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
1871289550Szbb		break;
1872289550Szbb	case NICVF_INTR_TCP_TIMER:
1873289551Szbb		reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
1874289550Szbb		break;
1875289550Szbb	case NICVF_INTR_MBOX:
1876289551Szbb		reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
1877289550Szbb		break;
1878289550Szbb	case NICVF_INTR_QS_ERR:
1879289551Szbb		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
1880289550Szbb		break;
1881289550Szbb	default:
1882289551Szbb		device_printf(nic->dev,
1883289550Szbb			   "Failed to disable interrupt: unknown type\n");
1884289550Szbb		break;
1885289550Szbb	}
1886289550Szbb
1887289550Szbb	nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
1888289550Szbb}
1889289550Szbb
1890289550Szbb/* Clear interrupt */
1891289551Szbbvoid
1892289551Szbbnicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1893289550Szbb{
1894289551Szbb	uint64_t reg_val = 0;
1895289550Szbb
1896289550Szbb	switch (int_type) {
1897289550Szbb	case NICVF_INTR_CQ:
1898289551Szbb		reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
1899289550Szbb		break;
1900289550Szbb	case NICVF_INTR_SQ:
1901289551Szbb		reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
1902289550Szbb		break;
1903289550Szbb	case NICVF_INTR_RBDR:
1904289551Szbb		reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1905289550Szbb		break;
1906289550Szbb	case NICVF_INTR_PKT_DROP:
1907289551Szbb		reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
1908289550Szbb		break;
1909289550Szbb	case NICVF_INTR_TCP_TIMER:
1910289551Szbb		reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
1911289550Szbb		break;
1912289550Szbb	case NICVF_INTR_MBOX:
1913289551Szbb		reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
1914289550Szbb		break;
1915289550Szbb	case NICVF_INTR_QS_ERR:
1916289551Szbb		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
1917289550Szbb		break;
1918289550Szbb	default:
1919289551Szbb		device_printf(nic->dev,
1920289550Szbb			   "Failed to clear interrupt: unknown type\n");
1921289550Szbb		break;
1922289550Szbb	}
1923289550Szbb
1924289550Szbb	nicvf_reg_write(nic, NIC_VF_INT, reg_val);
1925289550Szbb}
1926289550Szbb
1927289550Szbb/* Check if interrupt is enabled */
1928289551Szbbint
1929289551Szbbnicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1930289550Szbb{
1931289551Szbb	uint64_t reg_val;
1932289551Szbb	uint64_t mask = 0xff;
1933289550Szbb
1934289550Szbb	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1935289550Szbb
1936289550Szbb	switch (int_type) {
1937289550Szbb	case NICVF_INTR_CQ:
1938289551Szbb		mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
1939289550Szbb		break;
1940289550Szbb	case NICVF_INTR_SQ:
1941289551Szbb		mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
1942289550Szbb		break;
1943289550Szbb	case NICVF_INTR_RBDR:
1944289551Szbb		mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1945289550Szbb		break;
1946289550Szbb	case NICVF_INTR_PKT_DROP:
1947289550Szbb		mask = NICVF_INTR_PKT_DROP_MASK;
1948289550Szbb		break;
1949289550Szbb	case NICVF_INTR_TCP_TIMER:
1950289550Szbb		mask = NICVF_INTR_TCP_TIMER_MASK;
1951289550Szbb		break;
1952289550Szbb	case NICVF_INTR_MBOX:
1953289550Szbb		mask = NICVF_INTR_MBOX_MASK;
1954289550Szbb		break;
1955289550Szbb	case NICVF_INTR_QS_ERR:
1956289550Szbb		mask = NICVF_INTR_QS_ERR_MASK;
1957289550Szbb		break;
1958289550Szbb	default:
1959289551Szbb		device_printf(nic->dev,
1960289550Szbb			   "Failed to check interrupt enable: unknown type\n");
1961289550Szbb		break;
1962289550Szbb	}
1963289550Szbb
1964289550Szbb	return (reg_val & mask);
1965289550Szbb}
1966289550Szbb
1967289551Szbbvoid
1968289551Szbbnicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1969289550Szbb{
1970289550Szbb	struct rcv_queue *rq;
1971289550Szbb
1972289550Szbb#define GET_RQ_STATS(reg) \
1973289550Szbb	nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1974289550Szbb			    (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1975289550Szbb
1976289550Szbb	rq = &nic->qs->rq[rq_idx];
1977289550Szbb	rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1978289550Szbb	rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1979289550Szbb}
1980289550Szbb
1981289551Szbbvoid
1982289551Szbbnicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1983289550Szbb{
1984289550Szbb	struct snd_queue *sq;
1985289550Szbb
1986289550Szbb#define GET_SQ_STATS(reg) \
1987289550Szbb	nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1988289550Szbb			    (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1989289550Szbb
1990289550Szbb	sq = &nic->qs->sq[sq_idx];
1991289550Szbb	sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1992289550Szbb	sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1993289550Szbb}
1994289550Szbb
1995289550Szbb/* Check for errors in the receive cmp.queue entry */
1996289551Szbbint
1997289551Szbbnicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
1998289551Szbb    struct cqe_rx_t *cqe_rx)
1999289550Szbb{
2000289550Szbb	struct nicvf_hw_stats *stats = &nic->hw_stats;
2001289550Szbb	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
2002289550Szbb
2003289550Szbb	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
2004289550Szbb		drv_stats->rx_frames_ok++;
2005289551Szbb		return (0);
2006289550Szbb	}
2007289550Szbb
2008289550Szbb	switch (cqe_rx->err_opcode) {
2009289550Szbb	case CQ_RX_ERROP_RE_PARTIAL:
2010289550Szbb		stats->rx_bgx_truncated_pkts++;
2011289550Szbb		break;
2012289550Szbb	case CQ_RX_ERROP_RE_JABBER:
2013289550Szbb		stats->rx_jabber_errs++;
2014289550Szbb		break;
2015289550Szbb	case CQ_RX_ERROP_RE_FCS:
2016289550Szbb		stats->rx_fcs_errs++;
2017289550Szbb		break;
2018289550Szbb	case CQ_RX_ERROP_RE_RX_CTL:
2019289550Szbb		stats->rx_bgx_errs++;
2020289550Szbb		break;
2021289550Szbb	case CQ_RX_ERROP_PREL2_ERR:
2022289550Szbb		stats->rx_prel2_errs++;
2023289550Szbb		break;
2024289550Szbb	case CQ_RX_ERROP_L2_MAL:
2025289550Szbb		stats->rx_l2_hdr_malformed++;
2026289550Szbb		break;
2027289550Szbb	case CQ_RX_ERROP_L2_OVERSIZE:
2028289550Szbb		stats->rx_oversize++;
2029289550Szbb		break;
2030289550Szbb	case CQ_RX_ERROP_L2_UNDERSIZE:
2031289550Szbb		stats->rx_undersize++;
2032289550Szbb		break;
2033289550Szbb	case CQ_RX_ERROP_L2_LENMISM:
2034289550Szbb		stats->rx_l2_len_mismatch++;
2035289550Szbb		break;
2036289550Szbb	case CQ_RX_ERROP_L2_PCLP:
2037289550Szbb		stats->rx_l2_pclp++;
2038289550Szbb		break;
2039289550Szbb	case CQ_RX_ERROP_IP_NOT:
2040289550Szbb		stats->rx_ip_ver_errs++;
2041289550Szbb		break;
2042289550Szbb	case CQ_RX_ERROP_IP_CSUM_ERR:
2043289550Szbb		stats->rx_ip_csum_errs++;
2044289550Szbb		break;
2045289550Szbb	case CQ_RX_ERROP_IP_MAL:
2046289550Szbb		stats->rx_ip_hdr_malformed++;
2047289550Szbb		break;
2048289550Szbb	case CQ_RX_ERROP_IP_MALD:
2049289550Szbb		stats->rx_ip_payload_malformed++;
2050289550Szbb		break;
2051289550Szbb	case CQ_RX_ERROP_IP_HOP:
2052289550Szbb		stats->rx_ip_ttl_errs++;
2053289550Szbb		break;
2054289550Szbb	case CQ_RX_ERROP_L3_PCLP:
2055289550Szbb		stats->rx_l3_pclp++;
2056289550Szbb		break;
2057289550Szbb	case CQ_RX_ERROP_L4_MAL:
2058289550Szbb		stats->rx_l4_malformed++;
2059289550Szbb		break;
2060289550Szbb	case CQ_RX_ERROP_L4_CHK:
2061289550Szbb		stats->rx_l4_csum_errs++;
2062289550Szbb		break;
2063289550Szbb	case CQ_RX_ERROP_UDP_LEN:
2064289550Szbb		stats->rx_udp_len_errs++;
2065289550Szbb		break;
2066289550Szbb	case CQ_RX_ERROP_L4_PORT:
2067289550Szbb		stats->rx_l4_port_errs++;
2068289550Szbb		break;
2069289550Szbb	case CQ_RX_ERROP_TCP_FLAG:
2070289550Szbb		stats->rx_tcp_flag_errs++;
2071289550Szbb		break;
2072289550Szbb	case CQ_RX_ERROP_TCP_OFFSET:
2073289550Szbb		stats->rx_tcp_offset_errs++;
2074289550Szbb		break;
2075289550Szbb	case CQ_RX_ERROP_L4_PCLP:
2076289550Szbb		stats->rx_l4_pclp++;
2077289550Szbb		break;
2078289550Szbb	case CQ_RX_ERROP_RBDR_TRUNC:
2079289550Szbb		stats->rx_truncated_pkts++;
2080289550Szbb		break;
2081289550Szbb	}
2082289550Szbb
2083289551Szbb	return (1);
2084289550Szbb}
2085289550Szbb
2086289550Szbb/* Check for errors in the send cmp.queue entry */
2087289551Szbbint
2088289551Szbbnicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
2089289551Szbb    struct cqe_send_t *cqe_tx)
2090289550Szbb{
2091289550Szbb	struct cmp_queue_stats *stats = &cq->stats;
2092289550Szbb
2093289550Szbb	switch (cqe_tx->send_status) {
2094289550Szbb	case CQ_TX_ERROP_GOOD:
2095289550Szbb		stats->tx.good++;
2096289551Szbb		return (0);
2097289550Szbb	case CQ_TX_ERROP_DESC_FAULT:
2098289550Szbb		stats->tx.desc_fault++;
2099289550Szbb		break;
2100289550Szbb	case CQ_TX_ERROP_HDR_CONS_ERR:
2101289550Szbb		stats->tx.hdr_cons_err++;
2102289550Szbb		break;
2103289550Szbb	case CQ_TX_ERROP_SUBDC_ERR:
2104289550Szbb		stats->tx.subdesc_err++;
2105289550Szbb		break;
2106289550Szbb	case CQ_TX_ERROP_IMM_SIZE_OFLOW:
2107289550Szbb		stats->tx.imm_size_oflow++;
2108289550Szbb		break;
2109289550Szbb	case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
2110289550Szbb		stats->tx.data_seq_err++;
2111289550Szbb		break;
2112289550Szbb	case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
2113289550Szbb		stats->tx.mem_seq_err++;
2114289550Szbb		break;
2115289550Szbb	case CQ_TX_ERROP_LOCK_VIOL:
2116289550Szbb		stats->tx.lock_viol++;
2117289550Szbb		break;
2118289550Szbb	case CQ_TX_ERROP_DATA_FAULT:
2119289550Szbb		stats->tx.data_fault++;
2120289550Szbb		break;
2121289550Szbb	case CQ_TX_ERROP_TSTMP_CONFLICT:
2122289550Szbb		stats->tx.tstmp_conflict++;
2123289550Szbb		break;
2124289550Szbb	case CQ_TX_ERROP_TSTMP_TIMEOUT:
2125289550Szbb		stats->tx.tstmp_timeout++;
2126289550Szbb		break;
2127289550Szbb	case CQ_TX_ERROP_MEM_FAULT:
2128289550Szbb		stats->tx.mem_fault++;
2129289550Szbb		break;
2130289550Szbb	case CQ_TX_ERROP_CK_OVERLAP:
2131289550Szbb		stats->tx.csum_overlap++;
2132289550Szbb		break;
2133289550Szbb	case CQ_TX_ERROP_CK_OFLOW:
2134289550Szbb		stats->tx.csum_overflow++;
2135289550Szbb		break;
2136289550Szbb	}
2137289550Szbb
2138289551Szbb	return (1);
2139289550Szbb}
2140