nicvf_queues.c revision 299444
1289550Szbb/*
2289550Szbb * Copyright (C) 2015 Cavium Inc.
3289550Szbb * All rights reserved.
4289550Szbb *
5289550Szbb * Redistribution and use in source and binary forms, with or without
6289550Szbb * modification, are permitted provided that the following conditions
7289550Szbb * are met:
8289550Szbb * 1. Redistributions of source code must retain the above copyright
9289550Szbb *    notice, this list of conditions and the following disclaimer.
10289550Szbb * 2. Redistributions in binary form must reproduce the above copyright
11289550Szbb *    notice, this list of conditions and the following disclaimer in the
12289550Szbb *    documentation and/or other materials provided with the distribution.
13289550Szbb *
14289550Szbb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15289550Szbb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16289550Szbb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17289550Szbb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18289550Szbb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19289550Szbb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20289550Szbb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21289550Szbb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22289550Szbb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23289550Szbb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24289550Szbb * SUCH DAMAGE.
25289550Szbb *
26289550Szbb * $FreeBSD: head/sys/dev/vnic/nicvf_queues.c 299444 2016-05-11 13:22:13Z zbb $
27289550Szbb *
28289550Szbb */
29289551Szbb#include <sys/cdefs.h>
30289551Szbb__FBSDID("$FreeBSD: head/sys/dev/vnic/nicvf_queues.c 299444 2016-05-11 13:22:13Z zbb $");
31289550Szbb
32296030Szbb#include "opt_inet.h"
33296030Szbb#include "opt_inet6.h"
34296030Szbb
35289551Szbb#include <sys/param.h>
36289551Szbb#include <sys/systm.h>
37289551Szbb#include <sys/bitset.h>
38289551Szbb#include <sys/bitstring.h>
39289551Szbb#include <sys/buf_ring.h>
40289551Szbb#include <sys/bus.h>
41289551Szbb#include <sys/endian.h>
42289551Szbb#include <sys/kernel.h>
43289551Szbb#include <sys/malloc.h>
44289551Szbb#include <sys/module.h>
45289551Szbb#include <sys/rman.h>
46289551Szbb#include <sys/pciio.h>
47289551Szbb#include <sys/pcpu.h>
48289551Szbb#include <sys/proc.h>
49289551Szbb#include <sys/sockio.h>
50289551Szbb#include <sys/socket.h>
51289551Szbb#include <sys/cpuset.h>
52289551Szbb#include <sys/lock.h>
53289551Szbb#include <sys/mutex.h>
54289551Szbb#include <sys/smp.h>
55289551Szbb#include <sys/taskqueue.h>
56289550Szbb
57289551Szbb#include <vm/vm.h>
58289551Szbb#include <vm/pmap.h>
59289551Szbb
60289551Szbb#include <machine/bus.h>
61289551Szbb#include <machine/vmparam.h>
62289551Szbb
63289551Szbb#include <net/if.h>
64289551Szbb#include <net/if_var.h>
65289551Szbb#include <net/if_media.h>
66289551Szbb#include <net/ifq.h>
67297450Szbb#include <net/bpf.h>
68297450Szbb#include <net/ethernet.h>
69289551Szbb
70296030Szbb#include <netinet/in_systm.h>
71296030Szbb#include <netinet/in.h>
72296030Szbb#include <netinet/if_ether.h>
73296030Szbb#include <netinet/ip.h>
74296030Szbb#include <netinet/ip6.h>
75296030Szbb#include <netinet/sctp.h>
76296030Szbb#include <netinet/tcp.h>
77296030Szbb#include <netinet/tcp_lro.h>
78296030Szbb#include <netinet/udp.h>
79296030Szbb
80289551Szbb#include <dev/pci/pcireg.h>
81289551Szbb#include <dev/pci/pcivar.h>
82289551Szbb
83289551Szbb#include "thunder_bgx.h"
84289550Szbb#include "nic_reg.h"
85289550Szbb#include "nic.h"
86289550Szbb#include "q_struct.h"
87289550Szbb#include "nicvf_queues.h"
88289550Szbb
89289551Szbb#define	DEBUG
90289551Szbb#undef DEBUG
91289551Szbb
92289551Szbb#ifdef DEBUG
93289551Szbb#define	dprintf(dev, fmt, ...)	device_printf(dev, fmt, ##__VA_ARGS__)
94289551Szbb#else
95289551Szbb#define	dprintf(dev, fmt, ...)
96289551Szbb#endif
97289551Szbb
98289551SzbbMALLOC_DECLARE(M_NICVF);
99289551Szbb
100289551Szbbstatic void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
101289551Szbbstatic struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
102289551Szbbstatic void nicvf_sq_disable(struct nicvf *, int);
103289551Szbbstatic void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
104289551Szbbstatic void nicvf_put_sq_desc(struct snd_queue *, int);
105289551Szbbstatic void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
106289551Szbb    boolean_t);
107289551Szbbstatic void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
108289551Szbb
109297450Szbbstatic int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
110297450Szbb
111289551Szbbstatic void nicvf_rbdr_task(void *, int);
112289551Szbbstatic void nicvf_rbdr_task_nowait(void *, int);
113289551Szbb
114289550Szbbstruct rbuf_info {
115289551Szbb	bus_dma_tag_t	dmat;
116289551Szbb	bus_dmamap_t	dmap;
117289551Szbb	struct mbuf *	mbuf;
118289550Szbb};
119289550Szbb
120289551Szbb#define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
121289550Szbb
122289550Szbb/* Poll a register for a specific value */
123289550Szbbstatic int nicvf_poll_reg(struct nicvf *nic, int qidx,
124289551Szbb			  uint64_t reg, int bit_pos, int bits, int val)
125289550Szbb{
126289551Szbb	uint64_t bit_mask;
127289551Szbb	uint64_t reg_val;
128289550Szbb	int timeout = 10;
129289550Szbb
130289551Szbb	bit_mask = (1UL << bits) - 1;
131289550Szbb	bit_mask = (bit_mask << bit_pos);
132289550Szbb
133289550Szbb	while (timeout) {
134289550Szbb		reg_val = nicvf_queue_reg_read(nic, reg, qidx);
135289550Szbb		if (((reg_val & bit_mask) >> bit_pos) == val)
136289551Szbb			return (0);
137289551Szbb
138289551Szbb		DELAY(1000);
139289550Szbb		timeout--;
140289550Szbb	}
141289551Szbb	device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
142289551Szbb	return (ETIMEDOUT);
143289550Szbb}
144289550Szbb
145289551Szbb/* Callback for bus_dmamap_load() */
146289551Szbbstatic void
147289551Szbbnicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
148289551Szbb{
149289551Szbb	bus_addr_t *paddr;
150289551Szbb
151289551Szbb	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
152289551Szbb	paddr = arg;
153289551Szbb	*paddr = segs->ds_addr;
154289551Szbb}
155289551Szbb
156289550Szbb/* Allocate memory for a queue's descriptors */
157289551Szbbstatic int
158289551Szbbnicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
159289551Szbb    int q_len, int desc_size, int align_bytes)
160289550Szbb{
161289551Szbb	int err, err_dmat;
162289551Szbb
163289551Szbb	/* Create DMA tag first */
164289551Szbb	err = bus_dma_tag_create(
165289551Szbb	    bus_get_dma_tag(nic->dev),		/* parent tag */
166289551Szbb	    align_bytes,			/* alignment */
167289551Szbb	    0,					/* boundary */
168289551Szbb	    BUS_SPACE_MAXADDR,			/* lowaddr */
169289551Szbb	    BUS_SPACE_MAXADDR,			/* highaddr */
170289551Szbb	    NULL, NULL,				/* filtfunc, filtfuncarg */
171289551Szbb	    (q_len * desc_size),		/* maxsize */
172289551Szbb	    1,					/* nsegments */
173289551Szbb	    (q_len * desc_size),		/* maxsegsize */
174289551Szbb	    0,					/* flags */
175289551Szbb	    NULL, NULL,				/* lockfunc, lockfuncarg */
176289551Szbb	    &dmem->dmat);			/* dmat */
177289551Szbb
178289551Szbb	if (err != 0) {
179289551Szbb		device_printf(nic->dev,
180289551Szbb		    "Failed to create busdma tag for descriptors ring\n");
181289551Szbb		return (err);
182289551Szbb	}
183289551Szbb
184289551Szbb	/* Allocate segment of continuous DMA safe memory */
185289551Szbb	err = bus_dmamem_alloc(
186289551Szbb	    dmem->dmat,				/* DMA tag */
187289551Szbb	    &dmem->base,			/* virtual address */
188289551Szbb	    (BUS_DMA_NOWAIT | BUS_DMA_ZERO),	/* flags */
189289551Szbb	    &dmem->dmap);			/* DMA map */
190289551Szbb	if (err != 0) {
191289551Szbb		device_printf(nic->dev, "Failed to allocate DMA safe memory for"
192289551Szbb		    "descriptors ring\n");
193289551Szbb		goto dmamem_fail;
194289551Szbb	}
195289551Szbb
196289551Szbb	err = bus_dmamap_load(
197289551Szbb	    dmem->dmat,
198289551Szbb	    dmem->dmap,
199289551Szbb	    dmem->base,
200289551Szbb	    (q_len * desc_size),		/* allocation size */
201289551Szbb	    nicvf_dmamap_q_cb,			/* map to DMA address cb. */
202289551Szbb	    &dmem->phys_base,			/* physical address */
203289551Szbb	    BUS_DMA_NOWAIT);
204289551Szbb	if (err != 0) {
205289551Szbb		device_printf(nic->dev,
206289551Szbb		    "Cannot load DMA map of descriptors ring\n");
207289551Szbb		goto dmamap_fail;
208289551Szbb	}
209289551Szbb
210289550Szbb	dmem->q_len = q_len;
211289551Szbb	dmem->size = (desc_size * q_len);
212289550Szbb
213289551Szbb	return (0);
214289551Szbb
215289551Szbbdmamap_fail:
216289551Szbb	bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
217289551Szbb	dmem->phys_base = 0;
218289551Szbbdmamem_fail:
219289551Szbb	err_dmat = bus_dma_tag_destroy(dmem->dmat);
220289551Szbb	dmem->base = NULL;
221289551Szbb	KASSERT(err_dmat == 0,
222289551Szbb	    ("%s: Trying to destroy BUSY DMA tag", __func__));
223289551Szbb
224289551Szbb	return (err);
225289550Szbb}
226289550Szbb
227289550Szbb/* Free queue's descriptor memory */
228289551Szbbstatic void
229289551Szbbnicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
230289550Szbb{
231289551Szbb	int err;
232289551Szbb
233289551Szbb	if ((dmem == NULL) || (dmem->base == NULL))
234289550Szbb		return;
235289550Szbb
236289551Szbb	/* Unload a map */
237289551Szbb	bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
238289551Szbb	bus_dmamap_unload(dmem->dmat, dmem->dmap);
239289551Szbb	/* Free DMA memory */
240289551Szbb	bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
241289551Szbb	/* Destroy DMA tag */
242289551Szbb	err = bus_dma_tag_destroy(dmem->dmat);
243289551Szbb
244289551Szbb	KASSERT(err == 0,
245289551Szbb	    ("%s: Trying to destroy BUSY DMA tag", __func__));
246289551Szbb
247289551Szbb	dmem->phys_base = 0;
248289550Szbb	dmem->base = NULL;
249289550Szbb}
250289550Szbb
251289551Szbb/*
252289551Szbb * Allocate buffer for packet reception
253289550Szbb * HW returns memory address where packet is DMA'ed but not a pointer
254289550Szbb * into RBDR ring, so save buffer address at the start of fragment and
255289550Szbb * align the start address to a cache aligned address
256289550Szbb */
257289551Szbbstatic __inline int
258289551Szbbnicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
259289551Szbb    bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
260289550Szbb{
261289551Szbb	struct mbuf *mbuf;
262289550Szbb	struct rbuf_info *rinfo;
263289551Szbb	bus_dma_segment_t segs[1];
264289551Szbb	int nsegs;
265289551Szbb	int err;
266289550Szbb
267289551Szbb	mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
268289551Szbb	if (mbuf == NULL)
269289551Szbb		return (ENOMEM);
270289550Szbb
271289551Szbb	/*
272289551Szbb	 * The length is equal to the actual length + one 128b line
273289551Szbb	 * used as a room for rbuf_info structure.
274289551Szbb	 */
275289551Szbb	mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
276289551Szbb
277289551Szbb	err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
278289551Szbb	    &nsegs, BUS_DMA_NOWAIT);
279289551Szbb	if (err != 0) {
280289551Szbb		device_printf(nic->dev,
281289551Szbb		    "Failed to map mbuf into DMA visible memory, err: %d\n",
282289551Szbb		    err);
283289551Szbb		m_freem(mbuf);
284289551Szbb		bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
285289551Szbb		return (err);
286289550Szbb	}
287289551Szbb	if (nsegs != 1)
288289551Szbb		panic("Unexpected number of DMA segments for RB: %d", nsegs);
289289551Szbb	/*
290289551Szbb	 * Now use the room for rbuf_info structure
291289551Szbb	 * and adjust mbuf data and length.
292289551Szbb	 */
293289551Szbb	rinfo = (struct rbuf_info *)mbuf->m_data;
294289551Szbb	m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
295289550Szbb
296289551Szbb	rinfo->dmat = rbdr->rbdr_buff_dmat;
297289551Szbb	rinfo->dmap = dmap;
298289551Szbb	rinfo->mbuf = mbuf;
299289550Szbb
300289551Szbb	*rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
301289550Szbb
302289551Szbb	return (0);
303289550Szbb}
304289550Szbb
305289551Szbb/* Retrieve mbuf for received packet */
306289551Szbbstatic struct mbuf *
307289551Szbbnicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
308289550Szbb{
309289551Szbb	struct mbuf *mbuf;
310289550Szbb	struct rbuf_info *rinfo;
311289550Szbb
312289550Szbb	/* Get buffer start address and alignment offset */
313289551Szbb	rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
314289550Szbb
315289551Szbb	/* Now retrieve mbuf to give to stack */
316289551Szbb	mbuf = rinfo->mbuf;
317289551Szbb	if (__predict_false(mbuf == NULL)) {
318289551Szbb		panic("%s: Received packet fragment with NULL mbuf",
319289551Szbb		    device_get_nameunit(nic->dev));
320289550Szbb	}
321289551Szbb	/*
322289551Szbb	 * Clear the mbuf in the descriptor to indicate
323289551Szbb	 * that this slot is processed and free to use.
324289551Szbb	 */
325289551Szbb	rinfo->mbuf = NULL;
326289550Szbb
327289551Szbb	bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
328289551Szbb	bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
329289550Szbb
330289551Szbb	return (mbuf);
331289550Szbb}
332289550Szbb
333289550Szbb/* Allocate RBDR ring and populate receive buffers */
334289551Szbbstatic int
335289551Szbbnicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
336289551Szbb    int buf_size, int qidx)
337289550Szbb{
338289551Szbb	bus_dmamap_t dmap;
339289551Szbb	bus_addr_t rbuf;
340289551Szbb	struct rbdr_entry_t *desc;
341289550Szbb	int idx;
342289550Szbb	int err;
343289550Szbb
344289551Szbb	/* Allocate rbdr descriptors ring */
345289550Szbb	err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
346289551Szbb	    sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
347289551Szbb	if (err != 0) {
348289551Szbb		device_printf(nic->dev,
349289551Szbb		    "Failed to create RBDR descriptors ring\n");
350289551Szbb		return (err);
351289551Szbb	}
352289550Szbb
353289550Szbb	rbdr->desc = rbdr->dmem.base;
354289551Szbb	/*
355289551Szbb	 * Buffer size has to be in multiples of 128 bytes.
356289551Szbb	 * Make room for metadata of size of one line (128 bytes).
357289551Szbb	 */
358289551Szbb	rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
359289551Szbb	rbdr->enable = TRUE;
360289550Szbb	rbdr->thresh = RBDR_THRESH;
361289551Szbb	rbdr->nic = nic;
362289551Szbb	rbdr->idx = qidx;
363289550Szbb
364289551Szbb	/*
365289551Szbb	 * Create DMA tag for Rx buffers.
366289551Szbb	 * Each map created using this tag is intended to store Rx payload for
367289551Szbb	 * one fragment and one header structure containing rbuf_info (thus
368289551Szbb	 * additional 128 byte line since RB must be a multiple of 128 byte
369289551Szbb	 * cache line).
370289551Szbb	 */
371289551Szbb	if (buf_size > MCLBYTES) {
372289551Szbb		device_printf(nic->dev,
373289551Szbb		    "Buffer size to large for mbuf cluster\n");
374289551Szbb		return (EINVAL);
375289551Szbb	}
376289551Szbb	err = bus_dma_tag_create(
377289551Szbb	    bus_get_dma_tag(nic->dev),		/* parent tag */
378289551Szbb	    NICVF_RCV_BUF_ALIGN_BYTES,		/* alignment */
379289551Szbb	    0,					/* boundary */
380289551Szbb	    DMAP_MAX_PHYSADDR,			/* lowaddr */
381289551Szbb	    DMAP_MIN_PHYSADDR,			/* highaddr */
382289551Szbb	    NULL, NULL,				/* filtfunc, filtfuncarg */
383289551Szbb	    roundup2(buf_size, MCLBYTES),	/* maxsize */
384289551Szbb	    1,					/* nsegments */
385289551Szbb	    roundup2(buf_size, MCLBYTES),	/* maxsegsize */
386289551Szbb	    0,					/* flags */
387289551Szbb	    NULL, NULL,				/* lockfunc, lockfuncarg */
388289551Szbb	    &rbdr->rbdr_buff_dmat);		/* dmat */
389289551Szbb
390289551Szbb	if (err != 0) {
391289551Szbb		device_printf(nic->dev,
392289551Szbb		    "Failed to create busdma tag for RBDR buffers\n");
393289551Szbb		return (err);
394289551Szbb	}
395289551Szbb
396289551Szbb	rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
397289551Szbb	    ring_len, M_NICVF, (M_WAITOK | M_ZERO));
398289551Szbb
399289550Szbb	for (idx = 0; idx < ring_len; idx++) {
400289551Szbb		err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
401289551Szbb		if (err != 0) {
402289551Szbb			device_printf(nic->dev,
403289551Szbb			    "Failed to create DMA map for RB\n");
404289551Szbb			return (err);
405289551Szbb		}
406289551Szbb		rbdr->rbdr_buff_dmaps[idx] = dmap;
407289550Szbb
408289551Szbb		err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
409289551Szbb		    DMA_BUFFER_LEN, &rbuf);
410289551Szbb		if (err != 0)
411289551Szbb			return (err);
412289551Szbb
413289550Szbb		desc = GET_RBDR_DESC(rbdr, idx);
414289551Szbb		desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
415289550Szbb	}
416289551Szbb
417289551Szbb	/* Allocate taskqueue */
418289551Szbb	TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
419289551Szbb	TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
420289551Szbb	rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
421289551Szbb	    taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
422289551Szbb	taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
423289551Szbb	    device_get_nameunit(nic->dev));
424289551Szbb
425289551Szbb	return (0);
426289550Szbb}
427289550Szbb
428289550Szbb/* Free RBDR ring and its receive buffers */
429289551Szbbstatic void
430289551Szbbnicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
431289550Szbb{
432289551Szbb	struct mbuf *mbuf;
433289551Szbb	struct queue_set *qs;
434289550Szbb	struct rbdr_entry_t *desc;
435289550Szbb	struct rbuf_info *rinfo;
436289551Szbb	bus_addr_t buf_addr;
437289551Szbb	int head, tail, idx;
438289551Szbb	int err;
439289550Szbb
440289551Szbb	qs = nic->qs;
441289550Szbb
442289551Szbb	if ((qs == NULL) || (rbdr == NULL))
443289550Szbb		return;
444289550Szbb
445289551Szbb	rbdr->enable = FALSE;
446289551Szbb	if (rbdr->rbdr_taskq != NULL) {
447289551Szbb		/* Remove tasks */
448289551Szbb		while (taskqueue_cancel(rbdr->rbdr_taskq,
449289551Szbb		    &rbdr->rbdr_task_nowait, NULL) != 0) {
450289551Szbb			/* Finish the nowait task first */
451289551Szbb			taskqueue_drain(rbdr->rbdr_taskq,
452289551Szbb			    &rbdr->rbdr_task_nowait);
453289551Szbb		}
454289551Szbb		taskqueue_free(rbdr->rbdr_taskq);
455289551Szbb		rbdr->rbdr_taskq = NULL;
456289550Szbb
457289551Szbb		while (taskqueue_cancel(taskqueue_thread,
458289551Szbb		    &rbdr->rbdr_task, NULL) != 0) {
459289551Szbb			/* Now finish the sleepable task */
460289551Szbb			taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
461289551Szbb		}
462289551Szbb	}
463289551Szbb
464289551Szbb	/*
465289551Szbb	 * Free all of the memory under the RB descriptors.
466289551Szbb	 * There are assumptions here:
467289551Szbb	 * 1. Corresponding RBDR is disabled
468289551Szbb	 *    - it is safe to operate using head and tail indexes
469289551Szbb	 * 2. All bffers that were received are properly freed by
470289551Szbb	 *    the receive handler
471289551Szbb	 *    - there is no need to unload DMA map and free MBUF for other
472289551Szbb	 *      descriptors than unused ones
473289551Szbb	 */
474289551Szbb	if (rbdr->rbdr_buff_dmat != NULL) {
475289551Szbb		head = rbdr->head;
476289551Szbb		tail = rbdr->tail;
477289551Szbb		while (head != tail) {
478289551Szbb			desc = GET_RBDR_DESC(rbdr, head);
479289551Szbb			buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
480289551Szbb			rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
481289551Szbb			bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
482289551Szbb			mbuf = rinfo->mbuf;
483289551Szbb			/* This will destroy everything including rinfo! */
484289551Szbb			m_freem(mbuf);
485289551Szbb			head++;
486289551Szbb			head &= (rbdr->dmem.q_len - 1);
487289551Szbb		}
488289551Szbb		/* Free tail descriptor */
489289551Szbb		desc = GET_RBDR_DESC(rbdr, tail);
490289550Szbb		buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
491289551Szbb		rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
492289551Szbb		bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
493289551Szbb		mbuf = rinfo->mbuf;
494289551Szbb		/* This will destroy everything including rinfo! */
495289551Szbb		m_freem(mbuf);
496289551Szbb
497289551Szbb		/* Destroy DMA maps */
498289551Szbb		for (idx = 0; idx < qs->rbdr_len; idx++) {
499289551Szbb			if (rbdr->rbdr_buff_dmaps[idx] == NULL)
500289551Szbb				continue;
501289551Szbb			err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
502289551Szbb			    rbdr->rbdr_buff_dmaps[idx]);
503289551Szbb			KASSERT(err == 0,
504289551Szbb			    ("%s: Could not destroy DMA map for RB, desc: %d",
505289551Szbb			    __func__, idx));
506289551Szbb			rbdr->rbdr_buff_dmaps[idx] = NULL;
507289551Szbb		}
508289551Szbb
509289551Szbb		/* Now destroy the tag */
510289551Szbb		err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
511289551Szbb		KASSERT(err == 0,
512289551Szbb		    ("%s: Trying to destroy BUSY DMA tag", __func__));
513289551Szbb
514289551Szbb		rbdr->head = 0;
515289551Szbb		rbdr->tail = 0;
516289550Szbb	}
517289550Szbb
518289550Szbb	/* Free RBDR ring */
519289550Szbb	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
520289550Szbb}
521289550Szbb
522289551Szbb/*
523289551Szbb * Refill receive buffer descriptors with new buffers.
524289550Szbb */
525289551Szbbstatic int
526289551Szbbnicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
527289550Szbb{
528289551Szbb	struct nicvf *nic;
529289551Szbb	struct queue_set *qs;
530289551Szbb	int rbdr_idx;
531289550Szbb	int tail, qcount;
532289550Szbb	int refill_rb_cnt;
533289550Szbb	struct rbdr_entry_t *desc;
534289551Szbb	bus_dmamap_t dmap;
535289551Szbb	bus_addr_t rbuf;
536289551Szbb	boolean_t rb_alloc_fail;
537289551Szbb	int new_rb;
538289550Szbb
539289551Szbb	rb_alloc_fail = TRUE;
540289551Szbb	new_rb = 0;
541289551Szbb	nic = rbdr->nic;
542289551Szbb	qs = nic->qs;
543289551Szbb	rbdr_idx = rbdr->idx;
544289551Szbb
545289550Szbb	/* Check if it's enabled */
546289550Szbb	if (!rbdr->enable)
547289551Szbb		return (0);
548289550Szbb
549289550Szbb	/* Get no of desc's to be refilled */
550289550Szbb	qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
551289550Szbb	qcount &= 0x7FFFF;
552289550Szbb	/* Doorbell can be ringed with a max of ring size minus 1 */
553289551Szbb	if (qcount >= (qs->rbdr_len - 1)) {
554289551Szbb		rb_alloc_fail = FALSE;
555289551Szbb		goto out;
556289551Szbb	} else
557289550Szbb		refill_rb_cnt = qs->rbdr_len - qcount - 1;
558289550Szbb
559289550Szbb	/* Start filling descs from tail */
560289550Szbb	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
561289550Szbb	while (refill_rb_cnt) {
562289550Szbb		tail++;
563289550Szbb		tail &= (rbdr->dmem.q_len - 1);
564289550Szbb
565289551Szbb		dmap = rbdr->rbdr_buff_dmaps[tail];
566289551Szbb		if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
567289551Szbb		    DMA_BUFFER_LEN, &rbuf)) {
568289551Szbb			/* Something went wrong. Resign */
569289550Szbb			break;
570289551Szbb		}
571289550Szbb		desc = GET_RBDR_DESC(rbdr, tail);
572289551Szbb		desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
573289550Szbb		refill_rb_cnt--;
574289550Szbb		new_rb++;
575289550Szbb	}
576289550Szbb
577289550Szbb	/* make sure all memory stores are done before ringing doorbell */
578289551Szbb	wmb();
579289550Szbb
580289550Szbb	/* Check if buffer allocation failed */
581289551Szbb	if (refill_rb_cnt == 0)
582289551Szbb		rb_alloc_fail = FALSE;
583289550Szbb
584289550Szbb	/* Notify HW */
585289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
586289550Szbb			      rbdr_idx, new_rb);
587289551Szbbout:
588289551Szbb	if (!rb_alloc_fail) {
589289551Szbb		/*
590289551Szbb		 * Re-enable RBDR interrupts only
591289551Szbb		 * if buffer allocation is success.
592289551Szbb		 */
593289550Szbb		nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
594289550Szbb
595289551Szbb		return (0);
596289551Szbb	}
597289551Szbb
598289551Szbb	return (ENOMEM);
599289550Szbb}
600289550Szbb
601289551Szbb/* Refill RBs even if sleep is needed to reclaim memory */
602289551Szbbstatic void
603289551Szbbnicvf_rbdr_task(void *arg, int pending)
604289550Szbb{
605289551Szbb	struct rbdr *rbdr;
606289551Szbb	int err;
607289550Szbb
608289551Szbb	rbdr = (struct rbdr *)arg;
609289551Szbb
610289551Szbb	err = nicvf_refill_rbdr(rbdr, M_WAITOK);
611289551Szbb	if (__predict_false(err != 0)) {
612289551Szbb		panic("%s: Failed to refill RBs even when sleep enabled",
613289551Szbb		    __func__);
614289551Szbb	}
615289550Szbb}
616289550Szbb
617289551Szbb/* Refill RBs as soon as possible without waiting */
618289551Szbbstatic void
619289551Szbbnicvf_rbdr_task_nowait(void *arg, int pending)
620289550Szbb{
621289551Szbb	struct rbdr *rbdr;
622289551Szbb	int err;
623289550Szbb
624289551Szbb	rbdr = (struct rbdr *)arg;
625289551Szbb
626289551Szbb	err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
627289551Szbb	if (err != 0) {
628289551Szbb		/*
629289551Szbb		 * Schedule another, sleepable kernel thread
630289551Szbb		 * that will for sure refill the buffers.
631289551Szbb		 */
632289551Szbb		taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
633289550Szbb	}
634289550Szbb}
635289550Szbb
636289551Szbbstatic int
637289551Szbbnicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
638289551Szbb    struct cqe_rx_t *cqe_rx, int cqe_type)
639289551Szbb{
640289551Szbb	struct mbuf *mbuf;
641296031Szbb	struct rcv_queue *rq;
642289551Szbb	int rq_idx;
643289551Szbb	int err = 0;
644289551Szbb
645289551Szbb	rq_idx = cqe_rx->rq_idx;
646296031Szbb	rq = &nic->qs->rq[rq_idx];
647289551Szbb
648289551Szbb	/* Check for errors */
649289551Szbb	err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
650289551Szbb	if (err && !cqe_rx->rb_cnt)
651289551Szbb		return (0);
652289551Szbb
653289551Szbb	mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
654289551Szbb	if (mbuf == NULL) {
655289551Szbb		dprintf(nic->dev, "Packet not received\n");
656289551Szbb		return (0);
657289551Szbb	}
658289551Szbb
659289551Szbb	/* If error packet */
660289551Szbb	if (err != 0) {
661289551Szbb		m_freem(mbuf);
662289551Szbb		return (0);
663289551Szbb	}
664289551Szbb
665296031Szbb	if (rq->lro_enabled &&
666296031Szbb	    ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
667296031Szbb	    (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
668296031Szbb            (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
669296031Szbb		/*
670296031Szbb		 * At this point it is known that there are no errors in the
671296031Szbb		 * packet. Attempt to LRO enqueue. Send to stack if no resources
672296031Szbb		 * or enqueue error.
673296031Szbb		 */
674296031Szbb		if ((rq->lro.lro_cnt != 0) &&
675296031Szbb		    (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
676296031Szbb			return (0);
677296031Szbb	}
678289551Szbb	/*
679289551Szbb	 * Push this packet to the stack later to avoid
680289551Szbb	 * unlocking completion task in the middle of work.
681289551Szbb	 */
682289551Szbb	err = buf_ring_enqueue(cq->rx_br, mbuf);
683289551Szbb	if (err != 0) {
684289551Szbb		/*
685289551Szbb		 * Failed to enqueue this mbuf.
686289551Szbb		 * We don't drop it, just schedule another task.
687289551Szbb		 */
688289551Szbb		return (err);
689289551Szbb	}
690289551Szbb
691289551Szbb	return (0);
692289551Szbb}
693289551Szbb
694289551Szbbstatic int
695289551Szbbnicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
696289551Szbb    struct cqe_send_t *cqe_tx, int cqe_type)
697289551Szbb{
698289551Szbb	bus_dmamap_t dmap;
699289551Szbb	struct mbuf *mbuf;
700289551Szbb	struct snd_queue *sq;
701289551Szbb	struct sq_hdr_subdesc *hdr;
702289551Szbb
703289551Szbb	mbuf = NULL;
704289551Szbb	sq = &nic->qs->sq[cqe_tx->sq_idx];
705289551Szbb	/* Avoid blocking here since we hold a non-sleepable NICVF_CMP_LOCK */
706289551Szbb	if (NICVF_TX_TRYLOCK(sq) == 0)
707289551Szbb		return (EAGAIN);
708289551Szbb
709289551Szbb	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
710289551Szbb	if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
711289551Szbb		NICVF_TX_UNLOCK(sq);
712289551Szbb		return (0);
713289551Szbb	}
714289551Szbb
715289551Szbb	dprintf(nic->dev,
716289551Szbb	    "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
717289551Szbb	    __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
718289551Szbb	    cqe_tx->sqe_ptr, hdr->subdesc_cnt);
719289551Szbb
720289551Szbb	dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
721289551Szbb	bus_dmamap_unload(sq->snd_buff_dmat, dmap);
722289551Szbb
723289551Szbb	mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
724289551Szbb	if (mbuf != NULL) {
725289551Szbb		m_freem(mbuf);
726289551Szbb		sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
727296602Szbb		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
728289551Szbb	}
729289551Szbb
730289551Szbb	nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
731289551Szbb
732289551Szbb	NICVF_TX_UNLOCK(sq);
733289551Szbb	return (0);
734289551Szbb}
735289551Szbb
736289551Szbbstatic int
737289551Szbbnicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
738289551Szbb{
739289551Szbb	struct mbuf *mbuf;
740289551Szbb	struct ifnet *ifp;
741289551Szbb	int processed_cqe, work_done = 0, tx_done = 0;
742289551Szbb	int cqe_count, cqe_head;
743289551Szbb	struct queue_set *qs = nic->qs;
744289551Szbb	struct cmp_queue *cq = &qs->cq[cq_idx];
745297450Szbb	struct snd_queue *sq = &qs->sq[cq_idx];
746296031Szbb	struct rcv_queue *rq;
747289551Szbb	struct cqe_rx_t *cq_desc;
748296031Szbb	struct lro_ctrl	*lro;
749296031Szbb	int rq_idx;
750289551Szbb	int cmp_err;
751289551Szbb
752289551Szbb	NICVF_CMP_LOCK(cq);
753289551Szbb	cmp_err = 0;
754289551Szbb	processed_cqe = 0;
755289551Szbb	/* Get no of valid CQ entries to process */
756289551Szbb	cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
757289551Szbb	cqe_count &= CQ_CQE_COUNT;
758289551Szbb	if (cqe_count == 0)
759289551Szbb		goto out;
760289551Szbb
761289551Szbb	/* Get head of the valid CQ entries */
762289551Szbb	cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
763289551Szbb	cqe_head &= 0xFFFF;
764289551Szbb
765289551Szbb	dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
766289551Szbb	    __func__, cq_idx, cqe_count, cqe_head);
767289551Szbb	while (processed_cqe < cqe_count) {
768289551Szbb		/* Get the CQ descriptor */
769289551Szbb		cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
770289551Szbb		cqe_head++;
771289551Szbb		cqe_head &= (cq->dmem.q_len - 1);
772296032Szbb		/* Prefetch next CQ descriptor */
773296032Szbb		__builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
774289551Szbb
775289551Szbb		dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
776289551Szbb		    cq_desc->cqe_type);
777289551Szbb		switch (cq_desc->cqe_type) {
778289551Szbb		case CQE_TYPE_RX:
779289551Szbb			cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
780289551Szbb			    CQE_TYPE_RX);
781289551Szbb			if (__predict_false(cmp_err != 0)) {
782289551Szbb				/*
783289551Szbb				 * Ups. Cannot finish now.
784289551Szbb				 * Let's try again later.
785289551Szbb				 */
786289551Szbb				goto done;
787289551Szbb			}
788289551Szbb			work_done++;
789289551Szbb			break;
790289551Szbb		case CQE_TYPE_SEND:
791289551Szbb			cmp_err = nicvf_snd_pkt_handler(nic, cq,
792289551Szbb			    (void *)cq_desc, CQE_TYPE_SEND);
793289551Szbb			if (__predict_false(cmp_err != 0)) {
794289551Szbb				/*
795289551Szbb				 * Ups. Cannot finish now.
796289551Szbb				 * Let's try again later.
797289551Szbb				 */
798289551Szbb				goto done;
799289551Szbb			}
800289551Szbb
801289551Szbb			tx_done++;
802289551Szbb			break;
803289551Szbb		case CQE_TYPE_INVALID:
804289551Szbb		case CQE_TYPE_RX_SPLIT:
805289551Szbb		case CQE_TYPE_RX_TCP:
806289551Szbb		case CQE_TYPE_SEND_PTP:
807289551Szbb			/* Ignore for now */
808289551Szbb			break;
809289551Szbb		}
810289551Szbb		processed_cqe++;
811289551Szbb	}
812289551Szbbdone:
813289551Szbb	dprintf(nic->dev,
814289551Szbb	    "%s CQ%d processed_cqe %d work_done %d\n",
815289551Szbb	    __func__, cq_idx, processed_cqe, work_done);
816289551Szbb
817289551Szbb	/* Ring doorbell to inform H/W to reuse processed CQEs */
818289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
819289551Szbb
820289551Szbb	if ((tx_done > 0) &&
821289551Szbb	    ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
822289551Szbb		/* Reenable TXQ if its stopped earlier due to SQ full */
823289551Szbb		if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
824297450Szbb		taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
825289551Szbb	}
826289551Szbbout:
827296031Szbb	/*
828296031Szbb	 * Flush any outstanding LRO work
829296031Szbb	 */
830296031Szbb	rq_idx = cq_idx;
831296031Szbb	rq = &nic->qs->rq[rq_idx];
832296031Szbb	lro = &rq->lro;
833297482Ssephe	tcp_lro_flush_all(lro);
834296031Szbb
835289551Szbb	NICVF_CMP_UNLOCK(cq);
836289551Szbb
837289551Szbb	ifp = nic->ifp;
838289551Szbb	/* Push received MBUFs to the stack */
839289551Szbb	while (!buf_ring_empty(cq->rx_br)) {
840289551Szbb		mbuf = buf_ring_dequeue_mc(cq->rx_br);
841289551Szbb		if (__predict_true(mbuf != NULL))
842289551Szbb			(*ifp->if_input)(ifp, mbuf);
843289551Szbb	}
844289551Szbb
845289551Szbb	return (cmp_err);
846289551Szbb}
847289551Szbb
848289551Szbb/*
849289551Szbb * Qset error interrupt handler
850289551Szbb *
851289551Szbb * As of now only CQ errors are handled
852289551Szbb */
853289551Szbbstatic void
854289551Szbbnicvf_qs_err_task(void *arg, int pending)
855289551Szbb{
856289551Szbb	struct nicvf *nic;
857289551Szbb	struct queue_set *qs;
858289551Szbb	int qidx;
859289551Szbb	uint64_t status;
860289551Szbb	boolean_t enable = TRUE;
861289551Szbb
862289551Szbb	nic = (struct nicvf *)arg;
863289551Szbb	qs = nic->qs;
864289551Szbb
865289551Szbb	/* Deactivate network interface */
866289551Szbb	if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
867289551Szbb
868289551Szbb	/* Check if it is CQ err */
869289551Szbb	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
870289551Szbb		status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
871289551Szbb		    qidx);
872289551Szbb		if ((status & CQ_ERR_MASK) == 0)
873289551Szbb			continue;
874289551Szbb		/* Process already queued CQEs and reconfig CQ */
875289551Szbb		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
876289551Szbb		nicvf_sq_disable(nic, qidx);
877289551Szbb		(void)nicvf_cq_intr_handler(nic, qidx);
878289551Szbb		nicvf_cmp_queue_config(nic, qs, qidx, enable);
879289551Szbb		nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
880289551Szbb		nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
881289551Szbb		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
882289551Szbb	}
883289551Szbb
884289551Szbb	if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
885289551Szbb	/* Re-enable Qset error interrupt */
886289551Szbb	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
887289551Szbb}
888289551Szbb
889289551Szbbstatic void
890289551Szbbnicvf_cmp_task(void *arg, int pending)
891289551Szbb{
892289551Szbb	struct cmp_queue *cq;
893289551Szbb	struct nicvf *nic;
894289551Szbb	int cmp_err;
895289551Szbb
896289551Szbb	cq = (struct cmp_queue *)arg;
897289551Szbb	nic = cq->nic;
898289551Szbb
899289551Szbb	/* Handle CQ descriptors */
900289551Szbb	cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
901289551Szbb	if (__predict_false(cmp_err != 0)) {
902289551Szbb		/*
903289551Szbb		 * Schedule another thread here since we did not
904289551Szbb		 * process the entire CQ due to Tx or Rx CQ parse error.
905289551Szbb		 */
906289551Szbb		taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
907289551Szbb
908289551Szbb	}
909289551Szbb
910296601Szbb	nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
911289551Szbb	/* Reenable interrupt (previously disabled in nicvf_intr_handler() */
912289551Szbb	nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
913289551Szbb
914289551Szbb}
915289551Szbb
916289550Szbb/* Initialize completion queue */
917289551Szbbstatic int
918289551Szbbnicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
919289551Szbb    int qidx)
920289550Szbb{
921289550Szbb	int err;
922289550Szbb
923289551Szbb	/* Initizalize lock */
924289551Szbb	snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
925289551Szbb	    device_get_nameunit(nic->dev), qidx);
926289551Szbb	mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
927289551Szbb
928289550Szbb	err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
929289550Szbb				     NICVF_CQ_BASE_ALIGN_BYTES);
930289550Szbb
931289551Szbb	if (err != 0) {
932289551Szbb		device_printf(nic->dev,
933289551Szbb		    "Could not allocate DMA memory for CQ\n");
934289551Szbb		return (err);
935289551Szbb	}
936289551Szbb
937289550Szbb	cq->desc = cq->dmem.base;
938296038Szbb	cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
939289551Szbb	cq->nic = nic;
940289551Szbb	cq->idx = qidx;
941289550Szbb	nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
942289550Szbb
943289551Szbb	cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
944289551Szbb	    &cq->mtx);
945289551Szbb
946289551Szbb	/* Allocate taskqueue */
947289551Szbb	TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
948289551Szbb	cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
949289551Szbb	    taskqueue_thread_enqueue, &cq->cmp_taskq);
950289551Szbb	taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
951289551Szbb	    device_get_nameunit(nic->dev), qidx);
952289551Szbb
953289551Szbb	return (0);
954289550Szbb}
955289550Szbb
956289551Szbbstatic void
957289551Szbbnicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
958289550Szbb{
959289551Szbb
960289551Szbb	if (cq == NULL)
961289550Szbb		return;
962289551Szbb	/*
963289551Szbb	 * The completion queue itself should be disabled by now
964289551Szbb	 * (ref. nicvf_snd_queue_config()).
965289551Szbb	 * Ensure that it is safe to disable it or panic.
966289551Szbb	 */
967289551Szbb	if (cq->enable)
968289551Szbb		panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
969289550Szbb
970289551Szbb	if (cq->cmp_taskq != NULL) {
971289551Szbb		/* Remove task */
972289551Szbb		while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
973289551Szbb			taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
974289551Szbb
975289551Szbb		taskqueue_free(cq->cmp_taskq);
976289551Szbb		cq->cmp_taskq = NULL;
977289551Szbb	}
978289551Szbb	/*
979289551Szbb	 * Completion interrupt will possibly enable interrupts again
980289551Szbb	 * so disable interrupting now after we finished processing
981289551Szbb	 * completion task. It is safe to do so since the corresponding CQ
982289551Szbb	 * was already disabled.
983289551Szbb	 */
984289551Szbb	nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
985289551Szbb	nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
986289551Szbb
987289551Szbb	NICVF_CMP_LOCK(cq);
988289550Szbb	nicvf_free_q_desc_mem(nic, &cq->dmem);
989289551Szbb	drbr_free(cq->rx_br, M_DEVBUF);
990289551Szbb	NICVF_CMP_UNLOCK(cq);
991289551Szbb	mtx_destroy(&cq->mtx);
992289551Szbb	memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
993289550Szbb}
994289550Szbb
995297450Szbbint
996297450Szbbnicvf_xmit_locked(struct snd_queue *sq)
997297450Szbb{
998297450Szbb	struct nicvf *nic;
999297450Szbb	struct ifnet *ifp;
1000297450Szbb	struct mbuf *next;
1001297450Szbb	int err;
1002297450Szbb
1003297450Szbb	NICVF_TX_LOCK_ASSERT(sq);
1004297450Szbb
1005297450Szbb	nic = sq->nic;
1006297450Szbb	ifp = nic->ifp;
1007297450Szbb	err = 0;
1008297450Szbb
1009297450Szbb	while ((next = drbr_peek(ifp, sq->br)) != NULL) {
1010297450Szbb		err = nicvf_tx_mbuf_locked(sq, &next);
1011297450Szbb		if (err != 0) {
1012297450Szbb			if (next == NULL)
1013297450Szbb				drbr_advance(ifp, sq->br);
1014297450Szbb			else
1015297450Szbb				drbr_putback(ifp, sq->br, next);
1016297450Szbb
1017297450Szbb			break;
1018297450Szbb		}
1019297450Szbb		drbr_advance(ifp, sq->br);
1020297450Szbb		/* Send a copy of the frame to the BPF listener */
1021297450Szbb		ETHER_BPF_MTAP(ifp, next);
1022297450Szbb	}
1023297450Szbb	return (err);
1024297450Szbb}
1025297450Szbb
1026289551Szbbstatic void
1027289551Szbbnicvf_snd_task(void *arg, int pending)
1028289551Szbb{
1029289551Szbb	struct snd_queue *sq = (struct snd_queue *)arg;
1030297450Szbb	struct nicvf *nic;
1031297450Szbb	struct ifnet *ifp;
1032297450Szbb	int err;
1033289551Szbb
1034297450Szbb	nic = sq->nic;
1035297450Szbb	ifp = nic->ifp;
1036297450Szbb
1037297450Szbb	/*
1038297450Szbb	 * Skip sending anything if the driver is not running,
1039297450Szbb	 * SQ full or link is down.
1040297450Szbb	 */
1041297450Szbb	if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1042297450Szbb	    IFF_DRV_RUNNING) || !nic->link_up)
1043297450Szbb		return;
1044297450Szbb
1045289551Szbb	NICVF_TX_LOCK(sq);
1046297450Szbb	err = nicvf_xmit_locked(sq);
1047289551Szbb	NICVF_TX_UNLOCK(sq);
1048297450Szbb	/* Try again */
1049297450Szbb	if (err != 0)
1050297450Szbb		taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
1051289551Szbb}
1052289551Szbb
1053289550Szbb/* Initialize transmit queue */
1054289551Szbbstatic int
1055289551Szbbnicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
1056289551Szbb    int qidx)
1057289550Szbb{
1058289551Szbb	size_t i;
1059289550Szbb	int err;
1060289550Szbb
1061289551Szbb	/* Initizalize TX lock for this queue */
1062289551Szbb	snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
1063289551Szbb	    device_get_nameunit(nic->dev), qidx);
1064289551Szbb	mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
1065289551Szbb
1066289551Szbb	NICVF_TX_LOCK(sq);
1067289551Szbb	/* Allocate buffer ring */
1068289551Szbb	sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
1069289551Szbb	    M_NOWAIT, &sq->mtx);
1070289551Szbb	if (sq->br == NULL) {
1071289551Szbb		device_printf(nic->dev,
1072289551Szbb		    "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
1073289551Szbb		err = ENOMEM;
1074289551Szbb		goto error;
1075289551Szbb	}
1076289551Szbb
1077289551Szbb	/* Allocate DMA memory for Tx descriptors */
1078289550Szbb	err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
1079289550Szbb				     NICVF_SQ_BASE_ALIGN_BYTES);
1080289551Szbb	if (err != 0) {
1081289551Szbb		device_printf(nic->dev,
1082289551Szbb		    "Could not allocate DMA memory for SQ\n");
1083289551Szbb		goto error;
1084289551Szbb	}
1085289550Szbb
1086289550Szbb	sq->desc = sq->dmem.base;
1087289551Szbb	sq->head = sq->tail = 0;
1088297388Szbb	sq->free_cnt = q_len - 1;
1089289550Szbb	sq->thresh = SND_QUEUE_THRESH;
1090289551Szbb	sq->idx = qidx;
1091289551Szbb	sq->nic = nic;
1092289550Szbb
1093289551Szbb	/*
1094289551Szbb	 * Allocate DMA maps for Tx buffers
1095289551Szbb	 */
1096289550Szbb
1097289551Szbb	/* Create DMA tag first */
1098289551Szbb	err = bus_dma_tag_create(
1099289551Szbb	    bus_get_dma_tag(nic->dev),		/* parent tag */
1100289551Szbb	    1,					/* alignment */
1101289551Szbb	    0,					/* boundary */
1102289551Szbb	    BUS_SPACE_MAXADDR,			/* lowaddr */
1103289551Szbb	    BUS_SPACE_MAXADDR,			/* highaddr */
1104289551Szbb	    NULL, NULL,				/* filtfunc, filtfuncarg */
1105296039Szbb	    NICVF_TSO_MAXSIZE,			/* maxsize */
1106296039Szbb	    NICVF_TSO_NSEGS,			/* nsegments */
1107289551Szbb	    MCLBYTES,				/* maxsegsize */
1108289551Szbb	    0,					/* flags */
1109289551Szbb	    NULL, NULL,				/* lockfunc, lockfuncarg */
1110289551Szbb	    &sq->snd_buff_dmat);		/* dmat */
1111289551Szbb
1112289551Szbb	if (err != 0) {
1113289551Szbb		device_printf(nic->dev,
1114289551Szbb		    "Failed to create busdma tag for Tx buffers\n");
1115289551Szbb		goto error;
1116289551Szbb	}
1117289551Szbb
1118289551Szbb	/* Allocate send buffers array */
1119289551Szbb	sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
1120289551Szbb	    (M_NOWAIT | M_ZERO));
1121289551Szbb	if (sq->snd_buff == NULL) {
1122289551Szbb		device_printf(nic->dev,
1123289551Szbb		    "Could not allocate memory for Tx buffers array\n");
1124289551Szbb		err = ENOMEM;
1125289551Szbb		goto error;
1126289551Szbb	}
1127289551Szbb
1128289551Szbb	/* Now populate maps */
1129289551Szbb	for (i = 0; i < q_len; i++) {
1130289551Szbb		err = bus_dmamap_create(sq->snd_buff_dmat, 0,
1131289551Szbb		    &sq->snd_buff[i].dmap);
1132289551Szbb		if (err != 0) {
1133289551Szbb			device_printf(nic->dev,
1134289551Szbb			    "Failed to create DMA maps for Tx buffers\n");
1135289551Szbb			goto error;
1136289551Szbb		}
1137289551Szbb	}
1138289551Szbb	NICVF_TX_UNLOCK(sq);
1139289551Szbb
1140289551Szbb	/* Allocate taskqueue */
1141289551Szbb	TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
1142289551Szbb	sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
1143289551Szbb	    taskqueue_thread_enqueue, &sq->snd_taskq);
1144289551Szbb	taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
1145289551Szbb	    device_get_nameunit(nic->dev), qidx);
1146289551Szbb
1147289551Szbb	return (0);
1148289551Szbberror:
1149289551Szbb	NICVF_TX_UNLOCK(sq);
1150289551Szbb	return (err);
1151289550Szbb}
1152289550Szbb
1153289551Szbbstatic void
1154289551Szbbnicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
1155289550Szbb{
1156289551Szbb	struct queue_set *qs = nic->qs;
1157289551Szbb	size_t i;
1158289551Szbb	int err;
1159289551Szbb
1160289551Szbb	if (sq == NULL)
1161289550Szbb		return;
1162289550Szbb
1163289551Szbb	if (sq->snd_taskq != NULL) {
1164289551Szbb		/* Remove task */
1165289551Szbb		while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
1166289551Szbb			taskqueue_drain(sq->snd_taskq, &sq->snd_task);
1167289550Szbb
1168289551Szbb		taskqueue_free(sq->snd_taskq);
1169289551Szbb		sq->snd_taskq = NULL;
1170289551Szbb	}
1171289551Szbb
1172289551Szbb	NICVF_TX_LOCK(sq);
1173289551Szbb	if (sq->snd_buff_dmat != NULL) {
1174289551Szbb		if (sq->snd_buff != NULL) {
1175289551Szbb			for (i = 0; i < qs->sq_len; i++) {
1176289551Szbb				m_freem(sq->snd_buff[i].mbuf);
1177289551Szbb				sq->snd_buff[i].mbuf = NULL;
1178289551Szbb
1179289551Szbb				bus_dmamap_unload(sq->snd_buff_dmat,
1180289551Szbb				    sq->snd_buff[i].dmap);
1181289551Szbb				err = bus_dmamap_destroy(sq->snd_buff_dmat,
1182289551Szbb				    sq->snd_buff[i].dmap);
1183289551Szbb				/*
1184289551Szbb				 * If bus_dmamap_destroy fails it can cause
1185289551Szbb				 * random panic later if the tag is also
1186289551Szbb				 * destroyed in the process.
1187289551Szbb				 */
1188289551Szbb				KASSERT(err == 0,
1189289551Szbb				    ("%s: Could not destroy DMA map for SQ",
1190289551Szbb				    __func__));
1191289551Szbb			}
1192289551Szbb		}
1193289551Szbb
1194289551Szbb		free(sq->snd_buff, M_NICVF);
1195289551Szbb
1196289551Szbb		err = bus_dma_tag_destroy(sq->snd_buff_dmat);
1197289551Szbb		KASSERT(err == 0,
1198289551Szbb		    ("%s: Trying to destroy BUSY DMA tag", __func__));
1199289551Szbb	}
1200289551Szbb
1201289551Szbb	/* Free private driver ring for this send queue */
1202289551Szbb	if (sq->br != NULL)
1203289551Szbb		drbr_free(sq->br, M_DEVBUF);
1204289551Szbb
1205289551Szbb	if (sq->dmem.base != NULL)
1206289551Szbb		nicvf_free_q_desc_mem(nic, &sq->dmem);
1207289551Szbb
1208289551Szbb	NICVF_TX_UNLOCK(sq);
1209289551Szbb	/* Destroy Tx lock */
1210289551Szbb	mtx_destroy(&sq->mtx);
1211289551Szbb	memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
1212289550Szbb}
1213289550Szbb
1214289551Szbbstatic void
1215289551Szbbnicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1216289550Szbb{
1217289551Szbb
1218289550Szbb	/* Disable send queue */
1219289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
1220289550Szbb	/* Check if SQ is stopped */
1221289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
1222289550Szbb		return;
1223289550Szbb	/* Reset send queue */
1224289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1225289550Szbb}
1226289550Szbb
1227289551Szbbstatic void
1228289551Szbbnicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1229289550Szbb{
1230289550Szbb	union nic_mbx mbx = {};
1231289550Szbb
1232289550Szbb	/* Make sure all packets in the pipeline are written back into mem */
1233289550Szbb	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
1234289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1235289550Szbb}
1236289550Szbb
1237289551Szbbstatic void
1238289551Szbbnicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1239289550Szbb{
1240289551Szbb
1241289550Szbb	/* Disable timer threshold (doesn't get reset upon CQ reset */
1242289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
1243289550Szbb	/* Disable completion queue */
1244289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
1245289550Szbb	/* Reset completion queue */
1246289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1247289550Szbb}
1248289550Szbb
1249289551Szbbstatic void
1250289551Szbbnicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
1251289550Szbb{
1252289551Szbb	uint64_t tmp, fifo_state;
1253289550Szbb	int timeout = 10;
1254289550Szbb
1255289550Szbb	/* Save head and tail pointers for feeing up buffers */
1256289551Szbb	rbdr->head =
1257289551Szbb	    nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
1258289551Szbb	rbdr->tail =
1259289551Szbb	    nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
1260289550Szbb
1261289551Szbb	/*
1262289551Szbb	 * If RBDR FIFO is in 'FAIL' state then do a reset first
1263289550Szbb	 * before relaiming.
1264289550Szbb	 */
1265289550Szbb	fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
1266289551Szbb	if (((fifo_state >> 62) & 0x03) == 0x3) {
1267289550Szbb		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
1268289551Szbb		    qidx, NICVF_RBDR_RESET);
1269289551Szbb	}
1270289550Szbb
1271289550Szbb	/* Disable RBDR */
1272289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
1273289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1274289550Szbb		return;
1275289550Szbb	while (1) {
1276289550Szbb		tmp = nicvf_queue_reg_read(nic,
1277289551Szbb		    NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
1278289550Szbb		if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
1279289550Szbb			break;
1280289551Szbb
1281289551Szbb		DELAY(1000);
1282289550Szbb		timeout--;
1283289550Szbb		if (!timeout) {
1284289551Szbb			device_printf(nic->dev,
1285289551Szbb			    "Failed polling on prefetch status\n");
1286289550Szbb			return;
1287289550Szbb		}
1288289550Szbb	}
1289289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1290289551Szbb	    NICVF_RBDR_RESET);
1291289550Szbb
1292289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
1293289550Szbb		return;
1294289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
1295289550Szbb	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1296289550Szbb		return;
1297289550Szbb}
1298289550Szbb
1299289550Szbb/* Configures receive queue */
1300289551Szbbstatic void
1301289551Szbbnicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
1302289551Szbb    int qidx, bool enable)
1303289550Szbb{
1304289550Szbb	union nic_mbx mbx = {};
1305289550Szbb	struct rcv_queue *rq;
1306289550Szbb	struct rq_cfg rq_cfg;
1307296031Szbb	struct ifnet *ifp;
1308296031Szbb	struct lro_ctrl	*lro;
1309289550Szbb
1310296031Szbb	ifp = nic->ifp;
1311296031Szbb
1312289550Szbb	rq = &qs->rq[qidx];
1313289550Szbb	rq->enable = enable;
1314289550Szbb
1315296031Szbb	lro = &rq->lro;
1316296031Szbb
1317289550Szbb	/* Disable receive queue */
1318289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
1319289550Szbb
1320289550Szbb	if (!rq->enable) {
1321289550Szbb		nicvf_reclaim_rcv_queue(nic, qs, qidx);
1322296031Szbb		/* Free LRO memory */
1323296031Szbb		tcp_lro_free(lro);
1324296031Szbb		rq->lro_enabled = FALSE;
1325289550Szbb		return;
1326289550Szbb	}
1327289550Szbb
1328296031Szbb	/* Configure LRO if enabled */
1329296031Szbb	rq->lro_enabled = FALSE;
1330296031Szbb	if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
1331296031Szbb		if (tcp_lro_init(lro) != 0) {
1332296031Szbb			device_printf(nic->dev,
1333296031Szbb			    "Failed to initialize LRO for RXQ%d\n", qidx);
1334296031Szbb		} else {
1335296031Szbb			rq->lro_enabled = TRUE;
1336296031Szbb			lro->ifp = nic->ifp;
1337296031Szbb		}
1338296031Szbb	}
1339296031Szbb
1340289550Szbb	rq->cq_qs = qs->vnic_id;
1341289550Szbb	rq->cq_idx = qidx;
1342289550Szbb	rq->start_rbdr_qs = qs->vnic_id;
1343289550Szbb	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
1344289550Szbb	rq->cont_rbdr_qs = qs->vnic_id;
1345289550Szbb	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
1346289550Szbb	/* all writes of RBDR data to be loaded into L2 Cache as well*/
1347289550Szbb	rq->caching = 1;
1348289550Szbb
1349289550Szbb	/* Send a mailbox msg to PF to config RQ */
1350289550Szbb	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
1351289550Szbb	mbx.rq.qs_num = qs->vnic_id;
1352289550Szbb	mbx.rq.rq_num = qidx;
1353289550Szbb	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
1354289551Szbb	    (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
1355289551Szbb	    (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
1356289551Szbb	    (rq->start_qs_rbdr_idx);
1357289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1358289550Szbb
1359289550Szbb	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
1360289551Szbb	mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
1361289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1362289550Szbb
1363289551Szbb	/*
1364289551Szbb	 * RQ drop config
1365289550Szbb	 * Enable CQ drop to reserve sufficient CQEs for all tx packets
1366289550Szbb	 */
1367289550Szbb	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
1368289551Szbb	mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
1369289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1370289550Szbb
1371289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
1372289550Szbb
1373289550Szbb	/* Enable Receive queue */
1374289550Szbb	rq_cfg.ena = 1;
1375289550Szbb	rq_cfg.tcp_ena = 0;
1376289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
1377289551Szbb	    *(uint64_t *)&rq_cfg);
1378289550Szbb}
1379289550Szbb
1380289550Szbb/* Configures completion queue */
1381289551Szbbstatic void
1382289551Szbbnicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
1383289551Szbb    int qidx, boolean_t enable)
1384289550Szbb{
1385289550Szbb	struct cmp_queue *cq;
1386289550Szbb	struct cq_cfg cq_cfg;
1387289550Szbb
1388289550Szbb	cq = &qs->cq[qidx];
1389289550Szbb	cq->enable = enable;
1390289550Szbb
1391289550Szbb	if (!cq->enable) {
1392289550Szbb		nicvf_reclaim_cmp_queue(nic, qs, qidx);
1393289550Szbb		return;
1394289550Szbb	}
1395289550Szbb
1396289550Szbb	/* Reset completion queue */
1397289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1398289550Szbb
1399289550Szbb	/* Set completion queue base address */
1400289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
1401289551Szbb	    (uint64_t)(cq->dmem.phys_base));
1402289550Szbb
1403289550Szbb	/* Enable Completion queue */
1404289550Szbb	cq_cfg.ena = 1;
1405289550Szbb	cq_cfg.reset = 0;
1406289550Szbb	cq_cfg.caching = 0;
1407289550Szbb	cq_cfg.qsize = CMP_QSIZE;
1408289550Szbb	cq_cfg.avg_con = 0;
1409289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
1410289550Szbb
1411289550Szbb	/* Set threshold value for interrupt generation */
1412289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
1413289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
1414289551Szbb	    nic->cq_coalesce_usecs);
1415289550Szbb}
1416289550Szbb
1417289550Szbb/* Configures transmit queue */
1418289551Szbbstatic void
1419289551Szbbnicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1420289551Szbb    boolean_t enable)
1421289550Szbb{
1422289550Szbb	union nic_mbx mbx = {};
1423289550Szbb	struct snd_queue *sq;
1424289550Szbb	struct sq_cfg sq_cfg;
1425289550Szbb
1426289550Szbb	sq = &qs->sq[qidx];
1427289550Szbb	sq->enable = enable;
1428289550Szbb
1429289550Szbb	if (!sq->enable) {
1430289550Szbb		nicvf_reclaim_snd_queue(nic, qs, qidx);
1431289550Szbb		return;
1432289550Szbb	}
1433289550Szbb
1434289550Szbb	/* Reset send queue */
1435289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1436289550Szbb
1437289550Szbb	sq->cq_qs = qs->vnic_id;
1438289550Szbb	sq->cq_idx = qidx;
1439289550Szbb
1440289550Szbb	/* Send a mailbox msg to PF to config SQ */
1441289550Szbb	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
1442289550Szbb	mbx.sq.qs_num = qs->vnic_id;
1443289550Szbb	mbx.sq.sq_num = qidx;
1444289550Szbb	mbx.sq.sqs_mode = nic->sqs_mode;
1445289550Szbb	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
1446289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1447289550Szbb
1448289550Szbb	/* Set queue base address */
1449289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
1450289551Szbb	    (uint64_t)(sq->dmem.phys_base));
1451289550Szbb
1452289550Szbb	/* Enable send queue  & set queue size */
1453289550Szbb	sq_cfg.ena = 1;
1454289550Szbb	sq_cfg.reset = 0;
1455289550Szbb	sq_cfg.ldwb = 0;
1456289550Szbb	sq_cfg.qsize = SND_QSIZE;
1457289550Szbb	sq_cfg.tstmp_bgx_intf = 0;
1458289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
1459289550Szbb
1460289550Szbb	/* Set threshold value for interrupt generation */
1461289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
1462289550Szbb}
1463289550Szbb
1464289550Szbb/* Configures receive buffer descriptor ring */
1465289551Szbbstatic void
1466289551Szbbnicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1467289551Szbb    boolean_t enable)
1468289550Szbb{
1469289550Szbb	struct rbdr *rbdr;
1470289550Szbb	struct rbdr_cfg rbdr_cfg;
1471289550Szbb
1472289550Szbb	rbdr = &qs->rbdr[qidx];
1473289550Szbb	nicvf_reclaim_rbdr(nic, rbdr, qidx);
1474289550Szbb	if (!enable)
1475289550Szbb		return;
1476289550Szbb
1477289550Szbb	/* Set descriptor base address */
1478289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
1479289551Szbb	    (uint64_t)(rbdr->dmem.phys_base));
1480289550Szbb
1481289550Szbb	/* Enable RBDR  & set queue size */
1482289550Szbb	/* Buffer size should be in multiples of 128 bytes */
1483289550Szbb	rbdr_cfg.ena = 1;
1484289550Szbb	rbdr_cfg.reset = 0;
1485289550Szbb	rbdr_cfg.ldwb = 0;
1486289550Szbb	rbdr_cfg.qsize = RBDR_SIZE;
1487289550Szbb	rbdr_cfg.avg_con = 0;
1488289550Szbb	rbdr_cfg.lines = rbdr->dma_size / 128;
1489289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1490289551Szbb	    *(uint64_t *)&rbdr_cfg);
1491289550Szbb
1492289550Szbb	/* Notify HW */
1493289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
1494289551Szbb	    qs->rbdr_len - 1);
1495289550Szbb
1496289550Szbb	/* Set threshold value for interrupt generation */
1497289551Szbb	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
1498289551Szbb	    rbdr->thresh - 1);
1499289550Szbb}
1500289550Szbb
1501289550Szbb/* Requests PF to assign and enable Qset */
1502289551Szbbvoid
1503289551Szbbnicvf_qset_config(struct nicvf *nic, boolean_t enable)
1504289550Szbb{
1505289550Szbb	union nic_mbx mbx = {};
1506289551Szbb	struct queue_set *qs;
1507289550Szbb	struct qs_cfg *qs_cfg;
1508289550Szbb
1509289551Szbb	qs = nic->qs;
1510289551Szbb	if (qs == NULL) {
1511289551Szbb		device_printf(nic->dev,
1512289551Szbb		    "Qset is still not allocated, don't init queues\n");
1513289550Szbb		return;
1514289550Szbb	}
1515289550Szbb
1516289550Szbb	qs->enable = enable;
1517289550Szbb	qs->vnic_id = nic->vf_id;
1518289550Szbb
1519289550Szbb	/* Send a mailbox msg to PF to config Qset */
1520289550Szbb	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
1521289550Szbb	mbx.qs.num = qs->vnic_id;
1522289550Szbb
1523289550Szbb	mbx.qs.cfg = 0;
1524289550Szbb	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
1525289550Szbb	if (qs->enable) {
1526289550Szbb		qs_cfg->ena = 1;
1527289550Szbb		qs_cfg->vnic = qs->vnic_id;
1528289550Szbb	}
1529289550Szbb	nicvf_send_msg_to_pf(nic, &mbx);
1530289550Szbb}
1531289550Szbb
1532289551Szbbstatic void
1533289551Szbbnicvf_free_resources(struct nicvf *nic)
1534289550Szbb{
1535289550Szbb	int qidx;
1536289551Szbb	struct queue_set *qs;
1537289550Szbb
1538289551Szbb	qs = nic->qs;
1539289551Szbb	/*
1540289551Szbb	 * Remove QS error task first since it has to be dead
1541289551Szbb	 * to safely free completion queue tasks.
1542289551Szbb	 */
1543289551Szbb	if (qs->qs_err_taskq != NULL) {
1544289551Szbb		/* Shut down QS error tasks */
1545289551Szbb		while (taskqueue_cancel(qs->qs_err_taskq,
1546289551Szbb		    &qs->qs_err_task,  NULL) != 0) {
1547289551Szbb			taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
1548289551Szbb
1549289551Szbb		}
1550289551Szbb		taskqueue_free(qs->qs_err_taskq);
1551289551Szbb		qs->qs_err_taskq = NULL;
1552289551Szbb	}
1553289550Szbb	/* Free receive buffer descriptor ring */
1554289550Szbb	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1555289550Szbb		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1556289550Szbb
1557289550Szbb	/* Free completion queue */
1558289550Szbb	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1559289550Szbb		nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1560289550Szbb
1561289550Szbb	/* Free send queue */
1562289550Szbb	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1563289550Szbb		nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1564289550Szbb}
1565289550Szbb
1566289551Szbbstatic int
1567289551Szbbnicvf_alloc_resources(struct nicvf *nic)
1568289550Szbb{
1569289551Szbb	struct queue_set *qs = nic->qs;
1570289550Szbb	int qidx;
1571289550Szbb
1572289550Szbb	/* Alloc receive buffer descriptor ring */
1573289550Szbb	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1574289550Szbb		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1575289551Szbb				    DMA_BUFFER_LEN, qidx))
1576289550Szbb			goto alloc_fail;
1577289550Szbb	}
1578289550Szbb
1579289550Szbb	/* Alloc send queue */
1580289550Szbb	for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
1581289551Szbb		if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
1582289550Szbb			goto alloc_fail;
1583289550Szbb	}
1584289550Szbb
1585289550Szbb	/* Alloc completion queue */
1586289550Szbb	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1587289551Szbb		if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
1588289550Szbb			goto alloc_fail;
1589289550Szbb	}
1590289550Szbb
1591289551Szbb	/* Allocate QS error taskqueue */
1592289551Szbb	TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
1593289551Szbb	qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
1594289551Szbb	    taskqueue_thread_enqueue, &qs->qs_err_taskq);
1595289551Szbb	taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
1596289551Szbb	    device_get_nameunit(nic->dev));
1597289551Szbb
1598289551Szbb	return (0);
1599289550Szbballoc_fail:
1600289550Szbb	nicvf_free_resources(nic);
1601289551Szbb	return (ENOMEM);
1602289550Szbb}
1603289550Szbb
1604289551Szbbint
1605289551Szbbnicvf_set_qset_resources(struct nicvf *nic)
1606289550Szbb{
1607289550Szbb	struct queue_set *qs;
1608289550Szbb
1609289551Szbb	qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
1610289550Szbb	nic->qs = qs;
1611289550Szbb
1612289550Szbb	/* Set count of each queue */
1613289550Szbb	qs->rbdr_cnt = RBDR_CNT;
1614299444Szbb	qs->rq_cnt = RCV_QUEUE_CNT;
1615289551Szbb
1616289550Szbb	qs->sq_cnt = SND_QUEUE_CNT;
1617289550Szbb	qs->cq_cnt = CMP_QUEUE_CNT;
1618289550Szbb
1619289550Szbb	/* Set queue lengths */
1620289550Szbb	qs->rbdr_len = RCV_BUF_COUNT;
1621289550Szbb	qs->sq_len = SND_QUEUE_LEN;
1622289550Szbb	qs->cq_len = CMP_QUEUE_LEN;
1623289550Szbb
1624289550Szbb	nic->rx_queues = qs->rq_cnt;
1625289550Szbb	nic->tx_queues = qs->sq_cnt;
1626289550Szbb
1627289551Szbb	return (0);
1628289550Szbb}
1629289550Szbb
1630289551Szbbint
1631289551Szbbnicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
1632289550Szbb{
1633289551Szbb	boolean_t disable = FALSE;
1634289551Szbb	struct queue_set *qs;
1635289550Szbb	int qidx;
1636289550Szbb
1637289551Szbb	qs = nic->qs;
1638289551Szbb	if (qs == NULL)
1639289551Szbb		return (0);
1640289550Szbb
1641289550Szbb	if (enable) {
1642289551Szbb		if (nicvf_alloc_resources(nic) != 0)
1643289551Szbb			return (ENOMEM);
1644289550Szbb
1645289550Szbb		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1646289550Szbb			nicvf_snd_queue_config(nic, qs, qidx, enable);
1647289550Szbb		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1648289550Szbb			nicvf_cmp_queue_config(nic, qs, qidx, enable);
1649289550Szbb		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1650289550Szbb			nicvf_rbdr_config(nic, qs, qidx, enable);
1651289550Szbb		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1652289550Szbb			nicvf_rcv_queue_config(nic, qs, qidx, enable);
1653289550Szbb	} else {
1654289550Szbb		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1655289550Szbb			nicvf_rcv_queue_config(nic, qs, qidx, disable);
1656289550Szbb		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1657289550Szbb			nicvf_rbdr_config(nic, qs, qidx, disable);
1658289550Szbb		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1659289550Szbb			nicvf_snd_queue_config(nic, qs, qidx, disable);
1660289550Szbb		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1661289550Szbb			nicvf_cmp_queue_config(nic, qs, qidx, disable);
1662289550Szbb
1663289550Szbb		nicvf_free_resources(nic);
1664289550Szbb	}
1665289550Szbb
1666289551Szbb	return (0);
1667289550Szbb}
1668289550Szbb
1669289551Szbb/*
1670289551Szbb * Get a free desc from SQ
1671289550Szbb * returns descriptor ponter & descriptor number
1672289550Szbb */
1673289551Szbbstatic __inline int
1674289551Szbbnicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1675289550Szbb{
1676289550Szbb	int qentry;
1677289550Szbb
1678289550Szbb	qentry = sq->tail;
1679297388Szbb	sq->free_cnt -= desc_cnt;
1680289550Szbb	sq->tail += desc_cnt;
1681289550Szbb	sq->tail &= (sq->dmem.q_len - 1);
1682289550Szbb
1683289551Szbb	return (qentry);
1684289550Szbb}
1685289550Szbb
1686289550Szbb/* Free descriptor back to SQ for future use */
1687289551Szbbstatic void
1688289551Szbbnicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1689289550Szbb{
1690289551Szbb
1691297388Szbb	sq->free_cnt += desc_cnt;
1692289550Szbb	sq->head += desc_cnt;
1693289550Szbb	sq->head &= (sq->dmem.q_len - 1);
1694289550Szbb}
1695289550Szbb
1696289551Szbbstatic __inline int
1697289551Szbbnicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1698289550Szbb{
1699289550Szbb	qentry++;
1700289550Szbb	qentry &= (sq->dmem.q_len - 1);
1701289551Szbb	return (qentry);
1702289550Szbb}
1703289550Szbb
1704289551Szbbstatic void
1705289551Szbbnicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1706289550Szbb{
1707289551Szbb	uint64_t sq_cfg;
1708289550Szbb
1709289550Szbb	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1710289550Szbb	sq_cfg |= NICVF_SQ_EN;
1711289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1712289550Szbb	/* Ring doorbell so that H/W restarts processing SQEs */
1713289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1714289550Szbb}
1715289550Szbb
1716289551Szbbstatic void
1717289551Szbbnicvf_sq_disable(struct nicvf *nic, int qidx)
1718289550Szbb{
1719289551Szbb	uint64_t sq_cfg;
1720289550Szbb
1721289550Szbb	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1722289550Szbb	sq_cfg &= ~NICVF_SQ_EN;
1723289550Szbb	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1724289550Szbb}
1725289550Szbb
1726289551Szbbstatic void
1727289551Szbbnicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
1728289550Szbb{
1729289551Szbb	uint64_t head, tail;
1730289551Szbb	struct snd_buff *snd_buff;
1731289550Szbb	struct sq_hdr_subdesc *hdr;
1732289550Szbb
1733289551Szbb	NICVF_TX_LOCK(sq);
1734289550Szbb	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1735289550Szbb	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
1736289550Szbb	while (sq->head != head) {
1737289550Szbb		hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1738289550Szbb		if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1739289550Szbb			nicvf_put_sq_desc(sq, 1);
1740289550Szbb			continue;
1741289550Szbb		}
1742289551Szbb		snd_buff = &sq->snd_buff[sq->head];
1743289551Szbb		if (snd_buff->mbuf != NULL) {
1744289551Szbb			bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1745289551Szbb			m_freem(snd_buff->mbuf);
1746289551Szbb			sq->snd_buff[sq->head].mbuf = NULL;
1747289551Szbb		}
1748289550Szbb		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1749289550Szbb	}
1750289551Szbb	NICVF_TX_UNLOCK(sq);
1751289550Szbb}
1752289550Szbb
1753289551Szbb/*
1754289551Szbb * Add SQ HEADER subdescriptor.
1755289550Szbb * First subdescriptor for every send descriptor.
1756289550Szbb */
1757296030Szbbstatic __inline int
1758289550Szbbnicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1759289551Szbb			 int subdesc_cnt, struct mbuf *mbuf, int len)
1760289550Szbb{
1761296039Szbb	struct nicvf *nic;
1762289550Szbb	struct sq_hdr_subdesc *hdr;
1763296030Szbb	struct ether_vlan_header *eh;
1764296030Szbb#ifdef INET
1765296030Szbb	struct ip *ip;
1766296039Szbb	struct tcphdr *th;
1767296030Szbb#endif
1768296030Szbb	uint16_t etype;
1769296030Szbb	int ehdrlen, iphlen, poff;
1770289550Szbb
1771296039Szbb	nic = sq->nic;
1772296039Szbb
1773289550Szbb	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1774289551Szbb	sq->snd_buff[qentry].mbuf = mbuf;
1775289550Szbb
1776289550Szbb	memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1777289550Szbb	hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1778289550Szbb	/* Enable notification via CQE after processing SQE */
1779289550Szbb	hdr->post_cqe = 1;
1780289550Szbb	/* No of subdescriptors following this */
1781289550Szbb	hdr->subdesc_cnt = subdesc_cnt;
1782289550Szbb	hdr->tot_len = len;
1783289550Szbb
1784296039Szbb	eh = mtod(mbuf, struct ether_vlan_header *);
1785296039Szbb	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1786296039Szbb		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1787296039Szbb		etype = ntohs(eh->evl_proto);
1788296039Szbb	} else {
1789296039Szbb		ehdrlen = ETHER_HDR_LEN;
1790296039Szbb		etype = ntohs(eh->evl_encap_proto);
1791296039Szbb	}
1792296030Szbb
1793296039Szbb	switch (etype) {
1794296039Szbb#ifdef INET6
1795296039Szbb	case ETHERTYPE_IPV6:
1796296039Szbb		/* ARM64TODO: Add support for IPv6 */
1797296039Szbb		hdr->csum_l3 = 0;
1798296039Szbb		sq->snd_buff[qentry].mbuf = NULL;
1799296039Szbb		return (ENXIO);
1800296039Szbb#endif
1801296039Szbb#ifdef INET
1802296039Szbb	case ETHERTYPE_IP:
1803296030Szbb		if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
1804296030Szbb			mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
1805296030Szbb			sq->snd_buff[qentry].mbuf = mbuf;
1806296030Szbb			if (mbuf == NULL)
1807296030Szbb				return (ENOBUFS);
1808296030Szbb		}
1809296030Szbb
1810296039Szbb		ip = (struct ip *)(mbuf->m_data + ehdrlen);
1811296039Szbb		iphlen = ip->ip_hl << 2;
1812296039Szbb		poff = ehdrlen + iphlen;
1813296030Szbb
1814296039Szbb		if (mbuf->m_pkthdr.csum_flags != 0) {
1815296039Szbb			hdr->csum_l3 = 1; /* Enable IP csum calculation */
1816296030Szbb			switch (ip->ip_p) {
1817296030Szbb			case IPPROTO_TCP:
1818296030Szbb				if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
1819296030Szbb					break;
1820296030Szbb
1821296030Szbb				if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
1822296030Szbb					mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
1823296030Szbb					sq->snd_buff[qentry].mbuf = mbuf;
1824296030Szbb					if (mbuf == NULL)
1825296030Szbb						return (ENOBUFS);
1826296030Szbb				}
1827296030Szbb				hdr->csum_l4 = SEND_L4_CSUM_TCP;
1828296030Szbb				break;
1829296030Szbb			case IPPROTO_UDP:
1830296030Szbb				if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
1831296030Szbb					break;
1832296030Szbb
1833296030Szbb				if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
1834296030Szbb					mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
1835296030Szbb					sq->snd_buff[qentry].mbuf = mbuf;
1836296030Szbb					if (mbuf == NULL)
1837296030Szbb						return (ENOBUFS);
1838296030Szbb				}
1839296030Szbb				hdr->csum_l4 = SEND_L4_CSUM_UDP;
1840296030Szbb				break;
1841296030Szbb			case IPPROTO_SCTP:
1842296030Szbb				if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
1843296030Szbb					break;
1844296030Szbb
1845296030Szbb				if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
1846296030Szbb					mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
1847296030Szbb					sq->snd_buff[qentry].mbuf = mbuf;
1848296030Szbb					if (mbuf == NULL)
1849296030Szbb						return (ENOBUFS);
1850296030Szbb				}
1851296030Szbb				hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1852296030Szbb				break;
1853296030Szbb			default:
1854296030Szbb				break;
1855296030Szbb			}
1856296039Szbb			hdr->l3_offset = ehdrlen;
1857296039Szbb			hdr->l4_offset = ehdrlen + iphlen;
1858296030Szbb		}
1859296030Szbb
1860296039Szbb		if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
1861296039Szbb			/*
1862296039Szbb			 * Extract ip again as m_data could have been modified.
1863296039Szbb			 */
1864296039Szbb			ip = (struct ip *)(mbuf->m_data + ehdrlen);
1865296039Szbb			th = (struct tcphdr *)((caddr_t)ip + iphlen);
1866296039Szbb
1867296039Szbb			hdr->tso = 1;
1868296039Szbb			hdr->tso_start = ehdrlen + iphlen + (th->th_off * 4);
1869296039Szbb			hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
1870296039Szbb			hdr->inner_l3_offset = ehdrlen - 2;
1871296039Szbb			nic->drv_stats.tx_tso++;
1872296039Szbb		}
1873296039Szbb		break;
1874296039Szbb#endif
1875296039Szbb	default:
1876296030Szbb		hdr->csum_l3 = 0;
1877296039Szbb	}
1878296030Szbb
1879296030Szbb	return (0);
1880289550Szbb}
1881289550Szbb
1882289551Szbb/*
1883289551Szbb * SQ GATHER subdescriptor
1884289550Szbb * Must follow HDR descriptor
1885289550Szbb */
1886289550Szbbstatic inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1887289551Szbb					       int size, uint64_t data)
1888289550Szbb{
1889289550Szbb	struct sq_gather_subdesc *gather;
1890289550Szbb
1891289550Szbb	qentry &= (sq->dmem.q_len - 1);
1892289550Szbb	gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1893289550Szbb
1894289550Szbb	memset(gather, 0, SND_QUEUE_DESC_SIZE);
1895289550Szbb	gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1896289550Szbb	gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1897289550Szbb	gather->size = size;
1898289550Szbb	gather->addr = data;
1899289550Szbb}
1900289550Szbb
1901289551Szbb/* Put an mbuf to a SQ for packet transfer. */
1902297450Szbbstatic int
1903297450Szbbnicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
1904289550Szbb{
1905289551Szbb	bus_dma_segment_t segs[256];
1906289551Szbb	struct snd_buff *snd_buff;
1907289551Szbb	size_t seg;
1908289551Szbb	int nsegs, qentry;
1909296039Szbb	int subdesc_cnt;
1910289551Szbb	int err;
1911289550Szbb
1912289551Szbb	NICVF_TX_LOCK_ASSERT(sq);
1913289551Szbb
1914289551Szbb	if (sq->free_cnt == 0)
1915289551Szbb		return (ENOBUFS);
1916289551Szbb
1917289551Szbb	snd_buff = &sq->snd_buff[sq->tail];
1918289551Szbb
1919289551Szbb	err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
1920297450Szbb	    *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
1921297450Szbb	if (__predict_false(err != 0)) {
1922289551Szbb		/* ARM64TODO: Add mbuf defragmenting if we lack maps */
1923297450Szbb		m_freem(*mbufp);
1924297450Szbb		*mbufp = NULL;
1925289551Szbb		return (err);
1926289550Szbb	}
1927289550Szbb
1928289551Szbb	/* Set how many subdescriptors is required */
1929297721Szbb	subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
1930289551Szbb	if (subdesc_cnt > sq->free_cnt) {
1931289551Szbb		/* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
1932289551Szbb		bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1933289551Szbb		return (ENOBUFS);
1934289551Szbb	}
1935289550Szbb
1936289550Szbb	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1937289550Szbb
1938289550Szbb	/* Add SQ header subdesc */
1939297450Szbb	err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
1940297450Szbb	    (*mbufp)->m_pkthdr.len);
1941296030Szbb	if (err != 0) {
1942297450Szbb		nicvf_put_sq_desc(sq, subdesc_cnt);
1943296030Szbb		bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1944297450Szbb		if (err == ENOBUFS) {
1945297450Szbb			m_freem(*mbufp);
1946297450Szbb			*mbufp = NULL;
1947297450Szbb		}
1948296030Szbb		return (err);
1949296030Szbb	}
1950289550Szbb
1951289550Szbb	/* Add SQ gather subdescs */
1952289551Szbb	for (seg = 0; seg < nsegs; seg++) {
1953289550Szbb		qentry = nicvf_get_nxt_sqentry(sq, qentry);
1954289551Szbb		nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
1955289551Szbb		    segs[seg].ds_addr);
1956289550Szbb	}
1957289550Szbb
1958289550Szbb	/* make sure all memory stores are done before ringing doorbell */
1959289551Szbb	bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
1960289550Szbb
1961289551Szbb	dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
1962289551Szbb	    __func__, sq->idx, subdesc_cnt);
1963289550Szbb	/* Inform HW to xmit new packet */
1964289551Szbb	nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
1965289551Szbb	    sq->idx, subdesc_cnt);
1966289551Szbb	return (0);
1967289550Szbb}
1968289550Szbb
1969289551Szbbstatic __inline u_int
1970289551Szbbfrag_num(u_int i)
1971289550Szbb{
1972289551Szbb#if BYTE_ORDER == BIG_ENDIAN
1973289551Szbb	return ((i & ~3) + 3 - (i & 3));
1974289550Szbb#else
1975289551Szbb	return (i);
1976289550Szbb#endif
1977289550Szbb}
1978289550Szbb
1979289551Szbb/* Returns MBUF for a received packet */
1980289551Szbbstruct mbuf *
1981289551Szbbnicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1982289550Szbb{
1983289550Szbb	int frag;
1984289550Szbb	int payload_len = 0;
1985289551Szbb	struct mbuf *mbuf;
1986289551Szbb	struct mbuf *mbuf_frag;
1987289551Szbb	uint16_t *rb_lens = NULL;
1988289551Szbb	uint64_t *rb_ptrs = NULL;
1989289550Szbb
1990289551Szbb	mbuf = NULL;
1991289551Szbb	rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
1992289551Szbb	rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
1993289550Szbb
1994289551Szbb	dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
1995289551Szbb	    __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1996289550Szbb
1997289550Szbb	for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1998289550Szbb		payload_len = rb_lens[frag_num(frag)];
1999289551Szbb		if (frag == 0) {
2000289550Szbb			/* First fragment */
2001289551Szbb			mbuf = nicvf_rb_ptr_to_mbuf(nic,
2002289551Szbb			    (*rb_ptrs - cqe_rx->align_pad));
2003289551Szbb			mbuf->m_len = payload_len;
2004289551Szbb			mbuf->m_data += cqe_rx->align_pad;
2005289551Szbb			if_setrcvif(mbuf, nic->ifp);
2006289550Szbb		} else {
2007289550Szbb			/* Add fragments */
2008289551Szbb			mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
2009289551Szbb			m_append(mbuf, payload_len, mbuf_frag->m_data);
2010289551Szbb			m_freem(mbuf_frag);
2011289550Szbb		}
2012289550Szbb		/* Next buffer pointer */
2013289550Szbb		rb_ptrs++;
2014289550Szbb	}
2015289551Szbb
2016289551Szbb	if (__predict_true(mbuf != NULL)) {
2017289551Szbb		m_fixhdr(mbuf);
2018289551Szbb		mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
2019289551Szbb		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
2020296030Szbb		if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
2021296030Szbb			/*
2022296030Szbb			 * HW by default verifies IP & TCP/UDP/SCTP checksums
2023296030Szbb			 */
2024297389Szbb			if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
2025296030Szbb				mbuf->m_pkthdr.csum_flags =
2026296030Szbb				    (CSUM_IP_CHECKED | CSUM_IP_VALID);
2027296030Szbb			}
2028297389Szbb
2029297389Szbb			switch (cqe_rx->l4_type) {
2030297389Szbb			case L4TYPE_UDP:
2031297389Szbb			case L4TYPE_TCP: /* fall through */
2032296030Szbb				mbuf->m_pkthdr.csum_flags |=
2033296030Szbb				    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2034297389Szbb				mbuf->m_pkthdr.csum_data = 0xffff;
2035297389Szbb				break;
2036297389Szbb			case L4TYPE_SCTP:
2037297389Szbb				mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
2038297389Szbb				break;
2039297389Szbb			default:
2040297389Szbb				break;
2041296030Szbb			}
2042296030Szbb		}
2043289551Szbb	}
2044289551Szbb
2045289551Szbb	return (mbuf);
2046289550Szbb}
2047289550Szbb
2048289550Szbb/* Enable interrupt */
2049289551Szbbvoid
2050289551Szbbnicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
2051289550Szbb{
2052289551Szbb	uint64_t reg_val;
2053289550Szbb
2054289550Szbb	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2055289550Szbb
2056289550Szbb	switch (int_type) {
2057289550Szbb	case NICVF_INTR_CQ:
2058289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2059289550Szbb		break;
2060289550Szbb	case NICVF_INTR_SQ:
2061289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2062289550Szbb		break;
2063289550Szbb	case NICVF_INTR_RBDR:
2064289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2065289550Szbb		break;
2066289550Szbb	case NICVF_INTR_PKT_DROP:
2067289551Szbb		reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2068289550Szbb		break;
2069289550Szbb	case NICVF_INTR_TCP_TIMER:
2070289551Szbb		reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2071289550Szbb		break;
2072289550Szbb	case NICVF_INTR_MBOX:
2073289551Szbb		reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2074289550Szbb		break;
2075289550Szbb	case NICVF_INTR_QS_ERR:
2076289551Szbb		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2077289550Szbb		break;
2078289550Szbb	default:
2079289551Szbb		device_printf(nic->dev,
2080289550Szbb			   "Failed to enable interrupt: unknown type\n");
2081289550Szbb		break;
2082289550Szbb	}
2083289550Szbb
2084289550Szbb	nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
2085289550Szbb}
2086289550Szbb
2087289550Szbb/* Disable interrupt */
2088289551Szbbvoid
2089289551Szbbnicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
2090289550Szbb{
2091289551Szbb	uint64_t reg_val = 0;
2092289550Szbb
2093289550Szbb	switch (int_type) {
2094289550Szbb	case NICVF_INTR_CQ:
2095289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2096289550Szbb		break;
2097289550Szbb	case NICVF_INTR_SQ:
2098289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2099289550Szbb		break;
2100289550Szbb	case NICVF_INTR_RBDR:
2101289551Szbb		reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2102289550Szbb		break;
2103289550Szbb	case NICVF_INTR_PKT_DROP:
2104289551Szbb		reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2105289550Szbb		break;
2106289550Szbb	case NICVF_INTR_TCP_TIMER:
2107289551Szbb		reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2108289550Szbb		break;
2109289550Szbb	case NICVF_INTR_MBOX:
2110289551Szbb		reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2111289550Szbb		break;
2112289550Szbb	case NICVF_INTR_QS_ERR:
2113289551Szbb		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2114289550Szbb		break;
2115289550Szbb	default:
2116289551Szbb		device_printf(nic->dev,
2117289550Szbb			   "Failed to disable interrupt: unknown type\n");
2118289550Szbb		break;
2119289550Szbb	}
2120289550Szbb
2121289550Szbb	nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
2122289550Szbb}
2123289550Szbb
2124289550Szbb/* Clear interrupt */
2125289551Szbbvoid
2126289551Szbbnicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
2127289550Szbb{
2128289551Szbb	uint64_t reg_val = 0;
2129289550Szbb
2130289550Szbb	switch (int_type) {
2131289550Szbb	case NICVF_INTR_CQ:
2132289551Szbb		reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2133289550Szbb		break;
2134289550Szbb	case NICVF_INTR_SQ:
2135289551Szbb		reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2136289550Szbb		break;
2137289550Szbb	case NICVF_INTR_RBDR:
2138289551Szbb		reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2139289550Szbb		break;
2140289550Szbb	case NICVF_INTR_PKT_DROP:
2141289551Szbb		reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2142289550Szbb		break;
2143289550Szbb	case NICVF_INTR_TCP_TIMER:
2144289551Szbb		reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2145289550Szbb		break;
2146289550Szbb	case NICVF_INTR_MBOX:
2147289551Szbb		reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
2148289550Szbb		break;
2149289550Szbb	case NICVF_INTR_QS_ERR:
2150289551Szbb		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2151289550Szbb		break;
2152289550Szbb	default:
2153289551Szbb		device_printf(nic->dev,
2154289550Szbb			   "Failed to clear interrupt: unknown type\n");
2155289550Szbb		break;
2156289550Szbb	}
2157289550Szbb
2158289550Szbb	nicvf_reg_write(nic, NIC_VF_INT, reg_val);
2159289550Szbb}
2160289550Szbb
2161289550Szbb/* Check if interrupt is enabled */
2162289551Szbbint
2163289551Szbbnicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
2164289550Szbb{
2165289551Szbb	uint64_t reg_val;
2166289551Szbb	uint64_t mask = 0xff;
2167289550Szbb
2168289550Szbb	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2169289550Szbb
2170289550Szbb	switch (int_type) {
2171289550Szbb	case NICVF_INTR_CQ:
2172289551Szbb		mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2173289550Szbb		break;
2174289550Szbb	case NICVF_INTR_SQ:
2175289551Szbb		mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2176289550Szbb		break;
2177289550Szbb	case NICVF_INTR_RBDR:
2178289551Szbb		mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2179289550Szbb		break;
2180289550Szbb	case NICVF_INTR_PKT_DROP:
2181289550Szbb		mask = NICVF_INTR_PKT_DROP_MASK;
2182289550Szbb		break;
2183289550Szbb	case NICVF_INTR_TCP_TIMER:
2184289550Szbb		mask = NICVF_INTR_TCP_TIMER_MASK;
2185289550Szbb		break;
2186289550Szbb	case NICVF_INTR_MBOX:
2187289550Szbb		mask = NICVF_INTR_MBOX_MASK;
2188289550Szbb		break;
2189289550Szbb	case NICVF_INTR_QS_ERR:
2190289550Szbb		mask = NICVF_INTR_QS_ERR_MASK;
2191289550Szbb		break;
2192289550Szbb	default:
2193289551Szbb		device_printf(nic->dev,
2194289550Szbb			   "Failed to check interrupt enable: unknown type\n");
2195289550Szbb		break;
2196289550Szbb	}
2197289550Szbb
2198289550Szbb	return (reg_val & mask);
2199289550Szbb}
2200289550Szbb
2201289551Szbbvoid
2202289551Szbbnicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
2203289550Szbb{
2204289550Szbb	struct rcv_queue *rq;
2205289550Szbb
2206289550Szbb#define GET_RQ_STATS(reg) \
2207289550Szbb	nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
2208289550Szbb			    (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2209289550Szbb
2210289550Szbb	rq = &nic->qs->rq[rq_idx];
2211289550Szbb	rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
2212289550Szbb	rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
2213289550Szbb}
2214289550Szbb
2215289551Szbbvoid
2216289551Szbbnicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
2217289550Szbb{
2218289550Szbb	struct snd_queue *sq;
2219289550Szbb
2220289550Szbb#define GET_SQ_STATS(reg) \
2221289550Szbb	nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
2222289550Szbb			    (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2223289550Szbb
2224289550Szbb	sq = &nic->qs->sq[sq_idx];
2225289550Szbb	sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
2226289550Szbb	sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
2227289550Szbb}
2228289550Szbb
2229289550Szbb/* Check for errors in the receive cmp.queue entry */
2230289551Szbbint
2231289551Szbbnicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
2232289551Szbb    struct cqe_rx_t *cqe_rx)
2233289550Szbb{
2234289550Szbb	struct nicvf_hw_stats *stats = &nic->hw_stats;
2235289550Szbb	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
2236289550Szbb
2237289550Szbb	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
2238289550Szbb		drv_stats->rx_frames_ok++;
2239289551Szbb		return (0);
2240289550Szbb	}
2241289550Szbb
2242289550Szbb	switch (cqe_rx->err_opcode) {
2243289550Szbb	case CQ_RX_ERROP_RE_PARTIAL:
2244289550Szbb		stats->rx_bgx_truncated_pkts++;
2245289550Szbb		break;
2246289550Szbb	case CQ_RX_ERROP_RE_JABBER:
2247289550Szbb		stats->rx_jabber_errs++;
2248289550Szbb		break;
2249289550Szbb	case CQ_RX_ERROP_RE_FCS:
2250289550Szbb		stats->rx_fcs_errs++;
2251289550Szbb		break;
2252289550Szbb	case CQ_RX_ERROP_RE_RX_CTL:
2253289550Szbb		stats->rx_bgx_errs++;
2254289550Szbb		break;
2255289550Szbb	case CQ_RX_ERROP_PREL2_ERR:
2256289550Szbb		stats->rx_prel2_errs++;
2257289550Szbb		break;
2258289550Szbb	case CQ_RX_ERROP_L2_MAL:
2259289550Szbb		stats->rx_l2_hdr_malformed++;
2260289550Szbb		break;
2261289550Szbb	case CQ_RX_ERROP_L2_OVERSIZE:
2262289550Szbb		stats->rx_oversize++;
2263289550Szbb		break;
2264289550Szbb	case CQ_RX_ERROP_L2_UNDERSIZE:
2265289550Szbb		stats->rx_undersize++;
2266289550Szbb		break;
2267289550Szbb	case CQ_RX_ERROP_L2_LENMISM:
2268289550Szbb		stats->rx_l2_len_mismatch++;
2269289550Szbb		break;
2270289550Szbb	case CQ_RX_ERROP_L2_PCLP:
2271289550Szbb		stats->rx_l2_pclp++;
2272289550Szbb		break;
2273289550Szbb	case CQ_RX_ERROP_IP_NOT:
2274289550Szbb		stats->rx_ip_ver_errs++;
2275289550Szbb		break;
2276289550Szbb	case CQ_RX_ERROP_IP_CSUM_ERR:
2277289550Szbb		stats->rx_ip_csum_errs++;
2278289550Szbb		break;
2279289550Szbb	case CQ_RX_ERROP_IP_MAL:
2280289550Szbb		stats->rx_ip_hdr_malformed++;
2281289550Szbb		break;
2282289550Szbb	case CQ_RX_ERROP_IP_MALD:
2283289550Szbb		stats->rx_ip_payload_malformed++;
2284289550Szbb		break;
2285289550Szbb	case CQ_RX_ERROP_IP_HOP:
2286289550Szbb		stats->rx_ip_ttl_errs++;
2287289550Szbb		break;
2288289550Szbb	case CQ_RX_ERROP_L3_PCLP:
2289289550Szbb		stats->rx_l3_pclp++;
2290289550Szbb		break;
2291289550Szbb	case CQ_RX_ERROP_L4_MAL:
2292289550Szbb		stats->rx_l4_malformed++;
2293289550Szbb		break;
2294289550Szbb	case CQ_RX_ERROP_L4_CHK:
2295289550Szbb		stats->rx_l4_csum_errs++;
2296289550Szbb		break;
2297289550Szbb	case CQ_RX_ERROP_UDP_LEN:
2298289550Szbb		stats->rx_udp_len_errs++;
2299289550Szbb		break;
2300289550Szbb	case CQ_RX_ERROP_L4_PORT:
2301289550Szbb		stats->rx_l4_port_errs++;
2302289550Szbb		break;
2303289550Szbb	case CQ_RX_ERROP_TCP_FLAG:
2304289550Szbb		stats->rx_tcp_flag_errs++;
2305289550Szbb		break;
2306289550Szbb	case CQ_RX_ERROP_TCP_OFFSET:
2307289550Szbb		stats->rx_tcp_offset_errs++;
2308289550Szbb		break;
2309289550Szbb	case CQ_RX_ERROP_L4_PCLP:
2310289550Szbb		stats->rx_l4_pclp++;
2311289550Szbb		break;
2312289550Szbb	case CQ_RX_ERROP_RBDR_TRUNC:
2313289550Szbb		stats->rx_truncated_pkts++;
2314289550Szbb		break;
2315289550Szbb	}
2316289550Szbb
2317289551Szbb	return (1);
2318289550Szbb}
2319289550Szbb
2320289550Szbb/* Check for errors in the send cmp.queue entry */
2321289551Szbbint
2322289551Szbbnicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
2323289551Szbb    struct cqe_send_t *cqe_tx)
2324289550Szbb{
2325289550Szbb	struct cmp_queue_stats *stats = &cq->stats;
2326289550Szbb
2327289550Szbb	switch (cqe_tx->send_status) {
2328289550Szbb	case CQ_TX_ERROP_GOOD:
2329289550Szbb		stats->tx.good++;
2330289551Szbb		return (0);
2331289550Szbb	case CQ_TX_ERROP_DESC_FAULT:
2332289550Szbb		stats->tx.desc_fault++;
2333289550Szbb		break;
2334289550Szbb	case CQ_TX_ERROP_HDR_CONS_ERR:
2335289550Szbb		stats->tx.hdr_cons_err++;
2336289550Szbb		break;
2337289550Szbb	case CQ_TX_ERROP_SUBDC_ERR:
2338289550Szbb		stats->tx.subdesc_err++;
2339289550Szbb		break;
2340289550Szbb	case CQ_TX_ERROP_IMM_SIZE_OFLOW:
2341289550Szbb		stats->tx.imm_size_oflow++;
2342289550Szbb		break;
2343289550Szbb	case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
2344289550Szbb		stats->tx.data_seq_err++;
2345289550Szbb		break;
2346289550Szbb	case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
2347289550Szbb		stats->tx.mem_seq_err++;
2348289550Szbb		break;
2349289550Szbb	case CQ_TX_ERROP_LOCK_VIOL:
2350289550Szbb		stats->tx.lock_viol++;
2351289550Szbb		break;
2352289550Szbb	case CQ_TX_ERROP_DATA_FAULT:
2353289550Szbb		stats->tx.data_fault++;
2354289550Szbb		break;
2355289550Szbb	case CQ_TX_ERROP_TSTMP_CONFLICT:
2356289550Szbb		stats->tx.tstmp_conflict++;
2357289550Szbb		break;
2358289550Szbb	case CQ_TX_ERROP_TSTMP_TIMEOUT:
2359289550Szbb		stats->tx.tstmp_timeout++;
2360289550Szbb		break;
2361289550Szbb	case CQ_TX_ERROP_MEM_FAULT:
2362289550Szbb		stats->tx.mem_fault++;
2363289550Szbb		break;
2364289550Szbb	case CQ_TX_ERROP_CK_OVERLAP:
2365289550Szbb		stats->tx.csum_overlap++;
2366289550Szbb		break;
2367289550Szbb	case CQ_TX_ERROP_CK_OFLOW:
2368289550Szbb		stats->tx.csum_overflow++;
2369289550Szbb		break;
2370289550Szbb	}
2371289550Szbb
2372289551Szbb	return (1);
2373289550Szbb}
2374