1/*-
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/*
28 * Implements the virtqueue interface as basically described
29 * in the original VirtIO paper.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/sglist.h>
40#include <vm/vm.h>
41#include <vm/pmap.h>
42
43#include <machine/cpu.h>
44#include <machine/bus.h>
45#include <machine/atomic.h>
46#include <machine/resource.h>
47#include <sys/bus.h>
48#include <sys/rman.h>
49
50#include <dev/virtio/virtio.h>
51#include <dev/virtio/virtqueue.h>
52#include <dev/virtio/virtio_ring.h>
53
54#include "virtio_bus_if.h"
55
56struct virtqueue {
57	device_t		 vq_dev;
58	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
59	uint16_t		 vq_queue_index;
60	uint16_t		 vq_nentries;
61	uint32_t		 vq_flags;
62#define	VIRTQUEUE_FLAG_INDIRECT	 0x0001
63#define	VIRTQUEUE_FLAG_EVENT_IDX 0x0002
64
65	int			 vq_alignment;
66	int			 vq_ring_size;
67	void			*vq_ring_mem;
68	int			 vq_max_indirect_size;
69	int			 vq_indirect_mem_size;
70	virtqueue_intr_t	*vq_intrhand;
71	void			*vq_intrhand_arg;
72
73	struct vring		 vq_ring;
74	uint16_t		 vq_free_cnt;
75	uint16_t		 vq_queued_cnt;
76	/*
77	 * Head of the free chain in the descriptor table. If
78	 * there are no free descriptors, this will be set to
79	 * VQ_RING_DESC_CHAIN_END.
80	 */
81	uint16_t		 vq_desc_head_idx;
82	/*
83	 * Last consumed descriptor in the used table,
84	 * trails vq_ring.used->idx.
85	 */
86	uint16_t		 vq_used_cons_idx;
87
88	struct vq_desc_extra {
89		void		  *cookie;
90		struct vring_desc *indirect;
91		vm_paddr_t	   indirect_paddr;
92		uint16_t	   ndescs;
93	} vq_descx[0];
94};
95
96/*
97 * The maximum virtqueue size is 2^15. Use that value as the end of
98 * descriptor chain terminator since it will never be a valid index
99 * in the descriptor table. This is used to verify we are correctly
100 * handling vq_free_cnt.
101 */
102#define VQ_RING_DESC_CHAIN_END 32768
103
104#define VQASSERT(_vq, _exp, _msg, ...)				\
105    KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,	\
106	##__VA_ARGS__))
107
108#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)			\
109    VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,		\
110	"invalid ring index: %d, max: %d", (_idx),		\
111	(_vq)->vq_nentries)
112
113#define VQ_RING_ASSERT_CHAIN_TERM(_vq)				\
114    VQASSERT((_vq), (_vq)->vq_desc_head_idx ==			\
115	VQ_RING_DESC_CHAIN_END,	"full ring terminated "		\
116	"incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
117
118static int	virtqueue_init_indirect(struct virtqueue *vq, int);
119static void	virtqueue_free_indirect(struct virtqueue *vq);
120static void	virtqueue_init_indirect_list(struct virtqueue *,
121		    struct vring_desc *);
122
123static void	vq_ring_init(struct virtqueue *);
124static void	vq_ring_update_avail(struct virtqueue *, uint16_t);
125static uint16_t	vq_ring_enqueue_segments(struct virtqueue *,
126		    struct vring_desc *, uint16_t, struct sglist *, int, int);
127static int	vq_ring_use_indirect(struct virtqueue *, int);
128static void	vq_ring_enqueue_indirect(struct virtqueue *, void *,
129		    struct sglist *, int, int);
130static int	vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
131static int	vq_ring_must_notify_host(struct virtqueue *);
132static void	vq_ring_notify_host(struct virtqueue *);
133static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
134
135uint64_t
136virtqueue_filter_features(uint64_t features)
137{
138	uint64_t mask;
139
140	mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
141	mask |= VIRTIO_RING_F_INDIRECT_DESC;
142	mask |= VIRTIO_RING_F_EVENT_IDX;
143
144	return (features & mask);
145}
146
147int
148virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
149    vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
150{
151	struct virtqueue *vq;
152	int error;
153
154	*vqp = NULL;
155	error = 0;
156
157	if (size == 0) {
158		device_printf(dev,
159		    "virtqueue %d (%s) does not exist (size is zero)\n",
160		    queue, info->vqai_name);
161		return (ENODEV);
162	} else if (!powerof2(size)) {
163		device_printf(dev,
164		    "virtqueue %d (%s) size is not a power of 2: %d\n",
165		    queue, info->vqai_name, size);
166		return (ENXIO);
167	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
168		device_printf(dev, "virtqueue %d (%s) requested too many "
169		    "indirect descriptors: %d, max %d\n",
170		    queue, info->vqai_name, info->vqai_maxindirsz,
171		    VIRTIO_MAX_INDIRECT);
172		return (EINVAL);
173	}
174
175	vq = malloc(sizeof(struct virtqueue) +
176	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
177	if (vq == NULL) {
178		device_printf(dev, "cannot allocate virtqueue\n");
179		return (ENOMEM);
180	}
181
182	vq->vq_dev = dev;
183	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
184	vq->vq_queue_index = queue;
185	vq->vq_alignment = align;
186	vq->vq_nentries = size;
187	vq->vq_free_cnt = size;
188	vq->vq_intrhand = info->vqai_intr;
189	vq->vq_intrhand_arg = info->vqai_intr_arg;
190
191	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
192		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
193
194	if (info->vqai_maxindirsz > 1) {
195		error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
196		if (error)
197			goto fail;
198	}
199
200	vq->vq_ring_size = round_page(vring_size(size, align));
201	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
202	    M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
203	if (vq->vq_ring_mem == NULL) {
204		device_printf(dev,
205		    "cannot allocate memory for virtqueue ring\n");
206		error = ENOMEM;
207		goto fail;
208	}
209
210	vq_ring_init(vq);
211	virtqueue_disable_intr(vq);
212
213	*vqp = vq;
214
215fail:
216	if (error)
217		virtqueue_free(vq);
218
219	return (error);
220}
221
222static int
223virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
224{
225	device_t dev;
226	struct vq_desc_extra *dxp;
227	int i, size;
228
229	dev = vq->vq_dev;
230
231	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
232		/*
233		 * Indirect descriptors requested by the driver but not
234		 * negotiated. Return zero to keep the initialization
235		 * going: we'll run fine without.
236		 */
237		if (bootverbose)
238			device_printf(dev, "virtqueue %d (%s) requested "
239			    "indirect descriptors but not negotiated\n",
240			    vq->vq_queue_index, vq->vq_name);
241		return (0);
242	}
243
244	size = indirect_size * sizeof(struct vring_desc);
245	vq->vq_max_indirect_size = indirect_size;
246	vq->vq_indirect_mem_size = size;
247	vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
248
249	for (i = 0; i < vq->vq_nentries; i++) {
250		dxp = &vq->vq_descx[i];
251
252		dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
253		if (dxp->indirect == NULL) {
254			device_printf(dev, "cannot allocate indirect list\n");
255			return (ENOMEM);
256		}
257
258		dxp->indirect_paddr = vtophys(dxp->indirect);
259		virtqueue_init_indirect_list(vq, dxp->indirect);
260	}
261
262	return (0);
263}
264
265static void
266virtqueue_free_indirect(struct virtqueue *vq)
267{
268	struct vq_desc_extra *dxp;
269	int i;
270
271	for (i = 0; i < vq->vq_nentries; i++) {
272		dxp = &vq->vq_descx[i];
273
274		if (dxp->indirect == NULL)
275			break;
276
277		free(dxp->indirect, M_DEVBUF);
278		dxp->indirect = NULL;
279		dxp->indirect_paddr = 0;
280	}
281
282	vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
283	vq->vq_indirect_mem_size = 0;
284}
285
286static void
287virtqueue_init_indirect_list(struct virtqueue *vq,
288    struct vring_desc *indirect)
289{
290	int i;
291
292	bzero(indirect, vq->vq_indirect_mem_size);
293
294	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
295		indirect[i].next = i + 1;
296	indirect[i].next = VQ_RING_DESC_CHAIN_END;
297}
298
299int
300virtqueue_reinit(struct virtqueue *vq, uint16_t size)
301{
302	struct vq_desc_extra *dxp;
303	int i;
304
305	if (vq->vq_nentries != size) {
306		device_printf(vq->vq_dev,
307		    "%s: '%s' changed size; old=%hu, new=%hu\n",
308		    __func__, vq->vq_name, vq->vq_nentries, size);
309		return (EINVAL);
310	}
311
312	/* Warn if the virtqueue was not properly cleaned up. */
313	if (vq->vq_free_cnt != vq->vq_nentries) {
314		device_printf(vq->vq_dev,
315		    "%s: warning '%s' virtqueue not empty, "
316		    "leaking %d entries\n", __func__, vq->vq_name,
317		    vq->vq_nentries - vq->vq_free_cnt);
318	}
319
320	vq->vq_desc_head_idx = 0;
321	vq->vq_used_cons_idx = 0;
322	vq->vq_queued_cnt = 0;
323	vq->vq_free_cnt = vq->vq_nentries;
324
325	/* To be safe, reset all our allocated memory. */
326	bzero(vq->vq_ring_mem, vq->vq_ring_size);
327	for (i = 0; i < vq->vq_nentries; i++) {
328		dxp = &vq->vq_descx[i];
329		dxp->cookie = NULL;
330		dxp->ndescs = 0;
331		if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
332			virtqueue_init_indirect_list(vq, dxp->indirect);
333	}
334
335	vq_ring_init(vq);
336	virtqueue_disable_intr(vq);
337
338	return (0);
339}
340
341void
342virtqueue_free(struct virtqueue *vq)
343{
344
345	if (vq->vq_free_cnt != vq->vq_nentries) {
346		device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
347		    "leaking %d entries\n", vq->vq_name,
348		    vq->vq_nentries - vq->vq_free_cnt);
349	}
350
351	if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
352		virtqueue_free_indirect(vq);
353
354	if (vq->vq_ring_mem != NULL) {
355		contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
356		vq->vq_ring_size = 0;
357		vq->vq_ring_mem = NULL;
358	}
359
360	free(vq, M_DEVBUF);
361}
362
363vm_paddr_t
364virtqueue_paddr(struct virtqueue *vq)
365{
366
367	return (vtophys(vq->vq_ring_mem));
368}
369
370int
371virtqueue_size(struct virtqueue *vq)
372{
373
374	return (vq->vq_nentries);
375}
376
377int
378virtqueue_nfree(struct virtqueue *vq)
379{
380
381	return (vq->vq_free_cnt);
382}
383
384int
385virtqueue_empty(struct virtqueue *vq)
386{
387
388	return (vq->vq_nentries == vq->vq_free_cnt);
389}
390
391int
392virtqueue_full(struct virtqueue *vq)
393{
394
395	return (vq->vq_free_cnt == 0);
396}
397
398void
399virtqueue_notify(struct virtqueue *vq)
400{
401
402	/* Ensure updated avail->idx is visible to host. */
403	mb();
404
405	if (vq_ring_must_notify_host(vq))
406		vq_ring_notify_host(vq);
407	vq->vq_queued_cnt = 0;
408}
409
410int
411virtqueue_nused(struct virtqueue *vq)
412{
413	uint16_t used_idx, nused;
414
415	used_idx = vq->vq_ring.used->idx;
416
417	nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
418	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
419
420	return (nused);
421}
422
423int
424virtqueue_intr_filter(struct virtqueue *vq)
425{
426
427	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
428		return (0);
429
430	virtqueue_disable_intr(vq);
431
432	return (1);
433}
434
435void
436virtqueue_intr(struct virtqueue *vq)
437{
438
439	vq->vq_intrhand(vq->vq_intrhand_arg);
440}
441
442int
443virtqueue_enable_intr(struct virtqueue *vq)
444{
445
446	return (vq_ring_enable_interrupt(vq, 0));
447}
448
449int
450virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
451{
452	uint16_t ndesc, avail_idx;
453
454	avail_idx = vq->vq_ring.avail->idx;
455	ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
456
457	switch (hint) {
458	case VQ_POSTPONE_SHORT:
459		ndesc = ndesc / 4;
460		break;
461	case VQ_POSTPONE_LONG:
462		ndesc = (ndesc * 3) / 4;
463		break;
464	case VQ_POSTPONE_EMPTIED:
465		break;
466	}
467
468	return (vq_ring_enable_interrupt(vq, ndesc));
469}
470
471/*
472 * Note this is only considered a hint to the host.
473 */
474void
475virtqueue_disable_intr(struct virtqueue *vq)
476{
477
478	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
479		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
480		    vq->vq_nentries - 1;
481	} else
482		vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
483}
484
485int
486virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
487    int readable, int writable)
488{
489	struct vq_desc_extra *dxp;
490	int needed;
491	uint16_t head_idx, idx;
492
493	needed = readable + writable;
494
495	VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
496	VQASSERT(vq, needed == sg->sg_nseg,
497	    "segment count mismatch, %d, %d", needed, sg->sg_nseg);
498	VQASSERT(vq,
499	    needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
500	    "too many segments to enqueue: %d, %d/%d", needed,
501	    vq->vq_nentries, vq->vq_max_indirect_size);
502
503	if (needed < 1)
504		return (EINVAL);
505	if (vq->vq_free_cnt == 0)
506		return (ENOSPC);
507
508	if (vq_ring_use_indirect(vq, needed)) {
509		vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
510		return (0);
511	} else if (vq->vq_free_cnt < needed)
512		return (EMSGSIZE);
513
514	head_idx = vq->vq_desc_head_idx;
515	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
516	dxp = &vq->vq_descx[head_idx];
517
518	VQASSERT(vq, dxp->cookie == NULL,
519	    "cookie already exists for index %d", head_idx);
520	dxp->cookie = cookie;
521	dxp->ndescs = needed;
522
523	idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
524	    sg, readable, writable);
525
526	vq->vq_desc_head_idx = idx;
527	vq->vq_free_cnt -= needed;
528	if (vq->vq_free_cnt == 0)
529		VQ_RING_ASSERT_CHAIN_TERM(vq);
530	else
531		VQ_RING_ASSERT_VALID_IDX(vq, idx);
532
533	vq_ring_update_avail(vq, head_idx);
534
535	return (0);
536}
537
538void *
539virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
540{
541	struct vring_used_elem *uep;
542	void *cookie;
543	uint16_t used_idx, desc_idx;
544
545	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
546		return (NULL);
547
548	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
549	uep = &vq->vq_ring.used->ring[used_idx];
550
551	rmb();
552	desc_idx = (uint16_t) uep->id;
553	if (len != NULL)
554		*len = uep->len;
555
556	vq_ring_free_chain(vq, desc_idx);
557
558	cookie = vq->vq_descx[desc_idx].cookie;
559	VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
560	vq->vq_descx[desc_idx].cookie = NULL;
561
562	return (cookie);
563}
564
565void *
566virtqueue_poll(struct virtqueue *vq, uint32_t *len)
567{
568	void *cookie;
569
570	while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
571		cpu_spinwait();
572
573	return (cookie);
574}
575
576void *
577virtqueue_drain(struct virtqueue *vq, int *last)
578{
579	void *cookie;
580	int idx;
581
582	cookie = NULL;
583	idx = *last;
584
585	while (idx < vq->vq_nentries && cookie == NULL) {
586		if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
587			vq->vq_descx[idx].cookie = NULL;
588			/* Free chain to keep free count consistent. */
589			vq_ring_free_chain(vq, idx);
590		}
591		idx++;
592	}
593
594	*last = idx;
595
596	return (cookie);
597}
598
599void
600virtqueue_dump(struct virtqueue *vq)
601{
602
603	if (vq == NULL)
604		return;
605
606	printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
607	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
608	    "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
609	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
610	    virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
611	    vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
612	    vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
613	    vq->vq_ring.used->flags);
614}
615
616static void
617vq_ring_init(struct virtqueue *vq)
618{
619	struct vring *vr;
620	char *ring_mem;
621	int i, size;
622
623	ring_mem = vq->vq_ring_mem;
624	size = vq->vq_nentries;
625	vr = &vq->vq_ring;
626
627	vring_init(vr, size, ring_mem, vq->vq_alignment);
628
629	for (i = 0; i < size - 1; i++)
630		vr->desc[i].next = i + 1;
631	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
632}
633
634static void
635vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
636{
637	uint16_t avail_idx;
638
639	/*
640	 * Place the head of the descriptor chain into the next slot and make
641	 * it usable to the host. The chain is made available now rather than
642	 * deferring to virtqueue_notify() in the hopes that if the host is
643	 * currently running on another CPU, we can keep it processing the new
644	 * descriptor.
645	 */
646	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
647	vq->vq_ring.avail->ring[avail_idx] = desc_idx;
648
649	wmb();
650	vq->vq_ring.avail->idx++;
651
652	/* Keep pending count until virtqueue_notify(). */
653	vq->vq_queued_cnt++;
654}
655
656static uint16_t
657vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
658    uint16_t head_idx, struct sglist *sg, int readable, int writable)
659{
660	struct sglist_seg *seg;
661	struct vring_desc *dp;
662	int i, needed;
663	uint16_t idx;
664
665	needed = readable + writable;
666
667	for (i = 0, idx = head_idx, seg = sg->sg_segs;
668	     i < needed;
669	     i++, idx = dp->next, seg++) {
670		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
671		    "premature end of free desc chain");
672
673		dp = &desc[idx];
674		dp->addr = seg->ss_paddr;
675		dp->len = seg->ss_len;
676		dp->flags = 0;
677
678		if (i < needed - 1)
679			dp->flags |= VRING_DESC_F_NEXT;
680		if (i >= readable)
681			dp->flags |= VRING_DESC_F_WRITE;
682	}
683
684	return (idx);
685}
686
687static int
688vq_ring_use_indirect(struct virtqueue *vq, int needed)
689{
690
691	if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
692		return (0);
693
694	if (vq->vq_max_indirect_size < needed)
695		return (0);
696
697	if (needed < 2)
698		return (0);
699
700	return (1);
701}
702
703static void
704vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
705    struct sglist *sg, int readable, int writable)
706{
707	struct vring_desc *dp;
708	struct vq_desc_extra *dxp;
709	int needed;
710	uint16_t head_idx;
711
712	needed = readable + writable;
713	VQASSERT(vq, needed <= vq->vq_max_indirect_size,
714	    "enqueuing too many indirect descriptors");
715
716	head_idx = vq->vq_desc_head_idx;
717	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
718	dp = &vq->vq_ring.desc[head_idx];
719	dxp = &vq->vq_descx[head_idx];
720
721	VQASSERT(vq, dxp->cookie == NULL,
722	    "cookie already exists for index %d", head_idx);
723	dxp->cookie = cookie;
724	dxp->ndescs = 1;
725
726	dp->addr = dxp->indirect_paddr;
727	dp->len = needed * sizeof(struct vring_desc);
728	dp->flags = VRING_DESC_F_INDIRECT;
729
730	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
731	    sg, readable, writable);
732
733	vq->vq_desc_head_idx = dp->next;
734	vq->vq_free_cnt--;
735	if (vq->vq_free_cnt == 0)
736		VQ_RING_ASSERT_CHAIN_TERM(vq);
737	else
738		VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
739
740	vq_ring_update_avail(vq, head_idx);
741}
742
743static int
744vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
745{
746
747	/*
748	 * Enable interrupts, making sure we get the latest index of
749	 * what's already been consumed.
750	 */
751	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
752		vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
753	else
754		vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
755
756	mb();
757
758	/*
759	 * Enough items may have already been consumed to meet our threshold
760	 * since we last checked. Let our caller know so it processes the new
761	 * entries.
762	 */
763	if (virtqueue_nused(vq) > ndesc)
764		return (1);
765
766	return (0);
767}
768
769static int
770vq_ring_must_notify_host(struct virtqueue *vq)
771{
772	uint16_t new_idx, prev_idx, event_idx;
773
774	if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
775		new_idx = vq->vq_ring.avail->idx;
776		prev_idx = new_idx - vq->vq_queued_cnt;
777		event_idx = vring_avail_event(&vq->vq_ring);
778
779		return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
780	}
781
782	return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
783}
784
785static void
786vq_ring_notify_host(struct virtqueue *vq)
787{
788
789	VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
790}
791
792static void
793vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
794{
795	struct vring_desc *dp;
796	struct vq_desc_extra *dxp;
797
798	VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
799	dp = &vq->vq_ring.desc[desc_idx];
800	dxp = &vq->vq_descx[desc_idx];
801
802	if (vq->vq_free_cnt == 0)
803		VQ_RING_ASSERT_CHAIN_TERM(vq);
804
805	vq->vq_free_cnt += dxp->ndescs;
806	dxp->ndescs--;
807
808	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
809		while (dp->flags & VRING_DESC_F_NEXT) {
810			VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
811			dp = &vq->vq_ring.desc[dp->next];
812			dxp->ndescs--;
813		}
814	}
815
816	VQASSERT(vq, dxp->ndescs == 0,
817	    "failed to free entire desc chain, remaining: %d", dxp->ndescs);
818
819	/*
820	 * We must append the existing free chain, if any, to the end of
821	 * newly freed chain. If the virtqueue was completely used, then
822	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
823	 */
824	dp->next = vq->vq_desc_head_idx;
825	vq->vq_desc_head_idx = desc_idx;
826}
827