Deleted Added
full compact
1/*-
2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/*
28 * Implements the virtqueue interface as basically described
29 * in the original VirtIO paper.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/10/sys/dev/virtio/virtqueue.c 267312 2014-06-10 03:23:35Z bryanv $");
33__FBSDID("$FreeBSD: stable/10/sys/dev/virtio/virtqueue.c 268010 2014-06-29 00:37:59Z bryanv $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/sglist.h>
40#include <vm/vm.h>
41#include <vm/pmap.h>
42
43#include <machine/cpu.h>
44#include <machine/bus.h>
45#include <machine/atomic.h>
46#include <machine/resource.h>
47#include <sys/bus.h>
48#include <sys/rman.h>
49
50#include <dev/virtio/virtio.h>
51#include <dev/virtio/virtio_config.h>
51#include <dev/virtio/virtqueue.h>
52#include <dev/virtio/virtio_ring.h>
53
54#include "virtio_bus_if.h"
55
56struct virtqueue {
57 device_t vq_dev;
58 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
59 uint16_t vq_queue_index;
60 uint16_t vq_nentries;
61 uint32_t vq_flags;
62#define VIRTQUEUE_FLAG_INDIRECT 0x0001
63#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
64
65 int vq_alignment;
66 int vq_ring_size;
67 void *vq_ring_mem;
68 int vq_max_indirect_size;
69 int vq_indirect_mem_size;
70 virtqueue_intr_t *vq_intrhand;
71 void *vq_intrhand_arg;
72
73 struct vring vq_ring;
74 uint16_t vq_free_cnt;
75 uint16_t vq_queued_cnt;
76 /*
77 * Head of the free chain in the descriptor table. If
78 * there are no free descriptors, this will be set to
79 * VQ_RING_DESC_CHAIN_END.
80 */
81 uint16_t vq_desc_head_idx;
82 /*
83 * Last consumed descriptor in the used table,
84 * trails vq_ring.used->idx.
85 */
86 uint16_t vq_used_cons_idx;
87
88 struct vq_desc_extra {
89 void *cookie;
90 struct vring_desc *indirect;
91 vm_paddr_t indirect_paddr;
92 uint16_t ndescs;
93 } vq_descx[0];
94};
95
96/*
97 * The maximum virtqueue size is 2^15. Use that value as the end of
98 * descriptor chain terminator since it will never be a valid index
99 * in the descriptor table. This is used to verify we are correctly
100 * handling vq_free_cnt.
101 */
102#define VQ_RING_DESC_CHAIN_END 32768
103
104#define VQASSERT(_vq, _exp, _msg, ...) \
105 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
106 ##__VA_ARGS__))
107
108#define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
109 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
110 "invalid ring index: %d, max: %d", (_idx), \
111 (_vq)->vq_nentries)
112
113#define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
114 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
115 VQ_RING_DESC_CHAIN_END, "full ring terminated " \
116 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
117
118static int virtqueue_init_indirect(struct virtqueue *vq, int);
119static void virtqueue_free_indirect(struct virtqueue *vq);
120static void virtqueue_init_indirect_list(struct virtqueue *,
121 struct vring_desc *);
122
123static void vq_ring_init(struct virtqueue *);
124static void vq_ring_update_avail(struct virtqueue *, uint16_t);
125static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
126 struct vring_desc *, uint16_t, struct sglist *, int, int);
127static int vq_ring_use_indirect(struct virtqueue *, int);
128static void vq_ring_enqueue_indirect(struct virtqueue *, void *,
129 struct sglist *, int, int);
130static int vq_ring_enable_interrupt(struct virtqueue *, uint16_t);
131static int vq_ring_must_notify_host(struct virtqueue *);
132static void vq_ring_notify_host(struct virtqueue *);
133static void vq_ring_free_chain(struct virtqueue *, uint16_t);
134
135uint64_t
136virtqueue_filter_features(uint64_t features)
137{
138 uint64_t mask;
139
140 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
141 mask |= VIRTIO_RING_F_INDIRECT_DESC;
142 mask |= VIRTIO_RING_F_EVENT_IDX;
143
144 return (features & mask);
145}
146
147int
148virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
149 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
150{
151 struct virtqueue *vq;
152 int error;
153
154 *vqp = NULL;
155 error = 0;
156
157 if (size == 0) {
158 device_printf(dev,
159 "virtqueue %d (%s) does not exist (size is zero)\n",
160 queue, info->vqai_name);
161 return (ENODEV);
162 } else if (!powerof2(size)) {
163 device_printf(dev,
164 "virtqueue %d (%s) size is not a power of 2: %d\n",
165 queue, info->vqai_name, size);
166 return (ENXIO);
167 } else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
168 device_printf(dev, "virtqueue %d (%s) requested too many "
169 "indirect descriptors: %d, max %d\n",
170 queue, info->vqai_name, info->vqai_maxindirsz,
171 VIRTIO_MAX_INDIRECT);
172 return (EINVAL);
173 }
174
175 vq = malloc(sizeof(struct virtqueue) +
176 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
177 if (vq == NULL) {
178 device_printf(dev, "cannot allocate virtqueue\n");
179 return (ENOMEM);
180 }
181
182 vq->vq_dev = dev;
183 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
184 vq->vq_queue_index = queue;
185 vq->vq_alignment = align;
186 vq->vq_nentries = size;
187 vq->vq_free_cnt = size;
188 vq->vq_intrhand = info->vqai_intr;
189 vq->vq_intrhand_arg = info->vqai_intr_arg;
190
191 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
192 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
193
194 if (info->vqai_maxindirsz > 1) {
195 error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
196 if (error)
197 goto fail;
198 }
199
200 vq->vq_ring_size = round_page(vring_size(size, align));
201 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
202 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
203 if (vq->vq_ring_mem == NULL) {
204 device_printf(dev,
205 "cannot allocate memory for virtqueue ring\n");
206 error = ENOMEM;
207 goto fail;
208 }
209
210 vq_ring_init(vq);
211 virtqueue_disable_intr(vq);
212
213 *vqp = vq;
214
215fail:
216 if (error)
217 virtqueue_free(vq);
218
219 return (error);
220}
221
222static int
223virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
224{
225 device_t dev;
226 struct vq_desc_extra *dxp;
227 int i, size;
228
229 dev = vq->vq_dev;
230
231 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
232 /*
233 * Indirect descriptors requested by the driver but not
234 * negotiated. Return zero to keep the initialization
235 * going: we'll run fine without.
236 */
237 if (bootverbose)
238 device_printf(dev, "virtqueue %d (%s) requested "
239 "indirect descriptors but not negotiated\n",
240 vq->vq_queue_index, vq->vq_name);
241 return (0);
242 }
243
244 size = indirect_size * sizeof(struct vring_desc);
245 vq->vq_max_indirect_size = indirect_size;
246 vq->vq_indirect_mem_size = size;
247 vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
248
249 for (i = 0; i < vq->vq_nentries; i++) {
250 dxp = &vq->vq_descx[i];
251
252 dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
253 if (dxp->indirect == NULL) {
254 device_printf(dev, "cannot allocate indirect list\n");
255 return (ENOMEM);
256 }
257
258 dxp->indirect_paddr = vtophys(dxp->indirect);
259 virtqueue_init_indirect_list(vq, dxp->indirect);
260 }
261
262 return (0);
263}
264
265static void
266virtqueue_free_indirect(struct virtqueue *vq)
267{
268 struct vq_desc_extra *dxp;
269 int i;
270
271 for (i = 0; i < vq->vq_nentries; i++) {
272 dxp = &vq->vq_descx[i];
273
274 if (dxp->indirect == NULL)
275 break;
276
277 free(dxp->indirect, M_DEVBUF);
278 dxp->indirect = NULL;
279 dxp->indirect_paddr = 0;
280 }
281
282 vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
283 vq->vq_indirect_mem_size = 0;
284}
285
286static void
287virtqueue_init_indirect_list(struct virtqueue *vq,
288 struct vring_desc *indirect)
289{
290 int i;
291
292 bzero(indirect, vq->vq_indirect_mem_size);
293
294 for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
295 indirect[i].next = i + 1;
296 indirect[i].next = VQ_RING_DESC_CHAIN_END;
297}
298
299int
300virtqueue_reinit(struct virtqueue *vq, uint16_t size)
301{
302 struct vq_desc_extra *dxp;
303 int i;
304
305 if (vq->vq_nentries != size) {
306 device_printf(vq->vq_dev,
307 "%s: '%s' changed size; old=%hu, new=%hu\n",
308 __func__, vq->vq_name, vq->vq_nentries, size);
309 return (EINVAL);
310 }
311
312 /* Warn if the virtqueue was not properly cleaned up. */
313 if (vq->vq_free_cnt != vq->vq_nentries) {
314 device_printf(vq->vq_dev,
315 "%s: warning '%s' virtqueue not empty, "
316 "leaking %d entries\n", __func__, vq->vq_name,
317 vq->vq_nentries - vq->vq_free_cnt);
318 }
319
320 vq->vq_desc_head_idx = 0;
321 vq->vq_used_cons_idx = 0;
322 vq->vq_queued_cnt = 0;
323 vq->vq_free_cnt = vq->vq_nentries;
324
325 /* To be safe, reset all our allocated memory. */
326 bzero(vq->vq_ring_mem, vq->vq_ring_size);
327 for (i = 0; i < vq->vq_nentries; i++) {
328 dxp = &vq->vq_descx[i];
329 dxp->cookie = NULL;
330 dxp->ndescs = 0;
331 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
332 virtqueue_init_indirect_list(vq, dxp->indirect);
333 }
334
335 vq_ring_init(vq);
336 virtqueue_disable_intr(vq);
337
338 return (0);
339}
340
341void
342virtqueue_free(struct virtqueue *vq)
343{
344
345 if (vq->vq_free_cnt != vq->vq_nentries) {
346 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
347 "leaking %d entries\n", vq->vq_name,
348 vq->vq_nentries - vq->vq_free_cnt);
349 }
350
351 if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
352 virtqueue_free_indirect(vq);
353
354 if (vq->vq_ring_mem != NULL) {
355 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
356 vq->vq_ring_size = 0;
357 vq->vq_ring_mem = NULL;
358 }
359
360 free(vq, M_DEVBUF);
361}
362
363vm_paddr_t
364virtqueue_paddr(struct virtqueue *vq)
365{
366
367 return (vtophys(vq->vq_ring_mem));
368}
369
370int
371virtqueue_size(struct virtqueue *vq)
372{
373
374 return (vq->vq_nentries);
375}
376
377int
378virtqueue_empty(struct virtqueue *vq)
379{
380
381 return (vq->vq_nentries == vq->vq_free_cnt);
382}
383
384int
385virtqueue_full(struct virtqueue *vq)
386{
387
388 return (vq->vq_free_cnt == 0);
389}
390
391void
392virtqueue_notify(struct virtqueue *vq)
393{
394
395 /* Ensure updated avail->idx is visible to host. */
396 mb();
397
398 if (vq_ring_must_notify_host(vq))
399 vq_ring_notify_host(vq);
400 vq->vq_queued_cnt = 0;
401}
402
403int
404virtqueue_nused(struct virtqueue *vq)
405{
406 uint16_t used_idx, nused;
407
408 used_idx = vq->vq_ring.used->idx;
409
410 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
411 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
412
413 return (nused);
414}
415
416int
417virtqueue_intr_filter(struct virtqueue *vq)
418{
419
420 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
421 return (0);
422
423 virtqueue_disable_intr(vq);
424
425 return (1);
426}
427
428void
429virtqueue_intr(struct virtqueue *vq)
430{
431
432 vq->vq_intrhand(vq->vq_intrhand_arg);
433}
434
435int
436virtqueue_enable_intr(struct virtqueue *vq)
437{
438
439 return (vq_ring_enable_interrupt(vq, 0));
440}
441
442int
443virtqueue_postpone_intr(struct virtqueue *vq, vq_postpone_t hint)
444{
445 uint16_t ndesc, avail_idx;
446
447 avail_idx = vq->vq_ring.avail->idx;
448 ndesc = (uint16_t)(avail_idx - vq->vq_used_cons_idx);
449
450 switch (hint) {
451 case VQ_POSTPONE_SHORT:
452 ndesc = ndesc / 4;
453 break;
454 case VQ_POSTPONE_LONG:
455 ndesc = (ndesc * 3) / 4;
456 break;
457 case VQ_POSTPONE_EMPTIED:
458 break;
459 }
460
461 return (vq_ring_enable_interrupt(vq, ndesc));
462}
463
464/*
465 * Note this is only considered a hint to the host.
466 */
467void
468virtqueue_disable_intr(struct virtqueue *vq)
469{
470
471 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
472 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx -
473 vq->vq_nentries - 1;
474 } else
475 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
476}
477
478int
479virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
480 int readable, int writable)
481{
482 struct vq_desc_extra *dxp;
483 int needed;
484 uint16_t head_idx, idx;
485
486 needed = readable + writable;
487
488 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
489 VQASSERT(vq, needed == sg->sg_nseg,
490 "segment count mismatch, %d, %d", needed, sg->sg_nseg);
491 VQASSERT(vq,
492 needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
493 "too many segments to enqueue: %d, %d/%d", needed,
494 vq->vq_nentries, vq->vq_max_indirect_size);
495
496 if (needed < 1)
497 return (EINVAL);
498 if (vq->vq_free_cnt == 0)
499 return (ENOSPC);
500
501 if (vq_ring_use_indirect(vq, needed)) {
502 vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
503 return (0);
504 } else if (vq->vq_free_cnt < needed)
505 return (EMSGSIZE);
506
507 head_idx = vq->vq_desc_head_idx;
508 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
509 dxp = &vq->vq_descx[head_idx];
510
511 VQASSERT(vq, dxp->cookie == NULL,
512 "cookie already exists for index %d", head_idx);
513 dxp->cookie = cookie;
514 dxp->ndescs = needed;
515
516 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
517 sg, readable, writable);
518
519 vq->vq_desc_head_idx = idx;
520 vq->vq_free_cnt -= needed;
521 if (vq->vq_free_cnt == 0)
522 VQ_RING_ASSERT_CHAIN_TERM(vq);
523 else
524 VQ_RING_ASSERT_VALID_IDX(vq, idx);
525
526 vq_ring_update_avail(vq, head_idx);
527
528 return (0);
529}
530
531void *
532virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
533{
534 struct vring_used_elem *uep;
535 void *cookie;
536 uint16_t used_idx, desc_idx;
537
538 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
539 return (NULL);
540
541 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
542 uep = &vq->vq_ring.used->ring[used_idx];
543
544 rmb();
545 desc_idx = (uint16_t) uep->id;
546 if (len != NULL)
547 *len = uep->len;
548
549 vq_ring_free_chain(vq, desc_idx);
550
551 cookie = vq->vq_descx[desc_idx].cookie;
552 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
553 vq->vq_descx[desc_idx].cookie = NULL;
554
555 return (cookie);
556}
557
558void *
559virtqueue_poll(struct virtqueue *vq, uint32_t *len)
560{
561 void *cookie;
562
563 while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
564 cpu_spinwait();
565
566 return (cookie);
567}
568
569void *
570virtqueue_drain(struct virtqueue *vq, int *last)
571{
572 void *cookie;
573 int idx;
574
575 cookie = NULL;
576 idx = *last;
577
578 while (idx < vq->vq_nentries && cookie == NULL) {
579 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
580 vq->vq_descx[idx].cookie = NULL;
581 /* Free chain to keep free count consistent. */
582 vq_ring_free_chain(vq, idx);
583 }
584 idx++;
585 }
586
587 *last = idx;
588
589 return (cookie);
590}
591
592void
593virtqueue_dump(struct virtqueue *vq)
594{
595
596 if (vq == NULL)
597 return;
598
599 printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
600 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
601 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
602 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
603 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
604 vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
605 vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
606 vq->vq_ring.used->flags);
607}
608
609static void
610vq_ring_init(struct virtqueue *vq)
611{
612 struct vring *vr;
613 char *ring_mem;
614 int i, size;
615
616 ring_mem = vq->vq_ring_mem;
617 size = vq->vq_nentries;
618 vr = &vq->vq_ring;
619
620 vring_init(vr, size, ring_mem, vq->vq_alignment);
621
622 for (i = 0; i < size - 1; i++)
623 vr->desc[i].next = i + 1;
624 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
625}
626
627static void
628vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
629{
630 uint16_t avail_idx;
631
632 /*
633 * Place the head of the descriptor chain into the next slot and make
634 * it usable to the host. The chain is made available now rather than
635 * deferring to virtqueue_notify() in the hopes that if the host is
636 * currently running on another CPU, we can keep it processing the new
637 * descriptor.
638 */
639 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
640 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
641
642 wmb();
643 vq->vq_ring.avail->idx++;
644
645 /* Keep pending count until virtqueue_notify(). */
646 vq->vq_queued_cnt++;
647}
648
649static uint16_t
650vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
651 uint16_t head_idx, struct sglist *sg, int readable, int writable)
652{
653 struct sglist_seg *seg;
654 struct vring_desc *dp;
655 int i, needed;
656 uint16_t idx;
657
658 needed = readable + writable;
659
660 for (i = 0, idx = head_idx, seg = sg->sg_segs;
661 i < needed;
662 i++, idx = dp->next, seg++) {
663 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
664 "premature end of free desc chain");
665
666 dp = &desc[idx];
667 dp->addr = seg->ss_paddr;
668 dp->len = seg->ss_len;
669 dp->flags = 0;
670
671 if (i < needed - 1)
672 dp->flags |= VRING_DESC_F_NEXT;
673 if (i >= readable)
674 dp->flags |= VRING_DESC_F_WRITE;
675 }
676
677 return (idx);
678}
679
680static int
681vq_ring_use_indirect(struct virtqueue *vq, int needed)
682{
683
684 if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
685 return (0);
686
687 if (vq->vq_max_indirect_size < needed)
688 return (0);
689
690 if (needed < 2)
691 return (0);
692
693 return (1);
694}
695
696static void
697vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
698 struct sglist *sg, int readable, int writable)
699{
700 struct vring_desc *dp;
701 struct vq_desc_extra *dxp;
702 int needed;
703 uint16_t head_idx;
704
705 needed = readable + writable;
706 VQASSERT(vq, needed <= vq->vq_max_indirect_size,
707 "enqueuing too many indirect descriptors");
708
709 head_idx = vq->vq_desc_head_idx;
710 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
711 dp = &vq->vq_ring.desc[head_idx];
712 dxp = &vq->vq_descx[head_idx];
713
714 VQASSERT(vq, dxp->cookie == NULL,
715 "cookie already exists for index %d", head_idx);
716 dxp->cookie = cookie;
717 dxp->ndescs = 1;
718
719 dp->addr = dxp->indirect_paddr;
720 dp->len = needed * sizeof(struct vring_desc);
721 dp->flags = VRING_DESC_F_INDIRECT;
722
723 vq_ring_enqueue_segments(vq, dxp->indirect, 0,
724 sg, readable, writable);
725
726 vq->vq_desc_head_idx = dp->next;
727 vq->vq_free_cnt--;
728 if (vq->vq_free_cnt == 0)
729 VQ_RING_ASSERT_CHAIN_TERM(vq);
730 else
731 VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
732
733 vq_ring_update_avail(vq, head_idx);
734}
735
736static int
737vq_ring_enable_interrupt(struct virtqueue *vq, uint16_t ndesc)
738{
739
740 /*
741 * Enable interrupts, making sure we get the latest index of
742 * what's already been consumed.
743 */
744 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
745 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
746 else
747 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
748
749 mb();
750
751 /*
752 * Enough items may have already been consumed to meet our threshold
753 * since we last checked. Let our caller know so it processes the new
754 * entries.
755 */
756 if (virtqueue_nused(vq) > ndesc)
757 return (1);
758
759 return (0);
760}
761
762static int
763vq_ring_must_notify_host(struct virtqueue *vq)
764{
765 uint16_t new_idx, prev_idx, event_idx;
766
767 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
768 new_idx = vq->vq_ring.avail->idx;
769 prev_idx = new_idx - vq->vq_queued_cnt;
770 event_idx = vring_avail_event(&vq->vq_ring);
771
772 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
773 }
774
775 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
776}
777
778static void
779vq_ring_notify_host(struct virtqueue *vq)
780{
781
782 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
783}
784
785static void
786vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
787{
788 struct vring_desc *dp;
789 struct vq_desc_extra *dxp;
790
791 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
792 dp = &vq->vq_ring.desc[desc_idx];
793 dxp = &vq->vq_descx[desc_idx];
794
795 if (vq->vq_free_cnt == 0)
796 VQ_RING_ASSERT_CHAIN_TERM(vq);
797
798 vq->vq_free_cnt += dxp->ndescs;
799 dxp->ndescs--;
800
801 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
802 while (dp->flags & VRING_DESC_F_NEXT) {
803 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
804 dp = &vq->vq_ring.desc[dp->next];
805 dxp->ndescs--;
806 }
807 }
808
809 VQASSERT(vq, dxp->ndescs == 0,
810 "failed to free entire desc chain, remaining: %d", dxp->ndescs);
811
812 /*
813 * We must append the existing free chain, if any, to the end of
814 * newly freed chain. If the virtqueue was completely used, then
815 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
816 */
817 dp->next = vq->vq_desc_head_idx;
818 vq->vq_desc_head_idx = desc_idx;
819}