1/*
2 * Copyright (c) 2014 ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9#include <string.h>
10#include <stdio.h>
11
12#include <barrelfish/barrelfish.h>
13#include <barrelfish/waitset.h>
14
15#include <virtio/virtio.h>
16#include <virtio/virtio_ring.h>
17#include <virtio/virtqueue.h>
18#include <virtio/virtqueue_host.h>
19#include <virtio/virtio_device.h>
20#include <virtio/virtio_host.h>
21
22#include "debug.h"
23
24#define IS_POW2(num) (((num) != 0) && (((num) & (~(num) + 1)) == (num)))
25
26#define VIRTQUEUE_FLAG_INDIRECT  0x0001
27#define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
28#define VIRTQUEUE_FLAG_FREE_CAP  0x8000
29
30/**
31 * this data structure stores additional information to the descriptors
32 */
33struct vring_mem_info
34{
35    struct capref cap;
36    lpaddr_t cap_offset;
37    lpaddr_t guest_paddr;
38    lvaddr_t vaddr;
39    size_t size;
40    struct vring_mem_info *next;
41};
42
43/**
44 * this data structure represents a VirtIO queue. It contains additional
45 * information not stored with the vring structure
46 */
47struct virtqueue_host
48{
49    /* device information */
50    struct virtio_device *device;       ///< pointer to to the virtio device
51    uint16_t queue_index;               ///< index of this queue in the device
52    char name[VIRTQUEUE_NAME_SIZE];     ///< name of the queue for debugging
53
54    /* vring information */
55    struct vring vring;                ///< vring data structure
56    struct capref vring_cap;            ///< capability of the vring data structure
57    lvaddr_t vring_vaddr;          ///< virtual address of the vring in memory
58    lpaddr_t vring_paddr;          ///< physical address of the vring
59    lvaddr_t vring_align;          ///< the alignment of the vring
60
61    uint16_t desc_num;                  ///< number of descriptors of this vring
62    uint16_t desc_num_max;              ///< maximum number of descriptors supported
63    uint16_t desc_num_queued;           ///< number of queued used descriptors
64
65    uint32_t flags;                     ///< flags
66
67    uint16_t avail_tail;                ///< last available index
68    uint16_t avail_head;                ///< cache of the available head index
69    uint16_t used_head;                 ///< index into the used head
70
71    virtq_work_handler_t worker_fn;     ///< callback when new work arrives
72    void *worker_arg;                   ///< argument for the worker function
73
74    struct virtio_host_buf *host_buffers;
75    struct vring_mem_info *mem;   ///< array of additional desc information
76#if 0
77    /* indirect descriptors */
78    uint16_t max_indirect;
79    size_t indirect_size;
80    struct vq_desc_extra {
81        void *cookie; << virtual address?
82        struct vring_desc *indirect;
83        vm_paddr_t indirect_paddr;
84        uint16_t ndescs;
85    }vq_descx[0];
86#endif
87};
88
89
90static inline struct virtio_host_buf *vqh_host_buf_deq(struct virtio_host_buf **queue)
91{
92    if(*queue == NULL) {
93        return NULL;
94    }
95    struct virtio_host_buf *buf = *queue;
96    *queue = buf->next;
97
98    return buf;
99}
100static inline void vqh_host_buf_enq(struct virtio_host_buf **queue,
101                                     struct virtio_host_buf *buf)
102{
103    assert(buf);
104    buf->next = *queue;
105    *queue = buf;
106}
107static inline uint16_t vqh_host_buf_enq_chain(struct virtio_host_buf **queue,
108                                              struct virtio_host_buf *buf)
109{
110    assert(buf);
111    struct virtio_host_buf *last = buf;
112    uint16_t count = 1;
113    while(last->next) {
114        last = last->next;
115        count++;
116    }
117    last->next = *queue;
118    *queue = buf;
119    return count;
120}
121
122static inline struct virtio_host_buf *vqh_host_buf_alloc(struct virtqueue_host *vq)
123{
124    return vqh_host_buf_deq(&vq->host_buffers);
125}
126static inline uint16_t vqh_host_buf_free_chain(struct virtqueue_host *vq,
127                                           struct virtio_host_buf *buf)
128{
129    return vqh_host_buf_enq_chain(&vq->host_buffers, buf);
130}
131
132#if 0
133
134static inline void vqh_host_buf_free(struct virtqueue_host *vq,
135                                     struct virtio_host_buf *buf)
136{
137    vqh_host_buf_enq(&vq->host_buffers, buf);
138}
139#endif
140
141
142
143static errval_t virtio_vq_host_add_mem_range(struct virtqueue_host *vq,
144                                      struct vring_mem_info *meminfo)
145{
146    if (vq->mem == NULL) {
147        vq->mem = meminfo;
148        return SYS_ERR_OK;
149    }
150
151    struct vring_mem_info *prev, *current = vq->mem;
152
153    if (meminfo->guest_paddr < vq->mem->guest_paddr) {
154        assert((meminfo->guest_paddr+meminfo->size) < vq->mem->guest_paddr);
155        meminfo->next = vq->mem;
156        vq->mem = meminfo;
157        return SYS_ERR_OK;
158    }
159
160    prev = vq->mem;
161    current = current->next;
162    while(current) {
163        if (meminfo->guest_paddr < current->guest_paddr) {
164            assert((meminfo->guest_paddr+meminfo->size) < current->guest_paddr);
165            meminfo->next = current;
166            prev->next = meminfo;
167        }
168        prev = current;
169        current = current->next;
170    }
171    return SYS_ERR_OK;
172}
173
174static lvaddr_t virtio_vq_host_guest2virt(struct virtqueue_host *vq,
175                                          lpaddr_t guest_phys)
176{
177    struct vring_mem_info *mi = vq->mem;
178    while(mi) {
179        if (mi->guest_paddr > guest_phys) {
180            return 0;
181        }
182        if (mi->guest_paddr <= guest_phys) {
183            if ((mi->guest_paddr + mi->size) > guest_phys) {
184                return mi->vaddr + (guest_phys - mi->guest_paddr);
185            }
186        }
187        mi = mi->next;
188    }
189
190    return 0;
191}
192
193/*
194 * ============================================================================
195 * Public Interface
196 * ============================================================================
197 */
198
199/*
200 * ----------------------------------------------------------------------------
201 *  Virtqueue Allocation / Deallocation
202 */
203
204/**
205 * \brief allocates and initiates a new virtqueue structure with no vring mem
206 *
207 * \param vq     pointer to the array of virtqueue pointers
208 * \param setup  pointer to the setup information
209 * \param vq_num the number of virtqueues structures to allocate
210 *
211 * \returns SYS_ERR_OK on success
212 */
213errval_t virtio_vq_host_alloc(struct virtqueue_host ***vq,
214                              struct virtqueue_setup *setup,
215                              uint16_t vq_num)
216{
217    if (vq == NULL) {
218        return VIRTIO_ERR_ARG_INVALID;
219    }
220
221    struct virtqueue_host **qa = calloc(vq_num, sizeof(void *));
222    if (qa == NULL) {
223        return LIB_ERR_MALLOC_FAIL;
224    }
225
226    struct virtqueue_host *queue = calloc(vq_num, sizeof(struct virtqueue_host));
227    if (queue == NULL) {
228        free(qa);
229        return LIB_ERR_MALLOC_FAIL;
230    }
231
232    // the first setup contains the device
233    struct virtio_device *vdev = setup->device;
234
235    for (uint32_t i = 0; i < vq_num; ++i) {
236        queue->desc_num_max = setup->vring_ndesc;
237        queue->device = vdev;
238        /*
239        queue->intr_arg = setup->intr_arg;
240        queue->intr_handler = setup->intr_handler;
241        */
242        queue->queue_index = i;
243        queue->vring_align = VIRTQUEUE_ALIGNMENT;
244        queue->worker_arg = setup->worker_arg;
245        queue->worker_fn = setup->worker_fn;
246        // queue->desc_ind_max = setup->max_indirect;
247        qa[i] = queue;
248        queue++;
249    }
250
251    *vq = qa;
252
253    return SYS_ERR_OK;
254}
255
256/**
257 * \brief allocates and initiates a new virtqueue structure
258 *
259 * \param vdev      the VirtIO device
260 * \param vring_cap capability to be used for the vring
261 * \param vq_id     id of the queue to initialize
262 * \param ndesc     the number of descriptors in this queue
263 * \param buf_bits  size of the buffers (0 for none)
264 *
265 * \returns SYS_ERR_OK on success
266 */
267errval_t virtio_vq_host_init_vring(struct virtio_device *vdev,
268                                   struct capref vring_cap,
269                                   uint16_t vq_id,
270                                   uint16_t ndesc,
271                                   uint8_t has_buffers)
272{
273    errval_t err;
274
275    struct frame_identity id;
276    err = frame_identify(vring_cap, &id);
277    if (err_is_fail(err)) {
278        VIRTIO_DEBUG_VQ("failed to identify vring cap.\n");
279        return err;
280    }
281
282    VIRTIO_DEBUG_VQ("Assigning vring [0x%016lx] to virtq %u %s buffers\n",
283                    id.base,
284                    vq_id,
285                    (has_buffers ? "with" : "w/o"));
286
287    struct virtqueue_host *vqh = virtio_device_get_host_virtq(vdev, vq_id);
288    if (vqh == NULL) {
289        return VIRTIO_ERR_QUEUE_INVALID;
290    }
291
292    void *vring_base;
293    err = vspace_map_one_frame_attr(&vring_base,
294                                    id.bytes,
295                                    vring_cap,
296                                    VIRTIO_VREGION_FLAGS_RING,
297                                    NULL,
298                                    NULL);
299    if (err_is_fail(err)) {
300        VIRTIO_DEBUG_VQ("failed to map vring cap.\n");
301        return err;
302    }
303
304    VIRTIO_DEBUG_VQ("initializing vring of size %u\n", ndesc);
305    vring_init(&vqh->vring, ndesc, vqh->vring_align, vring_base);
306
307    vqh->vring_cap = vring_cap;
308    vqh->desc_num = ndesc;
309    vqh->vring_vaddr = (lvaddr_t)vring_base;
310    vqh->vring_paddr = id.base;
311
312    vqh->host_buffers = calloc(ndesc, sizeof(struct virtio_host_buf));
313    struct virtio_host_buf *buf = vqh->host_buffers;
314    assert(vqh->host_buffers);
315    for (uint32_t i = 0; i < ndesc-1; ++i) {
316        buf->next = (buf + 1);
317        buf++;
318    }
319
320    if (has_buffers) {
321        lpaddr_t offset = vring_size(ndesc, vqh->vring_align);
322        offset = ROUND_UP(offset, BASE_PAGE_SIZE);
323        struct vring_mem_info *mi = calloc(1, sizeof(struct vring_mem_info));
324        assert(mi);
325        mi->cap = vring_cap;
326        mi->cap_offset =offset;
327        mi->guest_paddr = virtio_host_translate_host_addr(id.base) + offset;
328        mi->vaddr = (lvaddr_t)(vring_base) + offset;
329        mi->size = id.bytes;
330        virtio_vq_host_add_mem_range(vqh, mi);
331    }
332    return SYS_ERR_OK;
333}
334
335/**
336 * \brief allocates and initiates a new virtqueue structure
337 *
338 * \param setup  pointer to the setup information
339 * \param vq     pointer where to store the new virtqueue pointer
340 *
341 * \returns SYS_ERR_OK on success
342 */
343
344#if 0
345/**
346 * \brief allocates and initiates a new virtqueue structure
347 *
348 * \param setup     pointer to the setup information
349 * \param vring_cap capability to be used for the vring
350 * \param vq        pointer where to store the new virtqueue pointer
351 *
352 * \returns SYS_ERR_OK on success
353 */
354errval_t virtio_vq_host_alloc_with_caps(struct virtqueue_setup *setup,
355                struct capref vring_cap,
356                struct virtqueue_host **ret_vq)
357{
358    errval_t err;
359
360    assert(ret_vq);
361
362    if (setup->vring_ndesc == 0 || !IS_POW2(setup->vring_ndesc)) {
363        VIRTIO_DEBUG_VQ("ERROR: invalid size: %u\n", setup->vring_ndesc);
364        return VIRTIO_ERR_SIZE_INVALID;
365    }
366
367    if (setup->max_indirect > VIRTIO_RING_MAX_INDIRECT) {
368        VIRTIO_DEBUG_VQ("ERROR: too many indirect descriptors requested: [%u / %u]\n",
369                        setup->vring_ndesc,
370                        VIRTIO_RING_MAX_INDIRECT);
371        return VIRTIO_ERR_MAX_INDIRECT;
372    }
373
374    assert(!capref_is_null(vring_cap));
375
376    struct frame_identity id;
377    err = invoke_frame_identify(vring_cap, &id);
378    if (err_is_fail(err)) {
379        return err;
380    }
381
382    size_t vring_mem_size = vring_size(setup->vring_ndesc, setup->vring_align);
383    vring_mem_size = ROUND_UP(vring_mem_size, BASE_PAGE_SIZE);
384
385    if (vring_mem_size > id.bytes) {
386        VIRTIO_DEBUG_VQ("ERROR: supplied cap was too small %lx, needed %lx\n",
387                        (id.bytes),
388                        (uint64_t )vring_mem_size);
389        return VIRTIO_ERR_CAP_SIZE;
390    }
391
392    void *vring_addr;
393    err = vspace_map_one_frame(&vring_addr, vring_mem_size, vring_cap, NULL, NULL);
394    if (err_is_fail(err)) {
395        return err;
396    }
397
398    struct virtqueue_host *vq = calloc(1,
399                    sizeof(struct virtqueue_host) + (setup->vring_ndesc
400                                    * sizeof(struct vring_desc_info)));
401    if (vq == NULL) {
402        vspace_unmap(vring_addr);
403        return LIB_ERR_MALLOC_FAIL;
404    }
405
406    vq->device = setup->device;
407    strncpy(vq->name, setup->name, sizeof(vq->name));
408    vq->queue_index = setup->queue_id;
409    vq->desc_num = setup->vring_ndesc;
410    vq->vring_align = setup->vring_align;
411    vq->vring_cap = vring_cap;
412    vq->vring_paddr = id.base;
413    vq->vring_vaddr = (lvaddr_t) vring_addr;
414
415    vq->intr_handler = setup->intr_handler;
416    vq->intr_arg = setup->intr_arg;
417
418    if (virtio_device_has_feature(setup->device, VIRTIO_RING_F_EVENT_IDX)) {
419        vq->flags |= (1 << VIRTQUEUE_FLAG_EVENT_IDX);
420    }
421
422    vring_init(&vq->vring, vq->desc_num, vq->vring_align, (void *) vq->vring_vaddr);
423
424    if (ret_vq) {
425        *ret_vq = vq;
426    }
427
428    return SYS_ERR_OK;
429}
430#endif
431
432/**
433 * \brief frees the resources of previously allocated virtqueues
434 *
435 * \param vq pointer to the virtqueue memory to be freed
436 *
437 * \returns SYS_ERR_OK on success
438 */
439errval_t virtio_vq_host_free(struct virtqueue_host *vq)
440{
441    assert(!"NYI: virtio_vq_host_free");
442
443    return SYS_ERR_OK;
444}
445
446/*
447 * ----------------------------------------------------------------------------
448 *  Virtqueue Getter Functions
449 */
450
451/**
452 * \brief Returns the physical address of the vring.
453 *
454 * \param vq pointer to the virtqueue structure
455 *
456 * \returns the physical address of the vring
457 */
458lpaddr_t virtio_vq_host_get_vring_paddr(struct virtqueue_host *vq)
459{
460    return vq->vring_paddr;
461}
462
463/**
464 * \brief returns the alignment of the vring
465 *
466 * \param the virtqueue to get the alignment from
467 *
468 * \returns vring alignment
469 */
470lvaddr_t virtio_vq_host_get_vring_align(struct virtqueue_host *vq)
471{
472    return vq->vring_align;
473}
474
475/**
476 * \brief Returns the frame capability of the vring
477 *
478 * \param vq        pointer to the virtqueue structure
479 * \param ret_cap   memory location where to store the capref
480 */
481void virtio_vq_host_get_vring_cap(struct virtqueue_host *vq,
482                                  struct capref *ret_cap)
483{
484    if (ret_cap) {
485        *ret_cap = vq->vring_cap;
486    }
487}
488
489/**
490 * \brief Returns the number of elements (number of descriptors)in the vring of
491 *        this virtqueue
492 *
493 * \param vq pointer to the virtqueue structure
494 *
495 * \returns number of elements in the vring
496 */
497uint16_t virtio_vq_host_get_num_desc(struct virtqueue_host *vq)
498{
499    return vq->desc_num;
500}
501
502/**
503 * \brief Returns the queue index of the virtqueue of the device
504 *
505 * \param vq pointer to the virtqueue structure
506 *
507 * \returns queue index
508 */
509uint16_t virtio_vq_host_get_queue_index(struct virtqueue_host *vq)
510{
511    return vq->queue_index;
512}
513
514/**
515 * \brief Checks if the virtqueue is empty
516 *
517 * \param vq pointer to the virtqueue structure
518 *
519 * \returns 0 the queue is not empty
520 *          1 the queue is empty
521 */
522bool virtio_vq_host_is_empty(struct virtqueue_host *vq)
523{
524    return (vq->vring.avail->idx == vq->avail_tail);
525}
526
527/**
528 * \brief Calculates the number of available descriptors in this queue
529 *
530 * \param vq pointer to the virtqueue structure
531 *
532 * \returns number of used descriptors
533 */
534uint16_t virtio_vq_host_get_num_avail(struct virtqueue_host *vq)
535{
536    uint16_t num_used;
537
538    num_used = vq->vring.avail->idx - vq->avail_tail;
539
540    /* sanity check */
541    assert(num_used <= vq->desc_num);
542
543    return num_used;
544}
545
546/*
547 * ----------------------------------------------------------------------------
548 *  Interrupt handling
549 */
550
551/**
552 * \brief sends an interrupt to the guest that an event has happened on the
553 *        queue
554 *
555 * \param vq virtqueue to send the interrupt on
556 *
557 * \returns SYS_ERR_OK on success
558 */
559errval_t virtio_vq_host_intr_send(struct virtqueue_host *vq)
560{
561    assert("NYI");
562    return SYS_ERR_OK;
563}
564
565/*
566 * We layout the vring structure in memory as follows:
567 *
568 * struct vring {
569 *      // The actual descriptors (16 bytes each)
570 *      struct vring_desc desc[num];
571 *
572 *      // A ring of available descriptor heads with free-running index.
573 *      uint16_t avail_flags;
574 *      uint16_t avail_idx;
575 *      uint16_t available[num];
576 *      uint16_t used_event_idx;
577 *
578 *      // Padding to the next align boundary.
579 *      char pad[];
580 *
581 *      // A ring of used descriptor heads with free-running index.
582 *      uint16_t used_flags;
583 *      uint16_t used_idx;
584 *      struct vring_used_elem used[num];
585 *      uint16_t avail_event_idx;
586 * };
587 */
588
589/**
590 * \brief Maps the given capability and initializes the vring on the memory
591 *        backed by the supplied capability
592 *
593 * \param vr    pointer to the vring structure to be initialized
594 * \param num   the number of elements in the ring
595 * \param align alignment constraints for the vring
596 * \param cap   frame capability used as backing memory for the structure
597 *
598 * \return SYS_ERR_OK on success
599 *         errno      on failure
600 */
601errval_t vring_init_from_cap(struct vring *vr,
602                             uint16_t num,
603                             uintptr_t align,
604                             struct capref cap)
605{
606    errval_t err;
607
608    /* num must be a power of two */
609    assert(((num != 0) && ((num & (~num + 1)) == num)));
610
611    size_t size = vring_size(num, align);
612
613    struct frame_identity id;
614    err = frame_identify(cap, &id);
615    if (err_is_fail(err)) {
616        return err_push(err, LIB_ERR_FRAME_IDENTIFY);
617    }
618
619    /* check if we have enough space in the given cap */
620    if (id.bytes < size) {
621        return SYS_ERR_INVALID_SIZE_BITS;
622    }
623
624    void *addr;
625    err = vspace_map_one_frame(&addr, id.bytes, cap, NULL, NULL);
626    if (err_is_fail(err)) {
627        return err_push(err, LIB_ERR_VSPACE_MAP);
628    }
629
630    vring_init(vr, num, align, addr);
631
632    return SYS_ERR_OK;
633}
634
635/**
636 * \brief frees the resources used by the vring structure
637 *
638 * \param vr the vring to be freed
639 *
640 * \return SYS_ERR_OK on success
641 *         errno      on failure
642 */
643errval_t vring_free(struct vring *vr)
644{
645    errval_t err;
646
647    err = vspace_unmap(vr->desc);
648    if (err_is_fail(err)) {
649        return err;
650    }
651
652    assert(!"NYI: returning the cap to the origin");
653    return SYS_ERR_OK;
654}
655
656/*
657 * ----------------------------------------------------------------------------
658 *  Queue Management
659 */
660
661/**
662 * \brief updates the used ring of the virtqueue by placing the descriptor
663 *        into the used ring.
664 *
665 * \param vq    the virtqueue to update
666 * \param idx   index of the new descriptor chain head
667 */
668static void virtqueue_update_used(struct virtqueue_host *vq,
669                                  uint16_t idx,
670                                  uint16_t length)
671{
672    uint16_t used_head = vq->vring.used->idx & (vq->desc_num - 1);
673
674    vq->vring.used->ring[used_head].length = length;
675    vq->vring.used->ring[used_head].id = idx;
676
677    /*
678     * wmb();
679     */
680
681    vq->vring.used->idx++;
682    vq->desc_num_queued++;
683}
684
685/**
686 * \brief Enqueues a new descriptor chain into the virtqueue
687 *
688 * \param vq     the virtqueue the descriptor chain gets enqueued in
689 * \param bl     list of buffers to enqueue into the virtqueue
690 * \param st     state associated with this descriptor chain
691 * \param num_wr number of writable descriptors
692 * \param num_rd number of readable descriptors
693 *
694 * \returns SYS_ERR_OK on success
695 *          VIRTIO_ERR_* on failure
696 */
697errval_t virtio_vq_host_desc_enqueue(struct virtqueue_host *vq,
698                                     struct virtio_host_buf *buf,
699                                     uint16_t idx)
700{
701
702
703    uint16_t count = vqh_host_buf_free_chain(vq, buf);
704
705    VIRTIO_DEBUG_VQ("Enqueue idx=%u, count=%u\n", idx, count);
706
707    /*
708     * TODO: check if we should use indirect descriptors or not
709     */
710
711    /* update free values */
712    vq->used_head++;
713
714    virtqueue_update_used(vq, idx, count);
715
716
717    return SYS_ERR_OK;
718}
719
720/**
721 * \brief dequeues a descriptor chain form the virtqueue
722 *
723 * \param vq     the virtqueue to dequeue descriptors from
724 *
725 * \returns SYS_ERR_OK when the dequeue is successful
726 *          VIRTIO_ERR_NO_DESC_AVAIL when there was no descriptor to dequeue
727 *          VIRTIO_ERR_* if there was an error
728 */
729errval_t virtio_vq_host_desc_dequeue(struct virtqueue_host *vq)
730{
731
732    uint16_t avail_idx;
733
734    struct vring_desc *desc;
735
736    /*
737     * check if there is a descriptor available
738     */
739    if (vq->avail_tail == vq->vring.avail->idx) {
740        return VIRTIO_ERR_NO_DESC_AVAIL;
741    }
742
743    debug_printf("Avail Tail = %u\n", vq->avail_tail);
744
745    avail_idx = vq->avail_tail++ & (vq->desc_num - 1);
746
747    uint16_t desc_idx = vq->vring.avail->ring[avail_idx];
748
749    desc = &vq->vring.desc[desc_idx];
750
751    struct virtio_host_buf *buf_chain = NULL;
752    struct virtio_host_buf *buf = NULL;
753
754    uint16_t count = 1;
755    while(desc->flags & VIRTIO_RING_DESC_F_NEXT) {
756        buf = vqh_host_buf_alloc(vq);
757        assert(buf);
758        buf->size = desc->length;
759        buf->flags = desc->flags;
760        buf->vaddr = virtio_vq_host_guest2virt(vq, desc->addr);
761        vqh_host_buf_enq(&buf_chain, buf);
762        desc = &vq->vring.desc[desc->next];
763        count++;
764    }
765
766    /* handle the last one */
767    buf = vqh_host_buf_alloc(vq);
768    assert(buf);
769    buf->size = desc->length;
770    buf->flags = desc->flags;
771    buf->vaddr = virtio_vq_host_guest2virt(vq, desc->addr);
772    vqh_host_buf_enq(&buf_chain, buf);
773
774    VIRTIO_DEBUG_VQ("Dequeuing element on the available [%u] ring: [%u, %u]\n",
775                    avail_idx, desc_idx, count);
776
777    /*
778     * TODO: read memory barrier
779     * rmb();
780     * */
781
782    if (vq->worker_fn) {
783        vq->worker_fn(vq, vq->worker_arg, buf, desc_idx);
784    } else {
785        virtio_vq_host_desc_enqueue(vq, buf_chain, desc_idx);
786    }
787
788
789    return SYS_ERR_OK;
790}
791
792/**
793 * \brief polls the virtqueue
794 *
795 * \param vq         the virtqueue array to dequeue descriptors from
796 * \param vq_num     the number of entries in the vq array
797 *
798 * \returns SYS_ERR_OK when the dequeue is successful
799 *          VIRTIO_ERR_* if there was an error
800 */
801errval_t virtio_vq_host_poll(struct virtqueue_host **vqh,
802                             uint16_t vq_num)
803{
804    errval_t err;
805
806    /* XXX: handle the case where the queues have not been allocated */
807    if (vq_num == 0) {
808        return SYS_ERR_OK;
809    }
810
811    assert(vqh);
812
813    for (uint32_t i = 0; i < vq_num; ++i) {
814        struct virtqueue_host *vq = vqh[i];
815        if (vq->vring_vaddr == 0) {
816            continue;
817        }
818        err = virtio_vq_host_desc_dequeue(vq);
819        if (err_is_fail(err) && err_no(err) != VIRTIO_ERR_NO_DESC_AVAIL) {
820            return err;
821        }
822    }
823
824    return SYS_ERR_OK;
825}
826