1/*
2 * Copyright (c) 2014 ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#include <string.h>
11#include <barrelfish/barrelfish.h>
12
13#include <virtio/virtio.h>
14#include <virtio/virtqueue.h>
15#include <virtio/virtio_ring.h>
16#include <virtio/virtio_device.h>
17
18#ifdef __VIRTIO_HOST__
19#include <virtio/virtio_host.h>
20#endif
21
22#include <dev/virtio/virtio_mmio_dev.h>
23
24#include "device.h"
25#include "backends/virtio_mmio.h"
26#include "debug.h"
27
28#define REG_WAIT_MAX 0xFFFF
29#define REG_WAIT_USE_YIELD 1
30
31#define REGISTER_SEND_READY(_reg,_dev)      \
32    do {                                    \
33        _reg(_dev->regs, 0x1);              \
34    } while(0);
35
36#if REG_WAIT_USE_YIELD
37#define REGISTER_WAIT_READY(_reg,_dev)      \
38    do {                                    \
39        /* uint32_t wait = REG_WAIT_MAX;   */    \
40        while (!_reg(_dev->regs)) {     \
41            thread_yield();                 \
42        }    \
43    } while(0);
44#else
45#define REGISTER_WAIT_READY(_reg,_dev)      \
46    do {                                    \
47        uint32_t wait = REG_WAIT_MAX;       \
48        while (!_reg(_dev->regs) && (--wait))     \
49            ;                               \
50    } while(0);
51
52#endif
53/**
54 * \brief queries the current status flags of the VirtIO device
55 *
56 * \param dev        VirtIO device
57 * \param ret_status pointer to memory to store the return value
58 *
59 * \returns SYS_ERR_OK on success
60 */
61static errval_t device_get_status(struct virtio_device *dev,
62                                  uint32_t *ret_status)
63{
64    virtio_mmio_status_t status;
65
66    struct virtio_device_mmio *mmio_dev = (struct virtio_device_mmio *) dev;
67
68    status = virtio_mmio_status_rd(&mmio_dev->regs);
69
70    if (ret_status) {
71        *ret_status = status;
72    }
73
74    VIRTIO_DEBUG_DEV("getting mmio device status: %x\n", status);
75
76    return SYS_ERR_OK;
77}
78
79/**
80 * \brief   updates the device status field of the VirtIO device
81 *
82 * \param dev        device to set the status
83 * \param new_status the status to set the device to
84 *
85 * \returns SYS_ERR_OK on success
86 *          VIRTIO_ERR_DEVICE_STATUS if the status change does not make sense
87 */
88static errval_t device_set_status(struct virtio_device *dev,
89                                  uint8_t new_status)
90{
91    VIRTIO_DEBUG_DEV("setting mmio device status: %u\n", new_status);
92
93    struct virtio_device_mmio *mmio_dev = (struct virtio_device_mmio *) dev;
94
95    virtio_mmio_status_t status = virtio_mmio_status_default;
96
97    switch (new_status) {
98        case VIRTIO_DEVICE_STATUS_RESET:
99            virtio_mmio_reset_wr(&mmio_dev->regs, virtio_mmio_device_reset);
100            return SYS_ERR_OK;
101            break;
102        case VIRTIO_DEVICE_STATUS_FAILED:
103            status = virtio_mmio_status_failed_insert(status, 0x1);
104            break;
105        case VIRTIO_DEVICE_STATUS_ACKNOWLEDGE:
106            status = virtio_mmio_status_rd(&mmio_dev->regs);
107            if (status != 0x0) {
108                return VIRTIO_ERR_DEVICE_STATUS;
109            }
110            status = virtio_mmio_status_acknowledge_insert(status, 0x1);
111            break;
112        case VIRTIO_DEVICE_STATUS_DRIVER:
113            status = virtio_mmio_status_rd(&mmio_dev->regs);
114            if (!virtio_mmio_status_acknowledge_extract(status)) {
115                return VIRTIO_ERR_DEVICE_STATUS;
116            }
117            status = virtio_mmio_status_driver_insert(status, 0x1);
118            break;
119
120        case VIRTIO_DEVICE_STATUS_FEATURES_OK:
121            status = virtio_mmio_status_rd(&mmio_dev->regs);
122            if (!virtio_mmio_status_driver_extract(status)) {
123                return VIRTIO_ERR_DEVICE_STATUS;
124            }
125            status = virtio_mmio_status_features_ok_insert(status, 0x1);
126            break;
127
128        case VIRTIO_DEVICE_STATUS_DRIVER_OK:
129            status = virtio_mmio_status_rd(&mmio_dev->regs);
130            if (!virtio_mmio_status_features_ok_extract(status)) {
131                return VIRTIO_ERR_DEVICE_STATUS;
132            }
133            status = virtio_mmio_status_driver_ok_insert(status, 0x1);
134            break;
135    }
136
137    virtio_mmio_status_wr(&mmio_dev->regs, status);
138
139    return SYS_ERR_OK;
140}
141
142#ifndef __VIRTIO_HOST__
143
144/**
145 * \brief resets the VirtIO deivces
146 *
147 * \param dev the device to reset
148 *
149 * \returns SYS_ERR_OK on success
150 */
151static errval_t device_reset(struct virtio_device *dev)
152{
153    VIRTIO_DEBUG_DEV("resetting mmio device: %s\n", dev->dev_name);
154    /*
155     * TODO: is there some clean up needed ?
156     */
157    return device_set_status(dev, VIRTIO_DEVICE_STATUS_RESET);
158}
159
160/**
161 *
162 */
163static errval_t device_get_device_features(struct virtio_device *dev,
164                                           uint64_t *ret_features)
165{
166    struct virtio_device_mmio *mmio_dev = (struct virtio_device_mmio *) dev;
167
168    uint8_t selector = 0x0;
169    uint64_t features, tmp = 0;
170
171    if (virtio_mmio_dev_features_sel_selector_rdf(&mmio_dev->regs)) {
172        features = virtio_mmio_dev_features_features_rdf(&mmio_dev->regs);
173        features <<= 32;
174    } else {
175        features = virtio_mmio_dev_features_features_rdf(&mmio_dev->regs);
176        selector = 0x1;
177    }
178
179    virtio_mmio_dev_features_sel_selector_wrf(&mmio_dev->regs, selector);
180
181    REGISTER_WAIT_READY(virtio_mmio_dev_features_sel_ready_rdf, &mmio_dev);
182
183    tmp = virtio_mmio_dev_features_features_rdf(&mmio_dev->regs);
184    if (selector) {
185        features |= (tmp << 32);
186    } else {
187        features |= tmp;
188    }
189
190    virtio_mmio_dev_features_sel_ready_wrf(&mmio_dev->regs, 0);
191
192    return SYS_ERR_OK;
193}
194
195
196
197static errval_t device_set_driver_features(struct virtio_device *dev,
198                                           uint64_t features)
199{
200    struct virtio_device_mmio *mmio_dev = (struct virtio_device_mmio *) dev;
201
202    debug_printf("device_set_driver_features\n");
203
204    uint8_t selector = 0x0;
205
206    uint32_t reg_val;
207
208    if (virtio_mmio_driv_features_sel_selector_rdf(&mmio_dev->regs)) {
209        reg_val = (uint32_t) (features >> 32);
210    } else {
211        reg_val = (uint32_t) (features);
212        selector = 0x1;
213    }
214
215    virtio_mmio_driv_features_features_wrf(&mmio_dev->regs, reg_val);
216
217    REGISTER_SEND_READY(virtio_mmio_driv_features_sel_ready_wrf, &mmio_dev);
218    REGISTER_WAIT_READY(!virtio_mmio_driv_features_sel_ready_rdf, &mmio_dev);
219
220    virtio_mmio_driv_features_sel_wr(&mmio_dev->regs, selector);
221
222    if (selector) {
223        reg_val = (uint32_t) (features >> 32);
224    } else {
225        reg_val = (uint32_t) (features);
226    }
227
228    virtio_mmio_driv_features_features_wrf(&mmio_dev->regs, reg_val);
229
230    REGISTER_SEND_READY(virtio_mmio_driv_features_sel_ready_wrf, &mmio_dev);
231    REGISTER_WAIT_READY(!virtio_mmio_driv_features_sel_ready_rdf, &mmio_dev);
232
233    // clear the ready bit in the end
234    virtio_mmio_driv_features_sel_ready_wrf(&mmio_dev->regs, 0x0);
235
236    return SYS_ERR_OK;
237}
238
239
240static errval_t device_set_virtq(struct virtio_device *dev,
241                                 struct virtqueue *vq)
242{
243    struct virtio_device_mmio *mmio_dev = (struct virtio_device_mmio *) dev;
244
245    uint16_t queue_index = virtio_virtqueue_get_queue_index(vq);
246
247    VIRTIO_DEBUG_TL("setting virtqueue [VQ(%u) @ %s]\n", queue_index, dev->dev_name);
248
249    /* we nee to change to the correct queue */
250    if (virtio_mmio_queue_sel_selector_rdf(&mmio_dev->regs) != queue_index) {
251        virtio_mmio_queue_sel_selector_wrf(&mmio_dev->regs, queue_index);
252        REGISTER_WAIT_READY(virtio_mmio_queue_sel_ready_rdf, &mmio_dev);
253    }
254
255    /* TODO> wait till queue has been selected */
256
257    if (virtio_mmio_queue_ready_ready_rdf(&mmio_dev->regs)) {
258        /* queue has already been activated... */
259        return VIRTIO_ERR_QUEUE_ACTIVE;
260    }
261
262    if (virtio_mmio_queue_max_size_rdf(&mmio_dev->regs) == 0x0) {
263        /* the queue is not implemented */
264        return VIRTIO_ERR_QUEUE_INVALID;
265    }
266
267    uint16_t size = virtio_virtqueue_get_num_desc(vq);
268    virtio_mmio_queue_num_size_wrf(&mmio_dev->regs, size);
269
270    lpaddr_t paddr = virtio_virtqueue_get_vring_paddr(vq);
271    lpaddr_t align = virtio_virtqueue_get_vring_align(vq);
272
273    virtio_mmio_queue_desc_hi_addr_wrf(&mmio_dev->regs, (uint32_t) (paddr >> 32));
274    virtio_mmio_queue_desc_lo_addr_wrf(&mmio_dev->regs, (uint32_t) (paddr));
275
276    paddr += size * sizeof(struct vring_desc);
277
278    virtio_mmio_queue_used_hi_addr_wrf(&mmio_dev->regs, (uint32_t) (paddr >> 32));
279    virtio_mmio_queue_used_lo_addr_wrf(&mmio_dev->regs, (uint32_t) (paddr));
280
281    paddr += sizeof(uint16_t) * (2 + size + 1);
282    paddr = (paddr + align - 1) & ~(align - 1);
283
284    virtio_mmio_queue_avail_hi_addr_wrf(&mmio_dev->regs, (uint32_t) (paddr >> 32));
285    virtio_mmio_queue_avail_lo_addr_wrf(&mmio_dev->regs, (uint32_t) (paddr));
286
287    /* signal the host that the queue is ready */
288    virtio_mmio_queue_ready_ready_wrf(&mmio_dev->regs, 0x1);
289    virtio_mmio_queue_ready_signal_wrf(&mmio_dev->regs, 0x1);
290
291    REGISTER_WAIT_READY(!virtio_mmio_queue_ready_signal_rdf, &mmio_dev);
292
293    return SYS_ERR_OK;
294}
295
296
297static errval_t device_get_queue_num_max(struct virtio_device *dev,
298                                         uint16_t queue_index,
299                                         uint16_t *num_max)
300{
301    struct virtio_device_mmio *mmio_dev = (struct virtio_device_mmio *) dev;
302
303    virtio_mmio_queue_sel_selector_wrf(&mmio_dev->regs, queue_index);
304
305    /* TODO: wait till data ready */
306
307    REGISTER_WAIT_READY(virtio_mmio_queue_sel_ready_rdf, &mmio_dev);
308
309    uint16_t qmax = (uint16_t) virtio_mmio_queue_max_size_rdf(&mmio_dev->regs);
310
311    if (num_max) {
312        *num_max = qmax;
313    }
314
315    return SYS_ERR_OK;
316}
317
318static errval_t device_negotiate_features(struct virtio_device *dev,
319                                          uint64_t driver_features)
320{
321    uint64_t device_features = 0x0;
322    device_get_device_features(dev, &device_features);
323
324    driver_features &= device_features;
325
326    VIRTIO_DEBUG_TL("setting negotiated features to: 0x%lx\n", driver_features);
327
328    device_set_driver_features(dev, driver_features);
329
330    dev->features = driver_features;
331
332    return SYS_ERR_OK;
333}
334
335static errval_t device_notify_virtq(struct virtio_device *dev,
336                                    uint16_t vq_id)
337{
338    struct virtio_device_mmio *mmio_dev = (struct virtio_device_mmio *) dev;
339
340    virtio_mmio_queue_notify_index_wrf(&mmio_dev->regs, vq_id);
341
342    return SYS_ERR_OK;
343}
344
345#endif
346
347/**
348 * \brief reads the device configuration space and copies it into a local buffer
349 *
350 * \param vdev  virtio device
351 * \param buf   pointer to the buffer to store the data
352 * \param len   the length of the buffer
353 *
354 * \returns SYS_ERR_OK on success
355 */
356static errval_t device_config_read(struct virtio_device *vdev,
357                                   void *buf,
358                                   size_t len)
359{
360    struct virtio_device_mmio *mmio_dev = (struct virtio_device_mmio *) vdev;
361
362    if (len > (mmio_dev->dev_size + virtio_mmio_config_offset)) {
363        return VIRTIO_ERR_SIZE_INVALID;
364    }
365
366    uint8_t *config_space = ((uint8_t *) mmio_dev->dev_base)
367                    + virtio_mmio_config_offset;
368
369    memcpy(buf, config_space, len);
370
371    return SYS_ERR_OK;
372}
373
374/**
375 * \brief writes to the configuration space of a device
376 *
377 * \param vdev  virtio device
378 * \param buf   pointer to the buffer with data to update
379 * \param len   the length of the buffer
380 *
381 * \returns SYS_ERR_OK on success
382 */
383static errval_t device_config_write(struct virtio_device *dev,
384                                    void *config,
385                                    size_t offset,
386                                    size_t length)
387{
388    struct virtio_device_mmio *mmio_dev = (struct virtio_device_mmio *) dev;
389
390    if ((length + offset) > (mmio_dev->dev_size + virtio_mmio_config_offset)) {
391        return VIRTIO_ERR_SIZE_INVALID;
392    }
393
394    size_t config_offset = virtio_mmio_config_offset + offset;
395
396    uint8_t *config_space = ((uint8_t *) mmio_dev->dev_base) + config_offset;
397
398    memcpy(config_space, config, length);
399
400    return SYS_ERR_OK;
401}
402
403
404#ifdef __VIRTIO_HOST__
405
406static errval_t virtio_device_mmio_poll_host(struct virtio_device *host);
407
408struct virtio_device_fn virtio_mmio_fn = {
409    .set_status = device_set_status,
410    .get_status = device_get_status,
411    .get_config = device_config_read,
412    .set_config = device_config_write,
413    .poll = virtio_device_mmio_poll_host
414};
415#else
416struct virtio_device_fn virtio_mmio_fn = {
417    .reset = device_reset,
418    .set_status = device_set_status,
419    .get_status = device_get_status,
420    .negotiate_features = device_negotiate_features,
421    .set_virtq = device_set_virtq,
422    .get_queue_num_max = device_get_queue_num_max,
423    .get_config = device_config_read,
424    .set_config = device_config_write,
425    .notify = device_notify_virtq
426};
427#endif
428
429/**
430 * \brief initializes and allocates a VirtIO device structure for the MMIO backend
431 */
432errval_t virtio_device_mmio_init(struct virtio_device **dev,
433                                 struct virtio_device_setup *info)
434{
435    struct virtio_device_mmio *mmio_dev;
436    errval_t err;
437    mmio_dev = calloc(1, sizeof(*mmio_dev));
438    if (mmio_dev == NULL) {
439        return LIB_ERR_MALLOC_FAIL;
440    }
441
442    virtio_mmio_initialize(&mmio_dev->regs,
443                           (mackerel_addr_t) (info->backend.args.mmio.dev_base));
444    mmio_dev->dev_base = info->backend.args.mmio.dev_base;
445    mmio_dev->dev_size = info->backend.args.mmio.dev_size;
446
447    /**
448     * 4.2.3.1.1 Driver Requirements: Device Initialization
449     * The driver MUST start the device initialization by reading and checking
450     * values from MagicValue and Version.
451     * If both values are valid, it MUST read DeviceID and if its value is zero
452     * (0x0) MUST abort initialization and MUST NOT access any other register.
453     */
454    if (virtio_mmio_magic_value_rd(&mmio_dev->regs) != virtio_mmio_magic_value) {
455        VIRTIO_DEBUG_DEV("Virtio Magic Value is invalid\n");
456        return VIRTIO_ERR_NOT_VIRTIO_DEVICE;
457    }
458
459    err = SYS_ERR_OK;
460    switch (virtio_mmio_version_rd(&mmio_dev->regs)) {
461        case virtio_mmio_version_legacy:
462            VIRTIO_DEBUG_DEV("Handling of legacy devices not supported.\n");
463            err = VIRTIO_ERR_VERSION_MISMATCH;
464            break;
465        case virtio_mmio_version_virtio10:
466            err = SYS_ERR_OK;
467            break;
468        default:
469            VIRTIO_DEBUG_DEV("Virtio version is invalid.\n");
470            err = VIRTIO_ERR_NOT_VIRTIO_DEVICE;
471            break;
472    }
473    if (err_is_fail(err)) {
474        return err;
475    }
476
477    virtio_mmio_deviceid_t devid = virtio_mmio_deviceid_rd(&mmio_dev->regs);
478    if (devid == 0) {
479        VIRTIO_DEBUG_DEV("Virtio device ID invalid.\n");
480        return VIRTIO_ERR_NOT_VIRTIO_DEVICE;
481    }
482
483    if (devid != info->dev_type) {
484        VIRTIO_DEBUG_DEV("VirtIO device id not as expected [%x, %x].\n",
485                         devid,
486                         info->dev_type);
487        return VIRTIO_ERR_DEVICE_TYPE;
488    }
489
490    mmio_dev->dev.backend = VIRTIO_DEVICE_BACKEND_MMIO;
491    mmio_dev->dev.f = &virtio_mmio_fn;
492    mmio_dev->dev.dev_type = virtio_mmio_deviceid_id_rdf(&mmio_dev->regs);
493
494    *dev = &mmio_dev->dev;
495
496    return SYS_ERR_OK;
497}
498
499#ifdef __VIRTIO_HOST__
500
501static errval_t handle_device_status_change(struct virtio_device_mmio *mmio_host,
502                                            uint8_t new_status)
503{
504    VIRTIO_DEBUG_TL("handle_device_status_change: [0x%x]\n", new_status);
505    mmio_host->dev_reg.status = new_status;
506    return SYS_ERR_OK;
507}
508
509static errval_t handle_dev_feature_sel_change(struct virtio_device_mmio *mmio_host,
510                                              uint8_t selector)
511{
512    VIRTIO_DEBUG_TL("handle_dev_feature_sel_change: [0x%x]\n", selector);
513    mmio_host->dev_reg.dev_feature_sel = selector;
514
515    if (selector) {
516        virtio_mmio_dev_features_wr(&mmio_host->regs,
517                                    (uint32_t) (mmio_host->dev.device_features >> 32));
518    } else {
519        virtio_mmio_dev_features_wr(&mmio_host->regs,
520                                    (uint32_t) mmio_host->dev.device_features);
521    }
522
523    virtio_mmio_dev_features_sel_ready_wrf(&mmio_host->regs, 0x1);
524
525    return SYS_ERR_OK;
526}
527
528static errval_t handle_driv_feature_change(struct virtio_device_mmio *mmio_host,
529                                           uint8_t selector,
530                                           uint32_t feature)
531{
532    VIRTIO_DEBUG_TL("handle_driv_feature_change: [0x%x] [0x%08x]\n",
533                    selector,
534                    feature);
535
536    mmio_host->dev_reg.driv_feature_sel = selector;
537    mmio_host->dev_reg.driv_features[selector] = feature;
538
539    virtio_mmio_driv_features_sel_ready_wrf(&mmio_host->regs, 0x0);
540
541    return SYS_ERR_OK;
542}
543
544static errval_t handle_queue_sel_change(struct virtio_device_mmio *mmio_host,
545                                        uint16_t selector)
546{
547    VIRTIO_DEBUG_TL("handle_queue_sel_change: [0x%x]\n", selector);
548
549    mmio_host->dev_reg.queue_sel = selector;
550
551    /*
552     * TODO: load the respective queue registers
553     */
554
555    virtio_mmio_queue_sel_ready_wrf(&mmio_host->regs, 0x0);
556
557    return SYS_ERR_OK;
558}
559
560static errval_t handle_queue_change(struct virtio_device_mmio *mmio_host,
561                                    uint16_t selector)
562{
563    VIRTIO_DEBUG_TL("handle_queue_change: [0x%x]\n", selector);
564
565    virtio_mmio_queue_ready_signal_wrf(&mmio_host->regs, 0x0);
566
567    return SYS_ERR_OK;
568}
569
570static errval_t handle_queue_notify(struct virtio_device_mmio *mmio_host,
571                                    uint16_t queue)
572{
573    VIRTIO_DEBUG_TL("handle_queue_notify: [0x%x]\n", queue);
574    if (mmio_host->dev.cb_h->notify) {
575        return mmio_host->dev.cb_h->notify(&mmio_host->dev, queue);
576    }
577    return SYS_ERR_OK;
578}
579
580static errval_t virtio_device_mmio_poll_host(struct virtio_device *host)
581{
582    errval_t err = VIRTIO_ERR_DEVICE_IDLE;
583    struct virtio_device_mmio *mmio_host = (struct virtio_device_mmio *) host;
584
585    uint32_t reg_value, selector;
586
587    reg_value = virtio_mmio_status_rd(&mmio_host->regs);
588    if (mmio_host->dev_reg.status != (uint8_t) reg_value) {
589        err = handle_device_status_change(mmio_host, (uint8_t) reg_value);
590    }
591
592    selector = virtio_mmio_dev_features_sel_selector_rdf(&mmio_host->regs);
593    if (mmio_host->dev_reg.dev_feature_sel != selector) {
594        err = handle_dev_feature_sel_change(mmio_host, selector);
595    }
596
597    reg_value = virtio_mmio_driv_features_rd(&mmio_host->regs);
598    selector = virtio_mmio_driv_features_sel_selector_rdf(&mmio_host->regs);
599    if ((selector != mmio_host->dev_reg.driv_feature_sel) || (mmio_host->dev_reg
600                    .driv_features[selector]
601                                                              != reg_value)) {
602        if (virtio_mmio_driv_features_sel_ready_rdf(&mmio_host->regs)) {
603            err = handle_driv_feature_change(mmio_host,
604                                             (uint8_t) selector,
605                                             reg_value);
606        }
607    } else {
608        // we have to ack the guest
609        virtio_mmio_driv_features_sel_ready_wrf(&mmio_host->regs, 0x0);
610    }
611
612    reg_value = virtio_mmio_driv_features_rd(&mmio_host->regs);
613    selector = virtio_mmio_queue_sel_selector_rdf(&mmio_host->regs);
614    if (selector != mmio_host->dev_reg.queue_sel) {
615        err = handle_queue_sel_change(mmio_host, (uint16_t) selector);
616    }
617
618    if (virtio_mmio_queue_ready_signal_rdf(&mmio_host->regs)) {
619        err = handle_queue_change(mmio_host, selector);
620    }
621    reg_value = virtio_mmio_queue_notify_index_rdf(&mmio_host->regs);
622    if (virtio_mmio_queue_notify_index_rdf(&mmio_host->regs) != 0xFFFF) {
623        err = handle_queue_notify(mmio_host, (uint16_t)reg_value);
624        virtio_mmio_queue_notify_index_wrf(&mmio_host->regs, 0xFFFF);
625    }
626
627
628
629    /* TODO: poll the queues */
630
631    return err;
632}
633
634/**
635 * \brief initializes a VirtIO device on the host side using the MMIO transpot
636 *
637 * \param dev   returns a pointer to the newly allocated device structure
638 * \param info  initialization parameters
639 *
640 * \returns SYS_ERR_OK on success
641 */
642errval_t virtio_device_mmio_init_host(struct virtio_device **host,
643                                      struct virtio_device_setup *setup)
644{
645    struct virtio_device_mmio *mmio_host;
646    errval_t err;
647
648    assert(host);
649
650    mmio_host = malloc(sizeof(*mmio_host));
651    if (mmio_host == NULL) {
652        return LIB_ERR_MALLOC_FAIL;
653    }
654
655    mmio_host->dev.vqh = calloc(setup->vq_num, sizeof(void *));
656    if (mmio_host->dev.vqh == NULL) {
657        free(mmio_host);
658        return LIB_ERR_MALLOC_FAIL;
659    }
660
661    /*
662     * TODO> Check for minimum MMIO devie size
663     */
664
665    if (capref_is_null(setup->backend.args.mmio.dev_cap)) {
666        VIRTIO_DEBUG_DEV("allocating a new device frame.\n");
667        setup->backend.args.mmio.dev_size += VIRTIO_MMIO_DEVICE_SIZE;
668        err = frame_alloc(&mmio_host->dev.dev_cap,
669                          setup->backend.args.mmio.dev_size,
670                          &setup->backend.args.mmio.dev_size);
671        if (err_is_fail(err)) {
672            free(mmio_host->dev.vqh);
673            free(mmio_host);
674            return err;
675        }
676    } else {
677        mmio_host->dev.dev_cap = setup->backend.args.mmio.dev_cap;
678    }
679
680    struct frame_identity id;
681    err = invoke_frame_identify(mmio_host->dev.dev_cap, &id);
682    if (err_is_fail(err)) {
683        VIRTIO_DEBUG_DEV("ERROR: could not identify the frame.");
684        return err;
685    }
686
687    assert(id.bytes > VIRTIO_MMIO_DEVICE_SIZE);
688
689    VIRTIO_DEBUG_DEV("Using frame [0x%016lx, 0x%lx] as device frame.\n",
690                     id.base,
691                     id.bytes);
692
693    if (setup->backend.args.mmio.dev_base == NULL) {
694        VIRTIO_DEBUG_DEV("mapping device frame.\n");
695        err = vspace_map_one_frame_attr(&setup->backend.args.mmio.dev_base,
696                                        setup->backend.args.mmio.dev_size,
697                                        mmio_host->dev.dev_cap,
698                                        VIRTIO_VREGION_FLAGS_DEVICE,
699                                        NULL,
700                                        NULL);
701        if (err_is_fail(err)) {
702            if (capref_is_null(setup->backend.args.mmio.dev_cap)) {
703                cap_destroy(mmio_host->dev.dev_cap);
704            }
705            free(mmio_host->dev.vqh);
706            free(mmio_host);
707            return err;
708        }
709    } else {
710        assert(setup->backend.args.mmio.dev_size > VIRTIO_MMIO_DEVICE_SIZE);
711    }
712
713    mmio_host->dev.device_features = setup->features;
714
715    for (uint32_t i = 0; i < setup->vq_num; ++i) {
716        /* todo: initialize virtqueues */
717        // mmio_host->dev.vqh[i].ndesc = setup->queue_num_max[i];
718    }
719
720    mmio_host->dev_base = setup->backend.args.mmio.dev_base;
721    mmio_host->dev_size = setup->backend.args.mmio.dev_size;
722
723    VIRTIO_DEBUG_DEV("initialize mmio registers to [%016lx].\n",
724                     (uintptr_t)mmio_host->dev_base);
725
726    virtio_mmio_initialize(&mmio_host->regs,
727                           (mackerel_addr_t) (mmio_host->dev_base));
728
729    /* initialize the device with values */
730    virtio_mmio_magic_value_wr(&mmio_host->regs, virtio_mmio_magic_value);
731    virtio_mmio_deviceid_id_wrf(&mmio_host->regs, setup->dev_type);
732    virtio_mmio_version_version_wrf(&mmio_host->regs, virtio_mmio_version_virtio10);
733
734    virtio_mmio_dev_features_sel_wr(&mmio_host->regs, 0x0);
735    virtio_mmio_driv_features_sel_wr(&mmio_host->regs, 0x0);
736    virtio_mmio_queue_sel_wr(&mmio_host->regs, 0x0);
737
738    virtio_mmio_dev_features_wr(&mmio_host->regs, (uint32_t) setup->features);
739    virtio_mmio_queue_max_wr(&mmio_host->regs, setup->vq_setup[0].vring_ndesc);
740
741    virtio_mmio_queue_notify_index_wrf(&mmio_host->regs, 0xFFFF);
742
743    mmio_host->dev_reg.status = 0x0;
744    mmio_host->dev_reg.driv_feature_sel = 0x0;
745    mmio_host->dev_reg.dev_feature_sel = 0x0;
746    mmio_host->dev_reg.queue_sel = 0x0;
747
748    mmio_host->dev.f = &virtio_mmio_fn;
749
750    mmio_host->dev.f->poll = virtio_device_mmio_poll_host;
751
752    *host = &mmio_host->dev;
753
754    return SYS_ERR_OK;
755
756}
757
758#endif
759
760