1/*
2 * Copyright 2019, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <sel4vmmplatsupport/drivers/virtio_pci_emul.h>
8#include <stdbool.h>
9
10#include "virtio_emul_helpers.h"
11
12#define BUF_SIZE 2048
13
14typedef struct ethif_virtio_emul_internal {
15    struct eth_driver driver;
16    uint8_t mac[6];
17    ps_dma_man_t dma_man;
18} ethif_internal_t;
19
20typedef struct emul_tx_cookie {
21    uint16_t desc_head;
22    void *vaddr;
23} emul_tx_cookie_t;
24
25static uintptr_t emul_allocate_rx_buf(void *iface, size_t buf_size, void **cookie)
26{
27    virtio_emul_t *emul = (virtio_emul_t *)iface;
28    ethif_internal_t *net = emul->internal;
29    if (buf_size > BUF_SIZE) {
30        return 0;
31    }
32    void *vaddr = ps_dma_alloc(&net->dma_man, BUF_SIZE, net->driver.dma_alignment, 1, PS_MEM_NORMAL);
33    if (!vaddr) {
34        return 0;
35    }
36    uintptr_t phys = ps_dma_pin(&net->dma_man, vaddr, BUF_SIZE);
37    *cookie = vaddr;
38    return phys;
39}
40
41static void emul_rx_complete(void *iface, unsigned int num_bufs, void **cookies, unsigned int *lens)
42{
43    virtio_emul_t *emul = (virtio_emul_t *)iface;
44    ethif_internal_t *net = (ethif_internal_t *)emul->internal;
45    vqueue_t *vq = &emul->virtq;
46    int i;
47    struct vring *vring = &vq->vring[RX_QUEUE];
48
49    /* grab the next receive chain */
50    struct virtio_net_hdr virtio_hdr;
51    memset(&virtio_hdr, 0, sizeof(virtio_hdr));
52    uint16_t guest_idx = ring_avail_idx(emul, vring);
53    uint16_t idx = vq->last_idx[RX_QUEUE];
54    if (idx != guest_idx) {
55        /* total length of the written packet so far */
56        size_t tot_written = 0;
57        /* amount of the current descriptor written */
58        size_t desc_written = 0;
59        /* how much we have written of the current buffer */
60        size_t buf_written = 0;
61        /* the current buffer. -1 indicates the virtio net buffer */
62        int current_buf = -1;
63        uint16_t desc_head = ring_avail(emul, vring, idx);
64        /* start walking the descriptors */
65        struct vring_desc desc;
66        uint16_t desc_idx = desc_head;
67        do {
68            desc = ring_desc(emul, vring, desc_idx);
69            /* determine how much we can copy */
70            uint32_t copy;
71            void *buf_base = NULL;
72            if (current_buf == -1) {
73                copy = sizeof(struct virtio_net_hdr) - buf_written;
74                buf_base = &virtio_hdr;
75            } else {
76                copy = lens[current_buf] - buf_written;
77                buf_base = cookies[current_buf];
78            }
79            copy = MIN(copy, desc.len - desc_written);
80            vm_guest_write_mem(emul->vm, buf_base + buf_written, (uintptr_t)desc.addr + desc_written, copy);
81            /* update amounts */
82            tot_written += copy;
83            desc_written += copy;
84            buf_written += copy;
85            /* see what's gone over */
86            if (desc_written == desc.len) {
87                if (!desc.flags & VRING_DESC_F_NEXT) {
88                    /* descriptor chain is too short to hold the whole packet.
89                     * just truncate */
90                    break;
91                }
92                desc_idx = desc.next;
93                desc_written = 0;
94            }
95            if (current_buf == -1) {
96                if (buf_written == sizeof(struct virtio_net_hdr)) {
97                    current_buf++;
98                    buf_written = 0;
99                }
100            } else {
101                if (buf_written == lens[current_buf]) {
102                    current_buf++;
103                    buf_written = 0;
104                }
105            }
106        } while (current_buf < num_bufs);
107        /* now put it in the used ring */
108        struct vring_used_elem used_elem = {desc_head, tot_written};
109        ring_used_add(emul, vring, used_elem);
110
111        /* record that we've used this descriptor chain now */
112        vq->last_idx[RX_QUEUE]++;
113        /* notify the guest that there is something in its used ring */
114        net->driver.i_fn.raw_handleIRQ(&net->driver, 0);
115    }
116    for (i = 0; i < num_bufs; i++) {
117        ps_dma_unpin(&net->dma_man, cookies[i], BUF_SIZE);
118        ps_dma_free(&net->dma_man, cookies[i], BUF_SIZE);
119    }
120}
121
122static void emul_tx_complete(void *iface, void *cookie)
123{
124    virtio_emul_t *emul = (virtio_emul_t *)iface;
125    ethif_internal_t *net = emul->internal;
126    emul_tx_cookie_t *tx_cookie = (emul_tx_cookie_t *)cookie;
127    /* free the dma memory */
128    ps_dma_unpin(&net->dma_man, tx_cookie->vaddr, BUF_SIZE);
129    ps_dma_free(&net->dma_man, tx_cookie->vaddr, BUF_SIZE);
130    /* put the descriptor chain into the used list */
131    struct vring_used_elem used_elem = {tx_cookie->desc_head, 0};
132    ring_used_add(emul, &emul->virtq.vring[TX_QUEUE], used_elem);
133    free(tx_cookie);
134    /* notify the guest that we have completed some of its buffers */
135    net->driver.i_fn.raw_handleIRQ(&net->driver, 0);
136}
137
138static void emul_notify_tx(virtio_emul_t *emul)
139{
140    ethif_internal_t *net = (ethif_internal_t *)emul->internal;
141    struct vring *vring = &emul->virtq.vring[TX_QUEUE];
142    /* read the index */
143    uint16_t guest_idx = ring_avail_idx(emul, vring);
144    /* process what we can of the ring */
145    uint16_t idx = emul->virtq.last_idx[TX_QUEUE];
146    while (idx != guest_idx) {
147        uint16_t desc_head;
148        /* read the head of the descriptor chain */
149        desc_head = ring_avail(emul, vring, idx);
150        /* allocate a packet */
151        void *vaddr = ps_dma_alloc(&net->dma_man, BUF_SIZE, net->driver.dma_alignment, 1, PS_MEM_NORMAL);
152        if (!vaddr) {
153            /* try again later */
154            break;
155        }
156        uintptr_t phys = ps_dma_pin(&net->dma_man, vaddr, BUF_SIZE);
157        assert(phys);
158        /* length of the final packet to deliver */
159        uint32_t len = 0;
160        /* we want to skip the initial virtio header, as this should
161         * not be sent to the actual ethernet driver. This records
162         * how much we have skipped so far. */
163        uint32_t skipped = 0;
164        /* start walking the descriptors */
165        struct vring_desc desc;
166        uint16_t desc_idx = desc_head;
167        do {
168            desc = ring_desc(emul, vring, desc_idx);
169            uint32_t skip = 0;
170            /* if we haven't yet skipped the full virtio net header, work
171             * out how much of this descriptor should be skipped */
172            if (skipped < sizeof(struct virtio_net_hdr)) {
173                skip = MIN(sizeof(struct virtio_net_hdr) - skipped, desc.len);
174                skipped += skip;
175            }
176            /* truncate packets that are too large */
177            uint32_t this_len = desc.len - skip;
178            this_len = MIN(BUF_SIZE - len, this_len);
179            vm_guest_read_mem(emul->vm, vaddr + len, (uintptr_t)desc.addr + skip, this_len);
180            len += this_len;
181            desc_idx = desc.next;
182        } while (desc.flags & VRING_DESC_F_NEXT);
183        /* ship it */
184        emul_tx_cookie_t *cookie = calloc(1, sizeof(*cookie));
185        assert(cookie);
186        cookie->desc_head = desc_head;
187        cookie->vaddr = vaddr;
188        int result = net->driver.i_fn.raw_tx(&net->driver, 1, &phys, &len, cookie);
189        switch (result) {
190        case ETHIF_TX_COMPLETE:
191            emul_tx_complete(emul, cookie);
192            break;
193        case ETHIF_TX_FAILED:
194            ps_dma_unpin(&net->dma_man, vaddr, BUF_SIZE);
195            ps_dma_free(&net->dma_man, vaddr, BUF_SIZE);
196            free(cookie);
197            break;
198        }
199        /* next */
200        idx++;
201    }
202    /* update which parts of the ring we have processed */
203    emul->virtq.last_idx[TX_QUEUE] = idx;
204}
205
206static void emul_tx_complete_external(void *iface, void *cookie)
207{
208    emul_tx_complete(iface, cookie);
209    /* space may have cleared for additional transmits */
210    emul_notify_tx(iface);
211}
212
213static struct raw_iface_callbacks emul_callbacks = {
214    .tx_complete = emul_tx_complete_external,
215    .rx_complete = emul_rx_complete,
216    .allocate_rx_buf = emul_allocate_rx_buf
217};
218
219static int emul_notify(virtio_emul_t *emul)
220{
221    if (emul->virtq.status != VIRTIO_CONFIG_S_DRIVER_OK) {
222        return -1;
223    }
224    emul_notify_tx(emul);
225    return 0;
226}
227
228bool net_device_emul_io_in(struct virtio_emul *emul, unsigned int offset, unsigned int size, unsigned int *result)
229{
230    bool handled = false;
231    switch (offset) {
232    case VIRTIO_PCI_HOST_FEATURES:
233        handled = true;
234        assert(size == 4);
235        //Net only
236        *result = BIT(VIRTIO_NET_F_MAC);
237        break;
238    case 0x14 ... 0x19:
239        assert(size == 1);
240        *result = ((ethif_internal_t *)emul->internal)->mac[offset - 0x14];
241        handled = true;
242        break;
243    }
244    return handled;
245}
246
247bool net_device_emul_io_out(struct virtio_emul *emul, unsigned int offset, unsigned int size, unsigned int value)
248{
249    bool handled = false;
250    switch (offset) {
251    case VIRTIO_PCI_GUEST_FEATURES:
252        handled = true;
253        assert(size == 4);
254        //Net only
255        assert(value == BIT(VIRTIO_NET_F_MAC));
256        break;
257    }
258    return handled;
259}
260
261void *net_virtio_emul_init(virtio_emul_t *emul, ps_io_ops_t io_ops, ethif_driver_init driver, void *config)
262{
263    ethif_internal_t *internal = NULL;
264    internal = calloc(1, sizeof(*internal));
265    if (!internal) {
266        goto error;
267    }
268    emul->notify = emul_notify_tx;
269    emul->device_io_in = net_device_emul_io_in;
270    emul->device_io_out = net_device_emul_io_out;
271    internal->driver.cb_cookie = emul;
272    internal->driver.i_cb = emul_callbacks;
273    internal->dma_man = io_ops.dma_manager;
274    int err = driver(&internal->driver, io_ops, config);
275    if (err) {
276        ZF_LOGE("Failed to initialize driver");
277        goto error;
278    }
279    int mtu;
280    internal->driver.i_fn.low_level_init(&internal->driver, internal->mac, &mtu);
281    return (void *)internal;
282error:
283    if (emul) {
284        free(emul);
285    }
286    if (internal) {
287        free(internal);
288    }
289    return NULL;
290}
291