1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13#include <ethdrivers/virtio_pci.h>
14#include <assert.h>
15#include <ethdrivers/helpers.h>
16#include <ethdrivers/virtio/virtio_config.h>
17#include <ethdrivers/virtio/virtio_pci.h>
18#include <ethdrivers/virtio/virtio_ring.h>
19#include <ethdrivers/virtio/virtio_net.h>
20#include <string.h>
21
22/* Mask of features we will use */
23#define FEATURES_REQUIRED (BIT(VIRTIO_NET_F_MAC))
24
25#define BUF_SIZE 2048
26#define DMA_ALIGN 16
27
28#define RX_QUEUE 0
29#define TX_QUEUE 1
30
31typedef struct virtio_dev {
32    void *mmio_base;
33    uint16_t io_base;
34    ps_io_port_ops_t ioops;
35    /* R/T Descriptor Head represents the beginning of the block of
36     * descriptors that are currently in use */
37    unsigned int tdh;
38    unsigned int rdh;
39    /* R/T Descriptor Tail represents the next free slot to add
40     * a descriptor */
41    unsigned int tdt;
42    unsigned int rdt;
43    /* R/T Used Head represents the index in the used ring that
44     * we last observed */
45    uint16_t tuh;
46    uint16_t ruh;
47    /* descriptor rings */
48    uintptr_t rx_ring_phys;
49    struct vring rx_ring;
50    unsigned int rx_size;
51    unsigned int rx_remain;
52    void **rx_cookies;
53    uintptr_t tx_ring_phys;
54    struct vring tx_ring;
55    unsigned int tx_size;
56    unsigned int tx_remain;
57    void **tx_cookies;
58    unsigned int *tx_lengths;
59    /* preallocated header. Since we do not actually use any features
60     * in the header we put the same one before every send/receive packet */
61    uintptr_t virtio_net_hdr_phys;
62} virtio_dev_t;
63
64static uint8_t read_reg8(virtio_dev_t *dev, uint16_t port) {
65    uint32_t val;
66    ps_io_port_in(&dev->ioops, dev->io_base + port, 1, &val);
67    return (uint8_t)val;
68}
69
70static uint16_t read_reg16(virtio_dev_t *dev, uint16_t port) {
71    uint32_t val;
72    ps_io_port_in(&dev->ioops, dev->io_base + port, 2, &val);
73    return (uint16_t)val;
74}
75
76static uint32_t read_reg32(virtio_dev_t *dev, uint16_t port) {
77    uint32_t val;
78    ps_io_port_in(&dev->ioops, dev->io_base + port, 4, &val);
79    return val;
80}
81
82static void write_reg8(virtio_dev_t *dev, uint16_t port, uint8_t val) {
83    ps_io_port_out(&dev->ioops, dev->io_base + port, 1, val);
84}
85
86static void write_reg16(virtio_dev_t *dev, uint16_t port, uint16_t val) {
87    ps_io_port_out(&dev->ioops, dev->io_base + port, 2, val);
88}
89
90static void write_reg32(virtio_dev_t *dev, uint16_t port, uint32_t val) {
91    ps_io_port_out(&dev->ioops, dev->io_base + port, 4, val);
92}
93
94static void set_status(virtio_dev_t *dev, uint8_t status) {
95    write_reg8(dev, VIRTIO_PCI_STATUS, status);
96}
97
98static uint8_t get_status(virtio_dev_t *dev) {
99    return read_reg8(dev, VIRTIO_PCI_STATUS);
100}
101
102static void add_status(virtio_dev_t *dev, uint8_t status) {
103    write_reg8(dev, VIRTIO_PCI_STATUS, get_status(dev) | status);
104}
105
106static uint32_t get_features(virtio_dev_t *dev) {
107    return read_reg32(dev, VIRTIO_PCI_HOST_FEATURES);
108}
109
110static void set_features(virtio_dev_t *dev, uint32_t features) {
111    write_reg32(dev, VIRTIO_PCI_GUEST_FEATURES, features);
112}
113
114static void free_desc_ring(virtio_dev_t *dev, ps_dma_man_t *dma_man) {
115    if (dev->rx_ring.desc) {
116        dma_unpin_free(dma_man, (void*)dev->rx_ring.desc, vring_size(dev->rx_size, VIRTIO_PCI_VRING_ALIGN));
117        dev->rx_ring.desc = NULL;
118    }
119    if (dev->tx_ring.desc) {
120        dma_unpin_free(dma_man, (void*)dev->tx_ring.desc, vring_size(dev->tx_size, VIRTIO_PCI_VRING_ALIGN));
121        dev->tx_ring.desc = NULL;
122    }
123    if (dev->rx_cookies) {
124        free(dev->rx_cookies);
125        dev->rx_cookies = NULL;
126    }
127    if (dev->tx_cookies) {
128        free(dev->tx_cookies);
129        dev->tx_cookies = NULL;
130    }
131    if (dev->tx_lengths) {
132        free(dev->tx_lengths);
133        dev->tx_lengths = NULL;
134    }
135}
136
137static int initialize_desc_ring(virtio_dev_t *dev, ps_dma_man_t *dma_man) {
138    dma_addr_t rx_ring = dma_alloc_pin(dma_man, vring_size(dev->rx_size, VIRTIO_PCI_VRING_ALIGN), 1, VIRTIO_PCI_VRING_ALIGN);
139    if (!rx_ring.phys) {
140        LOG_ERROR("Failed to allocate rx_ring");
141        return -1;
142    }
143    memset(rx_ring.virt, 0, vring_size(dev->rx_size, VIRTIO_PCI_VRING_ALIGN));
144    vring_init(&dev->rx_ring, dev->rx_size, rx_ring.virt, VIRTIO_PCI_VRING_ALIGN);
145    dev->rx_ring_phys = rx_ring.phys;
146    dma_addr_t tx_ring = dma_alloc_pin(dma_man, vring_size(dev->tx_size, VIRTIO_PCI_VRING_ALIGN), 1, VIRTIO_PCI_VRING_ALIGN);
147    if (!tx_ring.phys) {
148        LOG_ERROR("Failed to allocate tx_ring");
149        free_desc_ring(dev, dma_man);
150        return -1;
151    }
152    memset(tx_ring.virt, 0, vring_size(dev->tx_size, VIRTIO_PCI_VRING_ALIGN));
153    vring_init(&dev->tx_ring, dev->tx_size, tx_ring.virt, VIRTIO_PCI_VRING_ALIGN);
154    dev->tx_ring_phys = tx_ring.phys;
155    dev->rx_cookies = malloc(sizeof(void*) * dev->rx_size);
156    dev->tx_cookies = malloc(sizeof(void*) * dev->tx_size);
157    dev->tx_lengths = malloc(sizeof(unsigned int) * dev->tx_size);
158    if (!dev->rx_cookies || !dev->tx_cookies || !dev->tx_lengths) {
159        LOG_ERROR("Failed to malloc");
160        free_desc_ring(dev, dma_man);
161        return -1;
162    }
163    /* Remaining needs to be 2 less than size as we cannot actually enqueue size many descriptors,
164     * since then the head and tail pointers would be equal, indicating empty. */
165    dev->rx_remain = dev->rx_size - 2;
166    dev->tx_remain = dev->tx_size - 2;
167
168    dev->tdh = dev->tdt = 0;
169    dev->rdh = dev->rdt = 0;
170    dev->tuh = dev->ruh = 0;
171
172    return 0;
173}
174
175static int initialize(virtio_dev_t *dev, ps_dma_man_t *dma_man) {
176    int err;
177    /* perform a reset */
178    set_status(dev, 0);
179    /* acknowledge to the host that we found it */
180    add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
181    /* read device features */
182    uint32_t features;
183    features = get_features(dev);
184    if ( (features & FEATURES_REQUIRED) != FEATURES_REQUIRED) {
185        LOG_ERROR("Required features 0x%x, have 0x%x", (unsigned int)FEATURES_REQUIRED, features);
186        return -1;
187    }
188    features &= FEATURES_REQUIRED;
189    /* write the features we will use */
190    set_features(dev, features);
191    /* determine the queue size */
192    write_reg16(dev, VIRTIO_PCI_QUEUE_SEL, RX_QUEUE);
193    dev->rx_size = read_reg16(dev, VIRTIO_PCI_QUEUE_NUM);
194    write_reg16(dev, VIRTIO_PCI_QUEUE_SEL, TX_QUEUE);
195    dev->tx_size = read_reg16(dev, VIRTIO_PCI_QUEUE_NUM);
196    /* create the rings */
197    err = initialize_desc_ring(dev, dma_man);
198    if (err) {
199        return -1;
200    }
201    /* write the virtqueue locations */
202    write_reg16(dev, VIRTIO_PCI_QUEUE_SEL, RX_QUEUE);
203    write_reg32(dev, VIRTIO_PCI_QUEUE_PFN, ((uintptr_t)dev->rx_ring_phys) / 4096);
204    write_reg16(dev, VIRTIO_PCI_QUEUE_SEL, TX_QUEUE);
205    write_reg32(dev, VIRTIO_PCI_QUEUE_PFN, ((uintptr_t)dev->tx_ring_phys) / 4096);
206    /* tell the driver everything is okay */
207    add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
208    return 0;
209}
210
211static void get_mac(virtio_dev_t *dev, uint8_t *mac) {
212    int i;
213    for (i = 0; i < 6; i++) {
214        mac[i] = read_reg8(dev, 0x14 + i);
215    }
216}
217
218static void low_level_init(struct eth_driver *driver, uint8_t *mac, int *mtu) {
219    virtio_dev_t *dev = (virtio_dev_t*)driver->eth_data;
220    get_mac(dev, mac);
221    *mtu = 1500;
222}
223
224static void print_state(struct eth_driver *eth_driver) {
225}
226
227static void complete_tx(struct eth_driver *driver) {
228    virtio_dev_t *dev = (virtio_dev_t*)driver->eth_data;
229    while (dev->tuh != dev->tx_ring.used->idx) {
230        uint16_t ring = dev->tuh % dev->tx_size;
231        unsigned int UNUSED desc = dev->tx_ring.used->ring[ring].id;
232        assert(desc == dev->tdh);
233        void *cookie = dev->tx_cookies[dev->tdh];
234        /* add 1 to the length we stored to account for the extra descriptor
235         * we used for the virtio header */
236        unsigned int used = dev->tx_lengths[dev->tdh] + 1;
237        dev->tx_remain += used;
238        dev->tdh = (dev->tdh + used) % dev->tx_size;
239        dev->tuh++;
240        /* give the buffer back */
241        driver->i_cb.tx_complete(driver->cb_cookie, cookie);
242    }
243}
244
245static void fill_rx_bufs(struct eth_driver *driver) {
246    virtio_dev_t *dev = (virtio_dev_t*)driver->eth_data;
247    /* we need 2 free as we enqueue in pairs. One descriptor to hold the
248     * virtio header, another one for the actual buffer */
249    while (dev->rx_remain >= 2) {
250        /* request a buffer */
251        void *cookie;
252        uintptr_t phys = driver->i_cb.allocate_rx_buf(driver->cb_cookie, BUF_SIZE, &cookie);
253        if (!phys) {
254            break;
255        }
256        unsigned int next_rdt = (dev->rdt + 1) % dev->rx_size;
257        dev->rx_ring.desc[dev->rdt] = (struct vring_desc) {
258            .addr = dev->virtio_net_hdr_phys,
259            .len = sizeof(struct virtio_net_hdr),
260            .flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE,
261            .next = next_rdt
262        };
263        dev->rx_cookies[dev->rdt] = cookie;
264        dev->rx_ring.desc[next_rdt] = (struct vring_desc) {
265            .addr = phys,
266            .len = BUF_SIZE,
267            .flags = VRING_DESC_F_WRITE,
268            .next = 0
269        };
270        dev->rx_ring.avail->ring[dev->rx_ring.avail->idx % dev->rx_size] = dev->rdt;
271        asm volatile("sfence" ::: "memory");
272        dev->rx_ring.avail->idx++;
273        asm volatile("sfence" ::: "memory");
274        write_reg16(dev, VIRTIO_PCI_QUEUE_NOTIFY, RX_QUEUE);
275        dev->rdt = (dev->rdt + 2) % dev->rx_size;
276        dev->rx_remain-=2;
277    }
278}
279
280static void complete_rx(struct eth_driver *driver) {
281    virtio_dev_t *dev = (virtio_dev_t*)driver->eth_data;
282    while (dev->ruh != dev->rx_ring.used->idx) {
283        uint16_t ring = dev->ruh % dev->rx_size;
284        unsigned int UNUSED desc = dev->rx_ring.used->ring[ring].id;
285        assert(desc == dev->rdh);
286        void *cookie = dev->rx_cookies[dev->rdh];
287        /* subtract off length of the virtio header we received */
288        unsigned int len = dev->rx_ring.used->ring[ring].len - sizeof(struct virtio_net_hdr);
289        /* update rdh. remember we actually had two descriptors, one
290         * is the header that we threw away, the other being the actual data */
291        dev->rdh = (dev->rdh + 2) % dev->rx_size;
292        dev->rx_remain += 2;
293        dev->ruh++;
294        /* Give the buffers back */
295        driver->i_cb.rx_complete(driver->cb_cookie, 1, &cookie, &len);
296    }
297}
298
299static int raw_tx(struct eth_driver *driver, unsigned int num, uintptr_t *phys, unsigned int *len, void *cookie) {
300    virtio_dev_t *dev = (virtio_dev_t*)driver->eth_data;
301    /* we need to num + 1 free descriptors. The + 1 is for the virtio header */
302    if (dev->tx_remain < num + 1) {
303        complete_tx(driver);
304        if (dev->tx_remain < num + 1) {
305            return ETHIF_TX_FAILED;
306        }
307    }
308    /* install the header */
309    dev->tx_ring.desc[dev->tdt] = (struct vring_desc) {
310        .addr = dev->virtio_net_hdr_phys,
311        .len = sizeof(struct virtio_net_hdr),
312        .flags = VRING_DESC_F_NEXT,
313        .next = (dev->tdt + 1) % dev->tx_size
314    };
315    /* now all the buffers */
316    unsigned int i;
317    for (i = 0; i < num; i++) {
318        unsigned int desc = (dev->tdt + i + 1) % dev->tx_size;
319        unsigned int next_desc = (desc + 1) % dev->tx_size;
320        dev->tx_ring.desc[desc] = (struct vring_desc) {
321            .addr = phys[i],
322            .len = len[i],
323            .flags = (i + 1 == num ? 0 : VRING_DESC_F_NEXT),
324            .next = next_desc
325        };
326    }
327    dev->tx_ring.avail->ring[dev->tx_ring.avail->idx% dev->tx_size] = dev->tdt;
328    dev->tx_cookies[dev->tdt] = cookie;
329    dev->tx_lengths[dev->tdt] = num;
330    /* ensure update to descriptors visible before updating the index */
331    asm volatile("mfence" ::: "memory");
332    dev->tdt = (dev->tdt + num + 1) % dev->tx_size;
333    dev->tx_remain -= (num + 1);
334    dev->tx_ring.avail->idx++;
335    /* ensure index update visible before notifying */
336    asm volatile("mfence" ::: "memory");
337    write_reg16(dev, VIRTIO_PCI_QUEUE_NOTIFY, TX_QUEUE);
338    return ETHIF_TX_ENQUEUED;
339}
340
341static void raw_poll(struct eth_driver *driver) {
342    complete_tx(driver);
343    complete_rx(driver);
344    fill_rx_bufs(driver);
345}
346
347static void handle_irq(struct eth_driver *driver, int irq) {
348    virtio_dev_t *dev = (virtio_dev_t*)driver->eth_data;
349    /* read and throw away the ISR state. This will perform the ack */
350    read_reg8(dev, VIRTIO_PCI_ISR);
351    raw_poll(driver);
352}
353static struct raw_iface_funcs iface_fns = {
354    .raw_handleIRQ = handle_irq,
355    .print_state = print_state,
356    .low_level_init = low_level_init,
357    .raw_tx = raw_tx,
358    .raw_poll = raw_poll
359};
360
361int ethif_virtio_pci_init(struct eth_driver *eth_driver, ps_io_ops_t io_ops, void *config) {
362    int err;
363    ethif_virtio_pci_config_t *virtio_config = (ethif_virtio_pci_config_t*)config;
364    virtio_dev_t *dev = (virtio_dev_t*)malloc(sizeof(*dev));
365    if (!dev) {
366        return -1;
367    }
368
369    dev->mmio_base = virtio_config->mmio_base;
370    dev->io_base = virtio_config->io_base;
371    dev->ioops = io_ops.io_port_ops;
372
373    eth_driver->eth_data = dev;
374    eth_driver->dma_alignment = 16;
375    eth_driver->i_fn = iface_fns;
376
377    err = initialize(dev, &io_ops.dma_manager);
378    if (err) {
379        goto error;
380    }
381    dma_addr_t packet = dma_alloc_pin(&io_ops.dma_manager, sizeof(struct virtio_net_hdr), 1, DMA_ALIGN);
382    if (!packet.virt) {
383        goto error;
384    }
385    memset(packet.virt, 0, sizeof(struct virtio_net_hdr));
386    dev->virtio_net_hdr_phys = packet.phys;
387
388    fill_rx_bufs(eth_driver);
389
390    return 0;
391
392error:
393    set_status(dev, VIRTIO_CONFIG_S_FAILED);
394    free_desc_ring(dev, &io_ops.dma_manager);
395    free(dev);
396    return -1;
397}
398