1/**
2 * \file
3 * \brief DriverKit IOMMU Service
4 *
5 * Contains functions to request mappings from the IOMMU service
6 */
7/*
8 * Copyright (c) 2018, ETH Zurich.
9 * All rights reserved.
10 *
11 * This file is distributed under the terms in the attached LICENSE file.
12 * If you do not find this file, copies can be found by writing to:
13 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich.
14 * Attn: Systems Group.
15 */
16#include <stdlib.h>
17
18#include <barrelfish/barrelfish.h>
19#include <barrelfish/nameservice_client.h>
20#include <driverkit/iommu.h>
21#include <skb/skb.h>
22
23#include <hw_records.h>
24
25#include <if/iommu_defs.h>
26#include <if/iommu_rpcclient_defs.h>
27#include <if/pci_iommu_defs.h>
28
29
30
31#include "common.h"
32
33#include "../intel_vtd/intel_vtd.h"
34
35
36//#define IOMMU_SVC_DEBUG(x...) debug_printf("[iommu] [svc] " x)
37#define IOMMU_SVC_DEBUG(x...)
38
39
40static struct vnodest *vnodes_pml4;
41static struct vnodest *vnodes_pdpt;
42static struct vnodest *vnodes_pdir;
43
44
45
46/**
47 * @brief obtains the writable version of the cap
48 *
49 * @param in    the readonly cap
50 * @param out   the writable version
51 *
52 * @return SYS_ERR_OK on success, errval on failure
53 */
54static inline errval_t iommu_get_writable_vnode(struct vnode_identity id,
55                                                struct capref *out)
56{
57    struct vnodest *vn;
58    switch(id.type) {
59        case ObjType_VNode_x86_64_pml4 :
60            vn = vnodes_pml4;
61            break;
62        case ObjType_VNode_x86_64_pdpt :
63            vn = vnodes_pdpt;
64            break;
65        case ObjType_VNode_x86_64_pdir :
66            vn = vnodes_pdir;
67            break;
68        default:
69            return SYS_ERR_VNODE_TYPE;
70    }
71
72    while(vn) {
73        if (vn->id.type == id.type && vn->id.base == id.base) {
74            *out = vn->cap;
75            return SYS_ERR_OK;
76        }
77        vn = vn->next;
78    }
79
80    return SYS_ERR_CAP_NOT_FOUND;
81}
82
83static inline errval_t iommu_put_writable_vnode(struct vnode_identity id,
84                                                struct capref in)
85{
86    struct vnodest *vn = calloc(1, sizeof(*vn));
87    if (vn == NULL) {
88        return LIB_ERR_MALLOC_FAIL;
89    }
90
91    vn->cap = in;
92    vn->id = id;
93
94    switch(id.type) {
95        case ObjType_VNode_x86_64_pml4 :
96            vn->next = vnodes_pml4;
97            vnodes_pml4 = vn;
98            break;
99        case ObjType_VNode_x86_64_pdpt :
100            vn->next = vnodes_pdpt;
101            vnodes_pdpt = vn;
102            break;
103        case ObjType_VNode_x86_64_pdir :
104            vn->next = vnodes_pdir;
105            vnodes_pdir = vn;
106            break;
107        default:
108            return SYS_ERR_VNODE_TYPE;
109    }
110
111    return SYS_ERR_OK;
112}
113
114
115
116/*
117 * ===========================================================================
118 * Receive Handlers of the servie
119 * ===========================================================================
120 */
121static void getvmconfig_request(struct iommu_binding *ib)
122{
123    errval_t err = SYS_ERR_OK;
124    int32_t nodeid = 1;
125
126    IOMMU_SVC_DEBUG("%s\n", __FUNCTION__);
127
128    struct iommu_device *idev = ib->st;
129    assert(idev);
130    assert(idev->iommu);
131
132    err = skb_execute_query(
133        "node_enum(addr(%i,%i,%i), Enum), write(Enum).",
134        idev->addr.pci.bus,
135        idev->addr.pci.device,
136        idev->addr.pci.function);
137
138    if(err_is_fail(err)){
139        DEBUG_SKB_ERR(err,"pci node id lookup");
140    } else {
141        err = skb_read_output("%"SCNi32, &nodeid);
142    }
143
144    err = ib->tx_vtbl.getvmconfig_response(ib, NOP_CONT, err,
145                                           idev->iommu->root_vnode_type,
146                                           idev->iommu->max_page_bits, nodeid);
147    /* should not fail */
148    assert(err_is_ok(err));
149}
150
151
152static void setroot_request(struct iommu_binding *ib, struct capref src)
153{
154    errval_t err;
155
156    struct iommu_device *idev = ib->st;
157    assert(idev);
158    assert(idev->iommu);
159
160    IOMMU_SVC_DEBUG("%s [%u][%p] %u.%u.%u\n", __FUNCTION__, idev->iommu->id,
161                    idev->iommu, idev->addr.pci.bus, idev->addr.pci.device,
162                    idev->addr.pci.function);
163
164    struct vnode_identity id;
165    err = invoke_vnode_identify(src, &id);
166    if (err_is_fail(err)) {
167        USER_PANIC_ERR(err, "failed");
168    }
169    switch(id.type) {
170        case ObjType_VNode_x86_64_pml4 :
171            IOMMU_SVC_DEBUG("%s. PML4 @ 0x%lx as root vnode\n", __FUNCTION__, id.base);
172            break;
173        case ObjType_VNode_x86_64_pdpt :
174            IOMMU_SVC_DEBUG("%s. PDPT @ 0x%lx as root vnode\n", __FUNCTION__, id.base);
175            break;
176        case ObjType_VNode_x86_64_pdir :
177            IOMMU_SVC_DEBUG("%s. PDIR @ 0x%lx as root vnode\n", __FUNCTION__, id.base);
178            break;
179        case ObjType_VNode_VTd_ctxt_table :
180            IOMMU_SVC_DEBUG("%s. CTXT @ 0x%lx as root vnode\n", __FUNCTION__, id.base );
181            break;
182    }
183
184    if (idev->f.set_root) {
185        err = idev->f.set_root(idev, src);
186    } else {
187        err = IOMMU_ERR_NOT_SUPPORTED;
188    }
189
190    err = ib->tx_vtbl.setroot_response(ib, NOP_CONT, err);
191    /* should not fail */
192    assert(err_is_ok(err));
193}
194
195
196static void sent_notification(void *arg)
197{
198    bool *state = arg;
199    *state = true;
200}
201
202static void  retype_request(struct iommu_binding *ib, struct capref src,
203                            uint8_t objtype)
204{
205    errval_t err;
206
207    struct capref retcap = NULL_CAP;
208    struct capref vnode = NULL_CAP;
209
210    IOMMU_SVC_DEBUG("%s\n", __FUNCTION__);
211
212
213    switch(objtype) {
214        case ObjType_VNode_x86_64_ptable :
215        case ObjType_VNode_x86_64_pdir :
216        case ObjType_VNode_x86_64_pdpt :
217        case ObjType_VNode_x86_64_pml4 :
218        case ObjType_VNode_x86_64_pml5 :
219
220            /* we should be the only one that has it */
221            err = cap_revoke(src);
222            if (err_is_fail(err)) {
223                err = err_push(err, LIB_ERR_CAP_DELETE_FAIL);
224                goto send_reply;
225            }
226
227            /* allocate slot to store the new cap */
228            err = slot_alloc(&vnode);
229            if (err_is_fail(err)) {
230                err = err_push(err, LIB_ERR_SLOT_ALLOC);
231                goto out_err;
232            }
233
234            /* allocate slot to store readonly version of the cap */
235            err = slot_alloc(&retcap);
236            if (err_is_fail(err)) {
237                err = err_push(err, LIB_ERR_SLOT_ALLOC);
238                goto out_err2;
239            }
240
241            /* retype it to a page table */
242            err = cap_retype(vnode, src, 0, objtype, vnode_objsize(objtype), 1);
243            if (err_is_fail(err)) {
244                err = err_push(err, LIB_ERR_CAP_RETYPE);
245                goto out_err2;
246            }
247
248            struct vnode_identity vid;
249            err = invoke_vnode_identify(vnode, &vid);
250            assert(err_is_ok(err)); /// should not fail
251
252        #define IOMMU_VNODE_CAPRIGHTS (CAPRIGHTS_READ | CAPRIGHTS_GRANT | CAPRIGHTS_IDENTIFY)
253
254            err = cap_mint(retcap, vnode, IOMMU_VNODE_CAPRIGHTS, 0x0);
255            if (err_is_fail(err)) {
256                goto out_err3;
257            }
258
259            err = iommu_put_writable_vnode(vid, vnode);
260            if(err_is_fail(err)) {
261                err = err_push(err, LIB_ERR_CAP_RETYPE);
262                goto out_err4;
263            }
264
265            /* delete the source cap */
266            cap_destroy(src);
267
268            break;
269        default:
270            err = SYS_ERR_VNODE_TYPE;
271
272    }
273
274    bool issent = false;
275    struct event_closure cont;
276    if (err_is_fail(err)) {
277        cont = NOP_CONT;
278    } else {
279        cont = MKCLOSURE(sent_notification, (void *)&issent);
280    }
281
282
283    send_reply:
284    err = ib->tx_vtbl.retype_response(ib, cont, err, retcap);
285    assert(err_is_ok(err)); /* should not fail */
286
287    while(!issent && err_is_ok(err)) {
288        event_dispatch(get_default_waitset());
289    }
290
291    cap_destroy(retcap);
292
293    return;
294
295    out_err4:
296    cap_destroy(retcap);
297    out_err3:
298    cap_destroy(vnode);
299    out_err2:
300    slot_free(retcap);
301    out_err:
302    slot_free(vnode);
303    retcap = src;
304    goto send_reply;
305}
306
307
308static void map_request(struct iommu_binding *ib, struct capref dst,
309                        struct capref src, uint16_t slot, uint64_t attr,
310                        uint64_t off, uint64_t pte_count)
311{
312    errval_t err;
313
314    struct iommu_device *dev = ib->st;
315    assert(dev);
316
317    IOMMU_SVC_DEBUG("%s [%u][%p] %u.%u.%u\n", __FUNCTION__, dev->iommu->id,
318                    dev->iommu, dev->addr.pci.bus, dev->addr.pci.device,
319                    dev->addr.pci.function);
320
321    if (dev->f.map == NULL) {
322        err = IOMMU_ERR_NOT_SUPPORTED;
323        goto out;
324    }
325
326    struct vnode_identity id;
327    err = invoke_vnode_identify(dst, &id);
328    if (err_is_fail(err)) {
329        goto out;
330    }
331
332    switch(id.type) {
333        case ObjType_VNode_x86_64_pml4 :
334            IOMMU_SVC_DEBUG("%s. PML4 @ 0x%lx slot [%u..%lu] flags[%lx]\n", __FUNCTION__, id.base, slot, slot+pte_count - 1, attr);
335            break;
336        case ObjType_VNode_x86_64_pdpt :
337            IOMMU_SVC_DEBUG("%s. PDPT @ 0x%lx slot [%u..%lu] flags[%lx]\n", __FUNCTION__, id.base, slot, slot+pte_count - 1, attr );
338            break;
339        case ObjType_VNode_x86_64_pdir :
340            IOMMU_SVC_DEBUG("%s. PDIR @ 0x%lx slot [%u..%lu] flags[%lx]\n", __FUNCTION__, id.base, slot, slot+pte_count - 1, attr);
341            break;
342        case ObjType_VNode_VTd_ctxt_table :
343            IOMMU_SVC_DEBUG("%s. CTXT @ 0x%lx slot [%u..%lu] flags[%lx]\n", __FUNCTION__, id.base, slot, slot+pte_count - 1, attr);
344            break;
345    }
346
347    struct capref vnode;
348    err = iommu_get_writable_vnode(id, &vnode);
349    if(err_is_fail(err)) {
350        goto out;
351    }
352
353    struct capref mapping;
354    err = slot_alloc(&mapping);
355    if (err_is_fail(err)) {
356        goto out;
357    }
358
359    err = dev->f.map(dev, vnode, src, slot, attr, off, pte_count, mapping);
360    if (err_is_fail(err)) {
361        DEBUG_ERR(err, "failed to map the frame\n");
362    }
363
364    out:
365    // delete the cap, we no longer need it
366    cap_destroy(dst);
367
368    err = ib->tx_vtbl.map_response(ib, NOP_CONT, err);
369    assert(err_is_ok(err)); /* should not fail */
370}
371
372
373static void unmap_request(struct iommu_binding *ib, struct capref vnode_ro,
374                          uint16_t slot)
375{
376    errval_t err;
377
378    struct iommu_device *dev = ib->st;
379    assert(dev);
380
381    IOMMU_SVC_DEBUG("%s %u.%u.%u\n", __FUNCTION__, dev->addr.pci.bus,
382                    dev->addr.pci.device, dev->addr.pci.function);
383
384    if (dev->f.unmap == NULL) {
385        err = IOMMU_ERR_NOT_SUPPORTED;
386        goto out;
387    }
388
389    struct vnode_identity id;
390    err = invoke_vnode_identify(vnode_ro, &id);
391    if (err_is_fail(err)) {
392        goto out;
393    }
394
395    switch(id.type) {
396        case ObjType_VNode_x86_64_pml4 :
397            IOMMU_SVC_DEBUG("%s. PML4 @ 0x%lx slot [%u]\n", __FUNCTION__, id.base, slot);
398            break;
399        case ObjType_VNode_x86_64_pdpt :
400            IOMMU_SVC_DEBUG("%s. PDPT @ 0x%lx slot [%u]\n", __FUNCTION__, id.base, slot );
401            break;
402        case ObjType_VNode_x86_64_pdir :
403            IOMMU_SVC_DEBUG("%s. PDIR @ 0x%lx slot [%u]\n", __FUNCTION__, id.base, slot);
404            break;
405        case ObjType_VNode_VTd_ctxt_table :
406            IOMMU_SVC_DEBUG("%s. CTXT @ 0x%lx slot [%u]\n", __FUNCTION__, id.base, slot );
407            break;
408    }
409
410    struct capref vnode;
411    err = iommu_get_writable_vnode(id, &vnode);
412    if(err_is_fail(err)) {
413        goto out;
414    }
415
416    /// XXX: we need to get the mapping cap somehow
417    err = vnode_unmap(vnode, NULL_CAP);
418
419    out:
420    // delete the cap, we no longer need it
421    cap_destroy(vnode_ro);
422
423    err = ib->tx_vtbl.unmap_response(ib, NOP_CONT, LIB_ERR_NOT_IMPLEMENTED);
424    assert(err_is_ok(err)); /* should not fail */
425}
426
427static void modify_request(struct iommu_binding *ib, struct capref vnode_ro,
428                           uint64_t attr, uint16_t slot)
429{
430    errval_t err;
431    IOMMU_SVC_DEBUG("%s\n", __FUNCTION__);
432
433
434    struct vnode_identity id;
435    err = invoke_vnode_identify(vnode_ro, &id);
436    if (err_is_fail(err)) {
437        goto out;
438    }
439
440    switch(id.type) {
441        case ObjType_VNode_x86_64_pml4 :
442            IOMMU_SVC_DEBUG("%s. PML4 @ 0x%lx slot [%u]\n", __FUNCTION__, id.base, slot);
443            break;
444        case ObjType_VNode_x86_64_pdpt :
445            IOMMU_SVC_DEBUG("%s. PDPT @ 0x%lx slot [%u]\n", __FUNCTION__, id.base, slot );
446            break;
447        case ObjType_VNode_x86_64_pdir :
448            IOMMU_SVC_DEBUG("%s. PDIR @ 0x%lx slot [%u]\n", __FUNCTION__, id.base, slot);
449            break;
450        case ObjType_VNode_VTd_ctxt_table :
451            IOMMU_SVC_DEBUG("%s. CTXT @ 0x%lx slot [%u]\n", __FUNCTION__, id.base, slot );
452            break;
453    }
454
455    struct capref vnode;
456    err = iommu_get_writable_vnode(id, &vnode);
457    if(err_is_fail(err)) {
458        goto out;
459    }
460    err = invoke_vnode_modify_flags(vnode, slot, 1, attr);
461
462    out:
463    // delete the cap, we no longer need it
464    cap_destroy(vnode_ro);
465
466    err = ib->tx_vtbl.modify_response(ib, NOP_CONT, err);
467    assert(err_is_ok(err)); /* should not fail */
468}
469
470errval_t iommu_service_init(void)
471{
472    IOMMU_SVC_DEBUG("%s\n", __FUNCTION__);
473
474
475    return SYS_ERR_OK;
476}
477
478static struct iommu_rx_vtbl rx_vtbl = {
479        .getvmconfig_call = getvmconfig_request,
480        .setroot_call = setroot_request,
481        .retype_call = retype_request,
482        .map_call = map_request,
483        .unmap_call = unmap_request,
484        .modify_call = modify_request,
485};
486
487/**
488 * @brief creates a new endpoint for the IOMMU service
489 *
490 * @param ep    capref to create the EP in, slot must be allocated
491 * @param dev   the pointer to the iommu device for this endpoint
492 * @param type  what endpoint type to allcate
493 *
494 * @return SYS_ERR_OK on success, errval on failure
495 */
496errval_t iommu_service_new_endpoint(struct capref ep, struct iommu_device *dev,
497                                    idc_endpoint_t type)
498{
499    struct waitset *ws = get_default_waitset();
500    return iommu_create_endpoint(type, &rx_vtbl, dev, ws,
501                                 IDC_ENDPOINT_FLAGS_DEFAULT, &dev->binding, ep);
502}
503
504
505/*****************************************************************
506 * Iommu PCI connection interface
507 *****************************************************************/
508
509
510static void request_iommu_endpoint_handler(struct pci_iommu_binding *b, uint8_t type,
511                                           uint32_t segment, uint32_t bus,
512                                           uint32_t device, uint32_t function)
513{
514    errval_t err, out_err;
515
516    assert(b->st);
517    struct iommu *io = (struct iommu *)b->st;
518
519    IOMMU_SVC_DEBUG("%s [%u][%p] %u.%u.%u\n", __FUNCTION__, io->id, io, bus, device, function);
520
521    struct iommu_device* dev;
522    out_err = iommu_device_create_by_pci(io, segment, bus, device, function, &dev);
523    if (err_is_fail(out_err)) {
524        goto reply;
525    }
526
527    struct capref cap;
528    out_err = slot_alloc(&cap);
529    if (err_is_fail(out_err)) {
530        iommu_device_destroy(dev);
531        goto reply;
532    }
533
534    out_err = iommu_service_new_endpoint(cap, dev, type);
535    if (err_is_fail(out_err)) {
536        slot_free(cap);
537        iommu_device_destroy(dev);
538    }
539
540reply:
541    err = b->tx_vtbl.request_iommu_endpoint_response(b, NOP_CONT, cap, out_err);
542    assert(err_is_ok(err));
543}
544
545struct pci_iommu_rx_vtbl pci_iommu_rx_vtbl = {
546    .request_iommu_endpoint_call = request_iommu_endpoint_handler
547};
548
549errval_t iommu_request_endpoint(uint8_t type, struct capref* cap, struct iommu* iommu)
550{
551    struct pci_iommu_binding* b;
552    return pci_iommu_create_endpoint(type, &pci_iommu_rx_vtbl, iommu, get_default_waitset(),
553                                     IDC_ENDPOINT_FLAGS_DEFAULT, &b, *cap);
554}
555