1/**
2 * \file
3 * \brief Unidirectional bulk data transfer via shared memory
4 */
5
6/*
7 * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <string.h>
16
17#include <barrelfish/barrelfish.h>
18
19#include <bulk_transfer/bulk_transfer.h>
20#include "bulk_pool.h"
21#include "bulk_buffer.h"
22
23static void bulk_pool_generate_id(struct bulk_pool_id* id)
24{
25    /*
26     * todo: get a system wide unique domain identifier
27     *       get a domain local sequence id
28     */
29
30    static uint32_t local_id = 0;
31
32    assert(id);
33
34    id->machine = 0;
35    //XXX: disp_get_domain_id() is core-local, but we don't want
36    //to put the core ID into the machine part
37    id->dom = (disp_get_core_id() << 16) | disp_get_domain_id();
38    id->local = local_id++;
39}
40
41static struct bulk_pool_list *pool_list = NULL;
42
43/**
44 * returns a pointer to the pool with the given id
45 *
46 * @param   id  the id of the pool to look up
47 *
48 * @return  NULL if the pool is not present in the domain
49 */
50struct bulk_pool *bulk_pool_domain_list_get(struct bulk_pool_id *id)
51{
52    struct bulk_pool_list *list = pool_list;
53    while (list) {
54        if (bulk_pool_cmp_id(&list->pool->id, id) == 0) {
55            return list->pool;
56        }
57        list = list->next;
58    }
59    return NULL;
60}
61
62/**
63 * inserts a pool into the domain global bulk pool list
64 *
65 * @param   pool    the pool to insert
66 */
67errval_t bulk_pool_domain_list_insert(struct bulk_pool *pool)
68{
69    struct bulk_pool_list *new_pool = malloc(sizeof(struct bulk_pool_list));
70    if (!new_pool) {
71        return BULK_TRANSFER_MEM;
72    }
73
74    new_pool->next = NULL;
75    new_pool->pool = pool;
76
77    if (pool_list == NULL) {
78        pool_list = new_pool;
79        return SYS_ERR_OK;
80    }
81
82    struct bulk_pool_list *list = pool_list;
83    struct bulk_pool_list *prev = NULL;
84
85    while (list) {
86        switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
87            case -1:
88                /* the ID of the pool in the list is lower, check next */
89                prev = list;
90                list = list->next;
91                continue;
92            case 0:
93                /* we have a match */
94                free(new_pool);
95                return SYS_ERR_OK;
96            case 1:
97                /* the ID of the pool in the list is bigger, insert before */
98                if (prev) {
99                    new_pool->next = prev->next;
100                    prev->next = new_pool;
101                } else {
102                    new_pool->next = pool_list;
103                    pool_list = new_pool;
104                }
105                return SYS_ERR_OK;
106            default:
107                break;
108        }
109    }
110
111    /* insert at the end */
112    prev->next = new_pool;
113
114    return SYS_ERR_OK;
115}
116
117
118/**
119 * removes the pool from the channel's pool list
120 *
121 * @param pool      the poo to remove
122 */
123errval_t bulk_pool_domain_list_remove(struct bulk_pool *pool)
124{
125    struct bulk_pool_list *list = pool_list;
126    struct bulk_pool_list *prev = NULL;
127
128    while (list) {
129        switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
130            case -1:
131                /* the ID of the pool in the list is lower, check next */
132                continue;
133            case 0:
134                /* we have a match, remove it */
135                if (prev) {
136                    prev->next = list->next;
137                } else {
138                    pool_list = list->next;
139                }
140                free(list);
141                return SYS_ERR_OK;
142            case 1:
143                /* the ID of the pool in the list is bigger, insert before */
144                return BULK_TRANSFER_POOL_NOT_ASSIGNED;
145            default:
146                break;
147        }
148        prev = list;
149        list = list->next;
150    }
151
152    return BULK_TRANSFER_POOL_NOT_ASSIGNED;
153}
154
155/**
156 * compares two bulk pool ids
157 *
158 * @return  -1  if id1 is less than id2
159 *           0  if both ids are equal
160 *           1  if id1 is bigger than id2
161 */
162int8_t bulk_pool_cmp_id(struct bulk_pool_id *id1, struct bulk_pool_id *id2)
163{
164
165    if (id1->machine < id2->machine) {
166        return -1;
167    }
168
169    if (id1->machine > id2->machine) {
170        return 1;
171    }
172
173    if (id1->dom < id2->dom) {
174        return -1;
175    }
176
177    if (id1->dom > id2->dom) {
178        return 1;
179    }
180
181    if (id1->local < id2->local) {
182        return -1;
183    }
184
185    if (id1->local > id2->local) {
186        return 1;
187    }
188
189    assert(id1->machine == id2->machine);
190    assert(id1->dom == id2->dom);
191    assert(id1->local == id1->local);
192
193    return 0;
194}
195
196/**
197 * checks if a pool already has been assigned to that channel
198 *
199 * @param pool      the bulk pool to check for assignment
200 * @param channel   the channel to check for assignment
201 *
202 * @return true:    the pool is assigned to this channel
203 *         false:   the pools has not been assigned to the channel
204 */
205uint8_t bulk_pool_is_assigned(struct bulk_pool *pool,
206                              struct bulk_channel *channel)
207{
208    assert(channel);
209
210    struct bulk_pool_list *list = channel->pools;
211
212    while (list) {
213        switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
214            case -1:
215                /* the ID of the pool in the list is lower, check next */
216                continue;
217            case 0:
218                /* we have a match */
219                return 1;
220            case 1:
221                /* we have a lower id than the pool in the list */
222                return 0;
223            default:
224                break;
225        }
226
227        list = list->next;
228    }
229
230    return 0;
231}
232
233/**
234 * gets a pointer to the pool on this channel
235 *
236 * @param id        the poolid we want the pool
237 * @param channel   the channel to look for the pools
238 */
239struct bulk_pool *bulk_pool_get(struct bulk_pool_id *id,
240                                struct bulk_channel *channel)
241{
242    struct bulk_pool_list *list = channel->pools;
243    while (list) {
244        if (bulk_pool_cmp_id(&list->pool->id, id) == 0) {
245            return list->pool;
246        }
247        list = list->next;
248    }
249    return NULL;
250}
251
252/**
253 * adds a pool to a channel's pool list
254 *
255 * @param pool      the pool to assing to the channel
256 * @param channel   the channel to assign the the pool to
257 */
258errval_t bulk_pool_assign(struct bulk_pool *pool, struct bulk_channel *channel)
259{
260    assert(channel);
261    struct bulk_pool_list *new_pool = malloc(sizeof(struct bulk_pool_list));
262    if (!new_pool) {
263        return BULK_TRANSFER_MEM;
264    }
265
266    struct bulk_pool_internal *pool_int = (struct bulk_pool_internal *) pool;
267
268    new_pool->next = NULL;
269    new_pool->pool = pool;
270
271    if (!channel->pools) {
272        channel->pools = new_pool;
273        return SYS_ERR_OK;
274    }
275
276    struct bulk_pool_list *list = channel->pools;
277    struct bulk_pool_list *prev = NULL;
278
279    while (list) {
280        switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
281            case -1:
282                /* the ID of the pool in the list is lower, check next */
283                prev = list;
284                list = list->next;
285                continue;
286            case 0:
287                /* we have a match */
288                return BULK_TRANSFER_POOL_ALREADY_ASSIGNED;
289            case 1:
290                /* the ID of the pool in the list is bigger, insert before */
291                if (prev) {
292                    new_pool->next = prev->next;
293                    prev->next = new_pool;
294                } else {
295                    new_pool->next = channel->pools;
296                    channel->pools = new_pool;
297                }
298                pool_int->refcnt++;
299                return SYS_ERR_OK;
300            default:
301                break;
302        }
303    }
304
305    /* insert at the end */
306    prev->next = new_pool;
307    pool_int->refcnt++;
308    return SYS_ERR_OK;
309}
310
311/**
312 * removes the pool from the channel's pool list
313 *
314 * @param pool      the poo to remove
315 * @param channel   the channel to remove the pool from
316 */
317errval_t bulk_pool_remove(struct bulk_pool *pool, struct bulk_channel *channel)
318{
319    assert(channel);
320
321    struct bulk_pool_list *list = channel->pools;
322    struct bulk_pool_list *prev = NULL;
323
324    while (list) {
325        switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
326            case -1:
327                /* the ID of the pool in the list is lower, check next */
328                continue;
329            case 0:
330                /* we have a match, remove it */
331                if (prev) {
332                    prev->next = list->next;
333                } else {
334                    channel->pools = list->next;
335                }
336                free(list);
337                return SYS_ERR_OK;
338            case 1:
339                /* the ID of the pool in the list is bigger, insert before */
340                return BULK_TRANSFER_POOL_NOT_ASSIGNED;
341            default:
342                break;
343        }
344        prev = list;
345        list = list->next;
346    }
347
348    return BULK_TRANSFER_POOL_NOT_ASSIGNED;
349}
350
351/**
352 * unmaps the entire pool and frees up the entire memory region of the pool.
353 *
354 * @param pool  the pool to unmap
355 */
356// XXX Are caps deleted?
357errval_t bulk_pool_unmap(struct bulk_pool *pool)
358{
359    assert(pool);
360    struct bulk_pool_internal *pool_int = (struct bulk_pool_internal *) pool;
361
362    if (!pool_int->vregion) {
363        /* there is no vregion associated with the pool, so its not mapped */
364        return SYS_ERR_OK;
365    }
366
367    errval_t err = SYS_ERR_OK;
368
369    /* get the vspace / vregions / memobj pointers */
370    struct vregion *vregion = pool_int->vregion;
371    struct memobj *memobj = vregion_get_memobj(vregion);
372
373    struct capref ret_cap;
374    genvaddr_t ret_addr;
375
376    struct bulk_buffer *buf;
377
378    /* unfill and unmap the frames */
379    for (int i = 0; i < pool->num_buffers; ++i) {
380        genvaddr_t offset = i * pool->buffer_size;
381        buf = pool->buffers[i];
382        err = memobj->f.unfill(memobj, offset, &ret_cap, &ret_addr);
383        if (err_is_fail(err)) {
384            if (err == LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET) {
385                break;
386            }
387            /* TODO: Error handling */
388            return err;
389        }
390        cap_delete(buf->cap);
391        buf->cap = NULL_CAP;
392    }
393
394    /* delete the pool cap and the cnode cap */
395    cap_destroy(pool->pool_cap);
396    cap_destroy(pool_int->cnode_cap);
397
398    err = vregion_destroy(vregion);
399    if (err_is_fail(err)) {
400        return err_push(err, LIB_ERR_VREGION_DESTROY);
401    }
402
403    return SYS_ERR_OK;
404}
405
406/**
407 * Does the mapping of a pool depending on the trust level.
408 * Reserves virtual memory, and allocates a memobj for the pool.
409 * In the trusted case, the pool is backed with the pool cap and mapped.
410 * In the nontrusted case, the pool cap is split into seperate buffer caps and
411 * mapped.
412 * If there is no pool_cap, only the virtual memory is allocated.
413 *
414 * XXX : trust_uninitialized is currently treated like the trusted case,
415 * which is probably not the best idea. should we treat it as an error or just
416 * ignore it?
417 *
418 * @param pool  the pool to map
419 */
420errval_t bulk_pool_map(struct bulk_pool *pool)
421{
422    assert(pool);
423    struct bulk_pool_internal *pool_int = (struct bulk_pool_internal *) pool;
424    errval_t err;
425    if (pool->base_address != 0) {
426        /* the pool already has an base address thus is mapped */
427
428        /* XXX: maybe return an already mapped error ? */
429        return SYS_ERR_OK;
430    }
431
432    if (!bulk_buffer_check_size(pool->buffer_size)) {
433        return BULK_TRANSFER_ALLOC_BUFFER_SIZE;
434    }
435
436    size_t pool_size = pool->buffer_size * pool->num_buffers;
437
438    struct vspace *vspace = get_current_vspace();
439
440    struct memobj_fixed *memobj_fixed = malloc(sizeof(struct memobj_fixed));
441    if (!memobj_fixed) {
442        return BULK_TRANSFER_MEM;
443    }
444    struct memobj *memobj = &(memobj_fixed->m);
445
446    // Create a memobj
447    err = memobj_create_fixed(memobj_fixed, pool_size, 0, pool->num_buffers,
448                              pool->buffer_size);
449
450    if (err_is_fail(err)) {
451        err = err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON);
452    }
453
454    struct pmap *pmap = vspace_get_pmap(vspace);
455
456    /* allocate some virtual address space */
457    genvaddr_t address;
458    err = pmap->f.determine_addr(pmap, memobj, 4096, &address);
459    if (err_is_fail(err)) {
460        return err_push(err, LIB_ERR_PMAP_DETERMINE_ADDR);
461    }
462    pool->base_address = vspace_genvaddr_to_lvaddr(address);
463
464    /* we have the address range, now we have to associate a vregion with it*/
465    struct vregion *vregion = malloc(sizeof(struct vregion));
466    if (!vregion) {
467        return BULK_TRANSFER_MEM;
468    }
469
470    err = vregion_map_fixed(vregion, get_current_vspace(), memobj, 0, pool_size,
471                            address, VREGION_FLAGS_READ_WRITE);
472    if (err_is_fail(err)) {
473        return err_push(err, LIB_ERR_MEMOBJ_MAP_REGION);
474    }
475
476    pool_int->vregion = vregion;
477
478    if (pool->trust == BULK_TRUST_FULL
479                    || pool->trust == BULK_TRUST_UNINITIALIZED) {
480        if (capref_is_null(pool->pool_cap)) {
481            return SYS_ERR_CAP_NOT_FOUND;
482        }
483        //XXX: treating uninitialized just like full trust does not sound like a
484        //      good idea...
485
486        /* start creating caps for each buffer */
487        struct capref buf_cap = {
488            .slot = 0 };
489
490        err = cnode_create(&pool_int->cnode_cap, &buf_cap.cnode,
491                           pool->num_buffers, NULL);
492        if (err_is_fail(err)) {
493            return err_push(err, LIB_ERR_CNODE_CREATE);
494        }
495
496        /* copy the pool cap for each buffer into the new cnode and set
497         * appropriate offset */
498        for (int i = 0; i < pool->num_buffers; ++i) {
499            struct bulk_buffer *buf = pool->buffers[i];
500            buf_cap.slot = i;
501            size_t offset = (i * pool->buffer_size);
502
503            err = cap_copy(buf_cap, pool->pool_cap);
504            if (err_is_fail(err)) {
505                return err_push(err, LIB_ERR_CAP_COPY);
506            }
507
508            err = bulk_buffer_assign_cap(buf, buf_cap, offset);
509            assert(err_is_ok(err)); /* this should not fail */
510
511            err = memobj->f.fill(memobj, offset, buf->cap,
512                                 buf->cap_offset);
513            if (err_is_fail(err)) {
514                /* TODO: error handling */
515                return err_push(err, LIB_ERR_MEMOBJ_FILL);
516            }
517            buf->address = (void *) vspace_genvaddr_to_lvaddr(address + offset);
518            err = memobj->f.pagefault(memobj, vregion, offset, 0);
519            if (err_is_fail(err)) {
520                return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
521            }
522
523            buf->state = BULK_BUFFER_READ_WRITE;
524        }
525    } else if (pool->trust == BULK_TRUST_NONE && !capref_is_null(pool->pool_cap)) {
526        /* start creating caps for each buffer */
527        struct capref buf_cap = {
528            .slot = 0 };
529
530        err = cnode_create(&pool_int->cnode_cap, &buf_cap.cnode,
531                           pool->num_buffers, NULL);
532        if (err_is_fail(err)) {
533            return err_push(err, LIB_ERR_CNODE_CREATE);
534        }
535        /* determine the size bits per buffer */
536        size_t size_bits = 12;
537        size_t buf_size = pool->buffer_size >> 12;
538        while (buf_size >>= 1) {
539            ++size_bits;
540        }
541        // XXX: trying to understand this; is size_bits == log2(pool->buffer_size)?
542        // -SG, 2016-04-20
543        assert(1UL << size_bits == pool->buffer_size);
544        //split pool cap into smaller caps for each buffer
545        err = cap_retype(buf_cap, pool->pool_cap, 0, ObjType_Frame, pool->buffer_size, 1);
546        assert(err_is_ok(err));//TODO: handle error instead
547
548        /* set the capref for each buffer into the new cnode and set
549         * appropriate offset in memobj */
550        for (int i = 0; i < pool->num_buffers; ++i) {
551            struct bulk_buffer *buf = pool->buffers[i];
552            buf_cap.slot = i;
553            size_t offset = (i * pool->buffer_size);
554
555            err = bulk_buffer_assign_cap(buf, buf_cap, 0);
556            assert(err_is_ok(err)); /* this should not fail */
557
558            err = memobj->f.fill(memobj, offset, buf->cap,
559                                 buf->cap_offset);
560            if (err_is_fail(err)) {
561                /* TODO: error handling - delete all our new caps? */
562                return err_push(err, LIB_ERR_MEMOBJ_FILL);
563            }
564            buf->address = (void *) vspace_genvaddr_to_lvaddr(address + offset);
565            err = memobj->f.pagefault(memobj, vregion, offset, 0);
566            if (err_is_fail(err)) {
567                return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
568            }
569
570            buf->state = BULK_BUFFER_READ_WRITE;
571        }
572    }
573
574    return SYS_ERR_OK;
575}
576
577/**
578 * initializes the buffers for a pool given the struct pool is allocated and
579 * filled with the num bufs
580 *
581 * @param pool  pointer to a pool with the information
582 */
583errval_t bulk_pool_init_bufs(struct bulk_pool *pool)
584{
585    size_t buffer_count = pool->num_buffers;
586
587    /* allocate memory for buffers */
588    struct bulk_buffer *bufs = malloc(
589                    buffer_count * sizeof(struct bulk_buffer));
590    if (!bufs) {
591        return BULK_TRANSFER_MEM;
592    }
593    memset(bufs, 0, buffer_count * sizeof(struct bulk_buffer));
594
595    pool->buffers = malloc(buffer_count * sizeof(void *));
596    if (!pool->buffers) {
597        return BULK_TRANSFER_MEM;
598    }
599
600    for (int i = 0; i < buffer_count; ++i) {
601        (bufs + i)->state = BULK_BUFFER_INVALID;
602        (bufs + i)->pool = pool;
603        (bufs + i)->bufferid = i;
604        pool->buffers[i] = bufs + i;
605    }
606
607    return SYS_ERR_OK;
608}
609
610/**
611 * allocates the data structures for the pool.
612 *
613 * @param   pool            storage for pointer to newly allocated pool
614 * @param   buffer_count    the number of buffers in the pool
615 * @param   buffer_size     the size of a single buffer
616 * @param   id              pool id
617 */
618errval_t bulk_pool_alloc_with_id(struct bulk_pool **pool,
619                                 size_t buffer_count,
620                                 size_t buffer_size,
621                                 struct bulk_pool_id id)
622{
623    errval_t err;
624    struct bulk_pool_internal *pool_int;
625
626    /* allocate memory for the pool struct */
627
628    pool_int = malloc(sizeof(struct bulk_pool_internal));
629
630    if (pool_int == NULL) {
631        return BULK_TRANSFER_MEM;
632    }
633
634    memset(pool_int, 0, sizeof(struct bulk_pool_internal));
635
636    pool_int->pool.id = id;
637
638    pool_int->pool.buffer_size = buffer_size;
639    pool_int->pool.num_buffers = buffer_count;
640    pool_int->pool.trust = BULK_TRUST_UNINITIALIZED;
641
642    err = bulk_pool_init_bufs(&pool_int->pool);
643    if (err_is_fail(err)) {
644        return err;
645    }
646
647    bulk_pool_domain_list_insert(&pool_int->pool);
648    *pool = &pool_int->pool;
649    return SYS_ERR_OK;
650}
651
652/**
653 * allocates the data structures for the pool with new id.
654 *
655 * @param   pool            storage for pointer to newly allocated pool
656 * @param   buffer_count    the number of buffers in the pool
657 * @param   buffer_size     the size of a single buffer
658 */
659errval_t bulk_pool_alloc(struct bulk_pool **pool,
660                         size_t buffer_count,
661                         size_t buffer_size)
662{
663    struct bulk_pool_id id;
664    bulk_pool_generate_id(&id);
665    return bulk_pool_alloc_with_id(pool, buffer_count, buffer_size, id);
666}
667
668/**
669 * frees up the resources needed by the pool note
670 *
671 * @param pool  the pool to dealloc
672 */
673errval_t bulk_pool_dealloc(struct bulk_pool *pool)
674{
675    /* all the buffers were malloced once */
676    free(pool->buffers[0]);
677    free(pool->buffers);
678    free(pool);
679
680    return SYS_ERR_OK;
681}
682