• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /barrelfish-2018-10-04/lib/bulk_transfer/

Lines Matching refs:pool

44  * returns a pointer to the pool with the given id
46 * @param id the id of the pool to look up
48 * @return NULL if the pool is not present in the domain
54 if (bulk_pool_cmp_id(&list->pool->id, id) == 0) {
55 return list->pool;
63 * inserts a pool into the domain global bulk pool list
65 * @param pool the pool to insert
67 errval_t bulk_pool_domain_list_insert(struct bulk_pool *pool)
75 new_pool->pool = pool;
86 switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
88 /* the ID of the pool in the list is lower, check next */
97 /* the ID of the pool in the list is bigger, insert before */
119 * removes the pool from the channel's pool list
121 * @param pool the poo to remove
123 errval_t bulk_pool_domain_list_remove(struct bulk_pool *pool)
129 switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
131 /* the ID of the pool in the list is lower, check next */
143 /* the ID of the pool in the list is bigger, insert before */
156 * compares two bulk pool ids
197 * checks if a pool already has been assigned to that channel
199 * @param pool the bulk pool to check for assignment
202 * @return true: the pool is assigned to this channel
205 uint8_t bulk_pool_is_assigned(struct bulk_pool *pool,
213 switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
215 /* the ID of the pool in the list is lower, check next */
221 /* we have a lower id than the pool in the list */
234 * gets a pointer to the pool on this channel
236 * @param id the poolid we want the pool
244 if (bulk_pool_cmp_id(&list->pool->id, id) == 0) {
245 return list->pool;
253 * adds a pool to a channel's pool list
255 * @param pool the pool to assing to the channel
256 * @param channel the channel to assign the the pool to
258 errval_t bulk_pool_assign(struct bulk_pool *pool, struct bulk_channel *channel)
266 struct bulk_pool_internal *pool_int = (struct bulk_pool_internal *) pool;
269 new_pool->pool = pool;
280 switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
282 /* the ID of the pool in the list is lower, check next */
290 /* the ID of the pool in the list is bigger, insert before */
312 * removes the pool from the channel's pool list
314 * @param pool the poo to remove
315 * @param channel the channel to remove the pool from
317 errval_t bulk_pool_remove(struct bulk_pool *pool, struct bulk_channel *channel)
325 switch (bulk_pool_cmp_id(&list->pool->id, &pool->id)) {
327 /* the ID of the pool in the list is lower, check next */
339 /* the ID of the pool in the list is bigger, insert before */
352 * unmaps the entire pool and frees up the entire memory region of the pool.
354 * @param pool the pool to unmap
357 errval_t bulk_pool_unmap(struct bulk_pool *pool)
359 assert(pool);
360 struct bulk_pool_internal *pool_int = (struct bulk_pool_internal *) pool;
363 /* there is no vregion associated with the pool, so its not mapped */
379 for (int i = 0; i < pool->num_buffers; ++i) {
380 genvaddr_t offset = i * pool->buffer_size;
381 buf = pool->buffers[i];
394 /* delete the pool cap and the cnode cap */
395 cap_destroy(pool->pool_cap);
407 * Does the mapping of a pool depending on the trust level.
408 * Reserves virtual memory, and allocates a memobj for the pool.
409 * In the trusted case, the pool is backed with the pool cap and mapped.
410 * In the nontrusted case, the pool cap is split into seperate buffer caps and
418 * @param pool the pool to map
420 errval_t bulk_pool_map(struct bulk_pool *pool)
422 assert(pool);
423 struct bulk_pool_internal *pool_int = (struct bulk_pool_internal *) pool;
425 if (pool->base_address != 0) {
426 /* the pool already has an base address thus is mapped */
432 if (!bulk_buffer_check_size(pool->buffer_size)) {
436 size_t pool_size = pool->buffer_size * pool->num_buffers;
447 err = memobj_create_fixed(memobj_fixed, pool_size, 0, pool->num_buffers,
448 pool->buffer_size);
462 pool->base_address = vspace_genvaddr_to_lvaddr(address);
478 if (pool->trust == BULK_TRUST_FULL
479 || pool->trust == BULK_TRUST_UNINITIALIZED) {
480 if (capref_is_null(pool->pool_cap)) {
491 pool->num_buffers, NULL);
496 /* copy the pool cap for each buffer into the new cnode and set
498 for (int i = 0; i < pool->num_buffers; ++i) {
499 struct bulk_buffer *buf = pool->buffers[i];
501 size_t offset = (i * pool->buffer_size);
503 err = cap_copy(buf_cap, pool->pool_cap);
525 } else if (pool->trust == BULK_TRUST_NONE && !capref_is_null(pool->pool_cap)) {
531 pool->num_buffers, NULL);
537 size_t buf_size = pool->buffer_size >> 12;
541 // XXX: trying to understand this; is size_bits == log2(pool->buffer_size)?
543 assert(1UL << size_bits == pool->buffer_size);
544 //split pool cap into smaller caps for each buffer
545 err = cap_retype(buf_cap, pool->pool_cap, 0, ObjType_Frame, pool->buffer_size, 1);
550 for (int i = 0; i < pool->num_buffers; ++i) {
551 struct bulk_buffer *buf = pool->buffers[i];
553 size_t offset = (i * pool->buffer_size);
578 * initializes the buffers for a pool given the struct pool is allocated and
581 * @param pool pointer to a pool with the information
583 errval_t bulk_pool_init_bufs(struct bulk_pool *pool)
585 size_t buffer_count = pool->num_buffers;
595 pool->buffers = malloc(buffer_count * sizeof(void *));
596 if (!pool->buffers) {
602 (bufs + i)->pool = pool;
604 pool->buffers[i] = bufs + i;
611 * allocates the data structures for the pool.
613 * @param pool storage for pointer to newly allocated pool
614 * @param buffer_count the number of buffers in the pool
616 * @param id pool id
618 errval_t bulk_pool_alloc_with_id(struct bulk_pool **pool,
626 /* allocate memory for the pool struct */
636 pool_int->pool.id = id;
638 pool_int->pool.buffer_size = buffer_size;
639 pool_int->pool.num_buffers = buffer_count;
640 pool_int->pool.trust = BULK_TRUST_UNINITIALIZED;
642 err = bulk_pool_init_bufs(&pool_int->pool);
647 bulk_pool_domain_list_insert(&pool_int->pool);
648 *pool = &pool_int->pool;
653 * allocates the data structures for the pool with new id.
655 * @param pool storage for pointer to newly allocated pool
656 * @param buffer_count the number of buffers in the pool
659 errval_t bulk_pool_alloc(struct bulk_pool **pool,
665 return bulk_pool_alloc_with_id(pool, buffer_count, buffer_size, id);
669 * frees up the resources needed by the pool note
671 * @param pool the pool to dealloc
673 errval_t bulk_pool_dealloc(struct bulk_pool *pool)
676 free(pool->buffers[0]);
677 free(pool->buffers);
678 free(pool);