1/** 2 * \file 3 * \brief Slot allocator for a single cnode 4 * 5 * Allocators should be created with worst case slab requirement. 6 * The worst case requirement is the number of slots in the cnode / 2. 7 */ 8 9/* 10 * Copyright (c) 2010, ETH Zurich. 11 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP. 12 * All rights reserved. 13 * 14 * This file is distributed under the terms in the attached LICENSE file. 15 * If you do not find this file, copies can be found by writing to: 16 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group. 17 */ 18 19#include <barrelfish/barrelfish.h> 20#include <barrelfish/caddr.h> 21 22static errval_t salloc(struct slot_allocator *ca, struct capref *ret) 23{ 24 struct single_slot_allocator *sca = (struct single_slot_allocator*)ca; 25 26 if (sca->a.space == 0) { 27 return LIB_ERR_SLOT_ALLOC_NO_SPACE; 28 } 29 30 thread_mutex_lock(&ca->mutex); 31 32 // Slot to return 33 ret->cnode = sca->cnode; 34 ret->slot = sca->head->slot; 35 36#if 0 37 char buf[256]; 38 debug_print_capref(buf, 256, *ret); 39 debug_printf("%p->salloc: ret = %.*s\n", ca, 256, buf); 40#endif 41 42 // Decrement space 43 sca->head->space--; 44 sca->head->slot++; 45 sca->a.space--; 46 47 // Check to free head 48 if (sca->head->space == 0) { 49 struct cnode_meta *walk = sca->head; 50 sca->head = walk->next; 51 slab_free(&sca->slab, walk); 52 } 53 54 thread_mutex_unlock(&ca->mutex); 55 return SYS_ERR_OK; 56} 57 58static errval_t free_slots(struct single_slot_allocator *sca, cslot_t slot, 59 cslot_t count, struct thread_mutex *mutex) 60{ 61 thread_mutex_lock(mutex); 62 63 errval_t err = SYS_ERR_OK; 64 65 struct cnode_meta *walk = sca->head; 66 struct cnode_meta *prev = NULL; 67 68 // Entire cnode was allocated 69 if (!sca->head) { 70 sca->head = slab_alloc(&sca->slab); 71 sca->head->slot = slot; 72 sca->head->space = count; 73 sca->head->next = NULL; 74 goto finish; 75 } 76 77 // Freeing one before head 78 if (slot + count == sca->head->slot) { 79 sca->head->slot = slot; 80 sca->head->space += count; 81 goto finish; 82 } 83 84 // Freeing before head 85 if (slot < sca->head->slot) { 86 struct cnode_meta *new = slab_alloc(&sca->slab); 87 new->slot = slot; 88 new->space = count; 89 new->next = sca->head; 90 sca->head = new; 91 goto finish; 92 } 93 94 while (walk != NULL) { 95 // Freeing at the edge of walk 96 if (slot == walk->slot + walk->space) { 97 walk->space += count; 98 99 // check if we can merge walk to next 100 struct cnode_meta *next = walk->next; 101 if (next && next->slot == walk->slot + walk->space) { 102 walk->space += next->space; 103 walk->next = next->next; 104 slab_free(&sca->slab, next); 105 } 106 107 goto finish; 108 } 109 else if (slot < walk->slot + walk->space) { 110 err = LIB_ERR_SLOT_UNALLOCATED; 111 goto unlock; 112 } 113 114 // Freing just before walk->next 115 if (walk->next && slot + count == walk->next->slot) { 116 walk->next->slot = slot; 117 walk->next->space += count; 118 goto finish; 119 } 120 121 // Freeing after walk and before walk->next 122 if (walk->next && slot < walk->next->slot) { 123 struct cnode_meta *new = walk->next; 124 walk->next = slab_alloc(&sca->slab); 125 walk->next->slot = slot; 126 walk->next->space = count; 127 walk->next->next = new; 128 goto finish; 129 } 130 prev = walk; 131 walk = walk->next; 132 } 133 134 // Freeing after the list 135 prev->next = slab_alloc(&sca->slab); 136 prev->next->slot = slot; 137 prev->next->space = count; 138 prev->next->next = NULL; 139 140 finish: 141 sca->a.space += count; 142 143 unlock: 144 thread_mutex_unlock(mutex); 145 return err; 146} 147 148static errval_t sfree(struct slot_allocator *ca, struct capref cap) 149{ 150 struct single_slot_allocator *sca = (struct single_slot_allocator*)ca; 151 if (!cnodecmp(cap.cnode, sca->cnode)) { 152 return LIB_ERR_SLOT_ALLOC_WRONG_CNODE; 153 } 154 155 return free_slots(sca, cap.slot, 1, &ca->mutex); 156} 157 158cslot_t single_slot_alloc_freecount(struct single_slot_allocator *this) 159{ 160 cslot_t freecount = 0; 161 for (struct cnode_meta *walk = this->head; walk; walk = walk->next) { 162 freecount += walk->space; 163 } 164 return freecount; 165} 166 167errval_t single_slot_alloc_resize(struct single_slot_allocator *this, 168 cslot_t newslotcount) 169{ 170 errval_t err; 171 172 173 if (newslotcount <= this->a.nslots) { 174 debug_printf("%s: newcount = %"PRIuCSLOT", currcount = %"PRIuCSLOT"\n", 175 __FUNCTION__, newslotcount, this->a.nslots); 176 return SYS_ERR_OK; 177 } 178 179 assert(newslotcount > this->a.nslots); 180 181 cslot_t grow = newslotcount - this->a.nslots; 182 183 // Refill slab allocator 184 size_t bufgrow = SINGLE_SLOT_ALLOC_BUFLEN(grow); 185 void *buf = malloc(bufgrow); 186 if (!buf) { 187 return LIB_ERR_MALLOC_FAIL; 188 } 189 slab_grow(&this->slab, buf, bufgrow); 190 191 // Update free slot metadata 192 err = free_slots(this, this->a.nslots, grow, &this->a.mutex); 193 if (err_is_fail(err)) { 194 return err; 195 } 196 197 // Update generic metadata 198 this->a.nslots = newslotcount; 199 200 return SYS_ERR_OK; 201} 202 203errval_t single_slot_alloc_init_raw(struct single_slot_allocator *ret, 204 struct capref cap, struct cnoderef cnode, 205 cslot_t nslots, void *buf, size_t buflen) 206{ 207 /* Generic part */ 208 ret->a.alloc = salloc; 209 ret->a.free = sfree; 210 ret->a.space = nslots; 211 ret->a.nslots = nslots; 212 thread_mutex_init(&ret->a.mutex); 213 214 /* Specific part */ 215 ret->cap = cap; 216 ret->cnode = cnode; 217 218 slab_init(&ret->slab, sizeof(struct cnode_meta), NULL); 219 if (buflen > 0) { 220 // check for callers that do not provide enough buffer space 221 #if !defined(NDEBUG) 222 //on arm, __builtin_return_address does not take arguments !=0 223 #if !defined(__arm__) && !defined(__aarch64__) 224 size_t buflen_proper = SINGLE_SLOT_ALLOC_BUFLEN(nslots); 225 if (buflen != buflen_proper) { 226 debug_printf("******* FIXME: %s buflen=%zu != buflen_proper=%zu" 227 "call stack: %p %p\n", 228 __FUNCTION__, buflen, buflen_proper, 229 __builtin_return_address(0), 230 __builtin_return_address(1)); 231 } 232 #endif 233 #endif 234 slab_grow(&ret->slab, buf, buflen); 235 } 236 237 ret->head = slab_alloc(&ret->slab); 238 assert(ret->head != NULL); 239 ret->head->slot = 0; 240 ret->head->space = nslots; 241 ret->head->next = NULL; 242 243 return SYS_ERR_OK; 244} 245 246errval_t single_slot_alloc_init(struct single_slot_allocator *ret, 247 cslot_t nslots, cslot_t *retslots) 248{ 249 errval_t err; 250 251 struct capref cap; 252 struct cnoderef cnode; 253 err = cnode_create(&cap, &cnode, nslots, &nslots); 254 if (err_is_fail(err)) { 255 return err_push(err, LIB_ERR_CNODE_CREATE); 256 } 257 size_t buflen = SINGLE_SLOT_ALLOC_BUFLEN(nslots); 258 void *buf = malloc(buflen); 259 if (!buf) { 260 return LIB_ERR_MALLOC_FAIL; 261 } 262 263 err = single_slot_alloc_init_raw(ret, cap, cnode, nslots, buf, buflen); 264 if (err_is_fail(err)) { 265 return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW); 266 } 267 268 if (retslots) { 269 *retslots = nslots; 270 } 271 return SYS_ERR_OK; 272} 273