1/**
2 * \file
3 * \brief Slot allocator capable of allocating more than one slot at a time
4 */
5
6/*
7 * Copyright (c) 2010, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <barrelfish/barrelfish.h>
16
17/**
18 * \brief Allocate slots out of a cnode
19 *
20 * \param alloc         Pointer to the metadata
21 * \param nslots        Number of slots to allocate
22 * \param ret           Pointer used to return the allocated slot
23 */
24errval_t range_slot_alloc(struct range_slot_allocator *alloc, cslot_t nslots,
25                          struct capref *ret)
26{
27    assert(alloc);
28    if (!alloc->is_head) {
29        return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
30    }
31    struct range_slot_allocator *head = alloc;
32    thread_mutex_lock(&head->mutex);
33
34    struct cnode_meta *prev = NULL, *walk = NULL;
35
36    /* Look for large enough space in whole chain */
37    while (alloc) {
38        walk = alloc->meta;
39        prev = NULL;
40        while(walk != NULL) {
41            if (walk->space >= nslots && walk->slot + nslots <= L2_CNODE_SLOTS) {
42                break;
43            }
44            prev = walk;
45            walk = walk->next;
46        }
47
48        /* Space found */
49        if (walk != NULL) {
50            break;
51        }
52
53        alloc = alloc->next;
54    }
55
56    if (alloc == NULL) {
57        thread_mutex_unlock(&head->mutex);
58        return LIB_ERR_SLOT_ALLOC_NO_SPACE;
59    }
60
61    /* Set the cap slot to return */
62    ret->cnode = alloc->cnode;
63    ret->slot  = walk->slot;
64
65    /* Increament used slots */
66    walk->slot  += nslots;
67    walk->space -= nslots;
68
69    /* If no more space, free metadata */
70    if (walk->space == 0) {
71        if (prev != NULL) {
72            prev->next = walk->next;
73        } else {
74            alloc->meta = walk->next;
75        }
76        slab_free(&alloc->slab, walk);
77    }
78
79    thread_mutex_unlock(&head->mutex);
80    return SYS_ERR_OK;
81}
82
83/**
84 * \brief Insert the slots after walk
85 *
86 * \param alloc           Meta data for the allocator
87 * \param nslots          Number of slots to insert
88 * \param slot            Slot number in the cnode to start inserting from
89 * \param walk            Meta data of the #cnode_meta to insert after
90 */
91static errval_t insert_after(struct range_slot_allocator *alloc, size_t nslots,
92                             cslot_t slot, struct cnode_meta *walk)
93{
94    assert(walk->next == NULL);
95
96    if (slot == (walk->slot + walk->space)) { /* Join */
97        walk->space += nslots;
98        return SYS_ERR_OK;
99    }
100
101    /* Create new meta data */
102    walk->next = slab_alloc(&alloc->slab);
103    if (walk->next == NULL) {
104        return LIB_ERR_SLAB_ALLOC_FAIL;
105    }
106    walk->next->slot  = slot;
107    walk->next->space = nslots;
108    walk->next->next  = NULL;
109    return SYS_ERR_OK;
110}
111
112/**
113 * \brief Insert the slots before walk
114 *
115 * \param alloc           Meta data for the allocator
116 * \param nslots          Number of slots to insert
117 * \param slot            Slot number in the cnode to start inserting from
118 * \param prev            Meta data of the #cnode_meta to insert after
119 * \param walk            Meta data of the #cnode_meta to insert before
120 */
121static errval_t insert_before(struct range_slot_allocator *alloc, size_t nslots,
122                              cslot_t slot, struct cnode_meta *prev,
123                              struct cnode_meta *walk)
124{
125    assert(walk != NULL);
126
127    if ((prev != NULL) &&
128        (slot == (prev->slot + prev->space)) &&
129        ((slot + nslots) == walk->slot)) { /* Join with prev and walk */
130        prev->space = prev->space + nslots + walk->space;
131        prev->next = walk->next;
132        slab_free(&alloc->slab, walk);
133        return SYS_ERR_OK;
134    }
135
136    if ((slot + nslots) == walk->slot) { /* Join with walk */
137        walk->slot = slot;
138        walk->space += nslots;
139        return SYS_ERR_OK;
140    }
141
142    if ((prev != NULL) &&
143        (slot == (prev->slot + prev->space))) { /* Join with prev */
144        prev->space += nslots;
145        return SYS_ERR_OK;
146    }
147
148    /* Create new meta data */
149    struct cnode_meta *new = slab_alloc(&alloc->slab);
150    if (new == NULL) {
151        return LIB_ERR_SLAB_ALLOC_FAIL;
152    }
153    new->slot  = slot;
154    new->space = nslots;
155    if (prev == NULL) {
156        assert(alloc->meta == walk);
157        alloc->meta = new;
158    } else {
159        prev->next = new;
160    }
161    new->next = walk;
162
163    return SYS_ERR_OK;
164}
165
166/**
167 * \brief Free slots belonging to the cnode
168 *
169 * \param alloc         Pointer to the metadata
170 * \param cap           Slot to start freeing from
171 * \param nslots        Number of slots to free
172 */
173errval_t range_slot_free(struct range_slot_allocator *alloc, struct capref cap,
174                         cslot_t nslots)
175{
176    if (!alloc->is_head) {
177        return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
178    }
179
180    errval_t err;
181    struct range_slot_allocator *head = alloc;
182    thread_mutex_lock(&head->mutex);
183
184    // find right allocator
185    while (!cnodecmp(cap.cnode, alloc->cnode)) {
186        alloc = alloc->next;
187    }
188    if (!alloc) {
189        thread_mutex_unlock(&head->mutex);
190        return LIB_ERR_SLOT_ALLOC_WRONG_CNODE;
191    }
192
193    // alloc now the right chain element
194    struct cnode_meta *prev = NULL;
195    struct cnode_meta *walk = alloc->meta;
196
197    while(walk != NULL) {
198        if ((cap.slot > walk->slot) && (walk->next == NULL)) {
199            err = insert_after(alloc, nslots, cap.slot, walk);
200            thread_mutex_unlock(&head->mutex);
201            return err;
202        }
203        if (cap.slot < walk->slot) {
204            err = insert_before(alloc, nslots, cap.slot, prev, walk);
205            thread_mutex_unlock(&head->mutex);
206            return err;
207        }
208        prev = walk;
209        walk = walk->next;
210    }
211
212    assert(alloc->meta == NULL);
213    alloc->meta = slab_alloc(&alloc->slab);
214    if (alloc->meta == NULL) {
215        thread_mutex_unlock(&head->mutex);
216        return LIB_ERR_SLAB_ALLOC_FAIL;
217    }
218    alloc->meta->slot = cap.slot;
219    alloc->meta->space = nslots;
220    alloc->meta->next = NULL;
221    thread_mutex_unlock(&head->mutex);
222    return SYS_ERR_OK;
223}
224
225/**
226 * \brief Constructor for a new instance of a single cnode cspace_allocator
227 *
228 * \param ret       Instance of the allocator created
229 * \param nslots    Desired number of slots the cnode should have
230 * \param ret_slots Number of slots actually used
231 */
232errval_t range_slot_alloc_init(struct range_slot_allocator *ret,
233                               cslot_t nslots, cslot_t *retslots)
234{
235    errval_t err;
236
237    if (nslots != L2_CNODE_SLOTS) {
238        debug_printf("WARNING: %s called with nslots=%"PRIuCSLOT"\n",
239                __FUNCTION__, nslots);
240        nslots = L2_CNODE_SLOTS;
241    }
242
243    /* Cap for the cnode */
244    err = cnode_create_l2(&ret->cnode_cap, &ret->cnode);
245    if (err_is_fail(err)) {
246        return err;
247    }
248
249    if (retslots) {
250        *retslots = L2_CNODE_SLOTS;
251    }
252
253    /* Memory for the slab allocator */
254    void *buf = malloc(sizeof(struct cnode_meta) * nslots / 2);
255    if (!buf) {
256        return LIB_ERR_MALLOC_FAIL;
257    }
258
259    slab_init(&ret->slab, sizeof(struct cnode_meta), NULL);
260    slab_grow(&ret->slab, buf, sizeof(struct cnode_meta) * nslots / 2);
261    thread_mutex_init(&ret->mutex);
262
263    /* Set the fields in the allocator instance */
264    ret->meta = slab_alloc(&ret->slab);
265    if (ret->meta == NULL) {
266        return LIB_ERR_SLAB_ALLOC_FAIL;
267    }
268    ret->meta->slot = 0;
269    ret->meta->space = nslots;
270    ret->meta->next = NULL;
271
272    // setting is_head true here, internal code can reset by hand
273    ret->is_head = true;
274
275    return SYS_ERR_OK;
276}
277
278size_t range_slot_alloc_freecount(struct range_slot_allocator *alloc)
279{
280    size_t count = 0;
281    if (!alloc->is_head) {
282        return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
283    }
284    struct range_slot_allocator *head = alloc;
285    thread_mutex_lock(&head->mutex);
286
287    struct range_slot_allocator *alloc_w = alloc;
288
289    while (alloc_w) {
290        struct cnode_meta *walk = alloc->meta;
291        while(walk != NULL) {
292            count += walk->space;
293            walk = walk->next;
294        }
295        alloc_w = alloc_w->next;
296    }
297
298    thread_mutex_unlock(&head->mutex);
299    return count;
300}
301
302errval_t range_slot_alloc_refill(struct range_slot_allocator *alloc, cslot_t slots)
303{
304    if (!alloc->is_head) {
305        return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
306    }
307
308    struct range_slot_allocator *head = alloc;
309    thread_mutex_lock(&head->mutex);
310    // find last allocator in chain
311    while(alloc->next) {
312        alloc = alloc->next;
313    }
314    // allocate new instance
315    alloc->next = malloc(sizeof(struct range_slot_allocator));
316    assert(alloc->next);
317
318    // initialize new instance
319    struct range_slot_allocator *n = alloc->next;
320    n->next = NULL;
321    cslot_t retslots;
322    errval_t err = range_slot_alloc_init(n, slots, &retslots);
323    assert(err_is_ok(err));
324    assert(retslots >= slots);
325
326    n->is_head = false;
327
328    thread_mutex_unlock(&head->mutex);
329    return SYS_ERR_OK;
330}
331