1/**
2 * \file
3 * \brief Slot allocator for two level CSpace
4 */
5
6/*
7 * Copyright (c) 2016, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <barrelfish/barrelfish.h>
16#include <barrelfish/core_state.h>
17#include "internal.h"
18#include <stdlib.h>
19
20/**
21 * \brief slot allocator
22 *
23 * \param ca   Instance of the allocator
24 * \param ret  Pointer to return the allocated slot
25 */
26errval_t two_level_alloc(struct slot_allocator *ca, struct capref *ret)
27{
28    errval_t err = SYS_ERR_OK;
29    struct multi_slot_allocator *mca = (struct multi_slot_allocator*)ca;
30
31    thread_mutex_lock(&ca->mutex);
32    assert(ca->space != 0);
33    ca->space--;
34
35    /* Try allocating from the list of single slot allocators */
36    struct slot_allocator_list *walk = mca->head;
37    //struct slot_allocator_list *prev = NULL;
38    while(walk != NULL) {
39        err = walk->a.a.alloc(&walk->a.a, ret);
40        if (err_no(err) != LIB_ERR_SLOT_ALLOC_NO_SPACE) {
41            break;
42        }
43        //prev = walk;
44        walk = walk->next;
45    }
46    if (err_is_fail(err)) {
47        thread_mutex_unlock(&ca->mutex);
48        return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC);
49    }
50
51    /* If no more slots left, grow */
52    if (ca->space == 0) {
53        ca->space = ca->nslots;
54        /* Pull in the reserve */
55        mca->reserve->next = mca->head;
56        mca->head = mca->reserve;
57
58        /* Setup a new reserve */
59        // Cnode: in Root CN
60        struct capref cap;
61        struct cnoderef cnode;
62        thread_mutex_unlock(&ca->mutex);
63        // Do not call slot_alloc_root() here as we want control over refill.
64        struct slot_alloc_state *state = get_slot_alloc_state();
65        struct slot_allocator *rca = (struct slot_allocator *)(&state->rootca);
66        // Need to refill when one slot left, otherwise it's too late
67        size_t rootcn_free = single_slot_alloc_freecount(&state->rootca);
68        if (rootcn_free == 1) {
69            // resize root slot allocator (and rootcn)
70            err = root_slot_allocator_refill(NULL, NULL);
71            if (err_is_fail(err)) {
72                return err_push(err, LIB_ERR_ROOTSA_RESIZE);
73            }
74        }
75        err = rca->alloc(rca, &cap);
76        if (err_is_fail(err)) {
77            DEBUG_ERR(err, "allocating root cnode slot failed");
78            return err_push(err, LIB_ERR_SLOT_ALLOC);
79        }
80        err = cnode_create_raw(cap, &cnode, ObjType_L2CNode, ca->nslots, NULL);
81        if (err_is_fail(err)) {
82            return err_push(err, LIB_ERR_CNODE_CREATE);
83        }
84        thread_mutex_lock(&ca->mutex);
85
86        // Buffers
87        void *buf = slab_alloc(&mca->slab);
88        if (!buf) { /* Grow slab */
89            // Allocate slot out of the list
90            mca->a.space--;
91            // ensure we have the right allocator
92            vspace_mmu_aware_set_slot_alloc(&mca->mmu_state, &mca->head->a.a);
93
94            thread_mutex_unlock(&ca->mutex); // following functions may call
95                                             // slot_alloc
96            void *slab_buf;
97            size_t size;
98            err = vspace_mmu_aware_map(&mca->mmu_state, mca->slab.blocksize,
99                                       &slab_buf, &size);
100            if (err_is_fail(err)) {
101                return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP);
102            }
103
104            thread_mutex_lock(&ca->mutex);
105
106            // Grow slab
107            slab_grow(&mca->slab, slab_buf, size);
108
109            // Try allocating again
110            buf = slab_alloc(&mca->slab);
111            if (!buf) {
112                thread_mutex_unlock(&ca->mutex);
113                return err_push(err, LIB_ERR_SLAB_ALLOC_FAIL);
114            }
115        }
116
117        mca->reserve = buf;
118        buf = (char *)buf + sizeof(struct slot_allocator_list);
119        size_t bufsize = mca->slab.blocksize - sizeof(struct slot_allocator_list);
120
121        // Allocator
122        err = single_slot_alloc_init_raw(&mca->reserve->a, cap, cnode,
123                                         mca->a.nslots, buf, bufsize);
124        if (err_is_fail(err)) {
125            thread_mutex_unlock(&ca->mutex);
126            return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW);
127        }
128    }
129
130    thread_mutex_unlock(&ca->mutex);
131    return SYS_ERR_OK;
132}
133
134/**
135 * \brief Free an allocated slot
136 *
137 * \param ca  Instance of the allocator
138 * \param cap The slot to free
139 *
140 * Walks the list of single slot allocators trying to free the slot.
141 */
142errval_t two_level_free(struct slot_allocator *ca, struct capref cap)
143{
144    errval_t err;
145    thread_mutex_lock(&ca->mutex);
146    struct multi_slot_allocator *mca = (struct multi_slot_allocator*)ca;
147    struct slot_allocator_list *walk = mca->head;
148
149    while(walk != NULL) {
150        err = walk->a.a.free(&walk->a.a, cap);
151        if (err_is_ok(err)) {
152            mca->a.space++;
153        }
154        if (err_no(err) != LIB_ERR_SLOT_ALLOC_WRONG_CNODE) {
155            thread_mutex_unlock(&ca->mutex);
156            return err;
157        }
158        walk = walk->next;
159    }
160
161    thread_mutex_unlock(&ca->mutex);
162    return LIB_ERR_SLOT_ALLOC_WRONG_CNODE;
163}
164
165/**
166 * \brief Initializer that does not allocate any space
167 *
168 * #slot_alloc_init duplicates some of the code below,
169 * modify it if making changes here.
170 *
171 * XXX: top_buf head_buf and reserve_buf each point to a separate buffer of
172 * size bufsize bytes which can be used for backing storage. bufsize evidently
173 * needs to be >= sizeof(struct cnode_meta) * nslots / 2. Don't ask me why! -AB
174 */
175errval_t two_level_slot_alloc_init_raw(struct multi_slot_allocator *ret,
176                                       struct capref initial_cap,
177                                       struct cnoderef initial_cnode,
178                                       struct capref reserve_cap,
179                                       struct cnoderef reserve_cnode,
180                                       void *head_buf, void *reserve_buf, size_t bufsize)
181{
182    errval_t err;
183
184    /* Generic part */
185    ret->a.alloc = two_level_alloc;
186    ret->a.free  = two_level_free;
187    ret->a.space = L2_CNODE_SLOTS;
188    ret->a.nslots = L2_CNODE_SLOTS;
189    thread_mutex_init(&ret->a.mutex);
190
191    // Top unused in two-level allocator
192
193    ret->head->next = NULL;
194    ret->reserve->next = NULL;
195
196    /* Head */
197    err = single_slot_alloc_init_raw(&ret->head->a, initial_cap,
198                                     initial_cnode, L2_CNODE_SLOTS, head_buf,
199                                     bufsize);
200    if (err_is_fail(err)) {
201        return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT);
202    }
203
204    /* Reserve */
205    err = single_slot_alloc_init_raw(&ret->reserve->a, reserve_cap,
206                                     reserve_cnode, L2_CNODE_SLOTS,
207                                     reserve_buf, bufsize);
208    if (err_is_fail(err)) {
209        return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT);
210    }
211
212    /* Slab */
213    size_t allocation_unit = sizeof(struct slot_allocator_list) +
214                             SINGLE_SLOT_ALLOC_BUFLEN(L2_CNODE_SLOTS);
215    slab_init(&ret->slab, allocation_unit, NULL);
216
217    return SYS_ERR_OK;
218}
219
220/**
221 * \brief Initializer that uses memory
222 */
223errval_t two_level_slot_alloc_init(struct multi_slot_allocator *ret)
224{
225    errval_t err;
226    size_t bufsize = SINGLE_SLOT_ALLOC_BUFLEN(L2_CNODE_SLOTS); // XXX?
227
228    ret->head = malloc(sizeof(struct slot_allocator_list));
229    if (!ret->head) {
230        return LIB_ERR_MALLOC_FAIL;
231    }
232    ret->head->next = NULL;
233    void *head_buf = malloc(bufsize);
234    if (!head_buf) {
235        free(ret->head);
236        return LIB_ERR_MALLOC_FAIL;
237    }
238
239    ret->reserve = malloc(sizeof(struct slot_allocator_list));
240    if (!ret->reserve) {
241        free(ret->head);
242        free(head_buf);
243        return LIB_ERR_MALLOC_FAIL;
244    }
245    void *reserve_buf = malloc(bufsize);
246    if (!reserve_buf) {
247        free(ret->head);
248        free(head_buf);
249        free(ret->reserve);
250        return LIB_ERR_MALLOC_FAIL;
251    }
252
253
254    size_t allocation_unit = sizeof(struct slot_allocator_list) + bufsize;
255    err = vspace_mmu_aware_init(&ret->mmu_state, allocation_unit * L2_CNODE_SLOTS * 2);
256    if (err_is_fail(err)) {
257        free(ret->head);
258        free(head_buf);
259        free(ret->reserve);
260        free(reserve_buf);
261        return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_INIT);
262    }
263
264    struct capref initial_cap, reserve_cap;
265    struct cnoderef initial_cnode, reserve_cnode;
266    err = cnode_create_l2(&initial_cap, &initial_cnode);
267    if (err_is_fail(err)) {
268        free(ret->head);
269        free(head_buf);
270        free(ret->reserve);
271        free(reserve_buf);
272        vregion_destroy(&ret->mmu_state.vregion);
273        return err_push(err, LIB_ERR_CNODE_CREATE);
274    }
275    err = cnode_create_l2(&reserve_cap, &reserve_cnode);
276    if (err_is_fail(err)) {
277        free(head_buf);
278        free(ret->head);
279        free(reserve_buf);
280        free(ret->reserve);
281        vregion_destroy(&ret->mmu_state.vregion);
282        cap_destroy(initial_cap);
283        return err_push(err, LIB_ERR_CNODE_CREATE);
284    }
285    err = two_level_slot_alloc_init_raw(ret, initial_cap, initial_cnode,
286                                        reserve_cap, reserve_cnode, head_buf,
287                                        reserve_buf, bufsize);
288    return SYS_ERR_OK;
289}
290