1/**
2 * \file
3 * \brief Simple slab allocator.
4 *
5 * This file implements a simple slab allocator. It allocates blocks of a fixed
6 * size from a pool of contiguous memory regions ("slabs").
7 */
8
9/*
10 * Copyright (c) 2008, 2009, 2010, ETH Zurich.
11 * All rights reserved.
12 *
13 * This file is distributed under the terms in the attached LICENSE file.
14 * If you do not find this file, copies can be found by writing to:
15 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
16 */
17
18#include <barrelfish/barrelfish.h>
19#include <barrelfish/slab.h>
20#include <barrelfish/static_assert.h>
21
22struct block_head {
23    struct block_head *next;///< Pointer to next block in free list
24};
25
26STATIC_ASSERT_SIZEOF(struct block_head, SLAB_BLOCK_HDRSIZE);
27
28/**
29 * \brief Initialise a new slab allocator
30 *
31 * \param slabs Pointer to slab allocator instance, to be filled-in
32 * \param blocksize Size of blocks to be allocated by this allocator
33 * \param refill_func Pointer to function to call when out of memory (or NULL)
34 */
35void slab_init(struct slab_allocator *slabs, size_t blocksize,
36               slab_refill_func_t refill_func)
37{
38    slabs->slabs = NULL;
39    slabs->blocksize = SLAB_REAL_BLOCKSIZE(blocksize);
40    slabs->refill_func = refill_func;
41}
42
43
44/**
45 * \brief Add memory (a new slab) to a slab allocator
46 *
47 * \param slabs Pointer to slab allocator instance
48 * \param buf Pointer to start of memory region
49 * \param buflen Size of memory region (in bytes)
50 */
51void slab_grow(struct slab_allocator *slabs, void *buf, size_t buflen)
52{
53    /* setup slab_head structure at top of buffer */
54    assert(buflen > sizeof(struct slab_head));
55    struct slab_head *head = buf;
56    buflen -= sizeof(struct slab_head);
57    buf = (char *)buf + sizeof(struct slab_head);
58
59    /* calculate number of blocks in buffer */
60    size_t blocksize = slabs->blocksize;
61    assert(buflen / blocksize <= UINT32_MAX);
62    head->free = head->total = buflen / blocksize;
63    assert(head->total > 0);
64
65    /* enqueue blocks in freelist */
66    struct block_head *bh = head->blocks = buf;
67    for (uint32_t i = head->total; i > 1; i--) {
68        buf = (char *)buf + blocksize;
69        bh->next = buf;
70        bh = buf;
71    }
72    bh->next = NULL;
73
74    /* enqueue slab in list of slabs */
75    head->next = slabs->slabs;
76    slabs->slabs = head;
77}
78
79/**
80 * \brief Allocate a new block from the slab allocator
81 *
82 * \param slabs Pointer to slab allocator instance
83 *
84 * \returns Pointer to block on success, NULL on error (out of memory)
85 */
86void *slab_alloc(struct slab_allocator *slabs)
87{
88    errval_t err;
89    /* find a slab with free blocks */
90    struct slab_head *sh;
91    for (sh = slabs->slabs; sh != NULL && sh->free == 0; sh = sh->next);
92
93    if (sh == NULL) {
94        /* out of memory. try refill function if we have one */
95        if (!slabs->refill_func) {
96            return NULL;
97        } else {
98            err = slabs->refill_func(slabs);
99            if (err_is_fail(err)) {
100                DEBUG_ERR(err, "slab refill_func failed");
101                return NULL;
102            }
103            for (sh = slabs->slabs; sh != NULL && sh->free == 0; sh = sh->next);
104            if (sh == NULL) {
105                return NULL;
106            }
107        }
108    }
109
110    /* dequeue top block from freelist */
111    struct block_head *bh = sh->blocks;
112    assert(bh != NULL);
113    sh->blocks = bh->next;
114    sh->free--;
115
116    return bh;
117}
118
119/**
120 * \brief Free a block to the slab allocator
121 *
122 * \param slabs Pointer to slab allocator instance
123 * \param block Pointer to block previously returned by #slab_alloc
124 */
125void slab_free(struct slab_allocator *slabs, void *block)
126{
127    if (block == NULL) {
128        return;
129    }
130
131    struct block_head *bh = (struct block_head *)block;
132
133    /* find matching slab */
134    struct slab_head *sh;
135    size_t blocksize = slabs->blocksize;
136    for (sh = slabs->slabs; sh != NULL; sh = sh->next) {
137        /* check if block falls inside this slab */
138        uintptr_t slab_limit = (uintptr_t)sh + sizeof(struct slab_head)
139                               + blocksize * sh->total;
140        if ((uintptr_t)bh > (uintptr_t)sh && (uintptr_t)bh < slab_limit) {
141            break;
142        }
143    }
144    assert(sh != NULL);
145
146    /* re-enqueue in slab's free list */
147    bh->next = sh->blocks;
148    sh->blocks = bh;
149    sh->free++;
150    assert(sh->free <= sh->total);
151}
152
153/**
154 * \brief Returns the count of free blocks in the allocator
155 *
156 * \param slabs Pointer to slab allocator instance
157 *
158 * \returns Free block count
159 */
160size_t slab_freecount(struct slab_allocator *slabs)
161{
162    size_t ret = 0;
163
164    for (struct slab_head *sh = slabs->slabs; sh != NULL; sh = sh->next) {
165        ret += sh->free;
166    }
167
168    return ret;
169}
170
171/**
172 * \brief General-purpose slab refill
173 *
174 * Allocates and maps a number of memory pages to the slab allocator.
175 *
176 * \param slabs Pointer to slab allocator instance
177 * \param bytes (Minimum) amount of memory to map
178 */
179static errval_t slab_refill_pages(struct slab_allocator *slabs, size_t bytes)
180{
181    errval_t err;
182    struct capref frame_cap;
183
184    err = frame_alloc(&frame_cap, bytes, &bytes);
185    if (err_is_fail(err)) {
186        return err_push(err, LIB_ERR_FRAME_CREATE);
187    }
188
189    void *buf;
190    err = vspace_map_one_frame(&buf, bytes, frame_cap, NULL, NULL);
191    if (err_is_fail(err)) {
192        return err_push(err, LIB_ERR_VSPACE_MAP);
193    }
194
195    slab_grow(slabs, buf, bytes);
196    return SYS_ERR_OK;
197}
198
199/**
200 * \brief General-purpose implementation of a slab allocate/refill function
201 *
202 * Allocates and maps a single page (FIXME: make configurable) and adds it
203 * to the allocator.
204 *
205 * \param slabs Pointer to slab allocator instance
206 */
207errval_t slab_default_refill(struct slab_allocator *slabs)
208{
209    return slab_refill_pages(slabs, BASE_PAGE_SIZE);
210}
211