1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13#include <allocman/allocman.h>
14#include <allocman/util.h>
15#include <stdlib.h>
16#include <assert.h>
17#include <string.h>
18#include <sel4/sel4.h>
19#include <vka/capops.h>
20#include <sel4utils/util.h>
21
22static int _refill_watermark(allocman_t *alloc);
23
24static inline int _can_alloc(struct allocman_properties properties, size_t alloc_depth, size_t free_depth)
25{
26    int in_alloc = alloc_depth > 0;
27    int in_free = free_depth > 0;
28    return (properties.alloc_can_alloc || !in_alloc) && (properties.free_can_alloc || !in_free);
29}
30
31static inline int _can_free(struct allocman_properties properties, size_t alloc_depth, size_t free_depth)
32{
33    int in_alloc = alloc_depth > 0;
34    int in_free = free_depth > 0;
35    return (properties.alloc_can_free || !in_alloc) && (properties.free_can_free || !in_free);
36}
37
38/* Signals an operation is being started, and returns whether
39   this is the root operation, or a dependent call */
40static int _start_operation(allocman_t *alloc)
41{
42    int ret = !alloc->in_operation;
43    alloc->in_operation = 1;
44    return ret;
45}
46
47static inline void _end_operation(allocman_t *alloc, int root)
48{
49    alloc->in_operation = !root;
50    /* Anytime we end an operation we need to make sure we have watermark
51       resources */
52    if (root) {
53        _refill_watermark(alloc);
54    }
55}
56
57static void allocman_mspace_queue_for_free(allocman_t *alloc, void *ptr, size_t bytes) {
58    if (alloc->num_freed_mspace_chunks == alloc->desired_freed_mspace_chunks) {
59        assert(!"Out of space to store free'd objects. Leaking memory");
60        return;
61    }
62    alloc->freed_mspace_chunks[alloc->num_freed_mspace_chunks] =
63        (struct allocman_freed_mspace_chunk) {ptr, bytes};
64    alloc->num_freed_mspace_chunks++;
65}
66
67static void allocman_cspace_queue_for_free(allocman_t *alloc, const cspacepath_t *path) {
68    if (alloc->num_freed_slots == alloc->desired_freed_slots) {
69        assert(!"Out of space to store free'd objects. Leaking memory");
70        return;
71    }
72    alloc->freed_slots[alloc->num_freed_slots] = *path;
73    alloc->num_freed_slots++;
74}
75
76static void allocman_utspace_queue_for_free(allocman_t *alloc, seL4_Word cookie, size_t size_bits) {
77    if (alloc->num_freed_utspace_chunks == alloc->desired_freed_utspace_chunks) {
78        assert(!"Out of space to store free'd objects. Leaking memory");
79        return;
80    }
81    alloc->freed_utspace_chunks[alloc->num_freed_utspace_chunks] =
82        (struct allocman_freed_utspace_chunk) {size_bits, cookie};
83    alloc->num_freed_utspace_chunks++;
84}
85
86/* this nasty macro prevents code duplication for the free functions. Unfortunately I can think of no other
87 * way of allowing the number of arguments to the 'free' function in the body to be parameterized */
88#define ALLOCMAN_FREE(alloc,space,...) do { \
89    int root; \
90    assert(alloc->have_##space); \
91    if (!_can_free(alloc->space.properties, alloc->space##_alloc_depth, alloc->space##_free_depth)) { \
92        allocman_##space##_queue_for_free(alloc, __VA_ARGS__); \
93        return; \
94    } \
95    root = _start_operation(alloc); \
96    alloc->space##_free_depth++; \
97    alloc->space.free(alloc, alloc->space.space, __VA_ARGS__); \
98    alloc->space##_free_depth--; \
99    _end_operation(alloc, root); \
100} while(0)
101
102void allocman_cspace_free(allocman_t *alloc, const cspacepath_t *slot)
103{
104    ALLOCMAN_FREE(alloc, cspace, slot);
105}
106
107void allocman_mspace_free(allocman_t *alloc, void *ptr, size_t bytes)
108{
109    ALLOCMAN_FREE(alloc, mspace, ptr, bytes);
110}
111
112void allocman_utspace_free(allocman_t *alloc, seL4_Word cookie, size_t size_bits)
113{
114    ALLOCMAN_FREE(alloc, utspace, cookie, size_bits);
115}
116
117static void *_try_watermark_mspace(allocman_t *alloc, size_t size, int *_error)
118{
119    size_t i;
120    for (i = 0; i < alloc->num_mspace_chunks; i++) {
121        if (alloc->mspace_chunk[i].size == size) {
122            if (alloc->mspace_chunk_count[i] > 0) {
123                void *ret = alloc->mspace_chunks[i][--alloc->mspace_chunk_count[i]];
124                SET_ERROR(_error, 0);
125                alloc->used_watermark = 1;
126                return ret;
127            }
128        }
129    }
130    SET_ERROR(_error, 1);
131    return NULL;
132}
133
134static int _try_watermark_cspace(allocman_t *alloc, cspacepath_t *slot)
135{
136    if (alloc->num_cspace_slots == 0) {
137        return 1;
138    }
139    alloc->used_watermark = 1;
140    *slot = alloc->cspace_slots[--alloc->num_cspace_slots];
141    return 0;
142}
143
144static seL4_Word _try_watermark_utspace(allocman_t *alloc, size_t size_bits, seL4_Word type, const cspacepath_t *path, int *_error)
145{
146    size_t i;
147
148    for (i = 0; i < alloc->num_utspace_chunks; i++) {
149        if (alloc->utspace_chunk[i].size_bits == size_bits && alloc->utspace_chunk[i].type == type) {
150            if (alloc->utspace_chunk_count[i] > 0) {
151                struct allocman_utspace_allocation result = alloc->utspace_chunks[i][alloc->utspace_chunk_count[i] - 1];
152                int error;
153                /* Need to perform a cap move */
154                error = vka_cnode_move(path, &result.slot);
155                if (error != seL4_NoError) {
156                    SET_ERROR(_error, 1);
157                    return 0;
158                }
159                alloc->used_watermark = 1;
160                alloc->utspace_chunk_count[i]--;
161                allocman_cspace_free(alloc, &result.slot);
162                SET_ERROR(_error, 0);
163                return result.cookie;
164            }
165        }
166    }
167    SET_ERROR(_error, 1);
168    return 0;
169}
170
171static void *_allocman_mspace_alloc(allocman_t *alloc, size_t size, int *_error, int use_watermark)
172{
173    int root_op;
174    void *ret;
175    int error;
176    /* see if we have an allocator installed yet*/
177    if (!alloc->have_mspace) {
178        SET_ERROR(_error, 1);
179        return 0;
180    }
181    /* Check that we are permitted to cspace_alloc here */
182    if (!_can_alloc(alloc->mspace.properties, alloc->mspace_alloc_depth, alloc->mspace_free_depth)) {
183        if (use_watermark) {
184            ret = _try_watermark_mspace(alloc, size, _error);
185            if (!ret) {
186                ZF_LOGI("Failed to fullfill recursive allocation from watermark, size %zu\n", size);
187            }
188            return ret;
189        } else {
190            SET_ERROR(_error, 1);
191            return 0;
192        }
193    }
194    root_op = _start_operation(alloc);
195    /* Attempt the allocation */
196    alloc->mspace_alloc_depth++;
197    ret = alloc->mspace.alloc(alloc, alloc->mspace.mspace, size, &error);
198    alloc->mspace_alloc_depth--;
199    if (!error) {
200        _end_operation(alloc, root_op);
201        SET_ERROR(_error, 0);
202        return ret;
203    }
204    /* We encountered some fail. We will try and allocate from the watermark pool.
205       Does not matter what the error or outcome is, just propogate back up*/
206    if (use_watermark) {
207        ret = _try_watermark_mspace(alloc, size, _error);
208        if (!ret) {
209            ZF_LOGI("Regular mspace alloc failed, and watermark also failed. for size %zu\n", size);
210        }
211        _end_operation(alloc, root_op);
212        return ret;
213    } else {
214        _end_operation(alloc, root_op);
215        SET_ERROR(_error, 1);
216        return NULL;
217    }
218}
219
220static int _allocman_cspace_alloc(allocman_t *alloc, cspacepath_t *slot, int use_watermark)
221{
222    int root_op;
223    int error;
224    /* see if we have an allocator installed yet*/
225    if (!alloc->have_cspace) {
226        return 1;
227    }
228    /* Check that we are permitted to cspace_alloc here */
229    if (!_can_alloc(alloc->cspace.properties, alloc->cspace_alloc_depth, alloc->cspace_free_depth)) {
230        if (use_watermark) {
231            int ret = _try_watermark_cspace(alloc, slot);
232            if (ret) {
233                ZF_LOGI("Failed to allocate cslot from watermark\n");
234            }
235            return ret;
236        } else {
237            return 1;
238        }
239    }
240    root_op = _start_operation(alloc);
241    /* Attempt the allocation */
242    alloc->cspace_alloc_depth++;
243    error = alloc->cspace.alloc(alloc, alloc->cspace.cspace, slot);
244    alloc->cspace_alloc_depth--;
245    if (!error) {
246        _end_operation(alloc, root_op);
247        return 0;
248    }
249    /* We encountered some fail. We will try and allocate from the watermark pool.
250       Does not matter what the error or outcome is, just propogate back up*/
251    if (use_watermark) {
252        error = _try_watermark_cspace(alloc, slot);
253        if (error) {
254            ZF_LOGI("Regular cspace alloc failed, and failed from watermark\n");
255        }
256        _end_operation(alloc, root_op);
257        return error;
258    } else {
259        _end_operation(alloc, root_op);
260        return 1;
261    }
262}
263
264static seL4_Word _allocman_utspace_alloc(allocman_t *alloc, size_t size_bits, seL4_Word type, const cspacepath_t *path, uintptr_t paddr, bool canBeDev, int *_error, int use_watermark)
265{
266    int root_op;
267    int error;
268    seL4_Word ret;
269    /* see if we have an allocator installed yet*/
270    if (!alloc->have_utspace) {
271        SET_ERROR(_error,1);
272        return 0;
273    }
274    /* Check that we are permitted to utspace_alloc here */
275    if (!_can_alloc(alloc->utspace.properties, alloc->utspace_alloc_depth, alloc->utspace_free_depth)) {
276        if (use_watermark && paddr == ALLOCMAN_NO_PADDR) {
277            ret = _try_watermark_utspace(alloc, size_bits, type, path, _error);
278            if (ret == 0) {
279                ZF_LOGI("Failed to allocate utspace from watermark. size %zu type %ld\n", size_bits, (long)type);
280            }
281            return ret;
282        } else {
283            SET_ERROR(_error, 1);
284            return 0;
285        }
286    }
287    root_op = _start_operation(alloc);
288    /* Attempt the allocation */
289    alloc->utspace_alloc_depth++;
290    ret = alloc->utspace.alloc(alloc, alloc->utspace.utspace, size_bits, type, path, paddr, canBeDev, &error);
291    alloc->utspace_alloc_depth--;
292    if (!error) {
293        _end_operation(alloc, root_op);
294        SET_ERROR(_error, error);
295        return ret;
296    }
297    /* We encountered some fail. We will try and allocate from the watermark pool.
298       Does not matter what the error or outcome is, just propogate back up*/
299    if (use_watermark && paddr == ALLOCMAN_NO_PADDR) {
300        ret = _try_watermark_utspace(alloc, size_bits, type, path, _error);
301        _end_operation(alloc, root_op);
302        if (ret == 0) {
303            ZF_LOGI("Regular utspace alloc failed and not watermark for size %zu type %ld\n", size_bits, (long)type);
304        }
305        return ret;
306    } else {
307        _end_operation(alloc, root_op);
308        SET_ERROR(_error, 1);
309        return 0;
310    }
311}
312
313void *allocman_mspace_alloc(allocman_t *alloc, size_t size, int *_error)
314{
315    return _allocman_mspace_alloc(alloc, size, _error, 1);
316}
317
318int allocman_cspace_alloc(allocman_t *alloc, cspacepath_t *slot)
319{
320    return _allocman_cspace_alloc(alloc, slot, 1);
321}
322
323seL4_Word allocman_utspace_alloc_at(allocman_t *alloc, size_t size_bits, seL4_Word type, const cspacepath_t *path, uintptr_t paddr, bool canBeDev, int *_error)
324{
325    return _allocman_utspace_alloc(alloc, size_bits, type, path, paddr, canBeDev, _error, 1);
326}
327
328static int _refill_watermark(allocman_t *alloc)
329{
330    int found_empty_pool;
331    int did_allocation;
332    size_t i;
333    if (alloc->refilling_watermark || !alloc->used_watermark) {
334        return 0;
335    }
336    alloc->refilling_watermark = 1;
337
338    /* Run in a loop refilling our resources. We need a loop as refilling
339       one resource may require another watermark resource to be used. It is up
340       to the allocators to prove that this process results in a consistent
341       increase in the watermark pool, and hence will terminate. Need to be
342       very careful with re-entry in this loop, as our watermark resources
343       may change anytime we perform an allocation. We try and allocate evenly
344       across all the resources types since typically we are only refilling
345       a single object from each resource anyway, so the performance will be
346       the same, and if we aren't we are boot strapping and I'm not convinced
347       that all allocations orders are equivalent in this case */
348    int limit = 0;
349    do {
350        found_empty_pool = 0;
351        did_allocation = 0;
352        while (alloc->num_freed_slots > 0) {
353            cspacepath_t slot = alloc->freed_slots[--alloc->num_freed_slots];
354            allocman_cspace_free(alloc, &slot);
355            /* a free is like an allocation in that we have made some progress */
356            did_allocation = 1;
357        }
358        while (alloc->num_freed_mspace_chunks > 0) {
359            struct allocman_freed_mspace_chunk chunk = alloc->freed_mspace_chunks[--alloc->num_freed_mspace_chunks];
360            allocman_mspace_free(alloc, chunk.ptr, chunk.size);
361            did_allocation = 1;
362        }
363        while (alloc->num_freed_utspace_chunks > 0) {
364            struct allocman_freed_utspace_chunk chunk = alloc->freed_utspace_chunks[--alloc->num_freed_utspace_chunks];
365            allocman_utspace_free(alloc, chunk.cookie, chunk.size_bits);
366            did_allocation = 1;
367        }
368        if (alloc->num_cspace_slots < alloc->desired_cspace_slots) {
369            int error;
370            found_empty_pool = 1;
371            cspacepath_t slot;
372            error = _allocman_cspace_alloc(alloc, &slot, 0);
373            if (!error) {
374                alloc->cspace_slots[alloc->num_cspace_slots++] = slot;
375                did_allocation = 1;
376            }
377        }
378        for (i = 0; i < alloc->num_utspace_chunks; i++) {
379            if (alloc->utspace_chunk_count[i] < alloc->utspace_chunk[i].count) {
380                cspacepath_t slot;
381                seL4_Word cookie;
382                int error;
383                /* First grab a slot */
384                found_empty_pool = 1;
385                error = allocman_cspace_alloc(alloc, &slot);
386                if (!error) {
387                    /* Now try to allocate */
388                    cookie = _allocman_utspace_alloc(alloc, alloc->utspace_chunk[i].size_bits, alloc->utspace_chunk[i].type, &slot, ALLOCMAN_NO_PADDR, false, &error, 0);
389                    if (!error) {
390                        alloc->utspace_chunks[i][alloc->utspace_chunk_count[i]].cookie = cookie;
391                        alloc->utspace_chunks[i][alloc->utspace_chunk_count[i]].slot = slot;
392                        alloc->utspace_chunk_count[i]++;
393                        did_allocation = 1;
394                    } else {
395                        /* Give the slot back */
396                        allocman_cspace_free(alloc, &slot);
397                    }
398                }
399            }
400        }
401        for (i = 0 ; i < alloc->num_mspace_chunks; i++) {
402            if (alloc->mspace_chunk_count[i] < alloc->mspace_chunk[i].count) {
403                void *result;
404                int error;
405                found_empty_pool = 1;
406                result = _allocman_mspace_alloc(alloc, alloc->mspace_chunk[i].size, &error, 0);
407                if (!error) {
408                    alloc->mspace_chunks[i][alloc->mspace_chunk_count[i]++] = result;
409                    did_allocation = 1;
410                }
411            }
412        }
413        limit++;
414    } while (found_empty_pool && did_allocation && limit < 4);
415
416    alloc->refilling_watermark = 0;
417    if (!found_empty_pool) {
418        alloc->used_watermark = 0;
419    }
420    return found_empty_pool;
421}
422
423int allocman_create(allocman_t *alloc, struct mspace_interface mspace) {
424    /* zero out the struct */
425    memset(alloc, 0, sizeof(allocman_t));
426
427    alloc->mspace = mspace;
428    alloc->have_mspace = 1;
429
430    return 0;
431}
432
433int allocman_fill_reserves(allocman_t *alloc) {
434    int full;
435    int root = _start_operation(alloc);
436    /* force the reserves to be checked */
437    alloc->used_watermark = 1;
438    /* attempt to fill */
439    full = _refill_watermark(alloc);
440    _end_operation(alloc, root);
441    return full;
442}
443
444#define ALLOCMAN_ATTACH(alloc, space, interface) do { \
445    int root = _start_operation(alloc); \
446    assert(root); \
447    if (alloc->have_##space) { \
448        /* an untyped allocator has already been attached, bail */ \
449        LOG_ERROR("Alocate of type " #space " is already attached"); \
450        return 1; \
451    } \
452    alloc->space = interface; \
453    alloc->have_##space = 1; \
454    _end_operation(alloc, root); \
455    return 0; \
456}while(0)
457
458int allocman_attach_utspace(allocman_t *alloc, struct utspace_interface utspace) {
459    ALLOCMAN_ATTACH(alloc, utspace, utspace);
460}
461
462int allocman_attach_cspace(allocman_t *alloc, struct cspace_interface cspace) {
463    ALLOCMAN_ATTACH(alloc, cspace, cspace);
464}
465
466static int resize_array(allocman_t *alloc, size_t num, void **array, size_t *size, size_t *count, size_t item_size) {
467    int root = _start_operation(alloc);
468    void *new_array;
469    int error;
470
471    assert(root);
472
473    /* allocate new array */
474    new_array = allocman_mspace_alloc(alloc, item_size * num, &error);
475    if (!!error) {
476        return error;
477    }
478
479    /* if we have less than before. throw an error */
480    while (num < (*count)) {
481        return -1;
482    }
483
484    /* copy any existing slots and free the old array, but avoid using a null array */
485    if ((*array)) {
486        memcpy(new_array, (*array), item_size * (*count));
487        allocman_mspace_free(alloc, (*array), item_size * (*size));
488    }
489
490    /* switch the new array in */
491    (*array) = new_array;
492    (*size) = num;
493
494    alloc->used_watermark = 1;
495    _end_operation(alloc, root);
496    return error;
497}
498
499static int resize_slots_array(allocman_t *alloc, size_t num, cspacepath_t **slots, size_t *size, size_t *count) {
500    return resize_array(alloc, num, (void**)slots, size, count, sizeof(cspacepath_t));
501}
502
503int allocman_configure_cspace_reserve(allocman_t *alloc, size_t num) {
504    return resize_slots_array(alloc, num, &alloc->cspace_slots, &alloc->desired_cspace_slots, &alloc->num_cspace_slots);
505}
506
507int allocman_configure_max_freed_slots(allocman_t *alloc, size_t num) {
508    return resize_slots_array(alloc, num, &alloc->freed_slots, &alloc->desired_freed_slots, &alloc->num_freed_slots);
509}
510
511int  allocman_configure_max_freed_memory_chunks(allocman_t *alloc, size_t num) {
512    return resize_array(alloc, num, (void**)&alloc->freed_mspace_chunks, &alloc->desired_freed_mspace_chunks, &alloc->num_freed_mspace_chunks, sizeof(struct allocman_freed_mspace_chunk));
513}
514
515int  allocman_configure_max_freed_untyped_chunks(allocman_t *alloc, size_t num) {
516    return resize_array(alloc, num, (void**)&alloc->freed_utspace_chunks, &alloc->desired_freed_utspace_chunks, &alloc->num_freed_utspace_chunks, sizeof(struct allocman_freed_utspace_chunk));
517}
518
519int allocman_configure_utspace_reserve(allocman_t *alloc, struct allocman_utspace_chunk chunk) {
520    int root = _start_operation(alloc);
521    size_t i;
522    struct allocman_utspace_chunk *new_chunk;
523    size_t *new_counts;
524    struct allocman_utspace_allocation **new_chunks;
525    struct allocman_utspace_allocation *new_alloc;
526    int error;
527    /* ensure this chunk hasn't already been added. would be nice to handle both decreasing and
528     * icnreasing reservations, but I cannot see the use case for that */
529    for (i = 0; i < alloc->num_utspace_chunks; i++) {
530        if (alloc->utspace_chunk[i].size_bits == chunk.size_bits && alloc->utspace_chunk[i].type == chunk.type) {
531            return 1;
532        }
533    }
534    /* tack this chunk on */
535    new_chunk = allocman_mspace_alloc(alloc, sizeof(struct allocman_utspace_chunk) * (alloc->num_utspace_chunks + 1), &error);
536    if (error) {
537        return error;
538    }
539    new_counts = allocman_mspace_alloc(alloc, sizeof(size_t) * (alloc->num_utspace_chunks + 1), &error);
540    if (error) {
541        allocman_mspace_free(alloc, new_chunk, sizeof(struct allocman_utspace_chunk) * (alloc->num_utspace_chunks + 1));
542        return error;
543    }
544    new_chunks = allocman_mspace_alloc(alloc, sizeof(struct allocman_utspace_allocation *) * (alloc->num_utspace_chunks + 1), &error);
545    if (error) {
546        allocman_mspace_free(alloc, new_chunk, sizeof(struct allocman_utspace_chunk) * (alloc->num_utspace_chunks + 1));
547        allocman_mspace_free(alloc, new_counts, sizeof(size_t) * (alloc->num_utspace_chunks + 1));
548        return error;
549    }
550    new_alloc = allocman_mspace_alloc(alloc, sizeof(struct allocman_utspace_allocation) * chunk.count, &error);
551    if (error) {
552        allocman_mspace_free(alloc, new_chunk, sizeof(struct allocman_utspace_chunk) * (alloc->num_utspace_chunks + 1));
553        allocman_mspace_free(alloc, new_counts, sizeof(size_t) * (alloc->num_utspace_chunks + 1));
554        allocman_mspace_free(alloc, new_chunks, sizeof(struct allocman_utspace_allocation *) * (alloc->num_utspace_chunks + 1));
555        return error;
556    }
557    if (alloc->num_utspace_chunks > 0) {
558        memcpy(new_chunk, alloc->utspace_chunk, sizeof(struct allocman_utspace_chunk) * alloc->num_utspace_chunks);
559        memcpy(new_counts, alloc->utspace_chunk_count, sizeof(size_t) * alloc->num_utspace_chunks);
560        memcpy(new_chunks, alloc->utspace_chunks, sizeof(struct allocman_utspace_allocation *) * alloc->num_utspace_chunks);
561        allocman_mspace_free(alloc, alloc->utspace_chunk, sizeof(struct allocman_utspace_chunk) * alloc->num_utspace_chunks);
562        allocman_mspace_free(alloc, alloc->utspace_chunk_count, sizeof(size_t) * alloc->num_utspace_chunks);
563        allocman_mspace_free(alloc, alloc->utspace_chunks, sizeof(struct allocman_utspace_allocation *) * alloc->num_utspace_chunks);
564    }
565    new_chunk[alloc->num_utspace_chunks] = chunk;
566    new_counts[alloc->num_utspace_chunks] = 0;
567    new_chunks[alloc->num_utspace_chunks] = new_alloc;
568    alloc->utspace_chunk = new_chunk;
569    alloc->utspace_chunk_count = new_counts;
570    alloc->utspace_chunks = new_chunks;
571    alloc->num_utspace_chunks++;
572    alloc->used_watermark = 1;
573    _end_operation(alloc, root);
574    return 0;
575}
576
577int allocman_configure_mspace_reserve(allocman_t *alloc, struct allocman_mspace_chunk chunk) {
578    int root = _start_operation(alloc);
579    size_t i;
580    struct allocman_mspace_chunk *new_chunk;
581    size_t *new_counts;
582    void ***new_chunks;
583    void **new_alloc;
584    int error;
585    /* ensure this chunk hasn't already been added. would be nice to handle both decreasing and
586     * icnreasing reservations, but I cannot see the use case for that */
587    for (i = 0; i < alloc->num_mspace_chunks; i++) {
588        if (alloc->mspace_chunk[i].size == chunk.size) {
589            return 1;
590        }
591    }
592    /* tack this chunk on */
593    new_chunk = allocman_mspace_alloc(alloc, sizeof(struct allocman_mspace_chunk) * (alloc->num_mspace_chunks + 1), &error);
594    if (error) {
595        return error;
596    }
597    new_counts = allocman_mspace_alloc(alloc, sizeof(size_t) * (alloc->num_mspace_chunks + 1), &error);
598    if (error) {
599        allocman_mspace_free(alloc, new_chunk, sizeof(struct allocman_mspace_chunk) * (alloc->num_mspace_chunks + 1));
600        return error;
601    }
602    new_chunks = allocman_mspace_alloc(alloc, sizeof(void **) * (alloc->num_mspace_chunks + 1), &error);
603    if (error) {
604        allocman_mspace_free(alloc, new_chunk, sizeof(struct allocman_mspace_chunk) * (alloc->num_mspace_chunks + 1));
605        allocman_mspace_free(alloc, new_counts, sizeof(size_t) * (alloc->num_mspace_chunks + 1));
606        return error;
607    }
608    new_alloc = allocman_mspace_alloc(alloc, sizeof(void *) * chunk.count, &error);
609    if (error) {
610        allocman_mspace_free(alloc, new_chunk, sizeof(struct allocman_mspace_chunk) * (alloc->num_mspace_chunks + 1));
611        allocman_mspace_free(alloc, new_counts, sizeof(size_t) * (alloc->num_mspace_chunks + 1));
612        allocman_mspace_free(alloc, new_chunks, sizeof(void **) * (alloc->num_mspace_chunks + 1));
613        return error;
614    }
615    if (alloc->num_mspace_chunks > 0) {
616        memcpy(new_chunk, alloc->mspace_chunk, sizeof(struct allocman_mspace_chunk) * alloc->num_mspace_chunks);
617        memcpy(new_counts, alloc->mspace_chunk_count, sizeof(size_t) * alloc->num_mspace_chunks);
618        memcpy(new_chunks, alloc->mspace_chunks, sizeof(void **) * alloc->num_mspace_chunks);
619        allocman_mspace_free(alloc, alloc->mspace_chunk, sizeof(struct allocman_mspace_chunk) * alloc->num_mspace_chunks);
620        allocman_mspace_free(alloc, alloc->mspace_chunk_count, sizeof(size_t) * alloc->num_mspace_chunks);
621        allocman_mspace_free(alloc, alloc->mspace_chunks, sizeof(void **) * alloc->num_mspace_chunks);
622    }
623    new_chunk[alloc->num_mspace_chunks] = chunk;
624    new_counts[alloc->num_mspace_chunks] = 0;
625    new_chunks[alloc->num_mspace_chunks] = new_alloc;
626    alloc->mspace_chunk = new_chunk;
627    alloc->mspace_chunk_count = new_counts;
628    alloc->mspace_chunks = new_chunks;
629    alloc->num_mspace_chunks++;
630    alloc->used_watermark = 1;
631    _end_operation(alloc, root);
632    return 0;
633}
634
635
636int allocman_add_untypeds_from_timer_objects(allocman_t *alloc, timer_objects_t *to) {
637    int error = 0;
638    for (size_t i = 0; i < to->nobjs; i++) {
639        cspacepath_t path = allocman_cspace_make_path(alloc, to->objs[i].obj.cptr);
640        error = allocman_utspace_add_uts(alloc, 1, &path, &to->objs[i].obj.size_bits,
641                                        (uintptr_t *) &to->objs[i].region.base_addr,
642                                        ALLOCMAN_UT_DEV);
643        if (error) {
644            ZF_LOGE("Failed to add ut to allocman");
645            return error;
646        }
647    }
648    return 0;
649}
650