Searched refs:slabs (Results 1 - 11 of 11) sorted by relevance

/barrelfish-2018-10-04/include/barrelfish/
H A Dslab.h26 typedef errval_t (*slab_refill_func_t)(struct slab_allocator *slabs);
37 struct slab_head *slabs; ///< Pointer to list of slabs member in struct:slab_allocator
42 void slab_init(struct slab_allocator *slabs, size_t blocksize,
44 void slab_grow(struct slab_allocator *slabs, void *buf, size_t buflen);
45 void *slab_alloc(struct slab_allocator *slabs);
46 void slab_free(struct slab_allocator *slabs, void *block);
47 size_t slab_freecount(struct slab_allocator *slabs);
48 errval_t slab_default_refill(struct slab_allocator *slabs);
/barrelfish-2018-10-04/lib/barrelfish/
H A Dslab.c6 * size from a pool of contiguous memory regions ("slabs").
31 * \param slabs Pointer to slab allocator instance, to be filled-in
35 void slab_init(struct slab_allocator *slabs, size_t blocksize, argument
38 slabs->slabs = NULL;
39 slabs->blocksize = SLAB_REAL_BLOCKSIZE(blocksize);
40 slabs->refill_func = refill_func;
47 * \param slabs Pointer to slab allocator instance
51 void slab_grow(struct slab_allocator *slabs, void *buf, size_t buflen) argument
60 size_t blocksize = slabs
86 slab_alloc(struct slab_allocator *slabs) argument
125 slab_free(struct slab_allocator *slabs, void *block) argument
160 slab_freecount(struct slab_allocator *slabs) argument
179 slab_refill_pages(struct slab_allocator *slabs, size_t bytes) argument
207 slab_default_refill(struct slab_allocator *slabs) argument
[all...]
H A Dthreads.c61 // XXX: mutex and spinlock protecting thread slabs in spanned domains
205 static errval_t refill_thread_slabs(struct slab_allocator *slabs) argument
207 assert(slabs == &thread_slabs);
219 slab_grow(slabs, buf, size);
1252 "thread slabs\n");
/barrelfish-2018-10-04/usr/mem_serv_dist/
H A Dmem_serv.h100 errval_t slab_refill(struct slab_allocator *slabs);
H A Dmem_serv.c105 errval_t slab_refill(struct slab_allocator *slabs) argument
110 while (slab_freecount(slabs) <= MINSPARENODES) {
111 // debug_printf("running low on free slabs: slabs=%ld\n",
112 // slab_freecount(&mm_percore.slabs));
130 slab_grow(slabs, buf, BASE_PAGE_SIZE * 8);
310 err = slab_refill(&mm_percore.slabs);
315 err = slab_refill(&mm_local.slabs);
480 slab_grow(&mm->slabs, nodebuf, nodebuf_size);
596 if (slab_freecount(&mm_percore.slabs) <
[all...]
H A Dsteal.c281 err = slab_refill(&mm_percore.slabs);
/barrelfish-2018-10-04/usr/mem_serv/
H A Dmem_serv.c264 while (slab_freecount(&mm_ram.slabs) <= MINSPARENODES) {
276 slab_grow(&mm_ram.slabs, buf, BASE_PAGE_SIZE * 8);
417 slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf));
455 if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES
458 slab_default_refill(&mm_ram.slabs); // may fail
/barrelfish-2018-10-04/include/mm/
H A Dmm.h52 struct slab_allocator slabs; ///< Slab allocator used for allocating nodes member in struct:mm
/barrelfish-2018-10-04/usr/init/
H A Dmem_alloc.c123 slab_grow(&mymm.slabs, nodebuf, sizeof(nodebuf));
/barrelfish-2018-10-04/lib/mm/
H A Dmm.c75 struct mmnode *node = slab_alloc(&mm->slabs);
518 slab_init(&mm->slabs, MM_NODE_SIZE(maxchildbits), slab_refill_func);
/barrelfish-2018-10-04/include/vm/
H A Duma_core.c484 * This is done if the number of slabs is larger than the hash size.
1271 * may end up going to the VM for slabs which we do not
2303 * Find a slab with some space. Prefer slabs that are partially
2426 * Search the available kegs for slabs. Be careful to hold the
3135 int slabs; local
3143 slabs = items / keg->uk_ipers;
3144 if (slabs * keg->uk_ipers < items)
3145 slabs++;
3146 while (slabs > 0) {
3152 slabs
[all...]

Completed in 104 milliseconds