1/**
2 * \file
3 * \brief Morecore implementation for malloc
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
8 * Copyright (c) 2014, HP Labs.
9 * All rights reserved.
10 *
11 * This file is distributed under the terms in the attached LICENSE file.
12 * If you do not find this file, copies can be found by writing to:
13 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
14 */
15
16#include <barrelfish/barrelfish.h>
17#include <barrelfish/core_state.h>
18#include <barrelfish/morecore.h>
19#include <stdio.h>
20#include <stdint.h>
21
22/// Amount of virtual space for malloc depending on 32/64 bits architectures
23#if (UINTPTR_MAX == UINT64_MAX)
24#       define HEAP_REGION (128UL * 1024UL * 1024 * 1024) /* 128GB */
25#else
26#       define HEAP_REGION (512UL * 1024 * 1024) /* 512MB */
27#endif
28
29typedef void *(*morecore_alloc_func_t)(size_t bytes, size_t *retbytes);
30extern morecore_alloc_func_t sys_morecore_alloc;
31
32typedef void (*morecore_free_func_t)(void *base, size_t bytes);
33extern morecore_free_func_t sys_morecore_free;
34
35/**
36 * \brief Allocate some memory for malloc to use
37 *
38 * This function will keep trying with smaller and smaller frames till
39 * it finds a set of frames that satisfy the requirement. retbytes can
40 * be smaller than bytes if we were able to allocate a smaller memory
41 * region than requested for.
42 */
43static void *morecore_alloc(size_t bytes, size_t *retbytes)
44{
45    errval_t err;
46    struct morecore_state *state = get_morecore_state();
47
48    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
49    if(ram_alloc_state->ram_alloc_func != ram_alloc_fixed) {
50        if (bytes < LARGE_PAGE_SIZE) {
51            bytes = LARGE_PAGE_SIZE;
52        }
53
54        bytes = ROUND_UP(bytes, LARGE_PAGE_SIZE);
55    }
56
57    void *buf = NULL;
58    size_t mapped = 0;
59    size_t step = bytes;
60    while (mapped < bytes) {
61        void *mid_buf = NULL;
62        err = vspace_mmu_aware_map(&state->mmu_state, step, &mid_buf, &step);
63        if (err_is_ok(err)) {
64            if (buf == NULL) {
65                buf = mid_buf;
66            }
67            mapped += step;
68        } else {
69            /*
70              vspace_mmu_aware_map failed probably because we asked
71              for a very large frame, will try asking for smaller one.
72             */
73            if (err_no(err) == LIB_ERR_FRAME_CREATE_MS_CONSTRAINTS) {
74                if (step < BASE_PAGE_SIZE) {
75                    // Return whatever we have allocated until now
76                    break;
77                }
78                step /= 2;
79                continue;
80            } else {
81                debug_err(__FILE__, __func__, __LINE__, err,
82                          "vspace_mmu_aware_map fail");
83                return NULL;
84            }
85        }
86    }
87
88    *retbytes = mapped;
89    return buf;
90}
91
92static void morecore_free(void *base, size_t bytes)
93{
94    struct morecore_state *state = get_morecore_state();
95    errval_t err = vspace_mmu_aware_unmap(&state->mmu_state,
96                                          (lvaddr_t)base, bytes);
97    if(err_is_fail(err)) {
98        USER_PANIC_ERR(err, "vspace_mmu_aware_unmap");
99    }
100}
101
102Header *get_malloc_freep(void);
103Header *get_malloc_freep(void)
104{
105    return get_morecore_state()->header_freep;
106}
107
108errval_t morecore_init(size_t pagesize)
109{
110    errval_t err;
111    struct morecore_state *state = get_morecore_state();
112
113    thread_mutex_init(&state->mutex);
114
115    // setup flags that match the pagesize
116    vregion_flags_t morecore_flags = VREGION_FLAGS_READ_WRITE;
117#if __x86_64__
118    morecore_flags |= (pagesize == HUGE_PAGE_SIZE ? VREGION_FLAGS_HUGE : 0);
119#endif
120    morecore_flags |= (pagesize == LARGE_PAGE_SIZE ? VREGION_FLAGS_LARGE : 0);
121
122    // Always align heap to 4 gigabyte boundary
123    const size_t heap_alignment = 4UL * 1024 * 1024 * 1024;
124    err = vspace_mmu_aware_init_aligned(&state->mmu_state, NULL, HEAP_REGION,
125                                        heap_alignment, morecore_flags);
126    if (err_is_fail(err)) {
127        return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_INIT);
128    }
129
130    /* overwrite alignment field in vspace_mmu_aware state */
131    state->mmu_state.alignment = heap_alignment;
132
133    sys_morecore_alloc = morecore_alloc;
134    sys_morecore_free = morecore_free;
135
136    return SYS_ERR_OK;
137}
138
139errval_t morecore_reinit(void)
140{
141    errval_t err;
142    struct morecore_state *state = get_morecore_state();
143    if ((vregion_get_flags(&state->mmu_state.vregion)
144            & (VREGION_FLAGS_HUGE|VREGION_FLAGS_LARGE)) == 0)
145    {
146        // No need to do anything if the heap is using base pages anyway
147        return SYS_ERR_OK;
148    }
149
150    if ((vregion_get_flags(&state->mmu_state.vregion) &
151         (VREGION_FLAGS_LARGE|VREGION_FLAGS_HUGE)) == 0)
152    {
153        return SYS_ERR_OK;
154    }
155
156    size_t mapoffset = state->mmu_state.mapoffset;
157    size_t remapsize = ROUND_UP(mapoffset, state->mmu_state.alignment);
158    if (remapsize <= mapoffset) {
159        // don't need to do anything if we only recreate the exact same
160        // mapping
161        // XXX: do we need/want to recreate existing mappings with a larger
162        // page size here? If so, what is the implication on early boot
163        // domains that don't have access to mem_serv? -SG, 2015-04-30.
164        return SYS_ERR_OK;
165    }
166    struct capref frame;
167    size_t retsize;
168    err = frame_alloc(&frame, remapsize, &retsize);
169    if (err_is_fail(err)) {
170        return err;
171    }
172    return vspace_mmu_aware_reset(&state->mmu_state, frame, remapsize);
173}
174