1/**
2 * \file
3 * \brief RAM allocator code (client-side)
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <barrelfish/barrelfish.h>
16#include <barrelfish/core_state.h>
17
18#include <if/monitor_defs.h>
19#include <if/mem_defs.h>
20#ifdef ARRAKIS
21#include <if/hyper_defs.h>
22#endif
23
24/* remote (indirect through a channel) version of ram_alloc, for most domains */
25static errval_t ram_alloc_remote(struct capref *ret, uint8_t size_bits,
26                                 uint64_t minbase, uint64_t maxlimit)
27{
28    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
29    errval_t err, result;
30
31    // XXX: the transport that ram_alloc uses will allocate slots,
32    // which may cause slot_allocator to grow itself.
33    // To grow itself, the slot_allocator needs to call ram_alloc.
34    // However, ram_alloc has a mutex to protect the lower level transport code.
35    // Therefore, we detect the situation when the slot_allocator
36    // may grow itself and grow it before acquiring the lock.
37    // Once this code become reentrant, this hack can be removed. -Akhi
38    struct slot_alloc_state *sas = get_slot_alloc_state();
39    struct slot_allocator *ca = (struct slot_allocator*)(&sas->defca);
40    if (ca->space == 1) {
41        // slot_alloc() might need to allocate memory: reset memory affinity to
42        // the default value
43        ram_set_affinity(0, 0);
44        do {
45                err = slot_alloc(ret);
46                if (err_is_fail(err)) {
47                    err = err_push(err, LIB_ERR_SLOT_ALLOC);
48                    break;
49                }
50        } while (0);
51        ram_set_affinity(minbase, maxlimit);
52        if (err_is_fail(err)) {
53            return err;
54        }
55    } else {
56        err = slot_alloc(ret);
57        if (err_is_fail(err)) {
58            return err;
59        }
60    }
61
62    assert(ret != NULL);
63
64    thread_mutex_lock(&ram_alloc_state->ram_alloc_lock);
65
66    struct mem_binding *b = get_mem_client();
67    err = b->rpc_tx_vtbl.allocate(b, size_bits, minbase, maxlimit, &result, ret);
68
69    thread_mutex_unlock(&ram_alloc_state->ram_alloc_lock);
70
71#if 0
72#ifdef ARRAKIS
73    if (err_is_fail(err)) {
74        return err;
75    }
76    struct frame_identity fi;
77    errval_t err2 = frame_identify(*ret, &fi);
78    if (err_is_fail(err2)){
79    DEBUG_ERR(err2, "frame_identify in ram_alloc");
80    }
81    assert(err_is_ok(err2));
82    debug_printf("[ARRADOMAIN] ram_alloc_remote: allocated %zu bytes at 0x%zx; notifing hypervisor\n",
83            fi.bytes, fi.base);
84
85    struct hyper_rpc_client *hc = get_hyper_rpc_client();
86    err = hc->vtbl.npt_map(hc, *ret, &err2);
87    assert(err_is_ok(err));
88    if (err_is_fail(err2)) {
89        DEBUG_ERR(err2, "while calling hypervisor for EPT mapping");
90    }
91    assert(err_is_ok(err2));
92#endif
93#endif
94
95    if (err_is_fail(err)) {
96        return err;
97    }
98
99    return result;
100}
101
102
103void ram_set_affinity(uint64_t minbase, uint64_t maxlimit)
104{
105    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
106    ram_alloc_state->default_minbase = minbase;
107    ram_alloc_state->default_maxlimit = maxlimit;
108}
109
110void ram_get_affinity(uint64_t *minbase, uint64_t *maxlimit)
111{
112    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
113    *minbase  = ram_alloc_state->default_minbase;
114    *maxlimit = ram_alloc_state->default_maxlimit;
115}
116
117#define OBJSPERPAGE_CTE         (1 << (BASE_PAGE_BITS - OBJBITS_CTE))
118
119errval_t ram_alloc_fixed(struct capref *ret, uint8_t size_bits,
120                         uint64_t minbase, uint64_t maxlimit)
121{
122    struct ram_alloc_state *state = get_ram_alloc_state();
123
124    if (size_bits == BASE_PAGE_BITS) {
125        // XXX: Return error if check to see if out of slots
126        if (state->base_capnum >= L2_CNODE_SLOTS) {
127            debug_printf("%s: state->base_capnum = %d\n", __FUNCTION__,
128                    state->base_capnum);
129            return LIB_ERR_RAM_ALLOC_FIXED_EXHAUSTED;
130        }
131        assert(state->base_capnum < L2_CNODE_SLOTS);
132        ret->cnode = cnode_base;
133        ret->slot  = state->base_capnum++;
134#ifdef ARRAKIS
135        struct frame_identity fi;
136        errval_t err2 = frame_identify(*ret, &fi);
137        if (err_is_fail(err2)){
138            DEBUG_ERR(err2, "frame_identify in ram_alloc");
139        }
140        assert(err_is_ok(err2));
141        debug_printf("[ARRADOMAIN] ram_alloc_fixed: allocated %zu bytes at 0x%zx\n",
142                fi.bytes, fi.base);
143#endif
144        return SYS_ERR_OK;
145    } else {
146        return LIB_ERR_RAM_ALLOC_WRONG_SIZE;
147    }
148}
149
150#include <stdio.h>
151#include <string.h>
152
153/**
154 * \brief Allocates memory in the form of a RAM capability
155 *
156 * \param ret Pointer to capref struct, filled-in with allocated cap location
157 * \param size_bits Amount of RAM to allocate, as a power of two
158 *              slot used for the cap in #ret, if any
159 */
160errval_t ram_alloc(struct capref *ret, uint8_t size_bits)
161{
162    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
163    assert(ram_alloc_state->ram_alloc_func != NULL);
164    errval_t err = ram_alloc_state->
165        ram_alloc_func(ret, size_bits, ram_alloc_state->default_minbase,
166                       ram_alloc_state->default_maxlimit);
167#if 0
168    if(err_is_fail(err)) {
169      DEBUG_ERR(err, "failed to allocate 2^%" PRIu32 " Bytes of RAM",
170                size_bits);
171      printf("callstack: %p %p %p %p\n",
172	     __builtin_return_address(0),
173	     __builtin_return_address(1),
174	     __builtin_return_address(2),
175	     __builtin_return_address(3));
176    }
177#endif
178    return err;
179}
180
181errval_t ram_available(genpaddr_t *available, genpaddr_t *total)
182{
183    errval_t err;
184
185    struct mem_binding *mc = get_mem_client();
186
187    err = mc->rpc_tx_vtbl.available(mc, available, total);
188    if(err_is_fail(err)) {
189        USER_PANIC_ERR(err, "available");
190    }
191
192    return SYS_ERR_OK;
193}
194
195static void bind_continuation(void *st, errval_t err, struct mem_binding *b)
196{
197    struct ram_alloc_state *ram_alloc_state = st;
198    assert(st == get_ram_alloc_state());
199
200    if (err_is_ok(err)) {
201        mem_rpc_client_init(b);
202        set_mem_client(b);
203    }
204
205    ram_alloc_state->mem_connect_err = err;
206    ram_alloc_state->mem_connect_done = true;
207}
208
209static void get_mem_iref_reply(struct monitor_binding *mb, iref_t iref)
210{
211    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
212    assert(iref != 0);
213
214    errval_t err = mem_bind(iref, bind_continuation, ram_alloc_state,
215                            get_default_waitset(), IDC_BIND_FLAG_RPC_CAP_TRANSFER);
216    if (err_is_fail(err)) {
217        ram_alloc_state->mem_connect_err = err;
218        ram_alloc_state->mem_connect_done = true;
219    }
220}
221
222/**
223 * \brief Initialize the dispatcher specific state of ram_alloc
224 */
225void ram_alloc_init(void)
226{
227    /* Initialize the ram_alloc_state */
228    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
229    ram_alloc_state->mem_connect_done = false;
230    ram_alloc_state->mem_connect_err  = 0;
231    thread_mutex_init(&ram_alloc_state->ram_alloc_lock);
232    ram_alloc_state->ram_alloc_func   = NULL;
233    ram_alloc_state->default_minbase  = 0;
234    ram_alloc_state->default_maxlimit = 0;
235    ram_alloc_state->base_capnum      = 0;
236    ram_alloc_state->earlycn_capnum   = 0;
237}
238
239/**
240 * \brief Set ram_alloc to the default ram_alloc_remote or to a given function
241 *
242 * If local_allocator is NULL, it will be initialized to the default
243 * remote allocator.
244 */
245errval_t ram_alloc_set(ram_alloc_func_t local_allocator)
246{
247    errval_t err;
248    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
249
250    /* Special case */
251    if (local_allocator != NULL) {
252        ram_alloc_state->ram_alloc_func = local_allocator;
253        return SYS_ERR_OK;
254    }
255
256    /* fire off a request for the iref for mem_serv */
257    struct monitor_binding *mb = get_monitor_binding();
258    mb->rx_vtbl.get_mem_iref_reply = get_mem_iref_reply;
259    err = mb->tx_vtbl.get_mem_iref_request(mb, NOP_CONT);
260    if (err_is_fail(err)) {
261        return err_push(err, LIB_ERR_GET_MEM_IREF);
262    }
263
264    /* XXX: wait for connection to complete */
265    while (!ram_alloc_state->mem_connect_done) {
266        messages_wait_and_handle_next();
267    }
268
269    if (err_is_ok(ram_alloc_state->mem_connect_err)) {
270        ram_alloc_state->ram_alloc_func = ram_alloc_remote;
271    }
272    return ram_alloc_state->mem_connect_err;
273}
274