1/**
2 * \file
3 * \brief RAM allocator code (client-side)
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <barrelfish/barrelfish.h>
16#include <barrelfish/core_state.h>
17
18#include <if/monitor_defs.h>
19#include <if/mem_defs.h>
20
21/* remote (indirect through a channel) version of ram_alloc, for most domains */
22static errval_t ram_alloc_remote(struct capref *ret, uint8_t size_bits,
23                                 uint64_t minbase, uint64_t maxlimit)
24{
25    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
26    errval_t err, result;
27
28    // XXX: the transport that ram_alloc uses will allocate slots,
29    // which may cause slot_allocator to grow itself.
30    // To grow itself, the slot_allocator needs to call ram_alloc.
31    // However, ram_alloc has a mutex to protect the lower level transport code.
32    // Therefore, we detect the situation when the slot_allocator
33    // may grow itself and grow it before acquiring the lock.
34    // Once this code become reentrant, this hack can be removed. -Akhi
35    struct slot_alloc_state *sas = get_slot_alloc_state();
36    struct slot_allocator *ca = (struct slot_allocator*)(&sas->defca);
37    if (ca->space == 1) {
38        // slot_alloc() might need to allocate memory: reset memory affinity to
39        // the default value
40        ram_set_affinity(0, 0);
41        do {
42                err = slot_alloc(ret);
43                if (err_is_fail(err)) {
44                    err = err_push(err, LIB_ERR_SLOT_ALLOC);
45                    break;
46                }
47        } while (0);
48        ram_set_affinity(minbase, maxlimit);
49        if (err_is_fail(err)) {
50            return err;
51        }
52    } else {
53        err = slot_alloc(ret);
54        if (err_is_fail(err)) {
55            return err;
56        }
57    }
58
59    assert(ret != NULL);
60
61    thread_mutex_lock(&ram_alloc_state->ram_alloc_lock);
62
63    struct mem_binding *b = get_mem_client();
64    err = b->rpc_tx_vtbl.allocate(b, size_bits, minbase, maxlimit, &result, ret);
65
66    thread_mutex_unlock(&ram_alloc_state->ram_alloc_lock);
67
68    if (err_is_fail(err)) {
69        return err;
70    }
71
72    return result;
73}
74
75
76void ram_set_affinity(uint64_t minbase, uint64_t maxlimit)
77{
78    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
79    ram_alloc_state->default_minbase = minbase;
80    ram_alloc_state->default_maxlimit = maxlimit;
81}
82
83void ram_get_affinity(uint64_t *minbase, uint64_t *maxlimit)
84{
85    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
86    *minbase  = ram_alloc_state->default_minbase;
87    *maxlimit = ram_alloc_state->default_maxlimit;
88}
89
90#define OBJSPERPAGE_CTE         (1 << (BASE_PAGE_BITS - OBJBITS_CTE))
91
92errval_t ram_alloc_fixed(struct capref *ret, uint8_t size_bits,
93                         uint64_t minbase, uint64_t maxlimit)
94{
95    struct ram_alloc_state *state = get_ram_alloc_state();
96
97    if (size_bits == BASE_PAGE_BITS) {
98        // XXX: Return error if check to see if out of slots
99        if (state->base_capnum >= L2_CNODE_SLOTS) {
100            debug_printf("%s: state->base_capnum = %d\n", __FUNCTION__,
101                    state->base_capnum);
102            return LIB_ERR_RAM_ALLOC_FIXED_EXHAUSTED;
103        }
104        assert(state->base_capnum < L2_CNODE_SLOTS);
105        ret->cnode = cnode_base;
106        ret->slot  = state->base_capnum++;
107        return SYS_ERR_OK;
108    } else {
109        return LIB_ERR_RAM_ALLOC_WRONG_SIZE;
110    }
111}
112
113#include <stdio.h>
114#include <string.h>
115
116/**
117 * \brief Allocates memory in the form of a RAM capability
118 *
119 * \param ret Pointer to capref struct, filled-in with allocated cap location
120 * \param size_bits Amount of RAM to allocate, as a power of two
121 *              slot used for the cap in #ret, if any
122 */
123errval_t ram_alloc(struct capref *ret, uint8_t size_bits)
124{
125    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
126    assert(ram_alloc_state->ram_alloc_func != NULL);
127    errval_t err = ram_alloc_state->
128        ram_alloc_func(ret, size_bits, ram_alloc_state->default_minbase,
129                       ram_alloc_state->default_maxlimit);
130#if 0
131    if(err_is_fail(err)) {
132      DEBUG_ERR(err, "failed to allocate 2^%" PRIu32 " Bytes of RAM",
133                size_bits);
134      printf("callstack: %p %p %p %p\n",
135	     __builtin_return_address(0),
136	     __builtin_return_address(1),
137	     __builtin_return_address(2),
138	     __builtin_return_address(3));
139    }
140#endif
141    return err;
142}
143
144errval_t ram_available(genpaddr_t *available, genpaddr_t *total)
145{
146    errval_t err;
147
148    struct mem_binding *mc = get_mem_client();
149
150    err = mc->rpc_tx_vtbl.available(mc, available, total);
151    if(err_is_fail(err)) {
152        USER_PANIC_ERR(err, "available");
153    }
154
155    return SYS_ERR_OK;
156}
157
158static void bind_continuation(void *st, errval_t err, struct mem_binding *b)
159{
160    struct ram_alloc_state *ram_alloc_state = st;
161    assert(st == get_ram_alloc_state());
162
163    if (err_is_ok(err)) {
164        mem_rpc_client_init(b);
165        set_mem_client(b);
166    }
167
168    ram_alloc_state->mem_connect_err = err;
169    ram_alloc_state->mem_connect_done = true;
170}
171
172static void get_mem_iref_reply(struct monitor_binding *mb, iref_t iref)
173{
174    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
175    assert(iref != 0);
176
177    errval_t err = mem_bind(iref, bind_continuation, ram_alloc_state,
178                            get_default_waitset(), IDC_BIND_FLAG_RPC_CAP_TRANSFER);
179    if (err_is_fail(err)) {
180        ram_alloc_state->mem_connect_err = err;
181        ram_alloc_state->mem_connect_done = true;
182    }
183}
184
185/**
186 * \brief Initialize the dispatcher specific state of ram_alloc
187 */
188void ram_alloc_init(void)
189{
190    /* Initialize the ram_alloc_state */
191    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
192    ram_alloc_state->mem_connect_done = false;
193    ram_alloc_state->mem_connect_err  = 0;
194    thread_mutex_init(&ram_alloc_state->ram_alloc_lock);
195    ram_alloc_state->ram_alloc_func   = NULL;
196    ram_alloc_state->default_minbase  = 0;
197    ram_alloc_state->default_maxlimit = 0;
198    ram_alloc_state->base_capnum      = 0;
199}
200
201/**
202 * \brief Set ram_alloc to the default ram_alloc_remote or to a given function
203 *
204 * If local_allocator is NULL, it will be initialized to the default
205 * remote allocator.
206 */
207errval_t ram_alloc_set(ram_alloc_func_t local_allocator)
208{
209    errval_t err;
210    struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
211
212    /* Special case */
213    if (local_allocator != NULL) {
214        ram_alloc_state->ram_alloc_func = local_allocator;
215        return SYS_ERR_OK;
216    }
217
218    /* fire off a request for the iref for mem_serv */
219    struct monitor_binding *mb = get_monitor_binding();
220    mb->rx_vtbl.get_mem_iref_reply = get_mem_iref_reply;
221    err = mb->tx_vtbl.get_mem_iref_request(mb, NOP_CONT);
222    if (err_is_fail(err)) {
223        return err_push(err, LIB_ERR_GET_MEM_IREF);
224    }
225
226    /* XXX: wait for connection to complete */
227    while (!ram_alloc_state->mem_connect_done) {
228        messages_wait_and_handle_next();
229    }
230
231    if (err_is_ok(ram_alloc_state->mem_connect_err)) {
232        ram_alloc_state->ram_alloc_func = ram_alloc_remote;
233    }
234    return ram_alloc_state->mem_connect_err;
235}
236