1/**
2 * \file
3 * \brief Distributed (percore) memory server: code specific to hybrid version
4 */
5
6/*
7 * Copyright (c) 2007-2011, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <stdlib.h>
16#include <stdio.h>
17#include <string.h>
18#include <getopt.h>
19
20#include <inttypes.h>
21#include <barrelfish/barrelfish.h>
22#include <barrelfish/dispatch.h>
23#include <trace/trace.h>
24#include <trace_definitions/trace_defs.h>
25#include <barrelfish/monitor_client.h>
26#include <barrelfish/spawn_client.h>
27#include <barrelfish/nameservice_client.h>
28#include <thc/thc.h>
29
30#include <dist/barrier.h>
31
32#include <if/mem_defs.h>
33#include <if/mem_defs.h>
34#include <if/monitor_defs.h>
35
36// #include "barrier.h"
37
38#include "mem_serv.h"
39#include "steal.h"
40
41/// state for a pending reply
42// because we have only one message that we send to a client, and there can only
43// be one outstanding per binding (because this is an RPC interface) this is
44// quite simple
45struct pending_reply {
46    struct mem_binding *b;
47    struct capref *acap, cap;
48    memsize_t mem_avail, mem_total;
49    errval_t err;
50};
51
52
53static void allocate_response_done(void *arg)
54{
55    struct capref *cap = arg;
56
57    if(!capref_is_null(*cap)) {
58        errval_t err = cap_delete(*cap);
59        if(err_is_fail(err)) {
60            DEBUG_ERR(err, "cap_delete after send. This memory will leak.");
61        }
62    }
63
64    free(cap);
65}
66
67// The various send retry functions
68
69static void retry_allocate_reply(void *arg)
70{
71    struct pending_reply *r = arg;
72    assert(r != NULL);
73    struct mem_binding *b = r->b;
74    errval_t err;
75
76    err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, r->acap),
77                                       r->err, *r->acap);
78    if (err_is_ok(err)) {
79        b->st = NULL;
80        free(r);
81    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
82        err = b->register_send(b, get_default_waitset(),
83                               MKCONT(retry_allocate_reply,r));
84    }
85
86    if (err_is_fail(err)) {
87        DEBUG_ERR(err, "failed to reply to memory request");
88        allocate_response_done(r->acap);
89        free(r);
90    }
91}
92
93static void retry_steal_reply(void *arg)
94{
95    struct pending_reply *r = arg;
96    assert(r != NULL);
97    struct mem_binding *b = r->b;
98    errval_t err;
99
100    err = b->tx_vtbl.steal_response(b, NOP_CONT, r->err, r->cap);
101    if (err_is_ok(err)) {
102        b->st = NULL;
103        free(r);
104    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
105        err = b->register_send(b, get_default_waitset(),
106                               MKCONT(retry_steal_reply,r));
107    }
108
109    if (err_is_fail(err)) {
110        DEBUG_ERR(err, "failed to reply to steal request");
111        free(r);
112    }
113}
114
115static void retry_available_reply(void *arg)
116{
117    struct pending_reply *r = arg;
118    assert(r != NULL);
119    struct mem_binding *b = r->b;
120    errval_t err;
121
122    err = b->tx_vtbl.available_response(b, NOP_CONT, r->mem_avail, r->mem_total);
123    if (err_is_ok(err)) {
124        b->st = NULL;
125        free(r);
126    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
127        err = b->register_send(b, get_default_waitset(),
128                               MKCONT(retry_available_reply,r));
129    }
130
131    if (err_is_fail(err)) {
132        DEBUG_ERR(err, "failed to reply to mem_available request");
133        free(r);
134    }
135}
136
137
138
139static void retry_free_reply(void *arg)
140{
141    struct pending_reply *r = arg;
142    assert(r != NULL);
143    struct mem_binding *b = r->b;
144    errval_t err;
145
146    err = b->tx_vtbl.free_monitor_response(b, NOP_CONT, r->err);
147    if (err_is_ok(err)) {
148        b->st = NULL;
149        free(r);
150    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
151        err = b->register_send(b, get_default_waitset(),
152                               MKCONT(retry_free_reply,r));
153    }
154
155    if (err_is_fail(err)) {
156        DEBUG_ERR(err, "failed to reply to free request");
157        free(r);
158    }
159}
160
161
162// The various request handler functions
163
164static void percore_free_handler(struct mem_binding *b,
165                                 struct capref ramcap,
166                                 genpaddr_t base, uint8_t bits)
167{
168    errval_t ret;
169
170    /* printf("%d: percore_free_handler, base = %" PRIxGENPADDR ", bits = %u\n", */
171    /*        disp_get_core_id(), base, bits); */
172
173    ret = percore_free_handler_common(ramcap, base, bits);
174
175    errval_t err;
176    err = b->tx_vtbl.free_monitor_response(b, NOP_CONT, ret);
177    if (err_is_fail(err)) {
178        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
179            struct pending_reply *r = malloc(sizeof(struct pending_reply));
180            assert(r != NULL);
181            r->b = b;
182            r->err = ret;
183            err = b->register_send(b, get_default_waitset(),
184                                   MKCONT(retry_free_reply,r));
185            assert(err_is_ok(err));
186        } else {
187            DEBUG_ERR(err, "failed to reply to free request");
188        }
189    }
190}
191
192static void mem_available_handler(struct mem_binding *b)
193{
194    memsize_t mem_available;
195    mem_available = mem_available_handler_common();
196
197    errval_t err;
198    err = b->tx_vtbl.available_response(b, NOP_CONT, mem_available, mem_total);
199    if (err_is_fail(err)) {
200        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
201            struct pending_reply *r = malloc(sizeof(struct pending_reply));
202            assert(r != NULL);
203            r->b = b;
204            r->mem_avail = mem_available;
205            r->mem_total = mem_total;
206            err = b->register_send(b, get_default_waitset(),
207                                   MKCONT(retry_available_reply,r));
208            assert(err_is_ok(err));
209        } else {
210            DEBUG_ERR(err, "failed to reply to mem_available request");
211        }
212    }
213}
214
215
216static void percore_steal_handler(struct mem_binding *b,
217                                     uint8_t bits,
218                                     genpaddr_t minbase, genpaddr_t maxlimit)
219{
220    errval_t ret;
221    struct capref cap;
222    ret = percore_steal_handler_common(bits, minbase, maxlimit, &cap);
223
224    errval_t err;
225    err = b->tx_vtbl.steal_response(b, NOP_CONT, ret, cap);
226    if (err_is_fail(err)) {
227        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
228            struct pending_reply *r = malloc(sizeof(struct pending_reply));
229            assert(r != NULL);
230            r->b = b;
231            r->err = ret;
232            r->cap = cap;
233            err = b->register_send(b, get_default_waitset(),
234                                   MKCONT(retry_steal_reply,r));
235            assert(err_is_ok(err));
236        } else {
237            DEBUG_ERR(err, "failed to reply to steal request");
238        }
239    }
240
241    trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, 0);
242}
243
244static void percore_allocate_handler(struct mem_binding *b,
245                                     uint8_t bits,
246                                     genpaddr_t minbase, genpaddr_t maxlimit)
247{
248    errval_t ret;
249    struct capref *cap = malloc(sizeof(struct capref));
250    ret = percore_allocate_handler_common(bits, minbase, maxlimit, cap);
251
252    errval_t err;
253    err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, cap),
254                                       ret, *cap);
255    if (err_is_fail(err)) {
256        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
257            struct pending_reply *r = malloc(sizeof(struct pending_reply));
258            assert(r != NULL);
259            r->b = b;
260            r->err = ret;
261            r->acap = cap;
262            err = b->register_send(b, get_default_waitset(),
263                                   MKCONT(retry_allocate_reply,r));
264            assert(err_is_ok(err));
265        } else {
266            DEBUG_ERR(err, "failed to reply to memory request");
267            allocate_response_done(cap);
268        }
269    }
270
271    trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_PERCORE_ALLOC_COMPLETE, 0);
272}
273
274
275// Various startup procedures
276
277static bool memserv_exported = false;
278static iref_t myiref;
279struct mem_binding *monitor_mem_binding = NULL;
280
281static void percore_export_callback(void *st, errval_t err, iref_t iref)
282{
283
284    assert(err_is_ok(err));
285
286    assert(iref != 0);
287    iref_t percore_mem_serv_iref = iref;
288
289    struct monitor_binding *mb = get_monitor_binding();
290    err = mb->tx_vtbl.set_mem_iref_request(mb, NOP_CONT, percore_mem_serv_iref);
291    if (err_is_fail(err)) {
292        USER_PANIC_ERR(err, "registering iref with monitor");
293    }
294
295    myiref = iref;
296    memserv_exported = true;
297}
298
299
300static struct mem_rx_vtbl percore_rx_vtbl = {
301    .allocate_call = percore_allocate_handler,
302    .available_call = mem_available_handler,
303    .free_monitor_call = percore_free_handler,
304    .steal_call = percore_steal_handler,
305};
306
307static errval_t percore_connect_callback(void *st, struct mem_binding *b)
308{
309    // Remember monitor's binding to this mem_serv
310    if(memserv_exported && monitor_mem_binding == NULL) {
311        monitor_mem_binding = b;
312    }
313
314    b->rx_vtbl = percore_rx_vtbl;
315    return SYS_ERR_OK;
316}
317
318errval_t percore_mem_serv(coreid_t core, coreid_t *cores,
319                                 int len_cores, memsize_t ram)
320{
321    errval_t err;
322
323    struct waitset *ws = get_default_waitset();
324
325    // Init the memory allocator
326    err = initialize_percore_mem_serv(core, cores, len_cores, ram);
327    if (err_is_fail(err)) {
328        DEBUG_ERR(err, "initializing percore mem_serv");
329        return err;
330    }
331
332    err = mem_export(NULL, percore_export_callback, percore_connect_callback,
333                     ws, IDC_EXPORT_FLAGS_DEFAULT);
334    if (err_is_fail(err)) {
335        DEBUG_ERR(err, "exporting percore mem interface");
336        return err;
337    }
338
339    while (!memserv_exported) {
340        messages_wait_and_handle_next();
341    }
342
343    // the following has to be outside the export_cb since it might do
344    // a bind, and invokes an RPC
345
346    // explicitly tell spawnd to use us
347    err = set_local_spawnd_memserv(core);
348    if (err_is_fail(err)) {
349        DEBUG_ERR(err, "setting spawnd's local memserv interface");
350        return err;
351    }
352
353    // and this has to be done after spawnd's local memserv has been set,
354    // so also not in the export_cb
355    char service_name[NAME_LEN];
356    snprintf(service_name, NAME_LEN, "%s.%d", MEMSERV_DIST, core);
357    err = nameservice_register(service_name, myiref);
358    if (err_is_fail(err)) {
359        USER_PANIC_ERR(err, "nameservice_register failed");
360    }
361    // let the master know we are ready
362    err = nsb_register_n(core, MEMSERV_DIST);
363    if (err_is_fail(err)) {
364        USER_PANIC_ERR(err, "nsb_register_n failed");
365    }
366
367    // Enter main dispatcher loop
368    THCFinish();
369
370    assert(!"Should never return");
371    abort();
372}
373