1/** \file
2 *  \brief Monitor's connection with the dispatchers on the same core for
3 *  blocking rpc calls.
4 */
5
6/*
7 * Copyright (c) 2009, 2010, 2011, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15
16#include "monitor.h"
17#include <barrelfish/monitor_client.h>
18#include <barrelfish_kpi/platform.h>
19#include "capops.h"
20
21// workaround inlining bug with gcc 4.4.1 shipped with ubuntu 9.10 and 4.4.3 in Debian
22#if defined(__i386__) && defined(__GNUC__) \
23    && __GNUC__ == 4 && __GNUC_MINOR__ == 4 && __GNUC_PATCHLEVEL__ <= 3
24#define SAFEINLINE __attribute__((noinline))
25#else
26#define SAFEINLINE
27#endif
28
29static void retype_reply_status(errval_t status, void *st)
30{
31    struct monitor_blocking_binding *b = (struct monitor_blocking_binding*)st;
32    errval_t err = b->tx_vtbl.remote_cap_retype_response(b, NOP_CONT, status);
33    assert(err_is_ok(err));
34}
35
36static void remote_cap_retype(struct monitor_blocking_binding *b,
37                              struct capref src_root, struct capref dest_root,
38                              capaddr_t src, uint64_t offset, uint64_t new_type,
39                              uint64_t objsize, uint64_t count, capaddr_t to,
40                              capaddr_t slot, int32_t to_level)
41{
42    if (capref_is_null(dest_root)) {
43        dest_root = src_root;
44    }
45    capops_retype(new_type, objsize, count, dest_root, to, to_level,
46                  slot, src_root, src, 2, offset, retype_reply_status, (void*)b);
47}
48
49static void delete_reply_status(errval_t status, void *st)
50{
51    DEBUG_CAPOPS("sending cap_delete reply msg: %s\n", err_getstring(status));
52    struct monitor_blocking_binding *b = (struct monitor_blocking_binding*)st;
53    errval_t err = b->tx_vtbl.remote_cap_delete_response(b, NOP_CONT, status);
54    assert(err_is_ok(err));
55}
56
57static void remote_cap_delete(struct monitor_blocking_binding *b,
58                              struct capref croot, capaddr_t src, uint8_t level)
59{
60    struct domcapref cap = { .croot = croot, .cptr = src, .level = level };
61    capops_delete(cap, delete_reply_status, (void*)b);
62}
63
64static void revoke_reply_status(errval_t status, void *st)
65{
66    struct monitor_blocking_binding *b = (struct monitor_blocking_binding*)st;
67    errval_t err = b->tx_vtbl.remote_cap_revoke_response(b, NOP_CONT, status);
68    assert(err_is_ok(err));
69}
70
71static void remote_cap_revoke(struct monitor_blocking_binding *b,
72                              struct capref croot, capaddr_t src, uint8_t level)
73{
74    struct domcapref cap = { .croot = croot, .cptr = src, .level = level };
75    capops_revoke(cap, revoke_reply_status, (void*)b);
76}
77
78static void rsrc_manifest(struct monitor_blocking_binding *b,
79                          struct capref dispcap, const char *str)
80{
81    errval_t err, err2;
82    rsrcid_t id;
83
84    err = rsrc_new(&id);
85    if(err_is_fail(err)) {
86        goto out;
87    }
88    err = rsrc_join(id, dispcap, b);
89    if(err_is_fail(err)) {
90        // TODO: Cleanup
91        goto out;
92    }
93    err = rsrc_submit_manifest(id, (CONST_CAST)str);
94
95 out:
96    err2 = b->tx_vtbl.rsrc_manifest_response(b, NOP_CONT, id, err);
97    assert(err_is_ok(err2));
98}
99
100static void rsrc_phase(struct monitor_blocking_binding *b,
101                       rsrcid_t id, uint32_t phase)
102{
103    errval_t err;
104
105    err = rsrc_set_phase(id, phase);
106    assert(err_is_ok(err));
107
108    err = b->tx_vtbl.rsrc_phase_response(b, NOP_CONT);
109    assert(err_is_ok(err));
110}
111
112static void rpc_rsrc_join(struct monitor_blocking_binding *b,
113                          rsrcid_t id, struct capref dispcap)
114{
115    errval_t err, err2;
116
117    err = rsrc_join(id, dispcap, b);
118
119    if(err_is_fail(err)) {
120        err2 = b->tx_vtbl.rsrc_join_response(b, NOP_CONT, err);
121        assert(err_is_ok(err2));
122    }
123}
124
125static void alloc_monitor_ep(struct monitor_blocking_binding *b)
126{
127    struct capref retcap = NULL_CAP;
128    errval_t err, reterr = SYS_ERR_OK;
129
130    struct monitor_lmp_binding *lmpb =
131        malloc(sizeof(struct monitor_lmp_binding));
132    assert(lmpb != NULL);
133
134    // setup our end of the binding
135    err = monitor_client_lmp_accept(lmpb, get_default_waitset(),
136                                    DEFAULT_LMP_BUF_WORDS);
137    if (err_is_fail(err)) {
138        free(lmpb);
139        reterr = err_push(err, LIB_ERR_MONITOR_CLIENT_ACCEPT);
140        goto out;
141    }
142
143    retcap = lmpb->chan.local_cap;
144    monitor_server_init(&lmpb->b);
145
146out:
147    err = b->tx_vtbl.alloc_monitor_ep_response(b, NOP_CONT, reterr, retcap);
148    if (err_is_fail(err)) {
149        USER_PANIC_ERR(err, "failed to send alloc_monitor_ep_reply");
150    }
151}
152
153struct cap_identify_del_st {
154    struct monitor_blocking_binding *b;
155    struct capref cap;
156    union capability_caprep_u u;
157    errval_t reterr;
158};
159
160static void cap_identify_delete_result_handler(errval_t status, void *st)
161{
162    errval_t err;
163    char *msg = NULL;
164    if (err_is_fail(status) && err_no(status) != SYS_ERR_CAP_NOT_FOUND) {
165        msg = "caps_delete failed";
166        err = status;
167        goto cleanup;
168    }
169
170    struct cap_identify_del_st *idst = st;
171
172    // free slot
173    err = slot_free(idst->cap);
174    if (err_is_fail(err)) {
175        msg = "slot_free failed";
176        goto cleanup;
177    }
178
179    // send cap_identify reply
180    err = idst->b->tx_vtbl.cap_identify_response(idst->b, NOP_CONT, idst->reterr,
181            idst->u.caprepb);
182    msg = "reply failed";
183
184cleanup:
185    if (err_is_fail(err)) {
186        USER_PANIC_ERR(err, msg ? msg : "unknown reason?!");
187    }
188    free(st);
189}
190
191static void cap_identify(struct monitor_blocking_binding *b,
192                         struct capref cap)
193{
194    // allocate delete state
195    struct cap_identify_del_st *st = malloc(sizeof(*st));
196    if (!st) {
197        USER_PANIC("malloc in cap_identify");
198    }
199    st->b = b;
200    st->cap = cap;
201    st->reterr = monitor_cap_identify(cap, &st->u.cap);
202
203    /* We always need to do the delete properly here as the cap might be
204     * locked or in a delete already, furthermore if the function is called
205     * from the monitor through it's self-client binding we still create a
206     * copy of the capability, and need to cleanup our copy */
207    struct domcapref dcap = get_cap_domref(cap);
208
209    capops_delete(dcap, cap_identify_delete_result_handler, st);
210}
211
212#define ARM_IRQ_MAX 256
213
214static void arm_irq_handle_call(struct monitor_blocking_binding *b,
215        struct capref ep, uint32_t irq)
216{
217
218    errval_t err = 1;
219
220    if (irq <= ARM_IRQ_MAX) {
221        err = invoke_irqtable_set(cap_irq, irq, ep);
222    }
223
224    errval_t err2 = b->tx_vtbl.arm_irq_handle_response(b, NOP_CONT, err);
225    assert(err_is_ok(err2));
226}
227
228static void irq_handle_call(struct monitor_blocking_binding *b, struct capref ep)
229{
230    /* allocate a new slot in the IRQ table */
231    int vec;
232    errval_t err, err2;
233    err = invoke_irqtable_alloc_vector(cap_irq, &vec);
234    if (err_is_fail(err)) {
235        err = err_push(err, MON_ERR_INVOKE_IRQ_ALLOCATE);
236        err2 = b->tx_vtbl.irq_handle_response(b, NOP_CONT, err, 0);
237    }
238    // we got a vector
239
240    /* set it and reply */
241    err = invoke_irqtable_set(cap_irq, vec, ep);
242    if (err_is_fail(err)) {
243        err = err_push(err, MON_ERR_INVOKE_IRQ_SET);
244    }
245    err2 = b->tx_vtbl.irq_handle_response(b, NOP_CONT, err, vec);
246    assert(err_is_ok(err2));
247}
248
249static void get_arch_core_id(struct monitor_blocking_binding *b)
250{
251    static uintptr_t arch_id = -1;
252    errval_t err;
253//    printf("%s:%s:%d: \n", __FILE__, __FUNCTION__, __LINE__);
254
255    if (arch_id == -1) {
256        err = invoke_monitor_get_arch_id(&arch_id);
257        assert(err_is_ok(err));
258        assert(arch_id != -1);
259    }
260
261    err = b->tx_vtbl.get_arch_core_id_response(b, NOP_CONT, arch_id);
262    assert(err_is_ok(err));
263}
264
265struct pending_reply {
266    struct monitor_blocking_binding *b;
267    errval_t err;
268};
269
270static void retry_reply(void *arg)
271{
272    struct pending_reply *r = arg;
273    assert(r != NULL);
274    struct monitor_blocking_binding *b = r->b;
275    errval_t err;
276
277    err = b->tx_vtbl.cap_set_remote_response(b, NOP_CONT, r->err);
278    if (err_is_ok(err)) {
279        free(r);
280    } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
281        err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply, r));
282        assert(err_is_ok(err));
283    } else {
284        DEBUG_ERR(err, "failed to reply to memory request");
285    }
286}
287
288static void cap_set_remote(struct monitor_blocking_binding *b,
289                           struct capref cap, bool remote)
290{
291    errval_t err, reterr;
292
293    reterr = monitor_remote_relations(cap, RRELS_COPY_BIT, RRELS_COPY_BIT, NULL);
294
295    err = b->tx_vtbl.cap_set_remote_response(b, NOP_CONT, reterr);
296    if(err_is_fail(err)) {
297        if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
298            struct pending_reply *r = malloc(sizeof(struct pending_reply));
299            assert(r != NULL);
300            r->b = b;
301            r->err = reterr;
302            err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply, r));
303            assert(err_is_ok(err));
304        } else {
305            USER_PANIC_ERR(err, "cap_set_remote_response");
306        }
307    }
308}
309
310/* ----------------------- BOOTINFO REQUEST CODE START ---------------------- */
311
312static void get_phyaddr_cap(struct monitor_blocking_binding *b)
313{
314    // XXX: We should not just hand out this cap to everyone
315    // who requests it. There is currently no way to determine
316    // if the client is a valid recipient
317    errval_t err;
318
319    struct capref src = {
320        .cnode = cnode_root,
321        .slot  = ROOTCN_SLOT_PACN
322    };
323
324    err = b->tx_vtbl.get_phyaddr_cap_response(b, NOP_CONT, src,
325            SYS_ERR_OK);
326    if (err_is_fail(err)) {
327        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
328            err = b->register_send(b, get_default_waitset(),
329                                   MKCONT((void (*)(void *))get_phyaddr_cap, b));
330            if (err_is_fail(err)) {
331                USER_PANIC_ERR(err, "register_send failed");
332            }
333        }
334
335        USER_PANIC_ERR(err, "sending get_phyaddr_cap_response failed");
336    }
337}
338
339static void get_io_cap(struct monitor_blocking_binding *b)
340{
341    // XXX: We should not just hand out this cap to everyone
342    // who requests it. There is currently no way to determine
343    // if the client is a valid recipient
344    errval_t err;
345    struct capref src = {
346        .cnode = cnode_task,
347        .slot  = TASKCN_SLOT_IO
348    };
349
350    err = b->tx_vtbl.get_io_cap_response(b, NOP_CONT, src,
351            SYS_ERR_OK);
352    if (err_is_fail(err)) {
353        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
354            err = b->register_send(b, get_default_waitset(),
355                                   MKCONT((void (*)(void *))get_io_cap, b));
356            if (err_is_fail(err)) {
357                USER_PANIC_ERR(err, "register_send failed");
358            }
359        }
360
361        USER_PANIC_ERR(err, "sending get_io_cap_response failed");
362    }
363}
364
365static void get_irq_dest_cap(struct monitor_blocking_binding *b)
366{
367    errval_t err;
368    //TODO get real cap
369
370    struct capref dest_cap;
371    slot_alloc(&dest_cap);
372    err = invoke_irqtable_alloc_dest_cap(cap_irq, dest_cap);
373    if(err_is_fail(err)){
374        DEBUG_ERR(err,"x");
375        USER_PANIC_ERR(err, "could not allocate dest cap!");
376    }
377
378
379    err = b->tx_vtbl.get_irq_dest_cap_response(b, NOP_CONT, dest_cap,
380            SYS_ERR_OK);
381    if (err_is_fail(err)) {
382        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
383            err = b->register_send(b, get_default_waitset(),
384                                   MKCONT((void (*)(void *))get_io_cap, b));
385            if (err_is_fail(err)) {
386                USER_PANIC_ERR(err, "register_send failed");
387            }
388        }
389
390        USER_PANIC_ERR(err, "sending get_io_cap_response failed");
391    }
392}
393
394
395static void get_bootinfo(struct monitor_blocking_binding *b)
396{
397    errval_t err;
398
399    struct capref frame = {
400        .cnode = cnode_task,
401        .slot  = TASKCN_SLOT_BOOTINFO
402    };
403
404    struct frame_identity id = { .base = 0, .bytes = 0 };
405    err = invoke_frame_identify(frame, &id);
406    assert(err_is_ok(err));
407
408    err = b->tx_vtbl.get_bootinfo_response(b, NOP_CONT, SYS_ERR_OK, frame,
409                                           id.bytes);
410    if (err_is_fail(err)) {
411        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
412            err = b->register_send(b, get_default_waitset(),
413                                   MKCONT((void (*)(void *))get_bootinfo, b));
414            if (err_is_fail(err)) {
415                USER_PANIC_ERR(err, "register_send failed");
416            }
417        }
418
419        USER_PANIC_ERR(err, "sending get_bootinfo_response failed");
420    }
421}
422
423/* ----------------------- BOOTINFO REQUEST CODE END ----------------------- */
424
425static void get_ipi_cap(struct monitor_blocking_binding *b)
426{
427    errval_t err;
428
429    // XXX: We should not just hand out this cap to everyone
430    // who requests it. There is currently no way to determine
431    // if the client is a valid recipient
432
433    err = b->tx_vtbl.get_ipi_cap_response(b, NOP_CONT, cap_ipi);
434    assert(err_is_ok(err));
435}
436
437// XXX: these look suspicious in combination with distops!
438static void forward_kcb_request(struct monitor_blocking_binding *b,
439                                coreid_t destination, struct capref kcb)
440{
441    printf("%s:%s:%d: forward_kcb_request in monitor\n",
442           __FILE__, __FUNCTION__, __LINE__);
443
444    errval_t err = SYS_ERR_OK;
445
446    struct capability kcb_cap;
447    err = monitor_cap_identify(kcb, &kcb_cap);
448    if (err_is_fail(err)) {
449        DEBUG_ERR(err, "monitor_cap_identify failed");
450        err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
451        assert(err_is_ok(err));
452        return;
453    }
454
455    if (destination == my_core_id) {
456        uintptr_t kcb_base = (uintptr_t)kcb_cap.u.kernelcontrolblock.kcb;
457        printf("%s:%s:%d: Invoke syscall directly, destination==my_core_id; kcb_base = 0x%"PRIxPTR"\n",
458               __FILE__, __FUNCTION__, __LINE__, kcb_base);
459        err = invoke_monitor_add_kcb(kcb_base);
460        if (err_is_fail(err)) {
461            USER_PANIC_ERR(err, "invoke_montitor_add_kcb failed.");
462        }
463
464        err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
465        assert(err_is_ok(err));
466        return;
467    }
468
469    struct intermon_binding *ib;
470    err = intermon_binding_get(destination, &ib);
471    if (err_is_fail(err)) {
472        DEBUG_ERR(err, "intermon_binding_get failed");
473        err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
474        assert(err_is_ok(err));
475        return;
476    }
477
478    intermon_caprep_t kcb_rep;
479    capability_to_caprep(&kcb_cap, &kcb_rep);
480
481    ib->st = b;
482    err = ib->tx_vtbl.give_kcb_request(ib, NOP_CONT, kcb_rep);
483    if (err_is_fail(err)) {
484        DEBUG_ERR(err, "give_kcb send failed");
485        err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
486        assert(err_is_ok(err));
487        return;
488    }
489}
490
491static void forward_kcb_rm_request(struct monitor_blocking_binding *b,
492                                   coreid_t destination, struct capref kcb)
493{
494    errval_t err = SYS_ERR_OK;
495
496    // can't move ourselves
497    assert(destination != my_core_id);
498
499    struct capability kcb_cap;
500    err = monitor_cap_identify(kcb, &kcb_cap);
501    if (err_is_fail(err)) {
502        DEBUG_ERR(err, "monitor_cap_identify failed");
503        err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
504        assert(err_is_ok(err));
505        return;
506    }
507
508    struct intermon_binding *ib;
509    err = intermon_binding_get(destination, &ib);
510    if (err_is_fail(err)) {
511        DEBUG_ERR(err, "intermon_binding_get failed");
512        err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
513        assert(err_is_ok(err));
514        return;
515    }
516    uintptr_t kcb_base = (uintptr_t )kcb_cap.u.kernelcontrolblock.kcb;
517
518    // send request to other monitor
519    // remember monitor binding to send answer
520    struct intermon_state *ist = (struct intermon_state*)ib->st;
521    ist->originating_client = (struct monitor_binding*)b; //XXX: HACK
522    err = ib->tx_vtbl.forward_kcb_rm_request(ib, NOP_CONT, kcb_base);
523    assert(err_is_ok(err));
524}
525
526static void get_global_paddr(struct monitor_blocking_binding *b)
527{
528    genpaddr_t global = 0;
529    errval_t err;
530    err = invoke_get_global_paddr(cap_kernel, &global);
531    if (err_is_fail(err)) {
532        DEBUG_ERR(err, "get_global_paddr invocation");
533    }
534
535    err = b->tx_vtbl.get_global_paddr_response(b, NOP_CONT, global);
536    if (err_is_fail(err)) {
537        USER_PANIC_ERR(err, "sending global paddr failed.");
538    }
539}
540
541static void get_platform(struct monitor_blocking_binding *b)
542{
543    struct platform_info pi;
544    errval_t err;
545    err = invoke_get_platform_info((uintptr_t)&pi);
546    if (err_is_fail(err)) {
547        DEBUG_ERR(err, "get_platform_info invocation");
548    }
549
550    err = b->tx_vtbl.get_platform_response(b, NOP_CONT, pi.arch, pi.platform);
551    if (err_is_fail(err)) {
552        USER_PANIC_ERR(err, "sending platform info failed.");
553    }
554}
555
556static void get_platform_arch(struct monitor_blocking_binding *b)
557{
558    errval_t err;
559    size_t struct_size;
560
561    struct platform_info *pi= malloc(sizeof(struct platform_info));
562    if(!pi) USER_PANIC("Failed to allocate platform info struct.\n");
563
564    err = invoke_get_platform_info((uintptr_t)pi);
565    if (err_is_fail(err)) {
566        DEBUG_ERR(err, "get_platform_info invocation");
567    }
568
569    switch(pi->arch) {
570        case PI_ARCH_ARMV7A:
571            struct_size= sizeof(struct arch_info_armv7);
572            break;
573        default:
574            struct_size= 0;
575    }
576    assert(struct_size < PI_ARCH_INFO_SIZE);
577
578    err = b->tx_vtbl.get_platform_arch_response(b, MKCONT(free,pi),
579            (uint8_t *)&pi->arch_info, struct_size);
580    if (err_is_fail(err)) {
581        USER_PANIC_ERR(err, "sending platform info failed.");
582    }
583}
584
585/*------------------------- Initialization functions -------------------------*/
586
587static struct monitor_blocking_rx_vtbl rx_vtbl = {
588    .get_bootinfo_call = get_bootinfo,
589    .get_phyaddr_cap_call = get_phyaddr_cap,
590    .get_io_cap_call = get_io_cap,
591    .get_irq_dest_cap_call = get_irq_dest_cap,
592
593    .remote_cap_retype_call  = remote_cap_retype,
594    .remote_cap_delete_call  = remote_cap_delete,
595    .remote_cap_revoke_call  = remote_cap_revoke,
596
597    .rsrc_manifest_call      = rsrc_manifest,
598    .rsrc_join_call          = rpc_rsrc_join,
599    .rsrc_phase_call         = rsrc_phase,
600
601    .alloc_monitor_ep_call   = alloc_monitor_ep,
602    .cap_identify_call       = cap_identify,
603    .irq_handle_call         = irq_handle_call,
604    .arm_irq_handle_call     = arm_irq_handle_call,
605    .get_arch_core_id_call   = get_arch_core_id,
606
607    .cap_set_remote_call     = cap_set_remote,
608    .get_ipi_cap_call = get_ipi_cap,
609
610    .forward_kcb_request_call = forward_kcb_request,
611
612    .forward_kcb_rm_request_call = forward_kcb_rm_request,
613
614    .get_global_paddr_call = get_global_paddr,
615
616    .get_platform_call = get_platform,
617    .get_platform_arch_call = get_platform_arch,
618};
619
620static void export_callback(void *st, errval_t err, iref_t iref)
621{
622    assert(err_is_ok(err));
623    set_monitor_rpc_iref(iref);
624}
625
626static errval_t connect_callback(void *st, struct monitor_blocking_binding *b)
627{
628    b->rx_vtbl = rx_vtbl;
629
630    // TODO: set error handler
631    return SYS_ERR_OK;
632}
633
634errval_t monitor_rpc_init(void)
635{
636    static struct monitor_blocking_export e = {
637        .connect_cb = connect_callback,
638        .common = {
639            .export_callback = export_callback,
640            .flags = IDC_EXPORT_FLAGS_DEFAULT,
641            .connect_cb_st = &e,
642            .lmp_connect_callback = monitor_blocking_lmp_connect_handler,
643        }
644    };
645
646    e.waitset = get_default_waitset();
647
648    return idc_export_service(&e.common);
649}
650