1/**
2 * \file
3 * \brief Process management service.
4 */
5
6/*
7 * Copyright (c) 2017, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <barrelfish/barrelfish.h>
16#include <barrelfish/nameservice_client.h>
17#include <barrelfish/spawn_client.h>
18#include <if/monitor_defs.h>
19#include <if/proc_mgmt_defs.h>
20#include <if/spawn_defs.h>
21
22#include "domain.h"
23#include "internal.h"
24#include "pending_clients.h"
25#include "spawnd_state.h"
26
27/**
28 * \brief Handler for message add_spawnd, for the local monitor binding.
29 */
30static void add_spawnd_handler(struct proc_mgmt_binding *b, coreid_t core_id,
31                               iref_t iref)
32{
33    if (spawnd_state_exists(core_id)) {
34        DEBUG_ERR(PROC_MGMT_ERR_SPAWND_EXISTS, "spawnd_state_exists");
35        return;
36    }
37
38    // Bind with the spawnd.
39    struct spawn_binding *spawnb;
40    errval_t err = spawn_bind_iref(iref, &spawnb);
41    if (err_is_fail(err)) {
42        DEBUG_ERR(err, "spawn_bind_iref");
43        return;
44    }
45
46    err = spawnd_state_alloc(core_id, spawnb);
47    if (err_is_fail(err)) {
48        DEBUG_ERR(err, "spawnd_state_alloc");
49    }
50
51    debug_printf("Process manager bound with spawnd.%u on iref %u\n", core_id,
52            iref);
53}
54
55/**
56 * \brief Handler for message add_spawnd, for non-monitor bindings.
57 */
58static void add_spawnd_handler_non_monitor(struct proc_mgmt_binding *b,
59                                           coreid_t core_id, iref_t iref)
60{
61    // debug_printf("Ignoring add_spawnd call: %s\n",
62    //              err_getstring(PROC_MGMT_ERR_NOT_MONITOR));
63}
64
65static bool cleanup_request_sender(struct msg_queue_elem *m);
66
67/**
68 * General-purpose handler for replies from spawnd.
69 */
70static void spawn_reply_handler(struct spawn_binding *b, errval_t spawn_err)
71{
72    struct pending_client *cl =
73            (struct pending_client*) spawnd_state_dequeue_recv(b->st);
74
75    struct pending_spawn *spawn = NULL;
76    struct pending_span *span = NULL;
77    struct pending_kill_cleanup *kc = NULL;
78
79    struct domain_entry *entry;
80
81    errval_t err, resp_err;
82
83    switch (cl->type) {
84        case ClientType_Spawn:
85        case ClientType_SpawnWithCaps:
86            spawn = (struct pending_spawn*) cl->st;
87            err = spawn_err;
88            if (err_is_ok(spawn_err)) {
89                err = domain_spawn(spawn->cap_node, spawn->core_id, spawn->argvbuf,
90                                   spawn->argvbytes);
91            }
92            if (cl->type == ClientType_Spawn) {
93                resp_err = cl->b->tx_vtbl.spawn_response(cl->b, NOP_CONT,
94                        err, spawn->cap_node->domain_cap);
95            } else {
96                resp_err = cl->b->tx_vtbl.spawn_with_caps_response(cl->b,
97                        NOP_CONT, err, spawn->cap_node->domain_cap);
98            }
99
100            free(spawn);
101            break;
102
103        case ClientType_Span:
104            span = (struct pending_span*) cl->st;
105            entry = span->entry;
106            if (entry->status == DOMAIN_STATUS_RUNNING) {
107                resp_err = cl->b->tx_vtbl.span_response(cl->b, NOP_CONT,
108                                                        spawn_err);
109            }
110
111            free(span);
112            break;
113
114        case ClientType_Cleanup:
115            kc = (struct pending_kill_cleanup*) cl->st;
116            entry = kc->entry;
117
118            assert(entry->num_spawnds_resources > 0);
119            assert(entry->status != DOMAIN_STATUS_CLEANED);
120
121            --entry->num_spawnds_resources;
122            if (entry->num_spawnds_resources == 0) {
123                entry->status = DOMAIN_STATUS_CLEANED;
124
125                // At this point, the domain exists in state CLEANED for history
126                // reasons. For instance, if some other domain issues a wait
127                // call for this one, the process manager can return the exit
128                // status directly. At some point, however, we might want to
129                // just clean up the domain entry and recycle the domain cap.
130            }
131
132            free(kc);
133            break;
134
135        case ClientType_Kill:
136        case ClientType_Exit:
137            kc = (struct pending_kill_cleanup*) cl->st;
138            entry = kc->entry;
139
140            assert(entry->num_spawnds_running > 0);
141            assert(entry->status != DOMAIN_STATUS_STOPPED);
142
143            --entry->num_spawnds_running;
144
145            if (entry->num_spawnds_running == 0) {
146                entry->status = DOMAIN_STATUS_STOPPED;
147
148                if (cl->type == ClientType_Kill) {
149                    entry->exit_status = EXIT_STATUS_KILLED;
150                    resp_err = cl->b->tx_vtbl.kill_response(cl->b, NOP_CONT,
151                                                            spawn_err);
152                }
153
154                struct domain_waiter *waiter = entry->waiters;
155                while (waiter != NULL) {
156                    waiter->b->tx_vtbl.wait_response(waiter->b, NOP_CONT,
157                                                     SYS_ERR_OK,
158                                                     entry->exit_status);
159                    struct domain_waiter *tmp = waiter;
160                    waiter = waiter->next;
161                    free(tmp);
162                }
163
164                for (coreid_t i = 0; i < MAX_COREID; ++i) {
165                    if (entry->spawnds[i] == NULL) {
166                        continue;
167                    }
168
169                    struct spawn_binding *spb = entry->spawnds[i]->b;
170
171                    struct pending_kill_cleanup *cleanup =
172                            (struct pending_kill_cleanup*) malloc(
173                                    sizeof(struct pending_kill_cleanup));
174                    cleanup->b = spb;
175                    cleanup->domain_cap = kc->domain_cap;
176                    cleanup->entry = entry;
177
178                    struct pending_client *cleanup_cl =
179                            (struct pending_client*) malloc(
180                                    sizeof(struct pending_client));
181                    cleanup_cl->b = cl->b;
182                    cleanup_cl->type = ClientType_Cleanup;
183                    cleanup_cl->st = cleanup;
184
185                    struct msg_queue_elem *msg = (struct msg_queue_elem*) malloc(
186                            sizeof(struct msg_queue_elem));
187                    msg->st = cleanup_cl;
188                    msg->cont = cleanup_request_sender;
189
190                    err = spawnd_state_enqueue_send(entry->spawnds[i], msg);
191
192                    if (err_is_fail(err)) {
193                        DEBUG_ERR(err, "enqueuing cleanup request");
194                        free(cleanup);
195                        free(cleanup_cl);
196                        free(msg);
197                    }
198                }
199            }
200
201            free(kc);
202            break;
203
204        default:
205            USER_PANIC("Unknown client type in spawn_reply_handler: %u\n",
206                       cl->type);
207    }
208
209    free(cl);
210}
211
212/**
213 * \brief Handler for sending spawn requests.
214 */
215static bool spawn_request_sender(struct msg_queue_elem *m)
216{
217    struct pending_client *cl = (struct pending_client*) m->st;
218    struct pending_spawn *spawn = (struct pending_spawn*) cl->st;
219    spawn->b->rx_vtbl.spawn_reply = spawn_reply_handler;
220
221    errval_t err;
222    bool with_caps = !(capref_is_null(spawn->inheritcn_cap) &&
223                       capref_is_null(spawn->argcn_cap));
224    if (with_caps) {
225        err = spawn->b->tx_vtbl.spawn_with_caps_request(spawn->b, NOP_CONT,
226                                                        cap_procmng,
227                                                        spawn->cap_node->domain_cap,
228                                                        spawn->path,
229                                                        spawn->argvbuf,
230                                                        spawn->argvbytes,
231                                                        spawn->envbuf,
232                                                        spawn->envbytes,
233                                                        spawn->inheritcn_cap,
234                                                        spawn->argcn_cap,
235                                                        spawn->flags);
236    } else {
237        err = spawn->b->tx_vtbl.spawn_request(spawn->b, NOP_CONT, cap_procmng,
238                                              spawn->cap_node->domain_cap,
239                                              spawn->path, spawn->argvbuf,
240                                              spawn->argvbytes, spawn->envbuf,
241                                              spawn->envbytes, spawn->flags);
242    }
243
244    if (err_is_fail(err)) {
245        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
246            return false;
247        } else {
248            USER_PANIC_ERR(err, "sending spawn request");
249        }
250    }
251
252    free(m);
253
254    return true;
255}
256
257/**
258 * \brief Handler for sending span requests.
259 */
260static bool span_request_sender(struct msg_queue_elem *m)
261{
262    struct pending_client *cl = (struct pending_client*) m->st;
263    struct pending_span *span = (struct pending_span*) cl->st;
264
265    errval_t err;
266    span->b->rx_vtbl.spawn_reply = spawn_reply_handler;
267    err = span->b->tx_vtbl.span_request(span->b, NOP_CONT, cap_procmng,
268                                        span->domain_cap, span->vroot,
269                                        span->dispframe);
270
271    if (err_is_fail(err)) {
272        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
273            return false;
274        } else {
275            USER_PANIC_ERR(err, "sending span request");
276        }
277    }
278
279    free(m);
280
281    return true;
282}
283
284/**
285 * \brief Handler for sending kill requests.
286 */
287static bool kill_request_sender(struct msg_queue_elem *m)
288{
289    struct pending_client *cl = (struct pending_client*) m->st;
290    struct pending_kill_cleanup *kill = (struct pending_kill_cleanup*) cl->st;
291
292    errval_t err;
293    kill->b->rx_vtbl.spawn_reply = spawn_reply_handler;
294    err = kill->b->tx_vtbl.kill_request(kill->b, NOP_CONT, cap_procmng,
295                                        kill->domain_cap);
296
297    if (err_is_fail(err)) {
298        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
299            return false;
300        } else {
301            USER_PANIC_ERR(err, "sending kill request");
302        }
303    }
304
305    free(m);
306
307    return true;
308}
309
310/**
311 * \brief Handler for sending cleanup requests.
312 */
313static bool cleanup_request_sender(struct msg_queue_elem *m)
314{
315    struct pending_client *cl = (struct pending_client*) m->st;
316    struct pending_kill_cleanup *cleanup = (struct pending_kill_cleanup*) cl->st;
317
318    errval_t err;
319    cleanup->b->rx_vtbl.spawn_reply = spawn_reply_handler;
320    err = cleanup->b->tx_vtbl.cleanup_request(cleanup->b, NOP_CONT,
321                                              cap_procmng,
322                                              cleanup->domain_cap);
323
324    if (err_is_fail(err)) {
325        if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
326            return false;
327        } else {
328            USER_PANIC_ERR(err, "sending cleanup request");
329        }
330    }
331
332    free(m);
333
334    return true;
335}
336
337/**
338 * \brief Common bits of the spawn and spawn_with_caps handlers.
339 */
340static errval_t spawn_handler_common(struct proc_mgmt_binding *b,
341                                     enum ClientType type,
342                                     coreid_t core_id, const char *path,
343                                     const char *argvbuf, size_t argvbytes,
344                                     const char *envbuf, size_t envbytes,
345                                     struct capref inheritcn_cap,
346                                     struct capref argcn_cap, uint8_t flags)
347{
348    if (!spawnd_state_exists(core_id)) {
349        // XXX fixes race condition for between proc_mgmt and spawnd for
350        // now, but is a problem when spawnd on a certain core is not started
351        // because the cpu driver on that core is not started
352        while(!spawnd_state_exists(core_id)) {
353            event_dispatch(get_default_waitset());
354        }
355        //return PROC_MGMT_ERR_INVALID_SPAWND;
356    }
357
358    struct spawnd_state *spawnd = spawnd_state_get(core_id);
359    assert(spawnd != NULL);
360    struct spawn_binding *cl = spawnd->b;
361    assert(cl != NULL);
362
363    errval_t err;
364    if (domain_should_refill_caps()) {
365        err = domain_prealloc_caps();
366        if (err_is_fail(err)) {
367            return err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP);
368        }
369    }
370
371    struct domain_cap_node *cap_node = next_cap_node();
372
373    struct pending_spawn *spawn = (struct pending_spawn*) malloc(
374            sizeof(struct pending_spawn));
375    spawn->cap_node = cap_node;
376    // spawn->domain_cap = domain_cap;
377    spawn->b = cl;
378    spawn->core_id = core_id;
379    spawn->path = path;
380    spawn->argvbuf = argvbuf;
381    spawn->argvbytes = argvbytes;
382    spawn->envbuf = envbuf;
383    spawn->envbytes = envbytes;
384    spawn->inheritcn_cap = inheritcn_cap;
385    spawn->argcn_cap = argcn_cap;
386    spawn->flags = flags;
387
388    struct pending_client *spawn_cl = (struct pending_client*) malloc(
389            sizeof(struct pending_client));
390    spawn_cl->b = b;
391    spawn_cl->type = type;
392    spawn_cl->st = spawn;
393
394    struct msg_queue_elem *msg = (struct msg_queue_elem*) malloc(
395            sizeof(struct msg_queue_elem));
396    msg->st = spawn_cl;
397    msg->cont = spawn_request_sender;
398
399    err = spawnd_state_enqueue_send(spawnd, msg);
400    if (err_is_fail(err)) {
401        DEBUG_ERR(err, "enqueuing spawn request");
402        free(spawn);
403        free(spawn_cl);
404        free(msg);
405    }
406
407    return SYS_ERR_OK;
408}
409
410/**
411 * \brief Handler for rpc spawn.
412 */
413static void spawn_handler(struct proc_mgmt_binding *b, coreid_t core_id,
414                          const char *path, const char *argvbuf,
415                          size_t argvbytes, const char *envbuf, size_t envbytes,
416                          uint8_t flags)
417{
418    errval_t err, resp_err;
419    err = spawn_handler_common(b, ClientType_Spawn, core_id, path, argvbuf,
420                               argvbytes, envbuf, envbytes, NULL_CAP, NULL_CAP,
421                               flags);
422
423    if (err_is_fail(err)) {
424        resp_err = b->tx_vtbl.spawn_response(b, NOP_CONT, err, NULL_CAP);
425        if (err_is_fail(resp_err)) {
426            DEBUG_ERR(resp_err, "failed to send spawn_response");
427        }
428    }
429}
430
431/**
432 * \brief Handler for rpc spawn_with_caps.
433 */
434static void spawn_with_caps_handler(struct proc_mgmt_binding *b,
435                                    coreid_t core_id, const char *path,
436                                    const char *argvbuf, size_t argvbytes,
437                                    const char *envbuf, size_t envbytes,
438                                    struct capref inheritcn_cap,
439                                    struct capref argcn_cap, uint8_t flags)
440{
441    errval_t err, resp_err;
442    err = spawn_handler_common(b, ClientType_SpawnWithCaps, core_id, path,
443                               argvbuf, argvbytes, envbuf, envbytes,
444                               inheritcn_cap, argcn_cap, flags);
445    if (err_is_ok(err)) {
446        // Will respond to client when we get the reply from spawnd.
447        return;
448    }
449
450    resp_err = b->tx_vtbl.spawn_with_caps_response(b, NOP_CONT, err,
451                                                            NULL_CAP);
452    if (err_is_fail(resp_err)) {
453        DEBUG_ERR(resp_err, "failed to send spawn_with_caps_response");
454    }
455}
456
457/**
458 * \brief Handler for rpc span.
459 */
460static void span_handler(struct proc_mgmt_binding *b, struct capref domain_cap,
461                         coreid_t core_id, struct capref vroot,
462                         struct capref dispframe)
463{
464    errval_t err, resp_err;
465    struct domain_entry *entry = NULL;
466    err = domain_get_by_cap(domain_cap, &entry);
467    if (err_is_fail(err)) {
468        goto respond_with_err;
469    }
470
471    assert(entry != NULL);
472    if (entry->status != DOMAIN_STATUS_RUNNING) {
473        err = PROC_MGMT_ERR_DOMAIN_NOT_RUNNING;
474        goto respond_with_err;
475    }
476
477    if (entry->spawnds[core_id] != NULL) {
478        // TODO(razvan): Maybe we want to allow the same domain to span multiple
479        // dispatchers onto the same core?
480        err = PROC_MGMT_ERR_ALREADY_SPANNED;
481        goto respond_with_err;
482    }
483
484    if (!spawnd_state_exists(core_id)) {
485        err = PROC_MGMT_ERR_INVALID_SPAWND;
486        goto respond_with_err;
487    }
488
489    struct spawnd_state *spawnd = spawnd_state_get(core_id);
490    assert(spawnd != NULL);
491    struct spawn_binding *cl = spawnd->b;
492    assert(cl != NULL);
493
494    struct pending_span *span = (struct pending_span*) malloc(
495            sizeof(struct pending_span));
496    span->domain_cap = domain_cap;
497    span->entry = entry;
498    span->b = cl;
499    span->core_id = core_id;
500    span->vroot = vroot;
501    span->dispframe = dispframe;
502
503    struct pending_client *span_cl = (struct pending_client*) malloc(
504            sizeof(struct pending_client));
505    span_cl->b = b;
506    span_cl->type = ClientType_Span;
507    span_cl->st = span;
508
509    struct msg_queue_elem *msg = (struct msg_queue_elem*) malloc(
510            sizeof(struct msg_queue_elem));
511    msg->st = span_cl;
512    msg->cont = span_request_sender;
513
514    err = spawnd_state_enqueue_send(spawnd, msg);
515
516    if (err_is_fail(err)) {
517        DEBUG_ERR(err, "enqueuing span request");
518        free(span);
519        free(span_cl);
520        free(msg);
521    }
522
523respond_with_err:
524    resp_err = b->tx_vtbl.span_response(b, NOP_CONT, err);
525    if (err_is_fail(resp_err)) {
526        DEBUG_ERR(resp_err, "failed to send span_response");
527    }
528}
529
530/**
531 * \brief Common bits of the kill and exit handlers.
532 */
533static errval_t kill_handler_common(struct proc_mgmt_binding *b,
534                                    struct capref domain_cap,
535                                    enum ClientType type,
536                                    uint8_t exit_status)
537{
538    struct domain_entry *entry;
539    errval_t err = domain_get_by_cap(domain_cap, &entry);
540    if (err_is_fail(err)) {
541        return err;
542    }
543
544    entry->exit_status = exit_status;
545    domain_stop_pending(entry);
546
547    for (coreid_t i = 0; i < MAX_COREID; ++i) {
548        if (entry->spawnds[i] == NULL) {
549            continue;
550        }
551
552        struct spawn_binding *spb = entry->spawnds[i]->b;
553
554        struct pending_kill_cleanup *cmd = (struct pending_kill_cleanup*) malloc(
555                sizeof(struct pending_kill_cleanup));
556        cmd->domain_cap = domain_cap;
557        cmd->entry = entry;
558        cmd->b = spb;
559
560        struct pending_client *cl = (struct pending_client*) malloc(
561                sizeof(struct pending_client));
562        cl->b = b;
563        cl->type = type;
564        cl->st = cmd;
565
566        struct msg_queue_elem *msg = (struct msg_queue_elem*) malloc(
567                sizeof(struct msg_queue_elem));
568        msg->st = cl;
569        msg->cont = kill_request_sender;
570
571        err = spawnd_state_enqueue_send(entry->spawnds[i], msg);
572        if (err_is_fail(err)) {
573            DEBUG_ERR(err, "enqueuing kill request");
574            free(cmd);
575            free(cl);
576            free(msg);
577        }
578    }
579
580    return SYS_ERR_OK;
581}
582
583/**
584 * \brief Handler for rpc kill.
585 */
586static void kill_handler(struct proc_mgmt_binding *b,
587                         struct capref victim_domain_cap)
588{
589    errval_t err = kill_handler_common(b, victim_domain_cap, ClientType_Kill,
590                                       EXIT_STATUS_KILLED);
591    if (err_is_fail(err)) {
592        errval_t resp_err = b->tx_vtbl.kill_response(b, NOP_CONT, err);
593        if (err_is_fail(resp_err)) {
594            DEBUG_ERR(resp_err, "failed to send kill_response");
595        }
596    }
597}
598
599/**
600 * \brief Handler for message exit.
601 */
602static void exit_handler(struct proc_mgmt_binding *b, struct capref domain_cap,
603                         uint8_t exit_status)
604{
605    errval_t err = kill_handler_common(b, domain_cap, ClientType_Exit,
606                                       exit_status);
607    if (err_is_fail(err)) {
608        DEBUG_ERR(err, "processing exit_handler for requesting domain, exit "
609                  "code %u", exit_status);
610    }
611    // Error or not, there's no client to respond to anymore.
612}
613
614/**
615 * \brief Handler for rpc wait.
616 */
617static void wait_handler(struct proc_mgmt_binding *b, struct capref domain_cap, bool nohang)
618{
619    errval_t err, resp_err;
620    struct domain_entry *entry;
621    err = domain_get_by_cap(domain_cap, &entry);
622    if (err_is_fail(err)) {
623        goto respond;
624    }
625
626    if (entry->status == DOMAIN_STATUS_STOPPED) {
627        // Domain has already been stopped, so just reply with exit status.
628        goto respond;
629    }
630
631    if (nohang) {
632        entry->exit_status = -1;
633        goto respond;
634    }
635
636    struct domain_waiter *waiter = (struct domain_waiter*) malloc(
637            sizeof(struct domain_waiter));
638    waiter->b = b;
639    waiter->next = entry->waiters;
640    entry->waiters = waiter;
641    // Will respond when domain is stopped.
642    return;
643
644respond:
645    resp_err = b->tx_vtbl.wait_response(b, NOP_CONT, err, entry->exit_status);
646    if (err_is_fail(resp_err)) {
647        DEBUG_ERR(resp_err, "failed to send wait_response");
648    }
649}
650
651/**
652 * \brief Handler for rpc get_domainlist.
653 */
654static void get_domainlist_handler(struct proc_mgmt_binding *b)
655{
656    errval_t resp_err;
657    size_t len;
658    domainid_t* domains;
659
660    domain_get_all_ids(&domains, &len);
661
662    // 4096 hardcoded limit in flounder interface
663    assert(sizeof(domainid_t)/sizeof(uint8_t)*len < 4096);
664
665    resp_err = b->tx_vtbl.get_domainlist_response(b, NOP_CONT, (uint8_t*) domains,
666                                                  sizeof(domainid_t)/sizeof(uint8_t)*len);
667    if (err_is_fail(resp_err)) {
668        DEBUG_ERR(resp_err, "failed to send wait_response");
669    }
670}
671
672/**
673 * \brief Handler for rpc get_status.
674 */
675static void get_status_handler(struct proc_mgmt_binding *b, domainid_t domain)
676{
677    errval_t err;
678    struct domain_entry* entry;
679    proc_mgmt_ps_entry_t pse;
680    memset(&pse, 0, sizeof(pse));
681
682    err = domain_get_by_id(domain, &entry);
683    if (err_is_fail(err)) {
684        err = b->tx_vtbl.get_status_response(b, NOP_CONT, pse, NULL, 0,
685                                             err);
686        if(err_is_fail(err)) {
687            DEBUG_ERR(err, "status_response");
688        }
689    }
690
691    pse.status = entry->status;
692
693    err = b->tx_vtbl.get_status_response(b, NOP_CONT, pse, entry->argbuf, entry->argbytes,
694                                         SYS_ERR_OK);
695    if(err_is_fail(err)) {
696        DEBUG_ERR(err, "status_response");
697    }
698}
699
700static struct proc_mgmt_rx_vtbl monitor_vtbl = {
701    .add_spawnd           = add_spawnd_handler,
702    .spawn_call           = spawn_handler,
703    .spawn_with_caps_call = spawn_with_caps_handler,
704    .span_call            = span_handler,
705    .kill_call            = kill_handler,
706    .exit_call            = exit_handler,
707    .wait_call            = wait_handler
708};
709
710static struct proc_mgmt_rx_vtbl non_monitor_vtbl = {
711    .add_spawnd           = add_spawnd_handler_non_monitor,
712    .spawn_call           = spawn_handler,
713    .spawn_with_caps_call = spawn_with_caps_handler,
714    .span_call            = span_handler,
715    .kill_call            = kill_handler,
716    .exit_call            = exit_handler,
717    .wait_call            = wait_handler,
718    .get_domainlist_call  = get_domainlist_handler,
719    .get_status_call      = get_status_handler
720};
721
722/**
723 * \brief Allocates a special LMP endpoint for authenticating with the monitor.
724 */
725static errval_t alloc_ep_for_monitor(struct capref *ep)
726{
727    struct proc_mgmt_lmp_binding *lmpb =
728        malloc(sizeof(struct proc_mgmt_lmp_binding));
729    assert(lmpb != NULL);
730
731    // setup our end of the binding
732    errval_t err = proc_mgmt_client_lmp_accept(lmpb, get_default_waitset(),
733                                               DEFAULT_LMP_BUF_WORDS);
734    if (err_is_fail(err)) {
735        free(lmpb);
736        return err_push(err, LIB_ERR_PROC_MGMT_CLIENT_ACCEPT);
737    }
738
739    *ep = lmpb->chan.local_cap;
740    lmpb->b.rx_vtbl = monitor_vtbl;
741
742    return SYS_ERR_OK;
743}
744
745static void export_cb(void *st, errval_t err, iref_t iref)
746{
747    if (err_is_fail(err)) {
748        USER_PANIC_ERR(err, "export failed");
749    }
750
751    // Allocate an endpoint for the local monitor, who will use it to inform
752    // us about new spawnd irefs on behalf of other monitors.
753    struct capref ep;
754    err = alloc_ep_for_monitor(&ep);
755    if (err_is_fail(err)) {
756        USER_PANIC_ERR(err, "failed to allocate LMP EP for local monitor");
757    }
758
759    // Send the endpoint to the monitor, so it can finish the handshake.
760    struct monitor_binding *mb = get_monitor_binding();
761    err = mb->tx_vtbl.set_proc_mgmt_ep_request(mb, NOP_CONT, ep);
762    if (err_is_fail(err)) {
763        USER_PANIC_ERR(err, "failed to send set_proc_mgmt_ep_request to "
764                       "monitor");
765    }
766
767    // Also register this iref with the name service, for arbitrary client
768    // domains to use for spawn-related ops.
769    err = nameservice_register(SERVICE_BASENAME, iref);
770    if (err_is_fail(err)) {
771        USER_PANIC_ERR(err, "nameservice_register failed");
772    }
773}
774
775static errval_t connect_cb(void *st, struct proc_mgmt_binding *b)
776{
777    b->rx_vtbl = non_monitor_vtbl;
778    return SYS_ERR_OK;
779}
780
781errval_t start_service(void)
782{
783    errval_t err = domain_prealloc_caps();
784    if (err_is_fail(err)) {
785        USER_PANIC_ERR(err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP),
786                       "domain_prealloc_caps in start_service");
787    }
788
789    return proc_mgmt_export(NULL, export_cb, connect_cb, get_default_waitset(),
790            IDC_EXPORT_FLAGS_DEFAULT);
791}
792