1/**
2 * \file
3 * \brief x86_64 kernel bootup code.
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <kernel.h>
16#include <string.h>
17#include <paging_kernel_arch.h>
18#include <elf/elf.h>
19#include <kernel_multiboot.h>
20#include <irq.h>
21#include <init.h>
22#include <barrelfish_kpi/cpu.h>
23#include <exec.h>
24#include <getopt/getopt.h>
25#include <dispatch.h>
26#include <barrelfish_kpi/init.h>
27#include <arch/x86/apic.h>
28#include <barrelfish_kpi/paging_arch.h>
29#include <barrelfish_kpi/syscalls.h>
30#include <target/x86/barrelfish_kpi/coredata_target.h>
31#include <kputchar.h>
32#include <startup.h>
33#include <arch/x86/startup_x86.h>
34#include <arch/x86/start_aps.h>
35#include <barrelfish/systime.h>
36
37/// Quick way to find the base address of a cnode capability
38#define CNODE(cte)     get_address(&(cte)->cap)
39
40/**
41 * init's needed boot pages.
42 */
43#define INIT_PDPT_SIZE          X86_64_PDPT_ENTRIES(X86_64_INIT_SPACE_LIMIT)
44#define INIT_PDIR_SIZE          X86_64_PDIR_ENTRIES(X86_64_INIT_SPACE_LIMIT)
45#define INIT_PTABLE_SIZE        X86_64_PTABLE_ENTRIES(X86_64_INIT_SPACE_LIMIT)
46#define INIT_PAGE_BITMAP        X86_64_PTABLE_PRESENT
47
48/// Pointer to bootinfo structure for init
49static struct bootinfo *bootinfo = (struct bootinfo *)BOOTINFO_BASE;
50
51struct spawn_state spawn_state;
52
53/**
54 * Page map level 4 table for init user address space.
55 */
56static union x86_64_pdir_entry *init_pml4; //[PTABLE_SIZE]
57
58/**
59 * Page directory pointer table for init user address space.
60 */
61static union x86_64_pdir_entry *init_pdpt; //[INIT_PDPT_SIZE][PTABLE_SIZE]
62
63/**
64 * Page directory for init user address space.
65 */
66static union x86_64_pdir_entry *init_pdir; //[INIT_PDPT_SIZE][INIT_PDIR_SIZE][PTABLE_SIZE]
67
68/**
69 * Page tables for init user address space.
70 */
71static union x86_64_ptable_entry *init_ptable; //[INIT_PDPT_SIZE][INIT_PDIR_SIZE][INIT_PTABLE_SIZE][PTABLE_SIZE]
72
73/**
74 * \brief Convert elf flags to page flags
75 *
76 * \param flags ELF64 program segment flags.
77 *
78 * \return page flags.
79 *
80 * Not all combinations may be supported by an architecture
81 */
82static uint64_t paging_elf_to_page_flags(uint32_t flags)
83{
84    uint64_t pageflags = 0;
85
86    pageflags |= flags & PF_R ? PTABLE_USER_SUPERVISOR : 0;
87    pageflags |= flags & PF_W ? PTABLE_READ_WRITE : 0;
88    pageflags |= flags & PF_X ? 0 : PTABLE_EXECUTE_DISABLE;
89
90    return pageflags;
91}
92
93/**
94 * \brief Map init user-space memory.
95 *
96 * This function maps pages of the init user-space module. It expects
97 * the virtual base address 'vbase' of a program segment of the init executable,
98 * its size 'size' and its ELF64 access control flags. It maps pages
99 * to the sequential area of physical memory, given by 'base'. If you
100 * want to allocate physical memory frames as you go, you better use
101 * startup_alloc_init().
102 *
103 * \param vbase Virtual base address of program segment.
104 * \param base  Physical base address of program segment.
105 * \param size  Size of program segment in bytes.
106 * \param flags ELF64 access control flags of program segment.
107 */
108errval_t startup_map_init(lvaddr_t vbase, lpaddr_t base, size_t size,
109                          uint32_t flags)
110{
111    lvaddr_t vaddr;
112
113    paging_align(&vbase, &base, &size, BASE_PAGE_SIZE);
114
115    assert(vbase + size - X86_64_INIT_VBASE < X86_64_INIT_SPACE_LIMIT);
116
117    // Map pages
118    for(vaddr = vbase; vaddr < vbase + size;
119        vaddr += BASE_PAGE_SIZE, base += BASE_PAGE_SIZE) {
120        lvaddr_t baddr = vaddr - X86_64_INIT_VBASE;
121        union x86_64_ptable_entry *ptable_base = &init_ptable[
122                    X86_64_PML4_BASE(baddr) * X86_64_PTABLE_SIZE *
123                    X86_64_PTABLE_SIZE * X86_64_PTABLE_SIZE +
124                    X86_64_PDPT_BASE(baddr) * X86_64_PTABLE_SIZE *
125                    X86_64_PTABLE_SIZE + X86_64_PDIR_BASE(baddr) *
126                    X86_64_PTABLE_SIZE + X86_64_PTABLE_BASE(vaddr)];
127
128        debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%lx, base = 0x%lx, "
129              "PML4_BASE = %lu, PDPT_BASE = %lu, PDIR_BASE = %lu, "
130              "PTABLE_BASE = %lu -- ", vaddr, base, X86_64_PML4_BASE(baddr),
131              X86_64_PDPT_BASE(baddr), X86_64_PDIR_BASE(baddr),
132              X86_64_PTABLE_BASE(baddr));
133
134        if(!X86_64_IS_PRESENT(ptable_base)) {
135            debug(SUBSYS_PAGING, "mapped!\n");
136            paging_x86_64_map(ptable_base, base,
137                              INIT_PAGE_BITMAP | paging_elf_to_page_flags(flags));
138        } else {
139            debug(SUBSYS_PAGING, "already existing!\n");
140        }
141    }
142
143    return SYS_ERR_OK;
144}
145
146/// Create physical address range or RAM caps to unused physical memory
147static void create_phys_caps(lpaddr_t init_alloc_addr)
148{
149    errval_t err;
150
151    // map first meg of RAM, which contains lots of crazy BIOS tables
152    err = create_caps_to_cnode(0, X86_64_START_KERNEL_PHYS,
153                               RegionType_PlatformData, &spawn_state, bootinfo);
154    assert(err_is_ok(err));
155
156    char *mmap_addr = MBADDR_ASSTRING(glbl_core_data->mmap_addr);
157    lpaddr_t last_end_addr = 0;
158
159    char *clean_mmap_addr;
160    uint32_t clean_mmap_length;
161    cleanup_bios_regions(mmap_addr, &clean_mmap_addr, &clean_mmap_length);
162
163    for(char *m = clean_mmap_addr; m < clean_mmap_addr + clean_mmap_length;) {
164        struct multiboot_mmap *mmap = (struct multiboot_mmap * SAFE)TC(m);
165
166        debug(SUBSYS_STARTUP, "MMAP %lx--%lx Type %u\n",
167              mmap->base_addr, mmap->base_addr + mmap->length,
168              mmap->type);
169
170        if (last_end_addr >= init_alloc_addr
171            && mmap->base_addr > last_end_addr) {
172            /* we have a gap between regions. add this as a physaddr range */
173            debug(SUBSYS_STARTUP, "physical address range %lx--%lx\n",
174                  last_end_addr, mmap->base_addr);
175
176            err = create_caps_to_cnode(last_end_addr,
177                                       mmap->base_addr - last_end_addr,
178                                       RegionType_PhyAddr, &spawn_state, bootinfo);
179            assert(err_is_ok(err));
180        }
181
182        if (mmap->type == MULTIBOOT_MEM_TYPE_RAM) {
183            genpaddr_t base_addr = mmap->base_addr;
184            genpaddr_t end_addr = base_addr + mmap->length;
185
186            // only map the rest of RAM which is greater than init_alloc_addr
187            if (end_addr > local_phys_to_gen_phys(init_alloc_addr)) {
188                if (base_addr < local_phys_to_gen_phys(init_alloc_addr)) {
189                    base_addr = local_phys_to_gen_phys(init_alloc_addr);
190                }
191                debug(SUBSYS_STARTUP, "RAM %lx--%lx\n", base_addr, end_addr);
192                err = create_caps_to_cnode(base_addr, end_addr - base_addr,
193                                           RegionType_Empty, &spawn_state, bootinfo);
194                if(err_is_fail(err)) {
195		    printk(LOG_WARN, "Skipping RAM %lx--%lx...\n", base_addr, end_addr);
196                }
197                /* assert(err_is_ok(err)); */
198            }
199        } else if (mmap->base_addr > local_phys_to_gen_phys(init_alloc_addr)) {
200            /* XXX: The multiboot spec just says that mapping types other than
201             * RAM are "reserved", but GRUB always maps the ACPI tables as type
202             * 3, and things like the IOAPIC tend to show up as type 2 or 4,
203             * so we map all these regions as platform data
204             */
205            debug(SUBSYS_STARTUP, "platform %lx--%lx\n", mmap->base_addr,
206                  mmap->base_addr + mmap->length);
207            assert(mmap->base_addr > local_phys_to_gen_phys(init_alloc_addr));
208            err = create_caps_to_cnode(mmap->base_addr, mmap->length,
209                                       RegionType_PlatformData, &spawn_state, bootinfo);
210            assert(err_is_ok(err));
211        }
212
213        last_end_addr = mmap->base_addr + mmap->length;
214        m += mmap->size + 4;
215    }
216
217    assert(last_end_addr != 0);
218
219    if (last_end_addr < X86_64_PADDR_SPACE_SIZE) {
220        /*
221         * XXX: with the new machines and the Xeon Phi we need to extend
222         *      this range to the full 48bit physical address range
223         *      - 2014-05-02, RA
224         */
225        size_t size = X86_64_PADDR_SPACE_SIZE - last_end_addr;
226
227        debug(SUBSYS_STARTUP, "end physical address range %lx--%lx\n",
228              last_end_addr, last_end_addr + size);
229        err = create_caps_to_cnode(last_end_addr, size,
230                                   RegionType_PhyAddr, &spawn_state, bootinfo);
231        assert(err_is_ok(err));
232    }
233}
234
235#define NEEDED_KERNEL_SPACE \
236    ((SIZE_KERNEL_IMAGE & 0x1000 ) == SIZE_KERNEL_IMAGE ? \
237    SIZE_KERNEL_IMAGE : \
238    (SIZE_KERNEL_IMAGE & 0xfffffffffffff000) + 0x1000)
239
240#define OBJSPERPAGE_CTE         (1UL << (BASE_PAGE_BITS - OBJBITS_CTE))
241
242static void init_page_tables(struct spawn_state *st, alloc_phys_func alloc_phys)
243{
244    /* Allocate memory for init's page tables */
245    init_pml4 = (void *)local_phys_to_mem(
246                alloc_phys(X86_64_PTABLE_SIZE * sizeof(union x86_64_pdir_entry)));
247    init_pdpt = (void *)local_phys_to_mem(
248                alloc_phys(X86_64_PTABLE_SIZE * INIT_PDPT_SIZE
249                           * sizeof(union x86_64_pdir_entry)));
250    init_pdir = (void *)local_phys_to_mem(
251                alloc_phys(X86_64_PTABLE_SIZE * INIT_PDPT_SIZE * INIT_PDIR_SIZE
252                           * sizeof(union x86_64_pdir_entry)));
253    init_ptable = (void *)local_phys_to_mem(
254                alloc_phys(X86_64_PTABLE_SIZE * INIT_PDPT_SIZE * INIT_PDIR_SIZE
255                           * INIT_PTABLE_SIZE * sizeof(union x86_64_ptable_entry)));
256
257    /* Page table setup */
258    /* Initialize init page tables */
259    for(size_t i = 0; i < INIT_PDPT_SIZE; i++) {
260        paging_x86_64_clear_pdir(&init_pdpt[i]);
261        for(size_t j = 0; j < INIT_PDIR_SIZE; j++) {
262            paging_x86_64_clear_pdir(&init_pdir[i * PTABLE_SIZE + j]);
263            for(size_t k = 0; k < INIT_PTABLE_SIZE; k++) {
264                paging_x86_64_clear_ptable(
265                &init_ptable[i * PTABLE_SIZE * PTABLE_SIZE + j * PTABLE_SIZE + k]);
266            }
267        }
268    }
269    /* Map pagetables into pageCN */
270    int     pagecn_pagemap = 0;
271    // Map PML4 (slot 0 in pagecn)
272    caps_create_new(ObjType_VNode_x86_64_pml4, mem_to_local_phys((lvaddr_t)init_pml4),
273                    BASE_PAGE_SIZE, 0, my_core_id,
274                    caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
275    // Map PDPT into successive slots in pagecn
276    for(size_t i = 0; i < INIT_PDPT_SIZE; i++) {
277        caps_create_new(ObjType_VNode_x86_64_pdpt,
278                        mem_to_local_phys((lvaddr_t)init_pdpt) + i * BASE_PAGE_SIZE,
279                        BASE_PAGE_SIZE, 0, my_core_id,
280                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
281    }
282    // Map PDIR into successive slots in pagecn
283    for(size_t i = 0; i < INIT_PDIR_SIZE; i++) {
284        caps_create_new(ObjType_VNode_x86_64_pdir,
285                        mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE,
286                        BASE_PAGE_SIZE, 0, my_core_id,
287                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
288    }
289    // Map page tables into successive slots in pagecn
290    for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) {
291        caps_create_new(ObjType_VNode_x86_64_ptable,
292                        mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE,
293                        BASE_PAGE_SIZE, 0, my_core_id,
294                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
295    }
296    // Connect all page tables to page directories.
297    // init's memory manager expects page tables within the pagecn to
298    // already be connected to the corresponding directories. To avoid
299    // unneccessary special cases, we connect them here.
300    for(lvaddr_t vaddr = X86_64_INIT_VBASE; vaddr < X86_64_INIT_SPACE_LIMIT;
301        vaddr += BASE_PAGE_SIZE) {
302        lvaddr_t baddr = vaddr - X86_64_INIT_VBASE;
303        union x86_64_pdir_entry *pml4_base, *pdpt_base, *pdir_base;
304        union x86_64_ptable_entry *ptable_base;
305        pml4_base = &init_pml4[X86_64_PML4_BASE(vaddr)];
306        pdpt_base = &init_pdpt[X86_64_PML4_BASE(baddr) * X86_64_PTABLE_SIZE +
307                               X86_64_PDPT_BASE(vaddr)];
308        pdir_base = &init_pdir[X86_64_PML4_BASE(baddr) * X86_64_PTABLE_SIZE *
309                               X86_64_PTABLE_SIZE
310                               + X86_64_PDPT_BASE(baddr) * X86_64_PTABLE_SIZE
311                               + X86_64_PDIR_BASE(vaddr)];
312        ptable_base = &init_ptable[X86_64_PML4_BASE(baddr) * X86_64_PTABLE_SIZE *
313                                   X86_64_PTABLE_SIZE * X86_64_PTABLE_SIZE +
314                                   X86_64_PDPT_BASE(baddr) * X86_64_PTABLE_SIZE *
315                                   X86_64_PTABLE_SIZE + X86_64_PDIR_BASE(baddr) *
316                                   X86_64_PTABLE_SIZE + X86_64_PTABLE_BASE(vaddr)];
317
318        paging_x86_64_map_table(pml4_base, mem_to_local_phys((lvaddr_t)pdpt_base));
319        paging_x86_64_map_table(pdpt_base, mem_to_local_phys((lvaddr_t)pdir_base));
320        paging_x86_64_map_table(pdir_base, mem_to_local_phys((lvaddr_t)ptable_base));
321    }
322
323    /* Initialize and switch to init's PML4 */
324    paging_x86_64_make_good_pml4(mem_to_local_phys((lvaddr_t)init_pml4));
325    paging_x86_64_context_switch(mem_to_local_phys((lvaddr_t)init_pml4));
326
327    /***** VSpace available now *****/
328}
329
330static struct dcb *
331spawn_init_common(struct spawn_state *st, const char *name, int argc,
332                  const char *argv[], lpaddr_t bootinfo_phys,
333                  alloc_phys_func alloc_phys,
334                  alloc_phys_aligned_func alloc_phys_aligned)
335{
336    errval_t err;
337
338    /* Perform arch-independent spawn */
339    lvaddr_t paramaddr;
340    struct dcb *init_dcb = spawn_module(st, name, argc, argv, bootinfo_phys,
341                                        ARGS_BASE, alloc_phys,
342                                        alloc_phys_aligned, &paramaddr);
343
344    /* Init page tables */
345    init_page_tables(st, alloc_phys);
346
347    /* Map cmdline args R/W into VSpace at ARGS_BASE */
348    paging_x86_64_map_table(&init_pml4[X86_64_PML4_BASE(ARGS_BASE)],
349                            mem_to_local_phys((lvaddr_t)init_pdpt));
350    paging_x86_64_map_table(&init_pdpt[X86_64_PDPT_BASE(ARGS_BASE)],
351                            mem_to_local_phys((lvaddr_t)init_pdir));
352    paging_x86_64_map_table(&init_pdir[X86_64_PDIR_BASE(ARGS_BASE)],
353                            mem_to_local_phys((lvaddr_t)init_ptable));
354    for (int i = 0; i < ARGS_SIZE / BASE_PAGE_SIZE; i++) {
355        paging_x86_64_map(&init_ptable[X86_64_PTABLE_BASE(ARGS_BASE) + i],
356                          st->args_page + i * BASE_PAGE_SIZE,
357                          INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
358    }
359
360    /* Map dispatcher frame R/W into VSpace */
361    paging_x86_64_map_table(&init_pml4[X86_64_PML4_BASE(DISPATCHER_BASE)],
362                            mem_to_local_phys((lvaddr_t)init_pdpt));
363    paging_x86_64_map_table(&init_pdpt[X86_64_PDPT_BASE(DISPATCHER_BASE)],
364                            mem_to_local_phys((lvaddr_t)init_pdir));
365    paging_x86_64_map_table(&init_pdir[X86_64_PDIR_BASE(DISPATCHER_BASE)],
366                            mem_to_local_phys((lvaddr_t)init_ptable));
367    for (int i = 0; i < DISPATCHER_FRAME_SIZE / BASE_PAGE_SIZE; i++) {
368        paging_x86_64_map(&init_ptable[X86_64_PTABLE_BASE(DISPATCHER_BASE) + i],
369                          mem_to_local_phys(init_dcb->disp) + i * BASE_PAGE_SIZE,
370                          INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
371    }
372
373    struct dispatcher_shared_generic *init_disp =
374        get_dispatcher_shared_generic(init_dcb->disp);
375    struct dispatcher_shared_x86_64 *init_disp_x86_64 =
376        get_dispatcher_shared_x86_64(init_dcb->disp);
377
378    registers_set_param(&init_disp_x86_64->enabled_save_area, paramaddr);
379
380    // Map IO cap in task cnode
381    struct cte *iocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_IO);
382    err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap);
383    assert(err_is_ok(err));
384
385    /* Set fields in DCB */
386    // Set Vspace
387    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pml4);
388
389    // init dispatcher
390    init_disp->disabled = true;
391    strncpy(init_disp->name, argv[0], DISP_NAME_LEN);
392
393    /* tell init the vspace addr of its dispatcher */
394    init_disp->udisp = DISPATCHER_BASE;
395
396    init_disp_x86_64->disabled_save_area.rdi = DISPATCHER_BASE;
397    init_disp_x86_64->disabled_save_area.fs = 0;
398    init_disp_x86_64->disabled_save_area.gs = 0;
399    init_disp_x86_64->disabled_save_area.eflags = USER_EFLAGS;
400    init_disp_x86_64->disabled_save_area.fxsave_area.fcw = 0x037f; // fcw
401    init_disp_x86_64->disabled_save_area.fxsave_area.mxcsr = 0x00001f80; // mxcsr
402
403// Setup systime frequency
404    init_disp->systime_frequency = systime_frequency;
405
406    return init_dcb;
407}
408
409struct dcb *spawn_bsp_init(const char *name)
410{
411    errval_t err;
412
413    /* Only the first core can run this code */
414    assert(apic_is_bsp());
415
416    /* Allocate bootinfo */
417    lpaddr_t bootinfo_phys = bsp_alloc_phys(BOOTINFO_SIZE);
418    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);
419
420    /* Construct cmdline args */
421    char bootinfochar[16];
422    snprintf(bootinfochar, sizeof(bootinfochar), "%lu", BOOTINFO_BASE);
423    const char *argv[] = { "init", bootinfochar };
424
425    struct dcb *init_dcb = spawn_init_common(&spawn_state, name,
426                                             ARRAY_LENGTH(argv), argv,
427                                             bootinfo_phys, bsp_alloc_phys,
428                                             bsp_alloc_phys_aligned);
429
430    /* Map bootinfo R/W into VSpace at vaddr BOOTINFO_BASE */
431    paging_x86_64_map_table(&init_pml4[0], mem_to_local_phys((lvaddr_t)init_pdpt));
432    paging_x86_64_map_table(&init_pdpt[0], mem_to_local_phys((lvaddr_t)init_pdir));
433    paging_x86_64_map_table(&init_pdir[1], mem_to_local_phys((lvaddr_t)init_ptable));
434    for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) {
435        paging_x86_64_map(&init_ptable[i], bootinfo_phys + i * BASE_PAGE_SIZE,
436                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
437    }
438
439    /* Load init ELF64 binary from multiboot */
440    struct multiboot_modinfo *module = multiboot_find_module(name);
441    if (module == NULL) {
442        panic("Could not find init module!");
443    }
444    lvaddr_t init_ep;
445    err = elf_load(EM_X86_64, startup_alloc_init, &spawn_state,
446                   local_phys_to_mem(module->mod_start),
447                   MULTIBOOT_MODULE_SIZE(*module), &init_ep);
448    if (err_is_fail(err)) {
449        //err_print_calltrace(err);
450        panic("ELF load of init module failed!");
451    }
452
453    struct dispatcher_shared_x86_64 *init_disp_x86_64 =
454        get_dispatcher_shared_x86_64(init_dcb->disp);
455    init_disp_x86_64->disabled_save_area.rip = init_ep;
456
457    /* Create caps for init to use */
458    create_module_caps(&spawn_state);
459    lpaddr_t init_alloc_end = bsp_alloc_phys(0);
460    create_phys_caps(init_alloc_end);
461
462    /* Fill bootinfo struct */
463    bootinfo->mem_spawn_core = NEEDED_KERNEL_SPACE; // Size of kernel
464
465    return init_dcb;
466}
467
468struct dcb *spawn_app_init(struct x86_core_data *core_data, const char *name)
469{
470    errval_t err;
471
472    /* Construct cmdline args */
473    // Core id of the core that booted this core
474    char coreidchar[16];
475    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);
476
477    // IPI channel id of core that booted this core
478    char chanidchar[30];
479    snprintf(chanidchar, sizeof(chanidchar), "chanid=%d", core_data->chan_id);
480
481    // Arch id of the core that booted this core
482    char archidchar[30];
483    snprintf(archidchar, sizeof(archidchar), "archid=%" PRIuHWID,
484             core_data->src_arch_id);
485
486    const char *argv[] = { name, coreidchar, chanidchar, archidchar };
487
488    struct dcb *init_dcb = spawn_init_common(&spawn_state, name,
489                                             ARRAY_LENGTH(argv), argv,
490                                             0, app_alloc_phys,
491                                             app_alloc_phys_aligned);
492
493    // Urpc frame cap
494    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
495                                                  TASKCN_SLOT_MON_URPC);
496    // use fact that cap is foreign to avoid zeroing it
497    assert(core_data->src_core_id != my_core_id);
498    err = caps_create_new(ObjType_Frame, core_data->urpc_frame_base,
499                          1UL << core_data->urpc_frame_bits,
500                          1UL << core_data->urpc_frame_bits, core_data->src_core_id,
501                          urpc_frame_cte);
502    assert(err_is_ok(err));
503    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);
504
505    /* Map urpc frame at MON_URPC_BASE */
506    paging_x86_64_map_table(&init_pml4[X86_64_PML4_BASE(MON_URPC_BASE)],
507                            mem_to_local_phys((lvaddr_t)init_pdpt));
508    paging_x86_64_map_table(&init_pdpt[X86_64_PDPT_BASE(MON_URPC_BASE)],
509                            mem_to_local_phys((lvaddr_t)init_pdir));
510    paging_x86_64_map_table(&init_pdir[X86_64_PDIR_BASE(MON_URPC_BASE)],
511                            mem_to_local_phys((lvaddr_t)init_ptable));
512    for (int i = 0; i < MON_URPC_SIZE / BASE_PAGE_SIZE; i++) {
513        paging_x86_64_map(&init_ptable[X86_64_PTABLE_BASE(MON_URPC_BASE) + i],
514                          urpc_ptr + i * BASE_PAGE_SIZE,
515                          INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
516    }
517
518    // elf load the domain
519    genvaddr_t entry_point;
520    err = elf_load(EM_X86_64, startup_alloc_init, &spawn_state,
521                   local_phys_to_mem(core_data->monitor_binary),
522                   core_data->monitor_binary_size, &entry_point);
523    if (err_is_fail(err)) {
524        //err_print_calltrace(err);
525        panic("ELF load of init module failed!");
526    }
527
528    struct dispatcher_shared_x86_64 *init_disp_x86_64 =
529        get_dispatcher_shared_x86_64(init_dcb->disp);
530    init_disp_x86_64->disabled_save_area.rip = entry_point;
531
532    return init_dcb;
533}
534