1/**
2 * \file
3 * \brief x86_64 kernel bootup code.
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <kernel.h>
16#include <string.h>
17#include <paging_kernel_arch.h>
18#include <elf/elf.h>
19#include <kernel_multiboot.h>
20#include <irq.h>
21#include <init.h>
22#include <barrelfish_kpi/cpu.h>
23#include <exec.h>
24#include <getopt/getopt.h>
25#include <dispatch.h>
26#include <barrelfish_kpi/init.h>
27#include <arch/x86/apic.h>
28#include <barrelfish_kpi/paging_arch.h>
29#include <barrelfish_kpi/syscalls.h>
30#include <target/x86/barrelfish_kpi/coredata_target.h>
31#include <kputchar.h>
32#include <startup.h>
33#include <arch/x86/startup_x86.h>
34#include <arch/x86/start_aps.h>
35#include <barrelfish/systime.h>
36
37#include <xeon_phi.h>
38#include <xeon_phi/xeon_phi.h>
39
40/// Quick way to find the base address of a cnode capability
41#define CNODE(cte)     get_address(&(cte)->cap)
42
43/**
44 * init's needed boot pages.
45 */
46#define INIT_PDPT_SIZE          X86_64_PDPT_ENTRIES(K1OM_INIT_SPACE_LIMIT)
47#define INIT_PDIR_SIZE          X86_64_PDIR_ENTRIES(K1OM_INIT_SPACE_LIMIT)
48#define INIT_PTABLE_SIZE        X86_64_PTABLE_ENTRIES(K1OM_INIT_SPACE_LIMIT)
49#define INIT_PAGE_BITMAP        X86_64_PTABLE_PRESENT
50
51/// Pointer to bootinfo structure for init
52static struct bootinfo *bootinfo = (struct bootinfo *)BOOTINFO_BASE;
53
54struct spawn_state spawn_state;
55
56/**
57 * Page map level 4 table for init user address space.
58 */
59static union x86_64_pdir_entry *init_pml4; //[PTABLE_SIZE]
60
61/**
62 * Page directory pointer table for init user address space.
63 */
64static union x86_64_pdir_entry *init_pdpt; //[INIT_PDPT_SIZE][PTABLE_SIZE]
65
66/**
67 * Page directory for init user address space.
68 */
69static union x86_64_pdir_entry *init_pdir; //[INIT_PDPT_SIZE][INIT_PDIR_SIZE][PTABLE_SIZE]
70
71/**
72 * Page tables for init user address space.
73 */
74static union x86_64_ptable_entry *init_ptable; //[INIT_PDPT_SIZE][INIT_PDIR_SIZE][INIT_PTABLE_SIZE][PTABLE_SIZE]
75
76/**
77 * \brief Convert elf flags to page flags
78 *
79 * \param flags ELF64 program segment flags.
80 *
81 * \return page flags.
82 *
83 * Not all combinations may be supported by an architecture
84 */
85static uint64_t paging_elf_to_page_flags(uint32_t flags)
86{
87    uint64_t pageflags = 0;
88
89    pageflags |= flags & PF_R ? PTABLE_USER_SUPERVISOR : 0;
90    pageflags |= flags & PF_W ? PTABLE_READ_WRITE : 0;
91    pageflags |= flags & PF_X ? 0 : PTABLE_EXECUTE_DISABLE;
92
93    return pageflags;
94}
95
96/**
97 * \brief Map init user-space memory.
98 *
99 * This function maps pages of the init user-space module. It expects
100 * the virtual base address 'vbase' of a program segment of the init executable,
101 * its size 'size' and its ELF64 access control flags. It maps pages
102 * to the sequential area of physical memory, given by 'base'. If you
103 * want to allocate physical memory frames as you go, you better use
104 * startup_alloc_init().
105 *
106 * \param vbase Virtual base address of program segment.
107 * \param base  Physical base address of program segment.
108 * \param size  Size of program segment in bytes.
109 * \param flags ELF64 access control flags of program segment.
110 */
111errval_t startup_map_init(lvaddr_t vbase, lpaddr_t base, size_t size,
112                          uint32_t flags)
113{
114    lvaddr_t vaddr;
115
116    paging_align(&vbase, &base, &size, BASE_PAGE_SIZE);
117
118    assert(vbase + size - K1OM_INIT_VBASE < K1OM_INIT_SPACE_LIMIT);
119
120    // Map pages
121    for(vaddr = vbase; vaddr < vbase + size;
122        vaddr += BASE_PAGE_SIZE, base += BASE_PAGE_SIZE) {
123        lvaddr_t baddr = vaddr - K1OM_INIT_VBASE;
124        union x86_64_ptable_entry *ptable_base = &init_ptable[
125                    X86_64_PML4_BASE(baddr) * X86_64_PTABLE_SIZE *
126                    X86_64_PTABLE_SIZE * X86_64_PTABLE_SIZE +
127                    X86_64_PDPT_BASE(baddr) * X86_64_PTABLE_SIZE *
128                    X86_64_PTABLE_SIZE + X86_64_PDIR_BASE(baddr) *
129                    X86_64_PTABLE_SIZE + X86_64_PTABLE_BASE(vaddr)];
130
131        debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%lx, base = 0x%lx, "
132              "PML4_BASE = %lu, PDPT_BASE = %lu, PDIR_BASE = %lu, "
133              "PTABLE_BASE = %lu -- ", vaddr, base, X86_64_PML4_BASE(baddr),
134              X86_64_PDPT_BASE(baddr), X86_64_PDIR_BASE(baddr),
135              X86_64_PTABLE_BASE(baddr));
136
137        if(!X86_64_IS_PRESENT(ptable_base)) {
138            debug(SUBSYS_PAGING, "mapped!\n");
139            paging_x86_64_map(ptable_base, base,
140                              INIT_PAGE_BITMAP | paging_elf_to_page_flags(flags));
141        } else {
142            debug(SUBSYS_PAGING, "already existing!\n");
143        }
144    }
145
146    return SYS_ERR_OK;
147}
148
149/// Create physical address range or RAM caps to unused physical memory
150static void create_phys_caps(lpaddr_t init_alloc_addr)
151{
152    errval_t err;
153
154    // map first meg of RAM, which contains lots of crazy BIOS tables
155    err = create_caps_to_cnode(0, K1OM_START_KERNEL_PHYS,
156                               RegionType_PlatformData, &spawn_state, bootinfo);
157    assert(err_is_ok(err));
158
159    char *mmap_addr = MBADDR_ASSTRING(glbl_core_data->mmap_addr);
160    lpaddr_t last_end_addr = 0;
161
162    struct multiboot_mmap *mbi_mmaps = (struct multiboot_mmap *)mmap_addr;
163    uint8_t swapped;
164    do {
165        swapped = false;
166        for (uint32_t i = 1; i < glbl_core_data->mmap_length / sizeof(struct multiboot_mmap); ++i) {
167            if (mbi_mmaps[i-1].base_addr > mbi_mmaps[i].base_addr) {
168                struct multiboot_mmap tmp = mbi_mmaps[i-1];
169                mbi_mmaps[i-1] = mbi_mmaps[i];
170                mbi_mmaps[i] = tmp;
171                swapped = true;
172            }
173        }
174    } while(swapped);
175
176    for(char *m = mmap_addr; m < mmap_addr + glbl_core_data->mmap_length;) {
177        struct multiboot_mmap *mmap = (struct multiboot_mmap * SAFE)TC(m);
178
179        debug(SUBSYS_STARTUP, "MMAP %lx--%lx Type %u\n",
180              mmap->base_addr, mmap->base_addr + mmap->length,
181              mmap->type);
182
183        if (last_end_addr >= init_alloc_addr
184            && mmap->base_addr > last_end_addr) {
185            /* we have a gap between regions. add this as a physaddr range */
186            debug(SUBSYS_STARTUP, "physical address range %lx--%lx\n",
187                  last_end_addr, mmap->base_addr);
188
189            err = create_caps_to_cnode(last_end_addr,
190                                       mmap->base_addr - last_end_addr,
191                                       RegionType_PhyAddr, &spawn_state, bootinfo);
192            assert(err_is_ok(err));
193        }
194
195        if (mmap->type == MULTIBOOT_MEM_TYPE_RAM) {
196            genpaddr_t base_addr = mmap->base_addr;
197            genpaddr_t end_addr = base_addr + mmap->length;
198
199            // only map the rest of RAM which is greater than init_alloc_addr
200            if (end_addr > local_phys_to_gen_phys(init_alloc_addr)) {
201                if (base_addr < local_phys_to_gen_phys(init_alloc_addr)) {
202                    base_addr = local_phys_to_gen_phys(init_alloc_addr);
203                }
204                debug(SUBSYS_STARTUP, "RAM %lx--%lx\n", base_addr, end_addr);
205                err = create_caps_to_cnode(base_addr, end_addr - base_addr,
206                                           RegionType_Empty, &spawn_state, bootinfo);
207                if (err_no(err) == SYS_ERR_SLOTS_IN_USE) {
208                    printk(LOG_WARN, "not able to create RAM caps for all physical memory in the system, CNode full\n");
209                }
210                assert(err_is_ok(err));
211            }
212        } else if (mmap->base_addr > local_phys_to_gen_phys(init_alloc_addr)) {
213            /* XXX: The multiboot spec just says that mapping types other than
214             * RAM are "reserved", but GRUB always maps the ACPI tables as type
215             * 3, and things like the IOAPIC tend to show up as type 2 or 4,
216             * so we map all these regions as platform data
217             */
218            debug(SUBSYS_STARTUP, "platform %lx--%lx\n", mmap->base_addr,
219                  mmap->base_addr + mmap->length);
220            assert(mmap->base_addr > local_phys_to_gen_phys(init_alloc_addr));
221            err = create_caps_to_cnode(mmap->base_addr, mmap->length,
222                                       RegionType_PlatformData, &spawn_state, bootinfo);
223            assert(err_is_ok(err));
224        }
225
226        last_end_addr = mmap->base_addr + mmap->length;
227        m += mmap->size;
228    }
229
230    assert(last_end_addr != 0);
231}
232
233#define NEEDED_KERNEL_SPACE \
234    ((SIZE_KERNEL_IMAGE & 0x1000 ) == SIZE_KERNEL_IMAGE ? \
235    SIZE_KERNEL_IMAGE : \
236    (SIZE_KERNEL_IMAGE & 0xfffffffffffff000) + 0x1000)
237
238#define OBJSPERPAGE_CTE         (1UL << (BASE_PAGE_BITS - OBJBITS_CTE))
239
240static void init_page_tables(struct spawn_state *st, alloc_phys_func alloc_phys)
241{
242    /* Allocate memory for init's page tables */
243    init_pml4 = (void *)local_phys_to_mem(
244                alloc_phys(X86_64_PTABLE_SIZE * sizeof(union x86_64_pdir_entry)));
245    init_pdpt = (void *)local_phys_to_mem(
246                alloc_phys(X86_64_PTABLE_SIZE * INIT_PDPT_SIZE
247                           * sizeof(union x86_64_pdir_entry)));
248    init_pdir = (void *)local_phys_to_mem(
249                alloc_phys(X86_64_PTABLE_SIZE * INIT_PDPT_SIZE * INIT_PDIR_SIZE
250                           * sizeof(union x86_64_pdir_entry)));
251    init_ptable = (void *)local_phys_to_mem(
252                alloc_phys(X86_64_PTABLE_SIZE * INIT_PDPT_SIZE * INIT_PDIR_SIZE
253                           * INIT_PTABLE_SIZE * sizeof(union x86_64_ptable_entry)));
254
255    /* Page table setup */
256    /* Initialize init page tables */
257    for(size_t i = 0; i < INIT_PDPT_SIZE; i++) {
258        paging_x86_64_clear_pdir(&init_pdpt[i]);
259        for(size_t j = 0; j < INIT_PDIR_SIZE; j++) {
260            paging_x86_64_clear_pdir(&init_pdir[i * PTABLE_SIZE + j]);
261            for(size_t k = 0; k < INIT_PTABLE_SIZE; k++) {
262                paging_x86_64_clear_ptable(
263                &init_ptable[i * PTABLE_SIZE * PTABLE_SIZE + j * PTABLE_SIZE + k]);
264            }
265        }
266    }
267    /* Map pagetables into pageCN */
268    int     pagecn_pagemap = 0;
269    // Map PML4 (slot 0 in pagecn)
270    caps_create_new(ObjType_VNode_x86_64_pml4, mem_to_local_phys((lvaddr_t)init_pml4),
271                    BASE_PAGE_SIZE, 0, my_core_id,
272                    caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
273    // Map PDPT into successive slots in pagecn
274    for(size_t i = 0; i < INIT_PDPT_SIZE; i++) {
275        caps_create_new(ObjType_VNode_x86_64_pdpt,
276                        mem_to_local_phys((lvaddr_t)init_pdpt) + i * BASE_PAGE_SIZE,
277                        BASE_PAGE_SIZE, 0, my_core_id,
278                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
279    }
280    // Map PDIR into successive slots in pagecn
281    for(size_t i = 0; i < INIT_PDIR_SIZE; i++) {
282        caps_create_new(ObjType_VNode_x86_64_pdir,
283                        mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE,
284                        BASE_PAGE_SIZE, 0, my_core_id,
285                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
286    }
287    // Map page tables into successive slots in pagecn
288    for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) {
289        caps_create_new(ObjType_VNode_x86_64_ptable,
290                        mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE,
291                        BASE_PAGE_SIZE, 0, my_core_id,
292                        caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
293    }
294    // Connect all page tables to page directories.
295    // init's memory manager expects page tables within the pagecn to
296    // already be connected to the corresponding directories. To avoid
297    // unneccessary special cases, we connect them here.
298    for(lvaddr_t vaddr = K1OM_INIT_VBASE; vaddr < K1OM_INIT_SPACE_LIMIT;
299        vaddr += BASE_PAGE_SIZE) {
300        lvaddr_t baddr = vaddr - K1OM_INIT_VBASE;
301        union x86_64_pdir_entry *pml4_base, *pdpt_base, *pdir_base;
302        union x86_64_ptable_entry *ptable_base;
303        pml4_base = &init_pml4[X86_64_PML4_BASE(vaddr)];
304        pdpt_base = &init_pdpt[X86_64_PML4_BASE(baddr) * X86_64_PTABLE_SIZE +
305                               X86_64_PDPT_BASE(vaddr)];
306        pdir_base = &init_pdir[X86_64_PML4_BASE(baddr) * X86_64_PTABLE_SIZE *
307                               X86_64_PTABLE_SIZE
308                               + X86_64_PDPT_BASE(baddr) * X86_64_PTABLE_SIZE
309                               + X86_64_PDIR_BASE(vaddr)];
310        ptable_base = &init_ptable[X86_64_PML4_BASE(baddr) * X86_64_PTABLE_SIZE *
311                                   X86_64_PTABLE_SIZE * X86_64_PTABLE_SIZE +
312                                   X86_64_PDPT_BASE(baddr) * X86_64_PTABLE_SIZE *
313                                   X86_64_PTABLE_SIZE + X86_64_PDIR_BASE(baddr) *
314                                   X86_64_PTABLE_SIZE + X86_64_PTABLE_BASE(vaddr)];
315
316        paging_x86_64_map_table(pml4_base, mem_to_local_phys((lvaddr_t)pdpt_base));
317        paging_x86_64_map_table(pdpt_base, mem_to_local_phys((lvaddr_t)pdir_base));
318        paging_x86_64_map_table(pdir_base, mem_to_local_phys((lvaddr_t)ptable_base));
319    }
320
321    /* Initialize and switch to init's PML4 */
322    paging_x86_64_make_good_pml4(mem_to_local_phys((lvaddr_t)init_pml4));
323    paging_x86_64_context_switch(mem_to_local_phys((lvaddr_t)init_pml4));
324
325    /***** VSpace available now *****/
326}
327
328static struct dcb *spawn_init_common(struct spawn_state *st, const char *name,
329                                     int argc, const char *argv[],
330                                     lpaddr_t bootinfo_phys,
331                                     alloc_phys_func alloc_phys,
332                                     alloc_phys_aligned_func alloc_phys_aligned)
333{
334    errval_t err;
335
336    /* Perform arch-independent spawn */
337    lvaddr_t paramaddr;
338    struct dcb *init_dcb = spawn_module(st, name, argc, argv, bootinfo_phys,
339                                        ARGS_BASE, alloc_phys, alloc_phys_aligned,
340                                        &paramaddr);
341
342    /* Init page tables */
343    init_page_tables(st, alloc_phys);
344
345    /* Map cmdline args R/W into VSpace at ARGS_BASE */
346    paging_x86_64_map_table(&init_pml4[X86_64_PML4_BASE(ARGS_BASE)],
347                            mem_to_local_phys((lvaddr_t)init_pdpt));
348    paging_x86_64_map_table(&init_pdpt[X86_64_PDPT_BASE(ARGS_BASE)],
349                            mem_to_local_phys((lvaddr_t)init_pdir));
350    paging_x86_64_map_table(&init_pdir[X86_64_PDIR_BASE(ARGS_BASE)],
351                            mem_to_local_phys((lvaddr_t)init_ptable));
352    for (int i = 0; i < ARGS_SIZE / BASE_PAGE_SIZE; i++) {
353        paging_x86_64_map(&init_ptable[X86_64_PTABLE_BASE(ARGS_BASE) + i],
354                          st->args_page + i * BASE_PAGE_SIZE,
355                          INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
356    }
357
358    /* Map dispatcher frame R/W into VSpace */
359    paging_x86_64_map_table(&init_pml4[X86_64_PML4_BASE(DISPATCHER_BASE)],
360                            mem_to_local_phys((lvaddr_t)init_pdpt));
361    paging_x86_64_map_table(&init_pdpt[X86_64_PDPT_BASE(DISPATCHER_BASE)],
362                            mem_to_local_phys((lvaddr_t)init_pdir));
363    paging_x86_64_map_table(&init_pdir[X86_64_PDIR_BASE(DISPATCHER_BASE)],
364                            mem_to_local_phys((lvaddr_t)init_ptable));
365    for (int i = 0; i < (1 << DISPATCHER_FRAME_BITS) / BASE_PAGE_SIZE; i++) {
366        paging_x86_64_map(&init_ptable[X86_64_PTABLE_BASE(DISPATCHER_BASE) + i],
367                          mem_to_local_phys(init_dcb->disp) + i * BASE_PAGE_SIZE,
368                          INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
369    }
370
371    struct dispatcher_shared_generic *init_disp =
372        get_dispatcher_shared_generic(init_dcb->disp);
373    struct dispatcher_shared_x86_64 *init_disp_x86_64 =
374        get_dispatcher_shared_x86_64(init_dcb->disp);
375
376    registers_set_param(&init_disp_x86_64->enabled_save_area, paramaddr);
377
378    // Map IO cap in task cnode
379    struct cte *iocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_SYSMEM);
380    err = caps_create_new(ObjType_DevFrame, XEON_PHI_SYSMEM_BASE,
381                          1UL << XEON_PHI_SYSMEM_SIZE_BITS,
382                          1UL << XEON_PHI_SYSMEM_SIZE_BITS,
383                          my_core_id, iocap);
384    /*
385     * XXX: there is no IO on the xeon phi, we use this slot to put in the
386     *      capability to the host memory, as this can be seen as IO
387     */
388    struct cte *mmiocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_IO);
389    err = caps_create_new(ObjType_DevFrame, XEON_PHI_SBOX_BASE,
390                          1UL << XEON_PHI_SBOX_SIZE_BITS,
391                          1UL << XEON_PHI_SBOX_SIZE_BITS,
392                          my_core_id, mmiocap);
393
394    struct cte *coreboot = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_COREBOOT);
395    // XXX: make the 64k below a named constant
396    err = caps_create_new(ObjType_DevFrame, 0, 65536, 65536, my_core_id, coreboot);
397
398    assert(err_is_ok(err));
399
400    /* Set fields in DCB */
401    // Set Vspace
402    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pml4);
403
404    // init dispatcher
405    init_disp->disabled = true;
406    strncpy(init_disp->name, argv[0], DISP_NAME_LEN);
407
408    /* tell init the vspace addr of its dispatcher */
409    init_disp->udisp = DISPATCHER_BASE;
410
411    init_disp->xeon_phi_id = glbl_core_data->xeon_phi_id;
412
413    init_disp_x86_64->disabled_save_area.rdi = DISPATCHER_BASE;
414    init_disp_x86_64->disabled_save_area.fs = 0;
415    init_disp_x86_64->disabled_save_area.gs = 0;
416    init_disp_x86_64->disabled_save_area.eflags = USER_EFLAGS;
417    init_disp_x86_64->disabled_save_area.fxsave_area.fcw = 0x037f; // fcw
418    init_disp_x86_64->disabled_save_area.fxsave_area.mxcsr = 0x00200000; // mxcsr
419
420// Setup systime frequency
421    init_disp->systime_frequency = systime_frequency;
422
423    return init_dcb;
424}
425
426struct dcb *spawn_bsp_init(const char *name)
427{
428    errval_t err;
429
430    /* Only the first core can run this code */
431    assert(apic_is_bsp());
432
433    /* Allocate bootinfo */
434    lpaddr_t bootinfo_phys = bsp_alloc_phys(BOOTINFO_SIZE);
435    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);
436
437    /* Construct cmdline args */
438    char bootinfochar[16];
439    snprintf(bootinfochar, sizeof(bootinfochar), "%lu", BOOTINFO_BASE);
440    const char *argv[] = { "init", bootinfochar };
441
442    struct dcb *init_dcb = spawn_init_common(&spawn_state, name,
443                                             ARRAY_LENGTH(argv), argv,
444                                             bootinfo_phys, bsp_alloc_phys,
445                                             bsp_alloc_phys_aligned);
446
447    /* Map bootinfo R/W into VSpace at vaddr BOOTINFO_BASE */
448    paging_x86_64_map_table(&init_pml4[0], mem_to_local_phys((lvaddr_t)init_pdpt));
449    paging_x86_64_map_table(&init_pdpt[0], mem_to_local_phys((lvaddr_t)init_pdir));
450    paging_x86_64_map_table(&init_pdir[1], mem_to_local_phys((lvaddr_t)init_ptable));
451    for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) {
452        paging_x86_64_map(&init_ptable[i], bootinfo_phys + i * BASE_PAGE_SIZE,
453                   INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
454    }
455
456    /* Load init ELF64 binary from multiboot */
457    struct multiboot_modinfo *module = multiboot_find_module(name);
458    if (module == NULL) {
459        panic("Could not find init module!");
460    }
461    lvaddr_t init_ep;
462    err = elf_load(EM_K1OM, startup_alloc_init, &spawn_state,
463                   local_phys_to_mem(module->mod_start),
464                   MULTIBOOT_MODULE_SIZE(*module), &init_ep);
465    if (err_is_fail(err)) {
466        //err_print_calltrace(err);
467        panic("ELF load of init module failed!");
468    }
469
470    struct dispatcher_shared_x86_64 *init_disp_x86_64 =
471        get_dispatcher_shared_x86_64(init_dcb->disp);
472    init_disp_x86_64->disabled_save_area.rip = init_ep;
473
474    /* Create caps for init to use */
475    create_module_caps(&spawn_state);
476    lpaddr_t init_alloc_end = bsp_alloc_phys(0);
477    create_phys_caps(init_alloc_end);
478
479    /* Fill bootinfo struct */
480    bootinfo->mem_spawn_core = NEEDED_KERNEL_SPACE; // Size of kernel
481
482    bootinfo->host_msg = glbl_core_data->bp->msg_base;
483    bootinfo->host_msg_bits = glbl_core_data->bp->msg_size_bits;
484
485    return init_dcb;
486}
487
488struct dcb *spawn_app_init(struct x86_core_data *core_data, const char *name)
489{
490    errval_t err;
491
492    /* Construct cmdline args */
493    // Core id of the core that booted this core
494    char coreidchar[16];
495    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);
496
497    // IPI channel id of core that booted this core
498    char chanidchar[30];
499    snprintf(chanidchar, sizeof(chanidchar), "chanid=%d", core_data->chan_id);
500
501    // Arch id of the core that booted this core
502    char archidchar[30];
503    snprintf(archidchar, sizeof(archidchar), "archid=%" PRIuHWID,
504             core_data->src_arch_id);
505
506    const char *argv[] = { name, coreidchar, chanidchar, archidchar };
507
508    struct dcb *init_dcb = spawn_init_common(&spawn_state, name,
509                                             ARRAY_LENGTH(argv), argv,
510                                             0, app_alloc_phys,
511                                             app_alloc_phys_aligned);
512
513    // Urpc frame cap
514    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
515                                                  TASKCN_SLOT_MON_URPC);
516    // use fact that cap is foreign to avoid zeroing it
517    assert(core_data->src_core_id != my_core_id);
518    err = caps_create_new(ObjType_Frame, core_data->urpc_frame_base,
519                          1UL << core_data->urpc_frame_bits,
520                          1UL << core_data->urpc_frame_bits, core_data->src_core_id,
521                          urpc_frame_cte);
522    assert(err_is_ok(err));
523    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);
524
525    /* Map urpc frame at MON_URPC_BASE */
526    paging_x86_64_map_table(&init_pml4[X86_64_PML4_BASE(MON_URPC_BASE)],
527                            mem_to_local_phys((lvaddr_t)init_pdpt));
528    paging_x86_64_map_table(&init_pdpt[X86_64_PDPT_BASE(MON_URPC_BASE)],
529                            mem_to_local_phys((lvaddr_t)init_pdir));
530    paging_x86_64_map_table(&init_pdir[X86_64_PDIR_BASE(MON_URPC_BASE)],
531                            mem_to_local_phys((lvaddr_t)init_ptable));
532    for (int i = 0; i < MON_URPC_SIZE / BASE_PAGE_SIZE; i++) {
533        paging_x86_64_map(&init_ptable[X86_64_PTABLE_BASE(MON_URPC_BASE) + i],
534                          urpc_ptr + i * BASE_PAGE_SIZE,
535                          INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
536    }
537
538    // elf load the domain
539    genvaddr_t entry_point;
540    err = elf_load(EM_K1OM, startup_alloc_init, &spawn_state,
541                   local_phys_to_mem(core_data->monitor_binary),
542                   core_data->monitor_binary_size, &entry_point);
543    if (err_is_fail(err)) {
544        //err_print_calltrace(err);
545        panic("ELF load of init module failed!");
546    }
547
548    struct dispatcher_shared_x86_64 *init_disp_x86_64 =
549        get_dispatcher_shared_x86_64(init_dcb->disp);
550    init_disp_x86_64->disabled_save_area.rip = entry_point;
551
552    return init_dcb;
553}
554