1/*
2 * Copyright (c) 2009, 2010 ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#include <kernel.h>
11#include <bitmacros.h>
12#include <dispatch.h>
13#include <string.h>
14#include <stdio.h>
15
16#include <barrelfish_kpi/init.h>
17#include <barrelfish_kpi/syscalls.h>
18#include <elf/elf.h>
19
20#include <arch/arm/platform.h>
21#include <paging_kernel_arch.h>
22#include <exceptions.h>
23#include <cpiobin.h>
24#include <init.h>
25#include <barrelfish_kpi/paging_arch.h>
26#include <barrelfish_kpi/arm_core_data.h>
27#include <kernel_multiboot.h>
28#include <offsets.h>
29#include <startup_arch.h>
30#include <global.h>
31#include <kcb.h>
32#include <arch/arm/startup_arm.h>
33
34#define CNODE(cte)              get_address(&cte->cap)
35
36#define STARTUP_PROGRESS()      debug(SUBSYS_STARTUP, "%s:%d\n",          \
37                                      __FUNCTION__, __LINE__);
38
39#define BSP_INIT_MODULE_NAME    BF_BINARY_PREFIX "armv7/sbin/init"
40#define APP_INIT_MODULE_NAME    BF_BINARY_PREFIX "armv7/sbin/monitor"
41
42#define MSG(format, ...) printk( LOG_NOTE, "ARMv7-A: "format, ## __VA_ARGS__ )
43
44//static phys_mmap_t* g_phys_mmap;        // Physical memory map
45static union arm_l1_entry * init_l1;              // L1 page table for init
46static union arm_l2_entry * init_l2;              // L2 page tables for init
47
48static struct spawn_state spawn_state;
49
50/// Pointer to bootinfo structure for init
51struct bootinfo* bootinfo = (struct bootinfo*)INIT_BOOTINFO_VBASE;
52
53/* There is only one copy of the global locks, which is allocated alongside
54 * the BSP kernel.  All kernels have their pointers set to the BSP copy. */
55struct global *global= NULL;
56
57/**
58 * Map frames into init process address space. Init has a contiguous set of
59 * l2 entries so this is straightforward.
60 *
61 * @param l2_table      pointer to init's L2 table.
62 * @param l2_base       virtual address represented by first L2 table entry
63 * @param va_base       virtual address to map.
64 * @param pa_base       physical address to associate with virtual address.
65 * @param bytes         number of bytes to map.
66 * @param l2_flags      ARM L2 small page flags for mapped pages.
67 */
68static void
69spawn_init_map(union arm_l2_entry* l2_table,
70               lvaddr_t   l2_base,
71               lvaddr_t   va_base,
72               lpaddr_t   pa_base,
73               size_t     bytes,
74               uintptr_t  l2_flags)
75{
76    assert(va_base >= l2_base);
77    assert(0 == (va_base & (BASE_PAGE_SIZE - 1)));
78    assert(0 == (pa_base & (BASE_PAGE_SIZE - 1)));
79    assert(0 == (bytes & (BASE_PAGE_SIZE - 1)));
80
81    int bi = (va_base - l2_base) / BASE_PAGE_SIZE;
82    int li = bi + bytes / BASE_PAGE_SIZE;
83
84    while (bi < li) {
85        paging_set_l2_entry((uintptr_t *)&l2_table[bi], pa_base, l2_flags);
86        pa_base += BASE_PAGE_SIZE;
87        bi++;
88    }
89}
90
91static uint32_t elf_to_l2_flags(uint32_t eflags)
92{
93    switch (eflags & (PF_W|PF_R)) {
94    case PF_W|PF_R:
95        return (ARM_L2_SMALL_USR_RW |
96                ARM_L2_SMALL_CACHEABLE |
97                ARM_L2_SMALL_BUFFERABLE);
98    case PF_R:
99        return (ARM_L2_SMALL_USR_RO |
100                ARM_L2_SMALL_CACHEABLE |
101                ARM_L2_SMALL_BUFFERABLE);
102    default:
103        panic("Unknown ELF flags combination.");
104    }
105}
106
107struct startup_l2_info
108{
109    union arm_l2_entry* l2_table;
110    lvaddr_t   l2_base;
111};
112
113static errval_t
114startup_alloc_init(
115    void*      state,
116    genvaddr_t gvbase,
117    size_t     bytes,
118    uint32_t   flags,
119    void**     ret
120    )
121{
122    const struct startup_l2_info* s2i = (const struct startup_l2_info*)state;
123
124    lvaddr_t sv = ROUND_DOWN((lvaddr_t)gvbase, BASE_PAGE_SIZE);
125    size_t   off = (lvaddr_t)gvbase - sv;
126    lvaddr_t lv = ROUND_UP((lvaddr_t)gvbase + bytes, BASE_PAGE_SIZE);
127    lpaddr_t pa;
128
129    //STARTUP_PROGRESS();
130    if(cpu_is_bsp()) {
131        pa = bsp_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);
132    } else {
133        pa = app_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);
134    }
135    if (lv > sv && (pa != 0)) {
136        spawn_init_map(s2i->l2_table, s2i->l2_base, sv,
137                       pa, lv - sv, elf_to_l2_flags(flags));
138        *ret = (void*)(local_phys_to_mem(pa) + off);
139    } else {
140        *ret = 0;
141    }
142    return SYS_ERR_OK;
143}
144
145static void
146load_init_image(
147    struct startup_l2_info* l2i,
148    const char *name,
149    genvaddr_t* init_ep,
150    genvaddr_t* got_base
151    )
152{
153    lvaddr_t elf_base;
154    size_t elf_bytes;
155    errval_t err;
156
157
158    *init_ep = *got_base = 0;
159
160    /* Load init ELF32 binary */
161    struct multiboot_modinfo *module = multiboot_find_module(name);
162    if (module == NULL) {
163        panic("Could not find init module!");
164    }
165
166    elf_base =  local_phys_to_mem(module->mod_start);
167    elf_bytes = MULTIBOOT_MODULE_SIZE(*module);
168
169    debug(SUBSYS_STARTUP, "load_init_image %p %08x\n", elf_base, elf_bytes);
170    //printf("load_init_image %p %08x\n", elf_base, elf_bytes);
171
172    err = elf_load(EM_ARM, startup_alloc_init, l2i,
173            elf_base, elf_bytes, init_ep);
174    if (err_is_fail(err)) {
175        //err_print_calltrace(err);
176        panic("ELF load of " BSP_INIT_MODULE_NAME " failed!\n");
177    }
178
179    // TODO: Fix application linkage so that it's non-PIC.
180    struct Elf32_Shdr* got_shdr =
181        elf32_find_section_header_name((lvaddr_t)elf_base, elf_bytes, ".got");
182    if (got_shdr) {
183        *got_base = got_shdr->sh_addr;
184    }
185}
186
187/// Setup the module cnode, which contains frame caps to all multiboot modules
188void create_module_caps(struct spawn_state *st)
189{
190    struct multiboot_info *mb=
191        (struct multiboot_info *)core_data->multiboot_header;
192    errval_t err;
193
194    /* Create caps for multiboot modules */
195    struct multiboot_modinfo *module =
196        (struct multiboot_modinfo *)local_phys_to_mem(mb->mods_addr);
197
198    // Allocate strings area
199    lpaddr_t mmstrings_phys = bsp_alloc_phys(BASE_PAGE_SIZE);
200    lvaddr_t mmstrings_base = local_phys_to_mem(mmstrings_phys);
201    lvaddr_t mmstrings = mmstrings_base;
202
203    // create cap for strings area in first slot of modulecn
204    assert(st->modulecn_slot == 0);
205    err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_SIZE,
206                          BASE_PAGE_SIZE, my_core_id,
207                          caps_locate_slot(CNODE(st->modulecn),
208                                           st->modulecn_slot++));
209    assert(err_is_ok(err));
210
211    /* Walk over multiboot modules, creating frame caps */
212    for (int i = 0; i < mb->mods_count; i++) {
213        struct multiboot_modinfo *m = &module[i];
214
215        // Set memory regions within bootinfo
216        struct mem_region *region =
217            &bootinfo->regions[bootinfo->regions_length++];
218
219        genpaddr_t remain = MULTIBOOT_MODULE_SIZE(*m);
220        genpaddr_t base_addr = local_phys_to_gen_phys(m->mod_start);
221
222        region->mr_type = RegionType_Module;
223        region->mr_base = base_addr;
224        region->mrmod_slot = st->modulecn_slot;  // first slot containing caps
225        region->mrmod_size = remain;  // size of image _in bytes_
226        region->mrmod_data = mmstrings - mmstrings_base; // offset of string in area
227
228        // round up to page size for caps
229        remain = ROUND_UP(remain, BASE_PAGE_SIZE);
230        assert((base_addr & BASE_PAGE_MASK) == 0);
231        assert((remain & BASE_PAGE_MASK) == 0);
232
233        assert(st->modulecn_slot < cnode_get_slots(&st->modulecn->cap));
234        // create as DevFrame cap to avoid zeroing memory contents
235        err = caps_create_new(ObjType_DevFrame, base_addr, remain,
236                              remain, my_core_id,
237                              caps_locate_slot(CNODE(st->modulecn),
238                                               st->modulecn_slot++));
239        assert(err_is_ok(err));
240
241        // Copy multiboot module string to mmstrings area
242        strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string));
243        mmstrings += strlen(MBADDR_ASSTRING(m->string)) + 1;
244        assert(mmstrings < mmstrings_base + BASE_PAGE_SIZE);
245    }
246}
247
248/* Create physical address range or RAM caps to unused physical memory.
249   init_alloc_addr is the last address allocated for the init process, plus
250   one. */
251static void create_phys_caps(lpaddr_t init_alloc_addr)
252{
253    struct multiboot_info *mb=
254        (struct multiboot_info *)core_data->multiboot_header;
255    errval_t err;
256
257    /* Walk multiboot MMAP structure, and create appropriate caps for memory.
258       This function assumes that the memory map is sorted by base address,
259       and contains no overlaps.  We also assume that the kernel, and init,
260       have been allocated at the beginning of the first RAM region, and thus
261       that init_alloc_addr represents the lowest unallocated RAM address. */
262    genpaddr_t last_end_addr= 0;
263    genpaddr_t first_free_byte= local_phys_to_gen_phys(init_alloc_addr);
264    debug(SUBSYS_STARTUP, "First free byte is PA:0x%"PRIxGENPADDR".\n",
265                          first_free_byte);
266
267    lvaddr_t mmap_vaddr= local_phys_to_mem((lpaddr_t)mb->mmap_addr);
268    for(uint32_t i= 0; i < mb->mmap_length; i++) {
269        struct multiboot_mmap *mmap = (struct multiboot_mmap *)mmap_vaddr;
270
271        genpaddr_t base_addr = mmap->base_addr;
272        genpaddr_t end_addr  = base_addr + (mmap->length - 1);
273
274        debug(SUBSYS_STARTUP, "MMAP PA:0x%"PRIxGENPADDR"-0x%"
275                              PRIxGENPADDR" type %"PRIu32"\n",
276                              base_addr, end_addr, mmap->type);
277
278        switch(mmap->type) {
279            case MULTIBOOT_MEM_TYPE_RAM:
280                /* Only map RAM which is greater than init_alloc_addr. */
281                if (end_addr >= first_free_byte) {
282                    if(base_addr < first_free_byte)
283                        base_addr= first_free_byte;
284                    debug(SUBSYS_STARTUP, "RAM PA:0x%"PRIxGENPADDR"-0x%"
285                                          PRIxGENPADDR"\n",
286                                          base_addr, end_addr);
287
288                    assert(end_addr >= base_addr);
289                    err= create_caps_to_cnode(base_addr,
290                            (end_addr - base_addr) + 1,
291                            RegionType_Empty, &spawn_state, bootinfo);
292                    assert(err_is_ok(err));
293                }
294                break;
295
296            case MULTIBOOT_MEM_TYPE_DEVICE:
297                /* Device memory will be handled explicitly later. */
298                break;
299
300            default:
301                if (mmap->base_addr >= first_free_byte) {
302                    /* XXX: The multiboot spec just says that mapping types
303                     * other than RAM are "reserved", but GRUB always maps the
304                     * ACPI tables as type 3, and things like the IOAPIC tend
305                     * to show up as type 2 or 4, so we map all these regions
306                     * as platform data.  */
307                    debug(SUBSYS_STARTUP, "Platform data PA:0x%"PRIxGENPADDR
308                                          "-0x%"PRIxGENPADDR"\n", base_addr,
309                                          end_addr);
310                    assert(base_addr >= first_free_byte);
311                    err = create_caps_to_cnode(base_addr, mmap->length,
312                            RegionType_PlatformData, &spawn_state, bootinfo);
313                    assert(err_is_ok(err));
314                }
315        }
316
317        last_end_addr= end_addr;
318        mmap_vaddr+= mmap->size;
319    }
320
321    // Assert that we have some physical address space
322    assert(last_end_addr != 0);
323}
324
325/*
326 * \brief Initialzie page tables
327 *
328 * This includes setting up page tables for the init process.
329 */
330static void init_page_tables(void)
331{
332    // Create page table for init
333    if(cpu_is_bsp()) {
334        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
335        memset(init_l1, 0, INIT_L1_BYTES);
336
337        init_l2 = (union arm_l2_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
338        memset(init_l2, 0, INIT_L2_BYTES);
339    } else {
340        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
341        memset(init_l1, 0, INIT_L1_BYTES);
342
343        init_l2 = (union arm_l2_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
344        memset(init_l2, 0, INIT_L2_BYTES);
345    }
346
347    MSG("init_page_tables done: init_l1=%p init_l2=%p\n",
348            init_l1, init_l2);
349
350    /* Map pagetables into page CN */
351    int pagecn_pagemap = 0;
352
353    /*
354     * ARM has:
355     *
356     * L1 has 4096 entries (16KB).
357     * L2 Coarse has 256 entries (256 * 4B = 1KB).
358     *
359     * CPU driver currently fakes having 1024 entries in L1 and
360     * L2 with 1024 entries by treating a page as 4 consecutive
361     * L2 tables and mapping this as a unit in L1.
362     */
363    caps_create_new(ObjType_VNode_ARM_l1,
364                    mem_to_local_phys((lvaddr_t)init_l1),
365                    vnode_objsize(ObjType_VNode_ARM_l1), 0, my_core_id,
366                    caps_locate_slot(CNODE(spawn_state.pagecn),
367                        pagecn_pagemap++)
368                    );
369
370    //STARTUP_PROGRESS();
371
372    // Map L2 into successive slots in pagecn
373    size_t i;
374    for (i = 0; i < INIT_L2_BYTES / ARM_L2_TABLE_BYTES; i++) {
375        size_t objsize_vnode = vnode_objsize(ObjType_VNode_ARM_l2);
376        assert(objsize_vnode == ARM_L2_TABLE_BYTES);
377        caps_create_new(
378                        ObjType_VNode_ARM_l2,
379                        mem_to_local_phys((lvaddr_t)init_l2) + i*objsize_vnode,
380                        objsize_vnode, 0, my_core_id,
381                        caps_locate_slot(CNODE(spawn_state.pagecn),
382                            pagecn_pagemap++)
383                        );
384    }
385
386    /*
387     * Initialize init page tables - this just wires the L1
388     * entries through to the corresponding L2 entries.
389     */
390    STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), "");
391    for (lvaddr_t vaddr = INIT_VBASE;
392         vaddr < INIT_SPACE_LIMIT;
393         vaddr += ARM_L1_SECTION_BYTES) {
394        uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES;
395        uintptr_t l2_off = section * ARM_L2_TABLE_BYTES;
396        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
397        paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr);
398    }
399
400    MSG("Calling paging_context_switch with address = %"PRIxLVADDR"\n",
401           mem_to_local_phys((lvaddr_t) init_l1));
402    paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1));
403}
404
405/* Locate the first device region below 4GB listed in the multiboot memory
406 * map, and truncate it to fit. */
407static void
408first_device_region(lpaddr_t *base, lpaddr_t *length) {
409    struct multiboot_info *mb=
410        (struct multiboot_info *)core_data->multiboot_header;
411
412    lvaddr_t mmap_vaddr= local_phys_to_mem((lpaddr_t)mb->mmap_addr);
413    for(uint32_t i= 0; i < mb->mmap_length; i++) {
414        struct multiboot_mmap *mmap= (struct multiboot_mmap *)mmap_vaddr;
415
416        if(mmap->type == MULTIBOOT_MEM_TYPE_DEVICE) {
417            uint64_t base64=   mmap->base_addr;
418            uint64_t length64= mmap->length;
419
420            if(base64 > (uint64_t)UINT32_MAX) {
421                MSG("device region %"PRIu32" lies above 4GB.\n", i);
422            }
423            else if(base64 + (length64 - 1) > (uint64_t)UINT32_MAX) {
424                MSG("device region %"PRIu32" extends beyond 4GB, "
425                    "truncating it.\n", i);
426                length64= ((uint64_t)UINT32_MAX - base64) + 1;
427            }
428
429            *base=   (lpaddr_t)base64;
430            *length= (lpaddr_t)length64;
431            return;
432        }
433
434        mmap_vaddr+= mmap->size;
435    }
436
437    panic("No device regions specified in multiboot memory map.\n");
438}
439
440static struct dcb *
441spawn_init_common(const char *name, int argc, const char *argv[],
442                  lpaddr_t bootinfo_phys, alloc_phys_func alloc_phys,
443                  alloc_phys_aligned_func alloc_phys_aligned)
444{
445    MSG("spawn_init_common %s\n", name);
446
447    lvaddr_t paramaddr;
448    struct dcb *init_dcb = spawn_module(&spawn_state, name,
449                                        argc, argv,
450                                        bootinfo_phys, INIT_ARGS_VBASE,
451                                        alloc_phys, alloc_phys_aligned,
452                                        &paramaddr);
453
454    init_page_tables();
455
456    MSG("about to call mem_to_local_phys with lvaddr=%"PRIxLVADDR"\n",
457           init_l1);
458
459    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1);
460
461    spawn_init_map(init_l2, INIT_VBASE, INIT_ARGS_VBASE,
462                   spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW);
463
464
465    // Map dispatcher
466    spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE,
467                   mem_to_local_phys(init_dcb->disp), DISPATCHER_FRAME_SIZE,
468                   INIT_PERM_RW);
469
470    /* Locate the memory-mapped device region. */
471    lpaddr_t device_base, device_length;
472    first_device_region(&device_base, &device_length);
473    MSG("Using device region at PA:0x%"PRIx32"-0x%"PRIx32"\n",
474            device_base, device_base + (device_length - 1));
475    if((1UL << log2ceil(device_length)) != device_length) {
476        panic("Device region isn't a power of two in size.\n");
477    }
478
479    /*
480     * We create the capability to the devices at this stage and store it
481     * in the TASKCN_SLOT_IO, where on x86 the IO capability is stored for
482     * device access on PCI.
483     *
484     * PCI is not available on our existing ARMv7 platforms, but this may be a
485     * problem in future.
486     */
487    struct cte *iocap=
488        caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO);
489    errval_t err=
490        caps_create_new(ObjType_DevFrame, device_base, device_length,
491                        device_length, my_core_id, iocap);
492    assert(err_is_ok(err));
493
494    struct dispatcher_shared_generic *disp
495        = get_dispatcher_shared_generic(init_dcb->disp);
496    struct dispatcher_shared_arm *disp_arm
497        = get_dispatcher_shared_arm(init_dcb->disp);
498
499    /* Initialize dispatcher */
500    disp->disabled = true;
501    strncpy(disp->name, argv[0], DISP_NAME_LEN);
502
503    /* tell init the vspace addr of its dispatcher */
504    disp->udisp = INIT_DISPATCHER_VBASE;
505
506    /* Write the context ID for init - see arch/arm/dispatch.c. */
507    cp15_write_contextidr(((uint32_t)init_dcb) & ~MASK(8));
508
509    disp_arm->enabled_save_area.named.r0   = paramaddr;
510    disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
511    arch_set_thread_register(INIT_DISPATCHER_VBASE);
512
513    MSG("spawn_init_common: starting from=%"PRIxLVADDR"\n");
514
515    dump_dispatcher(disp);
516
517    return init_dcb;
518}
519
520
521struct dcb *
522spawn_bsp_init(const char *name)
523{
524    MSG("spawn_bsp_init\n");
525
526    /* Only the first core can run this code */
527    assert(cpu_is_bsp());
528
529    /* Allocate bootinfo */
530    lpaddr_t bootinfo_phys = bsp_alloc_phys_aligned(BOOTINFO_SIZE, BASE_PAGE_SIZE);
531    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);
532
533    /* Construct cmdline args */
534    char bootinfochar[16];
535    snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE);
536    const char *argv[] = { "init", bootinfochar };
537    int argc = 2;
538
539    struct dcb *init_dcb =
540        spawn_init_common(name, argc, argv, bootinfo_phys,
541                          bsp_alloc_phys, bsp_alloc_phys_aligned);
542
543    // Map bootinfo
544    spawn_init_map(init_l2, INIT_VBASE, INIT_BOOTINFO_VBASE,
545                   bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW);
546
547    struct startup_l2_info l2_info = { init_l2, INIT_VBASE };
548
549    genvaddr_t init_ep, got_base;
550    load_init_image(&l2_info, BSP_INIT_MODULE_NAME, &init_ep, &got_base);
551
552    struct dispatcher_shared_arm *disp_arm =
553        get_dispatcher_shared_arm(init_dcb->disp);
554    disp_arm->enabled_save_area.named.r9   = got_base;
555    disp_arm->got_base = got_base;
556
557    disp_arm->disabled_save_area.named.pc   = init_ep;
558    disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
559    disp_arm->disabled_save_area.named.r9   = got_base;
560
561    /* Create caps for init to use */
562    create_module_caps(&spawn_state);
563    lpaddr_t init_alloc_end = bsp_alloc_phys(0); // XXX
564    create_phys_caps(init_alloc_end);
565
566    /* Fill bootinfo struct */
567    //bootinfo->mem_spawn_core = KERNEL_IMAGE_SIZE; // Size of kernel
568
569    return init_dcb;
570}
571
572struct dcb *spawn_app_init(struct arm_core_data *new_core_data, const char *name)
573{
574    errval_t err;
575
576    /* Construct cmdline args */
577    // Core id of the core that booted this core
578    char coreidchar[10];
579    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);
580
581    // IPI channel id of core that booted this core
582    char chanidchar[30];
583    snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);
584
585    // Arch id of the core that booted this core
586    char archidchar[30];
587    snprintf(archidchar, sizeof(archidchar), "archid=%d",
588             core_data->src_arch_id);
589
590    const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
591    int argc = 4;
592
593    struct dcb *init_dcb=
594        spawn_init_common(name, argc, argv, 0, app_alloc_phys, app_alloc_phys_aligned);
595
596    // Urpc frame cap
597    struct cte *urpc_frame_cte =
598        caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_MON_URPC);
599    // XXX: Create as devframe so the memory is not zeroed out
600    err = caps_create_new(ObjType_DevFrame,
601                          core_data->urpc_frame_base,
602                          core_data->urpc_frame_size,
603                          core_data->urpc_frame_size,
604                          my_core_id,
605                          urpc_frame_cte);
606    assert(err_is_ok(err));
607    urpc_frame_cte->cap.type = ObjType_Frame;
608    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);
609
610    /* Map urpc frame at MON_URPC_BASE */
611    spawn_init_map(init_l2, INIT_VBASE, MON_URPC_VBASE, urpc_ptr, MON_URPC_SIZE,
612                           INIT_PERM_RW);
613
614    struct startup_l2_info l2_info = { init_l2, INIT_VBASE };
615
616    // elf load the domain
617    lvaddr_t monitor_binary=
618        local_phys_to_mem(core_data->monitor_module.mod_start);
619    size_t monitor_binary_size=
620        core_data->monitor_module.mod_end -
621        core_data->monitor_module.mod_start + 1;
622    genvaddr_t entry_point, got_base=0;
623    err = elf_load(EM_ARM, startup_alloc_init, &l2_info,
624                monitor_binary, monitor_binary_size, &entry_point);
625    if (err_is_fail(err)) {
626        //err_print_calltrace(err);
627        panic("ELF load of init module failed!");
628    }
629
630    // TODO: Fix application linkage so that it's non-PIC.
631    struct Elf32_Shdr* got_shdr =
632        elf32_find_section_header_name(monitor_binary, monitor_binary_size,
633                                       ".got");
634    if (got_shdr) {
635        got_base = got_shdr->sh_addr;
636    }
637
638    struct dispatcher_shared_arm *disp_arm =
639        get_dispatcher_shared_arm(init_dcb->disp);
640    disp_arm->enabled_save_area.named.r9   = got_base;
641    disp_arm->got_base = got_base;
642
643    disp_arm->disabled_save_area.named.pc   = entry_point;
644    disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
645    disp_arm->disabled_save_area.named.r9   = got_base;
646    arch_set_thread_register(INIT_DISPATCHER_VBASE);
647
648    return init_dcb;
649}
650
651void arm_kernel_startup(void)
652{
653    MSG("arm_kernel_startup entered \n");
654    struct dcb *init_dcb;
655
656    if (cpu_is_bsp()) {
657        MSG("Doing BSP related bootup \n");
658
659        struct multiboot_info *mb=
660            (struct multiboot_info *)core_data->multiboot_header;
661        size_t max_addr = max(multiboot_end_addr(mb),
662                              (uintptr_t)&kernel_final_byte);
663
664        /* Initialize the location to allocate phys memory from */
665        bsp_init_alloc_addr = mem_to_local_phys(max_addr);
666
667        /* Initial KCB was allocated by the boot driver. */
668        assert(kcb_current);
669
670        // Bring up init
671        init_dcb = spawn_bsp_init(BSP_INIT_MODULE_NAME);
672    } else {
673        MSG("Doing non-BSP related bootup \n");
674
675        kcb_current = (struct kcb *)
676            local_phys_to_mem((lpaddr_t) kcb_current);
677
678        /* Initialize the allocator with the information passed to us */
679        app_alloc_phys_start = core_data->memory_base_start;
680        app_alloc_phys_end   = app_alloc_phys_start + core_data->memory_bytes;
681
682        init_dcb = spawn_app_init(core_data, APP_INIT_MODULE_NAME);
683
684        // uint32_t irq = platform_get_active_irq();
685        // platform_acknowledge_irq(irq);
686    }
687
688    /* XXX - this really shouldn't be necessary. */
689    // MSG("Trying to enable interrupts\n");
690    // __asm volatile ("CPSIE aif");
691    // MSG("Done enabling interrupts\n");
692
693    /* printf("HOLD BOOTUP - SPINNING\n"); */
694    /* while (1); */
695    /* printf("THIS SHOULD NOT HAPPEN\n"); */
696
697    // Should not return
698    MSG("Calling dispatch from arm_kernel_startup, start address is=%"PRIxLVADDR"\n",
699           get_dispatcher_shared_arm(init_dcb->disp)->enabled_save_area.named.r0);
700    dispatch(init_dcb);
701    panic("Error spawning init!");
702
703}
704