1/*
2 * Copyright (c) 2009, 2010 ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#include <kernel.h>
11#include <bitmacros.h>
12#include <dispatch.h>
13#include <string.h>
14#include <stdio.h>
15
16#include <barrelfish_kpi/init.h>
17#include <barrelfish_kpi/syscalls.h>
18#include <elf/elf.h>
19
20#include <platform.h>
21#include <paging_kernel_arch.h>
22#include <exceptions.h>
23#include <cpiobin.h>
24#include <init.h>
25#include <barrelfish_kpi/paging_arch.h>
26#include <barrelfish_kpi/arm_core_data.h>
27#include <kernel_multiboot.h>
28#include <offsets.h>
29#include <startup_arch.h>
30#include <global.h>
31#include <kcb.h>
32#include <gic.h>
33#include <arch/arm/startup_arm.h>
34
35#define CNODE(cte)              get_address(&cte->cap)
36
37#define STARTUP_PROGRESS()      debug(SUBSYS_STARTUP, "%s:%d\n",          \
38                                      __FUNCTION__, __LINE__);
39
40#define BSP_INIT_MODULE_NAME    BF_BINARY_PREFIX "armv7/sbin/init"
41#define APP_INIT_MODULE_NAME    BF_BINARY_PREFIX "armv7/sbin/monitor"
42
43#define MSG(format, ...) printk( LOG_NOTE, "ARMv7-A: "format, ## __VA_ARGS__ )
44
45//static phys_mmap_t* g_phys_mmap;        // Physical memory map
46static union arm_l1_entry * init_l1;              // L1 page table for init
47static union arm_l2_entry * init_l2;              // L2 page tables for init
48
49static struct spawn_state spawn_state;
50
51/// Pointer to bootinfo structure for init
52struct bootinfo* bootinfo = (struct bootinfo*)INIT_BOOTINFO_VBASE;
53
54/* There is only one copy of the global locks, which is allocated alongside
55 * the BSP kernel.  All kernels have their pointers set to the BSP copy. */
56struct global *global= NULL;
57
58/**
59 * Map frames into init process address space. Init has a contiguous set of
60 * l2 entries so this is straightforward.
61 *
62 * @param l2_table      pointer to init's L2 table.
63 * @param l2_base       virtual address represented by first L2 table entry
64 * @param va_base       virtual address to map.
65 * @param pa_base       physical address to associate with virtual address.
66 * @param bytes         number of bytes to map.
67 * @param l2_flags      ARM L2 small page flags for mapped pages.
68 */
69static void
70spawn_init_map(union arm_l2_entry* l2_table,
71               lvaddr_t   l2_base,
72               lvaddr_t   va_base,
73               lpaddr_t   pa_base,
74               size_t     bytes,
75               uintptr_t  l2_flags)
76{
77    assert(va_base >= l2_base);
78    assert(0 == (va_base & (BASE_PAGE_SIZE - 1)));
79    assert(0 == (pa_base & (BASE_PAGE_SIZE - 1)));
80    assert(0 == (bytes & (BASE_PAGE_SIZE - 1)));
81
82    int bi = (va_base - l2_base) / BASE_PAGE_SIZE;
83    int li = bi + bytes / BASE_PAGE_SIZE;
84
85    while (bi < li) {
86        paging_set_l2_entry((uintptr_t *)&l2_table[bi], pa_base, l2_flags);
87        pa_base += BASE_PAGE_SIZE;
88        bi++;
89    }
90}
91
92static uint32_t elf_to_l2_flags(uint32_t eflags)
93{
94    switch (eflags & (PF_W|PF_R)) {
95    case PF_W|PF_R:
96        return (ARM_L2_SMALL_USR_RW |
97                ARM_L2_SMALL_CACHEABLE |
98                ARM_L2_SMALL_BUFFERABLE);
99    case PF_R:
100        return (ARM_L2_SMALL_USR_RO |
101                ARM_L2_SMALL_CACHEABLE |
102                ARM_L2_SMALL_BUFFERABLE);
103    default:
104        panic("Unknown ELF flags combination.");
105    }
106}
107
108struct startup_l2_info
109{
110    union arm_l2_entry* l2_table;
111    lvaddr_t   l2_base;
112};
113
114static errval_t
115startup_alloc_init(
116    void*      state,
117    genvaddr_t gvbase,
118    size_t     bytes,
119    uint32_t   flags,
120    void**     ret
121    )
122{
123    const struct startup_l2_info* s2i = (const struct startup_l2_info*)state;
124
125    lvaddr_t sv = ROUND_DOWN((lvaddr_t)gvbase, BASE_PAGE_SIZE);
126    size_t   off = (lvaddr_t)gvbase - sv;
127    lvaddr_t lv = ROUND_UP((lvaddr_t)gvbase + bytes, BASE_PAGE_SIZE);
128    lpaddr_t pa;
129
130    //STARTUP_PROGRESS();
131    if(cpu_is_bsp()) {
132        pa = bsp_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);
133    } else {
134        pa = app_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);
135    }
136    if (lv > sv && (pa != 0)) {
137        spawn_init_map(s2i->l2_table, s2i->l2_base, sv,
138                       pa, lv - sv, elf_to_l2_flags(flags));
139        *ret = (void*)(local_phys_to_mem(pa) + off);
140    } else {
141        *ret = 0;
142    }
143    return SYS_ERR_OK;
144}
145
146static void
147load_init_image(
148    struct startup_l2_info* l2i,
149    const char *name,
150    genvaddr_t* init_ep,
151    genvaddr_t* got_base
152    )
153{
154    lvaddr_t elf_base;
155    size_t elf_bytes;
156    errval_t err;
157
158
159    *init_ep = *got_base = 0;
160
161    /* Load init ELF32 binary */
162    struct multiboot_modinfo *module = multiboot_find_module(name);
163    if (module == NULL) {
164        panic("Could not find init module!");
165    }
166
167    elf_base =  local_phys_to_mem(module->mod_start);
168    elf_bytes = MULTIBOOT_MODULE_SIZE(*module);
169
170    debug(SUBSYS_STARTUP, "load_init_image %p %08x\n", elf_base, elf_bytes);
171    //printf("load_init_image %p %08x\n", elf_base, elf_bytes);
172
173    err = elf_load(EM_ARM, startup_alloc_init, l2i,
174            elf_base, elf_bytes, init_ep);
175    if (err_is_fail(err)) {
176        //err_print_calltrace(err);
177        panic("ELF load of " BSP_INIT_MODULE_NAME " failed!\n");
178    }
179
180    // TODO: Fix application linkage so that it's non-PIC.
181    struct Elf32_Shdr* got_shdr =
182        elf32_find_section_header_name((lvaddr_t)elf_base, elf_bytes, ".got");
183    if (got_shdr) {
184        *got_base = got_shdr->sh_addr;
185    }
186}
187
188/// Setup the module cnode, which contains frame caps to all multiboot modules
189void create_module_caps(struct spawn_state *st)
190{
191    struct multiboot_info *mb=
192        (struct multiboot_info *)core_data->multiboot_header;
193    errval_t err;
194
195    /* Create caps for multiboot modules */
196    struct multiboot_modinfo *module =
197        (struct multiboot_modinfo *)local_phys_to_mem(mb->mods_addr);
198
199    // Allocate strings area
200    lpaddr_t mmstrings_phys = bsp_alloc_phys(BASE_PAGE_SIZE);
201    lvaddr_t mmstrings_base = local_phys_to_mem(mmstrings_phys);
202    lvaddr_t mmstrings = mmstrings_base;
203
204    // create cap for strings area in first slot of modulecn
205    assert(st->modulecn_slot == 0);
206    err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_SIZE,
207                          BASE_PAGE_SIZE, my_core_id,
208                          caps_locate_slot(CNODE(st->modulecn),
209                                           st->modulecn_slot++));
210    assert(err_is_ok(err));
211
212    /* Walk over multiboot modules, creating frame caps */
213    for (int i = 0; i < mb->mods_count; i++) {
214        struct multiboot_modinfo *m = &module[i];
215
216        // Set memory regions within bootinfo
217        struct mem_region *region =
218            &bootinfo->regions[bootinfo->regions_length++];
219
220        genpaddr_t remain = MULTIBOOT_MODULE_SIZE(*m);
221        genpaddr_t base_addr = local_phys_to_gen_phys(m->mod_start);
222
223        region->mr_type = RegionType_Module;
224        region->mr_base = base_addr;
225        region->mrmod_slot = st->modulecn_slot;  // first slot containing caps
226        region->mrmod_size = remain;  // size of image _in bytes_
227        region->mrmod_data = mmstrings - mmstrings_base; // offset of string in area
228
229        // round up to page size for caps
230        remain = ROUND_UP(remain, BASE_PAGE_SIZE);
231        assert((base_addr & BASE_PAGE_MASK) == 0);
232        assert((remain & BASE_PAGE_MASK) == 0);
233
234        assert(st->modulecn_slot < cnode_get_slots(&st->modulecn->cap));
235        // create as DevFrame cap to avoid zeroing memory contents
236        err = caps_create_new(ObjType_DevFrame, base_addr, remain,
237                              remain, my_core_id,
238                              caps_locate_slot(CNODE(st->modulecn),
239                                               st->modulecn_slot++));
240        assert(err_is_ok(err));
241
242        // Copy multiboot module string to mmstrings area
243        strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string));
244        mmstrings += strlen(MBADDR_ASSTRING(m->string)) + 1;
245        assert(mmstrings < mmstrings_base + BASE_PAGE_SIZE);
246    }
247}
248
249/* Create physical address range or RAM caps to unused physical memory.
250   init_alloc_addr is the last address allocated for the init process, plus
251   one. */
252static void create_phys_caps(lpaddr_t init_alloc_addr)
253{
254    struct multiboot_info *mb=
255        (struct multiboot_info *)core_data->multiboot_header;
256    errval_t err;
257
258    /* Walk multiboot MMAP structure, and create appropriate caps for memory.
259       This function assumes that the memory map is sorted by base address,
260       and contains no overlaps.  We also assume that the kernel, and init,
261       have been allocated at the beginning of the first RAM region, and thus
262       that init_alloc_addr represents the lowest unallocated RAM address. */
263    genpaddr_t last_end_addr= 0;
264    genpaddr_t first_free_byte= local_phys_to_gen_phys(init_alloc_addr);
265    debug(SUBSYS_STARTUP, "First free byte is PA:0x%"PRIxGENPADDR".\n",
266                          first_free_byte);
267
268    lvaddr_t mmap_vaddr= local_phys_to_mem((lpaddr_t)mb->mmap_addr);
269    for(uint32_t i= 0; i < mb->mmap_length; i++) {
270        struct multiboot_mmap *mmap = (struct multiboot_mmap *)mmap_vaddr;
271
272        genpaddr_t base_addr = mmap->base_addr;
273        genpaddr_t end_addr  = base_addr + (mmap->length - 1);
274
275        debug(SUBSYS_STARTUP, "MMAP PA:0x%"PRIxGENPADDR"-0x%"
276                              PRIxGENPADDR" type %"PRIu32"\n",
277                              base_addr, end_addr, mmap->type);
278
279        switch(mmap->type) {
280            case MULTIBOOT_MEM_TYPE_RAM:
281                /* Only map RAM which is greater than init_alloc_addr. */
282                if (end_addr >= first_free_byte) {
283                    if(base_addr < first_free_byte)
284                        base_addr= first_free_byte;
285                    debug(SUBSYS_STARTUP, "RAM PA:0x%"PRIxGENPADDR"-0x%"
286                                          PRIxGENPADDR"\n",
287                                          base_addr, end_addr);
288
289                    assert(end_addr >= base_addr);
290                    err= create_caps_to_cnode(base_addr,
291                            (end_addr - base_addr) + 1,
292                            RegionType_Empty, &spawn_state, bootinfo);
293                    assert(err_is_ok(err));
294                }
295                break;
296
297            case MULTIBOOT_MEM_TYPE_DEVICE:
298                /* Device memory will be handled explicitly later. */
299                break;
300
301            default:
302                if (mmap->base_addr >= first_free_byte) {
303                    /* XXX: The multiboot spec just says that mapping types
304                     * other than RAM are "reserved", but GRUB always maps the
305                     * ACPI tables as type 3, and things like the IOAPIC tend
306                     * to show up as type 2 or 4, so we map all these regions
307                     * as platform data.  */
308                    debug(SUBSYS_STARTUP, "Platform data PA:0x%"PRIxGENPADDR
309                                          "-0x%"PRIxGENPADDR"\n", base_addr,
310                                          end_addr);
311                    assert(base_addr >= first_free_byte);
312                    err = create_caps_to_cnode(base_addr, mmap->length,
313                            RegionType_PlatformData, &spawn_state, bootinfo);
314                    assert(err_is_ok(err));
315                }
316        }
317
318        last_end_addr= end_addr;
319        mmap_vaddr+= mmap->size;
320    }
321
322    // Assert that we have some physical address space
323    assert(last_end_addr != 0);
324}
325
326/*
327 * \brief Initialzie page tables
328 *
329 * This includes setting up page tables for the init process.
330 */
331static void init_page_tables(void)
332{
333    // Create page table for init
334    if(cpu_is_bsp()) {
335        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
336        memset(init_l1, 0, INIT_L1_BYTES);
337
338        init_l2 = (union arm_l2_entry *)local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
339        memset(init_l2, 0, INIT_L2_BYTES);
340    } else {
341        init_l1 =  (union arm_l1_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L1_BYTES, ARM_L1_ALIGN));
342        memset(init_l1, 0, INIT_L1_BYTES);
343
344        init_l2 = (union arm_l2_entry *)local_phys_to_mem(app_alloc_phys_aligned(INIT_L2_BYTES, ARM_L2_ALIGN));
345        memset(init_l2, 0, INIT_L2_BYTES);
346    }
347
348    MSG("init_page_tables done: init_l1=%p init_l2=%p\n",
349            init_l1, init_l2);
350
351    /* Map pagetables into page CN */
352    int pagecn_pagemap = 0;
353
354    /*
355     * ARM has:
356     *
357     * L1 has 4096 entries (16KB).
358     * L2 Coarse has 256 entries (256 * 4B = 1KB).
359     *
360     * CPU driver currently fakes having 1024 entries in L1 and
361     * L2 with 1024 entries by treating a page as 4 consecutive
362     * L2 tables and mapping this as a unit in L1.
363     */
364    caps_create_new(ObjType_VNode_ARM_l1,
365                    mem_to_local_phys((lvaddr_t)init_l1),
366                    vnode_objsize(ObjType_VNode_ARM_l1), 0, my_core_id,
367                    caps_locate_slot(CNODE(spawn_state.pagecn),
368                        pagecn_pagemap++)
369                    );
370
371    //STARTUP_PROGRESS();
372
373    // Map L2 into successive slots in pagecn
374    size_t i;
375    for (i = 0; i < INIT_L2_BYTES / ARM_L2_TABLE_BYTES; i++) {
376        size_t objsize_vnode = vnode_objsize(ObjType_VNode_ARM_l2);
377        assert(objsize_vnode == ARM_L2_TABLE_BYTES);
378        caps_create_new(
379                        ObjType_VNode_ARM_l2,
380                        mem_to_local_phys((lvaddr_t)init_l2) + i*objsize_vnode,
381                        objsize_vnode, 0, my_core_id,
382                        caps_locate_slot(CNODE(spawn_state.pagecn),
383                            pagecn_pagemap++)
384                        );
385    }
386
387    /*
388     * Initialize init page tables - this just wires the L1
389     * entries through to the corresponding L2 entries.
390     */
391    STATIC_ASSERT(0 == (INIT_VBASE % ARM_L1_SECTION_BYTES), "");
392    for (lvaddr_t vaddr = INIT_VBASE;
393         vaddr < INIT_SPACE_LIMIT;
394         vaddr += ARM_L1_SECTION_BYTES) {
395        uintptr_t section = (vaddr - INIT_VBASE) / ARM_L1_SECTION_BYTES;
396        uintptr_t l2_off = section * ARM_L2_TABLE_BYTES;
397        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
398        paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr);
399    }
400
401    MSG("Calling paging_context_switch with address = %"PRIxLVADDR"\n",
402           mem_to_local_phys((lvaddr_t) init_l1));
403    paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1));
404}
405
406/* Locate the first device region below 4GB listed in the multiboot memory
407 * map, and truncate it to fit. */
408static void
409first_device_region(lpaddr_t *base, lpaddr_t *length) {
410    struct multiboot_info *mb=
411        (struct multiboot_info *)core_data->multiboot_header;
412
413    lvaddr_t mmap_vaddr= local_phys_to_mem((lpaddr_t)mb->mmap_addr);
414    for(uint32_t i= 0; i < mb->mmap_length; i++) {
415        struct multiboot_mmap *mmap= (struct multiboot_mmap *)mmap_vaddr;
416
417        if(mmap->type == MULTIBOOT_MEM_TYPE_DEVICE) {
418            uint64_t base64=   mmap->base_addr;
419            uint64_t length64= mmap->length;
420
421            if(base64 > (uint64_t)UINT32_MAX) {
422                MSG("device region %"PRIu32" lies above 4GB.\n", i);
423            }
424            else if(base64 + (length64 - 1) > (uint64_t)UINT32_MAX) {
425                MSG("device region %"PRIu32" extends beyond 4GB, "
426                    "truncating it.\n", i);
427                length64= ((uint64_t)UINT32_MAX - base64) + 1;
428            }
429
430            *base=   (lpaddr_t)base64;
431            *length= (lpaddr_t)length64;
432            return;
433        }
434
435        mmap_vaddr+= mmap->size;
436    }
437
438    panic("No device regions specified in multiboot memory map.\n");
439}
440
441static struct dcb *
442spawn_init_common(const char *name, int argc, const char *argv[],
443                  lpaddr_t bootinfo_phys, alloc_phys_func alloc_phys,
444                  alloc_phys_aligned_func alloc_phys_aligned)
445{
446    MSG("spawn_init_common %s\n", name);
447
448    lvaddr_t paramaddr;
449    struct dcb *init_dcb = spawn_module(&spawn_state, name,
450                                        argc, argv,
451                                        bootinfo_phys, INIT_ARGS_VBASE,
452                                        alloc_phys, alloc_phys_aligned,
453                                        &paramaddr);
454
455    init_page_tables();
456
457    MSG("about to call mem_to_local_phys with lvaddr=%"PRIxLVADDR"\n",
458           init_l1);
459
460    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1);
461
462    spawn_init_map(init_l2, INIT_VBASE, INIT_ARGS_VBASE,
463                   spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW);
464
465
466    // Map dispatcher
467    spawn_init_map(init_l2, INIT_VBASE, INIT_DISPATCHER_VBASE,
468                   mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE,
469                   INIT_PERM_RW);
470
471    /* Locate the memory-mapped device region. */
472    lpaddr_t device_base, device_length;
473    first_device_region(&device_base, &device_length);
474    MSG("Using device region at PA:0x%"PRIx32"-0x%"PRIx32"\n",
475            device_base, device_base + (device_length - 1));
476    if((1UL << log2ceil(device_length)) != device_length) {
477        panic("Device region isn't a power of two in size.\n");
478    }
479
480    /*
481     * We create the capability to the devices at this stage and store it
482     * in the TASKCN_SLOT_IO, where on x86 the IO capability is stored for
483     * device access on PCI.
484     *
485     * PCI is not available on our existing ARMv7 platforms, but this may be a
486     * problem in future.
487     */
488    struct cte *iocap=
489        caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO);
490    errval_t err=
491        caps_create_new(ObjType_DevFrame, device_base, device_length,
492                        device_length, my_core_id, iocap);
493    assert(err_is_ok(err));
494
495    struct dispatcher_shared_generic *disp
496        = get_dispatcher_shared_generic(init_dcb->disp);
497    struct dispatcher_shared_arm *disp_arm
498        = get_dispatcher_shared_arm(init_dcb->disp);
499
500    /* Initialize dispatcher */
501    disp->disabled = true;
502    strncpy(disp->name, argv[0], DISP_NAME_LEN);
503
504    /* tell init the vspace addr of its dispatcher */
505    disp->udisp = INIT_DISPATCHER_VBASE;
506
507    /* Write the context ID for init - see arch/arm/dispatch.c. */
508    cp15_write_contextidr(((uint32_t)init_dcb) & ~MASK(8));
509
510    disp_arm->enabled_save_area.named.r0   = paramaddr;
511    disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
512    arch_set_thread_register(INIT_DISPATCHER_VBASE);
513
514    MSG("spawn_init_common: starting from=%"PRIxLVADDR"\n");
515
516    dump_dispatcher(disp);
517
518    return init_dcb;
519}
520
521
522struct dcb *
523spawn_bsp_init(const char *name)
524{
525    MSG("spawn_bsp_init\n");
526
527    /* Only the first core can run this code */
528    assert(cpu_is_bsp());
529
530    /* Allocate bootinfo */
531    lpaddr_t bootinfo_phys = bsp_alloc_phys_aligned(BOOTINFO_SIZE, BASE_PAGE_SIZE);
532    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);
533
534    /* Construct cmdline args */
535    char bootinfochar[16];
536    snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE);
537    const char *argv[] = { "init", bootinfochar };
538    int argc = 2;
539
540    struct dcb *init_dcb =
541        spawn_init_common(name, argc, argv, bootinfo_phys,
542                          bsp_alloc_phys, bsp_alloc_phys_aligned);
543
544    // Map bootinfo
545    spawn_init_map(init_l2, INIT_VBASE, INIT_BOOTINFO_VBASE,
546                   bootinfo_phys, BOOTINFO_SIZE, INIT_PERM_RW);
547
548    struct startup_l2_info l2_info = { init_l2, INIT_VBASE };
549
550    genvaddr_t init_ep, got_base;
551    load_init_image(&l2_info, BSP_INIT_MODULE_NAME, &init_ep, &got_base);
552
553    struct dispatcher_shared_arm *disp_arm =
554        get_dispatcher_shared_arm(init_dcb->disp);
555    disp_arm->enabled_save_area.named.r9   = got_base;
556    disp_arm->got_base = got_base;
557
558    disp_arm->disabled_save_area.named.pc   = init_ep;
559    disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
560    disp_arm->disabled_save_area.named.r9   = got_base;
561
562    /* Create caps for init to use */
563    create_module_caps(&spawn_state);
564    lpaddr_t init_alloc_end = bsp_alloc_phys(0); // XXX
565    create_phys_caps(init_alloc_end);
566
567    /* Fill bootinfo struct */
568    //bootinfo->mem_spawn_core = KERNEL_IMAGE_SIZE; // Size of kernel
569
570    return init_dcb;
571}
572
573struct dcb *spawn_app_init(struct arm_core_data *new_core_data, const char *name)
574{
575    errval_t err;
576
577    /* Construct cmdline args */
578    // Core id of the core that booted this core
579    char coreidchar[10];
580    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);
581
582    // IPI channel id of core that booted this core
583    char chanidchar[30];
584    snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);
585
586    // Arch id of the core that booted this core
587    char archidchar[30];
588    snprintf(archidchar, sizeof(archidchar), "archid=%d",
589             core_data->src_arch_id);
590
591    const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
592    int argc = 4;
593
594    struct dcb *init_dcb=
595        spawn_init_common(name, argc, argv, 0, app_alloc_phys, app_alloc_phys_aligned);
596
597    // Urpc frame cap
598    struct cte *urpc_frame_cte =
599        caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_MON_URPC);
600    // XXX: Create as devframe so the memory is not zeroed out
601    err = caps_create_new(ObjType_DevFrame,
602                          core_data->urpc_frame_base,
603                          core_data->urpc_frame_size,
604                          core_data->urpc_frame_size,
605                          my_core_id,
606                          urpc_frame_cte);
607    assert(err_is_ok(err));
608    urpc_frame_cte->cap.type = ObjType_Frame;
609    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);
610
611    /* Map urpc frame at MON_URPC_BASE */
612    spawn_init_map(init_l2, INIT_VBASE, MON_URPC_VBASE, urpc_ptr, MON_URPC_SIZE,
613                           INIT_PERM_RW);
614
615    struct startup_l2_info l2_info = { init_l2, INIT_VBASE };
616
617    // elf load the domain
618    lvaddr_t monitor_binary=
619        local_phys_to_mem(core_data->monitor_module.mod_start);
620    size_t monitor_binary_size=
621        core_data->monitor_module.mod_end -
622        core_data->monitor_module.mod_start + 1;
623    genvaddr_t entry_point, got_base=0;
624    err = elf_load(EM_ARM, startup_alloc_init, &l2_info,
625                monitor_binary, monitor_binary_size, &entry_point);
626    if (err_is_fail(err)) {
627        //err_print_calltrace(err);
628        panic("ELF load of init module failed!");
629    }
630
631    // TODO: Fix application linkage so that it's non-PIC.
632    struct Elf32_Shdr* got_shdr =
633        elf32_find_section_header_name(monitor_binary, monitor_binary_size,
634                                       ".got");
635    if (got_shdr) {
636        got_base = got_shdr->sh_addr;
637    }
638
639    struct dispatcher_shared_arm *disp_arm =
640        get_dispatcher_shared_arm(init_dcb->disp);
641    disp_arm->enabled_save_area.named.r9   = got_base;
642    disp_arm->got_base = got_base;
643
644    disp_arm->disabled_save_area.named.pc   = entry_point;
645    disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
646    disp_arm->disabled_save_area.named.r9   = got_base;
647    arch_set_thread_register(INIT_DISPATCHER_VBASE);
648
649    return init_dcb;
650}
651
652void arm_kernel_startup(void)
653{
654    MSG("arm_kernel_startup entered \n");
655    struct dcb *init_dcb;
656
657    if (cpu_is_bsp()) {
658        MSG("Doing BSP related bootup \n");
659
660        struct multiboot_info *mb=
661            (struct multiboot_info *)core_data->multiboot_header;
662        size_t max_addr = max(multiboot_end_addr(mb),
663                              (uintptr_t)&kernel_final_byte);
664
665        /* Initialize the location to allocate phys memory from */
666        bsp_init_alloc_addr = mem_to_local_phys(max_addr);
667
668        /* Initial KCB was allocated by the boot driver. */
669        assert(kcb_current);
670
671        // Bring up init
672        init_dcb = spawn_bsp_init(BSP_INIT_MODULE_NAME);
673    } else {
674        MSG("Doing non-BSP related bootup \n");
675
676        kcb_current = (struct kcb *)
677            local_phys_to_mem((lpaddr_t) kcb_current);
678
679        /* Initialize the allocator with the information passed to us */
680        app_alloc_phys_start = core_data->memory_base_start;
681        app_alloc_phys_end   = app_alloc_phys_start + core_data->memory_bytes;
682
683        init_dcb = spawn_app_init(core_data, APP_INIT_MODULE_NAME);
684
685        uint32_t irq = gic_get_active_irq();
686        gic_ack_irq(irq);
687    }
688
689    /* XXX - this really shouldn't be necessary. */
690    MSG("Trying to enable interrupts\n");
691    // __asm volatile ("CPSIE aif");
692    MSG("Done enabling interrupts\n");
693
694    /* printf("HOLD BOOTUP - SPINNING\n"); */
695    /* while (1); */
696    /* printf("THIS SHOULD NOT HAPPEN\n"); */
697
698    // enable interrupt forwarding to cpu
699    // FIXME: PS: enable this as it is needed for multicore setup.
700    //gic_cpu_interface_enable();
701
702    // Should not return
703    MSG("Calling dispatch from arm_kernel_startup, start address is=%"PRIxLVADDR"\n",
704           get_dispatcher_shared_arm(init_dcb->disp)->enabled_save_area.named.r0);
705    dispatch(init_dcb);
706    panic("Error spawning init!");
707
708}
709