1/*
2 * Copyright (c) 2009,2010,2015, ETH Zurich.
3 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
4 * All rights reserved.
5 *
6 * This file is distributed under the terms in the attached LICENSE file.
7 * If you do not find this file, copies can be found by writing to:
8 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
9 */
10
11#include <kernel.h>
12#include <dispatch.h>
13#include <string.h>
14#include <stdio.h>
15
16#include <barrelfish_kpi/init.h>
17#include <barrelfish_kpi/syscalls.h>
18#include <elf/elf.h>
19
20#include <arm_hal.h>
21#include <paging_kernel_arch.h>
22#include <exceptions.h>
23#include <sysreg.h>
24#include <cpiobin.h>
25#include <init.h>
26#include <barrelfish_kpi/arm_core_data.h>
27#include <kernel_multiboot2.h>
28#include <offsets.h>
29#include <startup_arch.h>
30#include <systime.h>
31#include <arch/arm/platform.h>
32
33#include <arch/arm/startup_arm.h>
34
35#include <target/aarch64/barrelfish_kpi/paging_arm_v8.h>
36
37#include <global.h>
38#include <kcb.h>
39
40#include <efi.h>
41
42#define CNODE(cte)              get_address(&(cte)->cap)
43
44#define STARTUP_PROGRESS()      debug(SUBSYS_STARTUP, "%s:%d\n",          \
45                                      __FUNCTION__, __LINE__);
46
47#define MSG(format, ...) printk( LOG_NOTE, "ARMv8-A: "format, ## __VA_ARGS__ )
48
49
50#if !defined(BF_BINARY_PREFIX)
51#   define BF_BINARY_PREFIX
52#endif
53
54#define BSP_INIT_MODULE_NAME    BF_BINARY_PREFIX "armv8/sbin/init"
55#define APP_INIT_MODULE_NAME    BF_BINARY_PREFIX "armv8/sbin/monitor"
56
57
58//static phys_mmap_t* g_phys_mmap;        // Physical memory map
59static union armv8_ttable_entry *init_l0; // L0 page table for init
60static union armv8_ttable_entry *init_l1; // L1 page table for init
61static union armv8_ttable_entry *init_l2; // L2 page tables for init
62static union armv8_ttable_entry *init_l3; // L3 page tables for init
63
64static struct spawn_state spawn_state;
65
66/// Pointer to bootinfo structure for init
67struct bootinfo* bootinfo = NULL;
68
69/**
70 * Each kernel has a local copy of global and locks. However, during booting and
71 * kernel relocation, these are set to point to global of the pristine kernel,
72 * so that all the kernels can share it.
73 */
74//static  struct global myglobal;
75struct global *global;
76
77static inline uintptr_t round_up(uintptr_t value, size_t unit)
78{
79    assert(0 == (unit & (unit - 1)));
80    size_t m = unit - 1;
81    return (value + m) & ~m;
82}
83
84static inline uintptr_t round_down(uintptr_t value, size_t unit)
85{
86    assert(0 == (unit & (unit - 1)));
87    size_t m = unit - 1;
88    return value & ~m;
89}
90
91/**
92 * Map frames into init process address space. Init has a contiguous set of
93 * l3 entries so this is straightforward.
94 *
95 * @param l3_table      pointer to init's L3 table.
96 * @param l3_base       virtual address represented by first L3 table entry
97 * @param va_base       virtual address to map.
98 * @param pa_base       physical address to associate with virtual address.
99 * @param bytes        number of bytes to map.
100 * @param l3_flags      ARM L3 small page flags for mapped pages.
101 */
102static void spawn_init_map(union armv8_ttable_entry *l3_table, lvaddr_t l3_base,
103                           lvaddr_t va_base, lpaddr_t pa_base, size_t bytes,
104                           uintptr_t  l3_flags)
105{
106    assert(va_base >= l3_base);
107    assert(0 == (va_base & (BASE_PAGE_SIZE - 1)));
108    assert(0 == (pa_base & (BASE_PAGE_SIZE - 1)));
109    assert(0 == (bytes & (BASE_PAGE_SIZE - 1)));
110
111    long bi = (va_base - l3_base) / BASE_PAGE_SIZE;
112    long li = bi + bytes / BASE_PAGE_SIZE;
113
114    while (bi < li) {
115        /* XXX: we should check not to overrun here */
116        paging_set_l3_entry(&l3_table[bi], pa_base, l3_flags);
117        pa_base += BASE_PAGE_SIZE;
118        bi++;
119    }
120}
121
122
123static uint32_t elf_to_l3_flags(uint32_t eflags)
124{
125    switch (eflags & (PF_W|PF_R))
126    {
127      case PF_W|PF_R:
128        return (VMSAv8_64_L3_USR_RW |
129                VMSAv8_64_L3_CACHEABLE |
130                VMSAv8_64_L3_BUFFERABLE);
131      case PF_R:
132        return (VMSAv8_64_L3_USR_RO |
133                VMSAv8_64_L3_CACHEABLE |
134                VMSAv8_64_L3_BUFFERABLE);
135      default:
136        panic("Unknown ELF flags combination.");
137    }
138}
139
140struct startup_l3_info
141{
142    union armv8_ttable_entry *l3_table;
143    lvaddr_t l3_base;
144};
145
146static errval_t startup_alloc_init(void* state, genvaddr_t gvbase, size_t bytes,
147                                   uint32_t flags, void **ret)
148{
149    const struct startup_l3_info* s2i = (const struct startup_l3_info*)state;
150
151    lvaddr_t sv = round_down((lvaddr_t)gvbase, BASE_PAGE_SIZE);
152    size_t   off = (lvaddr_t)gvbase - sv;
153    lvaddr_t lv = round_up((lvaddr_t)gvbase + bytes, BASE_PAGE_SIZE);
154    lpaddr_t pa;
155
156    //STARTUP_PROGRESS();
157    if(cpu_is_bsp())
158        pa = bsp_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);
159    else
160        pa = app_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);
161
162    if (lv > sv && (pa != 0))
163    {
164        spawn_init_map(s2i->l3_table, s2i->l3_base, sv,
165                       pa, lv - sv, elf_to_l3_flags(flags));
166        *ret = (void*)(local_phys_to_mem(pa) + off);
167    }
168    else
169    {
170        *ret = 0;
171    }
172    return SYS_ERR_OK;
173}
174
175static void
176load_init_image(
177    struct startup_l3_info* l3i,
178    const char *name,
179    genvaddr_t* init_ep,
180    genvaddr_t* got_base
181    )
182{
183    lvaddr_t elf_base;
184    size_t elf_bytes;
185    errval_t err;
186
187    *init_ep = *got_base = 0;
188
189    /* Load init ELF64 binary */
190    struct multiboot_info *multiboot =
191            (struct multiboot_info *) local_phys_to_mem(
192                    armv8_glbl_core_data->multiboot_image.base);
193    struct multiboot_tag_module_64 *module = multiboot2_find_module_64(
194            multiboot->tags, multiboot->total_size - 8, name);
195    if (module == NULL) {
196        panic("Could not find init module!");
197    }
198
199    elf_base =  local_phys_to_mem(module->mod_start);
200    elf_bytes = MULTIBOOT_MODULE_SIZE(*module);
201
202    debug(SUBSYS_STARTUP, "load_init_image %p %08x\n", elf_base, elf_bytes);
203    printf("load_init_image %p %08x\n", elf_base, elf_bytes);
204
205    err = elf_load(EM_AARCH64, startup_alloc_init, l3i,
206            elf_base, elf_bytes, init_ep);
207    if (err_is_fail(err)) {
208        //err_print_calltrace(err);
209        panic("ELF load of " BSP_INIT_MODULE_NAME " failed!\n");
210    }
211
212    // TODO: Fix application linkage so that it's non-PIC.
213    struct Elf64_Shdr* got_shdr =
214        elf64_find_section_header_name((lvaddr_t)elf_base, elf_bytes, ".got");
215    if (got_shdr)
216    {
217        *got_base = got_shdr->sh_addr;
218    }
219}
220
221
222/// Setup the module cnode, which contains frame caps to all multiboot modules
223void create_module_caps(struct spawn_state *st)
224{
225    errval_t err;
226
227    /* Create caps for multiboot modules */
228    struct multiboot_info *multiboot =
229        (struct multiboot_info *)local_phys_to_mem(armv8_glbl_core_data->multiboot_image.base);
230
231    // Allocate strings area
232    lpaddr_t mmstrings_phys = bsp_alloc_phys(BASE_PAGE_SIZE);
233    lvaddr_t mmstrings_base = local_phys_to_mem(mmstrings_phys);
234    lvaddr_t mmstrings = mmstrings_base;
235
236    // create cap for strings area in first slot of modulecn
237    assert(st->modulecn_slot == 0);
238    err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_SIZE,
239                          BASE_PAGE_SIZE, my_core_id,
240                          caps_locate_slot(CNODE(st->modulecn),
241                                           st->modulecn_slot++));
242    assert(err_is_ok(err));
243
244    //Nag
245    bootinfo->regions_length = 0;
246
247    /* Walk over multiboot modules, creating frame caps */
248    size_t position = 0;
249    struct mem_region *region;
250
251    lpaddr_t acpi_base = (lpaddr_t)-1;
252    /* add the ACPI regions */
253    struct multiboot_tag_new_acpi *acpi_new;
254    acpi_new = (struct multiboot_tag_new_acpi *)
255           multiboot2_find_tag(multiboot->tags, multiboot->total_size - 8, MULTIBOOT_TAG_TYPE_ACPI_NEW);
256    if (acpi_new) {
257        acpi_base = mem_to_local_phys((lvaddr_t)&acpi_new->rsdp[0]);
258    } else {
259        struct multiboot_tag_old_acpi *acpi_old;
260        acpi_old = (struct multiboot_tag_old_acpi *)
261           multiboot2_find_tag(multiboot->tags, multiboot->total_size - 8, MULTIBOOT_TAG_TYPE_ACPI_OLD);
262        if (acpi_old) {
263            acpi_base = mem_to_local_phys((lvaddr_t)&acpi_old->rsdp[0]);
264        }
265    }
266
267    if (acpi_base != (lpaddr_t)-1) {
268        region = &bootinfo->regions[bootinfo->regions_length++];
269        region->mr_base = acpi_base;
270        region->mr_type = RegionType_ACPI_TABLE;
271    }
272
273    /* add the module regions */
274    position = 0;
275    struct multiboot_tag_module_64 *module = (struct multiboot_tag_module_64 *)
276            multiboot2_find_tag(multiboot->tags, multiboot->total_size - 8, MULTIBOOT_TAG_TYPE_MODULE_64);
277    while (module) {
278        // Set memory regions within bootinfo
279        region = &bootinfo->regions[bootinfo->regions_length++];
280
281        genpaddr_t remain = module->mod_end - module->mod_start;
282        genpaddr_t base_addr = local_phys_to_gen_phys(module->mod_start);
283        region->mr_type = RegionType_Module;
284        region->mr_base = base_addr;
285        region->mrmod_slot = st->modulecn_slot;  // first slot containing caps
286        region->mrmod_size = remain;  // size of image _in bytes_
287        region->mrmod_data = mmstrings - mmstrings_base; // offset of string in area
288
289        // round up to page size for caps
290        remain = ROUND_UP(remain, BASE_PAGE_SIZE);
291        assert((base_addr & BASE_PAGE_MASK) == 0);
292        assert((remain & BASE_PAGE_MASK) == 0);
293
294        assert(st->modulecn_slot < cnode_get_slots(&st->modulecn->cap));
295        // create as DevFrame cap to avoid zeroing memory contents
296        err = caps_create_new(ObjType_DevFrame, base_addr, remain,
297                              remain, my_core_id,
298                              caps_locate_slot(CNODE(st->modulecn),
299                                               st->modulecn_slot++));
300        assert(err_is_ok(err));
301
302        // Copy multiboot module string to mmstrings area
303        strcpy((char *)mmstrings, module->cmdline);
304        mmstrings += strlen(module->cmdline) + 1;
305        assert(mmstrings < mmstrings_base + BASE_PAGE_SIZE);
306
307        module = ((void *) module) + module->size;
308        position += module->size;
309        module = (struct multiboot_tag_module_64 *) multiboot2_find_tag(
310                (struct multiboot_tag *)module, (multiboot->total_size - 8) - position,
311                MULTIBOOT_TAG_TYPE_MODULE_64);
312    }
313}
314
315static void
316create_phys_caps_region(lpaddr_t reserved_start, lpaddr_t reserved_end, lpaddr_t region_base,
317        size_t region_size, enum region_type region_type) {
318    errval_t err = SYS_ERR_OK;
319    if (reserved_start <= region_base + region_size && region_base <= reserved_end) {
320        // reserved overlaps with region
321        if (region_base < reserved_start) {
322            err = create_caps_to_cnode(region_base, reserved_start - region_base, region_type, &spawn_state, bootinfo);
323        }
324        assert(err_is_ok(err));
325        if (region_base + region_size > reserved_end) {
326            err = create_caps_to_cnode(reserved_end, region_base + region_size - reserved_end, region_type, &spawn_state, bootinfo);
327        }
328    } else {
329        err = create_caps_to_cnode(region_base, region_size, region_type, &spawn_state, bootinfo);
330    }
331    assert(err_is_ok(err));
332}
333
334/// Create physical address range or RAM caps to unused physical memory
335static void create_phys_caps(lpaddr_t reserved_start, lpaddr_t reserved_end)
336{
337    /* Walk multiboot MMAP structure, and create appropriate caps for memory */
338    struct multiboot_tag_efi_mmap *mmap = (struct multiboot_tag_efi_mmap *)
339            local_phys_to_mem(armv8_glbl_core_data->efi_mmap);
340
341    lpaddr_t last_end_addr = 0;
342    for (size_t i = 0; i < (mmap->size - sizeof(struct multiboot_tag_efi_mmap)) / mmap->descr_size; i++) {
343        efi_memory_descriptor *desc = (efi_memory_descriptor *)(mmap->efi_mmap + mmap->descr_size * i);
344
345        enum region_type region_type = RegionType_Max;
346        switch(desc->Type) {
347            case EfiConventionalMemory:
348               region_type = RegionType_Empty;
349               break;
350            case EfiPersistentMemory :
351                region_type = RegionType_Empty;
352                break;
353            case EfiACPIReclaimMemory :
354                region_type = RegionType_PlatformData;
355                break;
356            default:
357               region_type = RegionType_PlatformData;
358           break;
359        };
360
361        if (last_end_addr < desc->PhysicalStart) {
362            // create cap for gap in mmap
363            create_phys_caps_region(reserved_start, reserved_end, last_end_addr, desc->PhysicalStart - last_end_addr, RegionType_PhyAddr);
364        }
365        last_end_addr = desc->PhysicalStart + desc->NumberOfPages * BASE_PAGE_SIZE;
366
367        create_phys_caps_region(reserved_start, reserved_end, desc->PhysicalStart, desc->NumberOfPages * BASE_PAGE_SIZE, region_type);
368    }
369
370    size_t size = (1UL << 48) - last_end_addr;
371
372
373    create_phys_caps_region(reserved_start, reserved_end, last_end_addr, size, RegionType_PhyAddr);
374}
375
376static void init_page_tables(void)
377{
378    lpaddr_t (*alloc_phys_aligned)(size_t size, size_t align);
379    if (cpu_is_bsp()) {
380        alloc_phys_aligned = bsp_alloc_phys_aligned;
381    } else {
382        alloc_phys_aligned = app_alloc_phys_aligned;
383    }
384
385    // Create page table for init
386    const size_t l0_size = VMSAv8_64_PTABLE_NUM_ENTRIES * INIT_L0_SIZE * sizeof(union armv8_ttable_entry);
387    init_l0 = (void *) local_phys_to_mem(alloc_phys_aligned(l0_size, VMSAv8_64_PTABLE_SIZE));
388    memset(init_l0, 0, l0_size);
389
390    const size_t l1_size = l0_size * INIT_L1_SIZE;
391    init_l1 = (void *) local_phys_to_mem(alloc_phys_aligned(l1_size, VMSAv8_64_PTABLE_SIZE));
392    memset(init_l1, 0, l1_size);
393
394    const size_t l2_size = l1_size * INIT_L2_SIZE;
395    init_l2 = (void *) local_phys_to_mem(alloc_phys_aligned(l2_size, VMSAv8_64_PTABLE_SIZE));
396    memset(init_l2, 0, l2_size);
397
398    const size_t l3_size = l2_size * INIT_L3_SIZE;
399    init_l3 = (void *) local_phys_to_mem(alloc_phys_aligned(l3_size, VMSAv8_64_PTABLE_SIZE));
400    memset(init_l3, 0, l3_size);
401
402    /* Map pagetables into page CN */
403    int pagecn_pagemap = 0;
404
405    /*
406     * AARCH64 has:
407     *
408     * L0 has 1 entry.
409     * L1 has 1 entry.
410     * L2 Coarse has 16 entries (512 * 8B = 4KB).
411     * L3 Coarse has 16*512 entries (512 * 8B = 4KB).
412     *
413     */
414
415    printk(LOG_NOTE, "init page tables: l0=%p, l1=%p, l2=%p, l3=%p\n",
416            init_l0, init_l1, init_l2, init_l3);
417
418    caps_create_new(
419            ObjType_VNode_AARCH64_l0,
420            mem_to_local_phys((lvaddr_t)init_l0),
421            vnode_objsize(ObjType_VNode_AARCH64_l0), 0,
422                        my_core_id,
423            caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
424    );
425
426    for (size_t i = 0; i < INIT_L1_SIZE; i++) {
427        size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l1);
428        assert(objsize_vnode == BASE_PAGE_SIZE);
429        caps_create_new(
430                ObjType_VNode_AARCH64_l1,
431                mem_to_local_phys((lvaddr_t)init_l1) + (i * objsize_vnode),
432                objsize_vnode, 0, my_core_id,
433                caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
434        );
435    }
436
437    //STARTUP_PROGRESS();
438    for(size_t i = 0; i < INIT_L2_SIZE; i++) {
439        size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l2);
440        assert(objsize_vnode == BASE_PAGE_SIZE);
441        caps_create_new(
442                ObjType_VNode_AARCH64_l2,
443                mem_to_local_phys((lvaddr_t)init_l2) + (i * objsize_vnode),
444                objsize_vnode, 0, my_core_id,
445                caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
446        );
447    }
448
449    // Map L3 into successive slots in pagecn
450    for(size_t i = 0; i < INIT_L3_SIZE; i++) {
451        size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l3);
452        assert(objsize_vnode == BASE_PAGE_SIZE);
453        caps_create_new(
454                ObjType_VNode_AARCH64_l3,
455                mem_to_local_phys((lvaddr_t)init_l3) + (i * objsize_vnode),
456                objsize_vnode, 0,
457                my_core_id,
458                caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
459        );
460    }
461
462    /*
463     * Initialize init page tables - this just wires the L0
464     * entries through to the corresponding L1 entries.
465     */
466    for(lvaddr_t vaddr = ARMV8_INIT_VBASE;
467        vaddr < ARMV8_INIT_SPACE_LIMIT;
468        vaddr += VMSAv8_64_L0_SIZE)
469    {
470        uintptr_t section = (vaddr - ARMV8_INIT_VBASE) / VMSAv8_64_L0_SIZE;
471        uintptr_t l1_off = section * VMSAv8_64_PTABLE_SIZE;
472        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l1) + l1_off;
473        paging_map_table_l0(init_l0, vaddr, paddr);
474    }
475    /*
476     * Initialize init page tables - this just wires the L1
477     * entries through to the corresponding L2 entries.
478     */
479    for(lvaddr_t vaddr = ARMV8_INIT_VBASE;
480        vaddr < ARMV8_INIT_SPACE_LIMIT;
481        vaddr += VMSAv8_64_L1_BLOCK_SIZE)
482    {
483        uintptr_t section = (vaddr - ARMV8_INIT_VBASE) / VMSAv8_64_L1_BLOCK_SIZE;
484        uintptr_t l2_off = section * VMSAv8_64_PTABLE_SIZE;
485        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
486        paging_map_table_l1(init_l1, vaddr, paddr);
487    }
488
489    /*
490     * Initialize init page tables - this just wires the L2
491     * entries through to the corresponding L3 entries.
492     */
493    STATIC_ASSERT(0 == (ARMV8_INIT_VBASE % VMSAv8_64_L2_BLOCK_SIZE), "");
494    for(lvaddr_t vaddr = ARMV8_INIT_VBASE;
495        vaddr < ARMV8_INIT_SPACE_LIMIT;
496        vaddr += VMSAv8_64_L2_BLOCK_SIZE)
497    {
498        uintptr_t section = (vaddr - ARMV8_INIT_VBASE) / VMSAv8_64_L2_BLOCK_SIZE;
499        uintptr_t l3_off = section * VMSAv8_64_PTABLE_SIZE;
500
501        lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l3) + l3_off;
502
503        paging_map_table_l2(init_l2, vaddr, paddr);
504    }
505
506}
507
508static struct dcb *spawn_init_common(const char *name,
509                                     int argc, const char *argv[],
510                                     lpaddr_t bootinfo_phys,
511                                     alloc_phys_func alloc_phys,
512                                     alloc_phys_aligned_func alloc_phys_aligned)
513{
514    struct dispatcher_shared_generic *disp;
515    struct dispatcher_shared_aarch64 *disp_aarch64;
516
517    MSG("spawn_init_common %s\n", name);
518
519    lvaddr_t paramaddr;
520    struct dcb *init_dcb = spawn_module(&spawn_state, name, argc, argv,
521                                        bootinfo_phys, INIT_ARGS_VBASE,
522                                        alloc_phys, alloc_phys_aligned,
523                                        &paramaddr);
524    /* initialize page tables */
525    init_page_tables();
526
527    init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l0);
528
529    spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_ARGS_VBASE,
530                       spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW);
531
532    /* Map dispatcher */
533    spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_DISPATCHER_VBASE,
534                   mem_to_local_phys(init_dcb->disp), DISPATCHER_FRAME_SIZE,
535                   INIT_PERM_RW);
536
537    disp = get_dispatcher_shared_generic(init_dcb->disp);
538    disp_aarch64 = get_dispatcher_shared_aarch64(init_dcb->disp);
539
540    /* Initialize dispatcher */
541    disp->disabled = true;
542    strncpy(disp->name, argv[0], DISP_NAME_LEN);
543
544    /* Tell init the vspace addr of its dispatcher. */
545    disp->udisp = INIT_DISPATCHER_VBASE;
546
547    disp->systime_frequency = systime_frequency;
548
549    /* TODO: write the contet ID for init */
550
551    /* Set the thread ID register to point to the shared structure. */
552
553    disp_aarch64->enabled_save_area.named.x0   = paramaddr;
554    disp_aarch64->enabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_I_MASK;
555    sysreg_write_tpidrro_el0((uint64_t)disp->udisp);
556
557    return init_dcb;
558}
559
560struct dcb *spawn_bsp_init(const char *name)
561{
562    MSG("spawning '%s' on BSP core\n", name);
563    /* Only the first core can run this code */
564    assert(cpu_is_bsp());
565
566    /* Allocate bootinfo */
567    lpaddr_t bootinfo_phys = bsp_alloc_phys_aligned(BOOTINFO_SIZE, BASE_PAGE_SIZE);
568    memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);
569
570    /* store pointer to bootinfo in kernel virtual memory */
571    bootinfo = (struct bootinfo *) local_phys_to_mem(bootinfo_phys);
572
573    /* Construct cmdline args */
574    char bootinfochar[16];
575    snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE);
576    const char *argv[] = { "init", bootinfochar };
577    int argc = 2;
578
579    /* perform common spawning of init domain */
580    struct dcb *init_dcb = spawn_init_common(name, argc, argv,bootinfo_phys,
581            bsp_alloc_phys, bsp_alloc_phys_aligned);
582
583    /* map boot info into init's VSPACE */
584    spawn_init_map(init_l3, ARMV8_INIT_VBASE, INIT_BOOTINFO_VBASE, bootinfo_phys,
585                   BOOTINFO_SIZE, INIT_PERM_RW);
586
587    /* load the image */
588    genvaddr_t init_ep, got_base;
589    struct startup_l3_info l3_info = { init_l3, ARMV8_INIT_VBASE };
590    load_init_image(&l3_info, BSP_INIT_MODULE_NAME, &init_ep, &got_base);
591
592    MSG("init loaded with entry=0x%" PRIxGENVADDR " and GOT=0x%" PRIxGENVADDR "\n",
593         init_ep, got_base);
594
595    struct dispatcher_shared_aarch64 *disp_aarch64 =
596            get_dispatcher_shared_aarch64(init_dcb->disp);
597
598    /* setting GOT pointers */
599    disp_aarch64->got_base = got_base;
600    /* XXX - Why does the kernel do this? -DC */
601    disp_aarch64->enabled_save_area.named.x10  = got_base;
602    disp_aarch64->disabled_save_area.named.x10  = got_base;
603
604    /* setting entry points */
605    disp_aarch64->disabled_save_area.named.pc   = init_ep;
606    disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK;
607
608    /* Create caps for init to use */
609    create_module_caps(&spawn_state);
610    lpaddr_t init_alloc_end = bsp_alloc_phys(0);
611    create_phys_caps(armv8_glbl_core_data->start_kernel_ram, init_alloc_end);
612
613    /* Fill bootinfo struct */
614    bootinfo->mem_spawn_core = KERNEL_IMAGE_SIZE; // Size of kernel
615
616    return init_dcb;
617}
618
619struct dcb *spawn_app_init(struct armv8_core_data *core_data,
620                           const char *name)
621{
622    errval_t err;
623
624    MSG("spawning '%s' on APP core\n", name);
625
626    /* Only the app core can run this code */
627    assert(!cpu_is_bsp());
628
629    /* Construct cmdline args */
630    // Core id of the core that booted this core
631    char coreidchar[10];
632    snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);
633
634    // IPI channel id of core that booted this core
635    char chanidchar[30];
636    snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);
637
638    // Arch id of the core that booted this core
639    char archidchar[30];
640    snprintf(archidchar, sizeof(archidchar), "archid=%d",
641            core_data->src_arch_id);
642
643    const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
644    int argc = 4;
645
646
647
648    struct dcb *init_dcb= spawn_init_common(name, argc, argv, 0, app_alloc_phys,
649                                            app_alloc_phys_aligned);
650
651
652    MSG("creating monitor URPC frame cap\n");
653    // Urpc frame cap
654    struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
655                                                  TASKCN_SLOT_MON_URPC);
656
657    // XXX: Create as devframe so the memory is not zeroed out
658    err = caps_create_new(ObjType_DevFrame,
659                          core_data->urpc_frame.base,
660                          core_data->urpc_frame.length,
661                          core_data->urpc_frame.length,
662                          my_core_id,
663                          urpc_frame_cte);
664    assert(err_is_ok(err));
665    urpc_frame_cte->cap.type = ObjType_Frame;
666    lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);
667
668
669    /* Map urpc frame at MON_URPC_BASE */
670    MSG("mapping URPC frame cap %" PRIxLPADDR" \n",urpc_ptr );
671    spawn_init_map(init_l3, ARMV8_INIT_VBASE, MON_URPC_VBASE, urpc_ptr,
672                   MON_URPC_SIZE, INIT_PERM_RW);
673
674    struct startup_l3_info l3_info = { init_l3, ARMV8_INIT_VBASE };
675
676    // elf load the domain
677    genvaddr_t entry_point, got_base=0;
678
679    MSG("loading elf '%s' @ %" PRIxLPADDR "\n", name,
680        local_phys_to_mem(core_data->monitor_binary.base));
681
682    err = elf_load(EM_AARCH64, startup_alloc_init, &l3_info,
683            local_phys_to_mem(core_data->monitor_binary.base),
684            core_data->monitor_binary.length, &entry_point);
685    if (err_is_fail(err)) {
686        //err_print_calltrace(err);
687        panic("ELF load of init module failed!");
688    }
689
690    // TODO: Fix application linkage so that it's non-PIC.
691    struct Elf64_Shdr* got_shdr;
692    got_shdr = elf64_find_section_header_name(local_phys_to_mem(core_data->monitor_binary.base),
693                                           core_data->monitor_binary.length, ".got");
694    if (got_shdr)
695    {
696        got_base = got_shdr->sh_addr;
697    }
698
699    MSG("init loaded with entry=0x%" PRIxGENVADDR " and GOT=0x%" PRIxGENVADDR "\n",
700        entry_point, got_base);
701
702    struct dispatcher_shared_aarch64 *disp_aarch64 =
703            get_dispatcher_shared_aarch64(init_dcb->disp);
704
705    disp_aarch64->got_base = got_base;
706    disp_aarch64->enabled_save_area.named.x10  = got_base;
707    disp_aarch64->disabled_save_area.named.x10  = got_base;
708
709    /* setting entry points */
710    disp_aarch64->disabled_save_area.named.pc   = entry_point;
711    disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK;
712    //arch_set_thread_register(INIT_DISPATCHER_VBASE);
713
714    MSG("init dcb set up\n");
715
716    return init_dcb;
717
718}
719
720void arm_kernel_startup(void)
721{
722    /* Initialize the core_data */
723    /* Used when bringing up other cores, must be at consistent global address
724     * seen by all cores */
725
726    struct dcb *init_dcb;
727
728    if (cpu_is_bsp()) {
729        MSG("Doing BSP related bootup \n");
730
731        /* Initialize the location to allocate phys memory from */
732        printf("start_free_ram = 0x%lx\n", armv8_glbl_core_data->start_free_ram);
733        bsp_init_alloc_addr = armv8_glbl_core_data->start_free_ram;
734
735        /* allocate initial KCB */
736        kcb_current= (struct kcb *)local_phys_to_mem(
737                bsp_alloc_phys(sizeof(*kcb_current)));
738        assert(kcb_current);
739        memset(kcb_current, 0, sizeof(*kcb_current));
740
741        init_dcb = spawn_bsp_init(BSP_INIT_MODULE_NAME);
742    } else {
743        MSG("Doing non-BSP related bootup \n");
744
745        /* Initialize the allocator */
746        app_alloc_phys_start = (armv8_glbl_core_data->memory.base);
747        app_alloc_phys_end   = (armv8_glbl_core_data->memory.length + app_alloc_phys_start);
748
749        MSG("Memory: %lx, %lx, size=%zu kB\n", app_alloc_phys_start, app_alloc_phys_end,
750            (app_alloc_phys_end - app_alloc_phys_start + 1) >> 10);
751
752        kcb_current= (struct kcb *)local_phys_to_mem(armv8_glbl_core_data->kcb);
753
754        init_dcb = spawn_app_init(armv8_glbl_core_data, APP_INIT_MODULE_NAME);
755    }
756    // enable interrupt forwarding to cpu
757
758    MSG("Calling dispatch from arm_kernel_startup, entry point %#"PRIxLVADDR"\n",
759            get_dispatcher_shared_aarch64(init_dcb->disp)->disabled_save_area.named.pc);
760
761#ifndef CONFIG_ONESHOT_TIMER
762    systime_set_timer(kernel_timeslice);
763#endif
764
765    // Should not return
766    dispatch(init_dcb);
767
768    panic("Error spawning init!");
769
770}
771