1/**
2 * \file
3 * \brief Boot driver arch specific parts for ARM CPUs
4 */
5/*
6 * Copyright (c) 2017, ETH Zurich.
7 * All rights reserved.
8 *
9 * This file is distributed under the terms in the attached LICENSE file.
10 * If you do not find this file, copies can be found by writing to:
11 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
12 */
13
14
15
16#include <barrelfish/barrelfish.h>
17#include <barrelfish_kpi/paging_arch.h>
18#include <barrelfish_kpi/platform.h>
19#include <barrelfish/syscall_arch.h>
20#include <target/aarch64/barrelfish_kpi/arm_core_data.h>
21#include <offsets.h>
22#include <barrelfish/deferred.h>
23#include <acpi_client/acpi_client.h>
24#include <if/acpi_defs.h>
25
26#include <hw_records_arch.h>
27
28#include <skb/skb.h>
29
30
31#include "../../coreboot.h"
32
33extern coreid_t my_arch_id;
34extern struct capref ipi_cap;
35
36/// XXX: make this configurable...
37#define ARMV8_KERNEL_STACK_SIZE (16 * 1024)
38
39struct armv8_parking_page
40{
41    uint32_t processor_id;
42    uint32_t reserved;
43    genpaddr_t jump_address;
44    genpaddr_t context;
45    uint8_t reserved_os[2048 - 24];
46    uint8_t reserved_firmware[2048];
47};
48STATIC_ASSERT_SIZEOF(struct armv8_parking_page, 4096);
49
50
51static void parking_write_mailbox(struct armv8_parking_page *mailbox,
52                                  uint32_t procid, genpaddr_t entry,
53                                  genpaddr_t context)
54{
55    mailbox->context = context;
56
57    /* Change the Processor ID to all ones */
58    mailbox->processor_id = 0xffffffff;
59
60    /* Execute a data synchronization barrier */
61    __asm volatile("dsb   sy\n"
62                   "dmb   sy\n"
63                   "isb     \n");
64
65    /* Change the jump address to the required value */
66    mailbox ->jump_address = entry;
67
68    /* Execute a data synchronization barrier */
69    __asm volatile("dsb   sy\n"
70                   "dmb   sy\n"
71                   "isb     \n");
72
73
74    /* Program the correct Processor ID in the mailbox */
75    mailbox->processor_id = procid;
76}
77
78
79
80
81static inline errval_t
82invoke_monitor_spawn_core(hwid_t core_id, enum cpu_type cpu_type,
83                          genpaddr_t entry, genpaddr_t context,
84                          uint64_t psci_use_hvc)
85{
86    return cap_invoke6(ipi_cap, IPICmd_Send_Start, core_id, cpu_type,
87                       entry, context, psci_use_hvc).error;
88}
89
90struct arch_config
91{
92    genpaddr_t arch_page_size;
93    size_t stack_size;
94    char boot_driver_binary[256];
95    char boot_driver_entry[256];
96    char cpu_driver_binary[256];
97    char monitor_binary[256];
98    bool psci_use_hvc;
99};
100
101static errval_t get_arch_config(hwid_t hwid, struct arch_config * config)
102{
103    errval_t err;
104
105    err = skb_client_connect();
106    if (err_is_fail(err)) {
107        USER_PANIC_ERR(err, "Connect to SKB.");
108    }
109
110    /* Query the SKB for the CPU driver to use. */
111    err = skb_execute_query("cpu_driver(S), write(res(S)).");
112    if (err_is_fail(err)) {
113        DEBUG_SKB_ERR(err, "skb_execute_query");
114        return err;
115    }
116    err = skb_read_output("res(%255[^)])", config->cpu_driver_binary);
117    if (err_is_fail(err)) return err;
118
119    /* Query the SKB for the monitor binary to use. */
120    err = skb_execute_query("monitor(S), write(res(S)).");
121    if (err_is_fail(err)) {
122        DEBUG_SKB_ERR(err, "skb_execute_query");
123        return err;
124    }
125    err = skb_read_output("res(%255[^)])", config->monitor_binary);
126    if (err_is_fail(err)) return err;
127
128    err = skb_execute_query("boot_driver_entry(%"PRIu64",T), entry_symbol(T,S),"
129                            " write(res(S)).", hwid);
130    if (err_is_fail(err)) {
131        printf("error: \n %s\n", skb_get_error_output());
132        return err;
133    }
134
135    err = skb_read_output("res(%255[^)])", config->boot_driver_entry);
136    if (err_is_fail(err)) {
137        return err;
138    }
139
140
141    err = skb_execute_query("boot_driver(S), write(res(S)).");
142    if (err_is_fail(err)) {
143        printf("error: \n %s\n", skb_get_error_output());
144        return err;
145    }
146
147    err = skb_read_output("res(%255[^)])", config->boot_driver_binary);
148    if (err_is_fail(err)) {
149        return err;
150    }
151
152    err = skb_execute_query("psci_use_hvc(C), write(C).");
153    if (err_is_fail(err)) {
154        printf("error: \n %s\n", skb_get_error_output());
155        return err;
156    }
157    err = skb_read_output("%d", &config->psci_use_hvc);
158    if (err_is_fail(err)) {
159        return err;
160    }
161
162    config->arch_page_size= BASE_PAGE_SIZE;
163    config->stack_size = ARMV8_KERNEL_STACK_SIZE;
164
165    return SYS_ERR_OK;
166}
167
168
169struct mem_info {
170    size_t                size;
171    struct capref         cap;
172    void                  *buf;
173    struct frame_identity frameid;
174};
175
176static errval_t mem_alloc(size_t size, bool map, struct mem_info *mem_info)
177{
178    errval_t err;
179
180    DEBUG("mem_alloc=%zu bytes\n", size);
181
182    memset(mem_info, 0, sizeof(*mem_info));
183
184    err = frame_alloc(&mem_info->cap, size, &mem_info->size);
185    if (err_is_fail(err)) {
186        return err;
187    }
188
189    err = invoke_frame_identify(mem_info->cap, &mem_info->frameid);
190    if (err_is_fail(err)) {
191        err =  err_push(err, LIB_ERR_FRAME_IDENTIFY);
192        goto out_err;
193    }
194
195    if (map) {
196        err = vspace_map_one_frame(&mem_info->buf, mem_info->size, mem_info->cap,
197                                   NULL, NULL);
198        if (err_is_fail(err)) {
199            err =  err_push(err, LIB_ERR_VSPACE_MAP);
200            goto out_err;
201        }
202    }
203
204    // Mark memory as remote
205    err = cap_mark_remote(mem_info->cap);
206    if (err_is_fail(err)) {
207        vspace_unmap(mem_info->buf);
208        goto out_err;
209    }
210
211    return SYS_ERR_OK;
212
213out_err:
214    cap_delete(mem_info->cap);
215    memset(mem_info, 0, sizeof(*mem_info));
216    return err;
217}
218
219static errval_t mem_free(struct mem_info *mem_info)
220{
221    errval_t err;
222
223    if (mem_info->buf) {
224        err = vspace_unmap(mem_info->buf);
225        if (err_is_fail(err)) {
226            DEBUG_ERR(err, "failed to unmap\n");
227        }
228    }
229    if (!capref_is_null(mem_info->cap)) {
230        err = cap_destroy(mem_info->cap);
231        if (err_is_fail(err)) {
232            DEBUG_ERR(err, "failed to cap destory");
233        }
234    }
235
236    return SYS_ERR_OK;
237}
238
239
240static errval_t cpu_memory_alloc(size_t size, struct mem_info *mem_info)
241{
242    return mem_alloc(size, true, mem_info);
243}
244
245static errval_t app_memory_alloc(size_t size, struct mem_info *mem_info)
246{
247    return mem_alloc(size, false, mem_info);
248}
249
250
251struct module_blob {
252    size_t             size;    ///< size of the binary in memory
253    lvaddr_t           vaddr;   ///< virtual address of the binary in memory
254    genpaddr_t         paddr;   ///< physical address of the memory
255    struct capref      frame;
256    struct mem_region *mem_region;
257};
258
259static errval_t
260get_module_info(const char *name, struct module_blob *blob)
261{
262    errval_t err;
263
264    DEBUG("getting module %s\n", name);
265
266    err = lookup_module(name, &blob->vaddr,
267                        &blob->paddr, &blob->size);
268    if (err_is_fail(err)) {
269        DEBUG_ERR(err, "Can not lookup module");
270        return err_push(err, SPAWN_ERR_FIND_MODULE);
271    }
272
273    return SYS_ERR_OK;
274}
275
276
277static errval_t
278relocate_elf(struct module_blob *binary, struct mem_info *mem,
279            lvaddr_t kernel_offset) {
280
281    DEBUG("Relocating kernel image.\n");
282
283    struct Elf64_Ehdr *ehdr = (struct Elf64_Ehdr *)binary->vaddr;
284
285    size_t shnum  = ehdr->e_shnum;
286    struct Elf64_Phdr *phdr = (struct Elf64_Phdr *)(binary->vaddr + ehdr->e_phoff);
287    struct Elf64_Shdr *shead = (struct Elf64_Shdr *)(binary->vaddr + (uintptr_t)ehdr->e_shoff);
288
289    /* Search for relocaton sections. */
290    for(size_t i= 0; i < shnum; i++) {
291
292        struct Elf64_Shdr *shdr=  &shead[i];
293        if(shdr->sh_type == SHT_REL || shdr->sh_type == SHT_RELA) {
294            if(shdr->sh_info != 0) {
295                DEBUG("I expected global relocations, but got"
296                              " section-specific ones.\n");
297                return ELF_ERR_HEADER;
298            }
299
300
301            uint64_t segment_elf_base= phdr[0].p_vaddr;
302            uint64_t segment_load_base=mem->frameid.base;
303            uint64_t segment_delta= segment_load_base - segment_elf_base;
304            uint64_t segment_vdelta= (uintptr_t)mem->buf - segment_elf_base;
305
306            size_t rsize;
307            if(shdr->sh_type == SHT_REL){
308                rsize= sizeof(struct Elf64_Rel);
309            } else {
310                rsize= sizeof(struct Elf64_Rela);
311            }
312
313            assert(rsize == shdr->sh_entsize);
314            size_t nrel= shdr->sh_size / rsize;
315
316            void * reldata = (void*)(binary->vaddr + shdr->sh_offset);
317
318            /* Iterate through the relocations. */
319            for(size_t ii= 0; ii < nrel; ii++) {
320                void *reladdr= reldata + ii *rsize;
321
322                switch(shdr->sh_type) {
323                    case SHT_REL:
324                        DEBUG("SHT_REL unimplemented.\n");
325                        return ELF_ERR_PROGHDR;
326                    case SHT_RELA:
327                    {
328                        struct Elf64_Rela *rel= reladdr;
329
330                        uint64_t offset= rel->r_offset;
331                        uint64_t sym= ELF64_R_SYM(rel->r_info);
332                        uint64_t type= ELF64_R_TYPE(rel->r_info);
333                        uint64_t addend= rel->r_addend;
334
335                        uint64_t *rel_target= (void *)offset + segment_vdelta;
336
337                        switch(type) {
338                            case R_AARCH64_RELATIVE:
339                                if(sym != 0) {
340                                    DEBUG("Relocation references a"
341                                                 " dynamic symbol, which is"
342                                                 " unsupported.\n");
343                                    return ELF_ERR_PROGHDR;
344                                }
345
346                                /* Delta(S) + A */
347                                *rel_target= addend + segment_delta + kernel_offset;
348                                break;
349
350                            default:
351                                DEBUG("Unsupported relocation type %d\n",
352                                             type);
353                                return ELF_ERR_PROGHDR;
354                        }
355                    }
356                    break;
357                    default:
358                        DEBUG("Unexpected type\n");
359                        break;
360
361                }
362            }
363        }
364    }
365
366    return SYS_ERR_OK;
367}
368
369static errval_t elf_check_header(lvaddr_t addr, size_t size)
370{
371    struct Elf64_Ehdr   *ehdr = (struct Elf64_Ehdr *)addr;
372
373    // Check for valid file size
374    if (size < sizeof(struct Elf64_Ehdr)) {
375        return ELF_ERR_FILESZ;
376    }
377
378    if(ehdr->e_ident[EI_CLASS] != ELFCLASS64 || ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
379        return ELF_ERR_HEADER;
380    }
381
382    if(ehdr->e_ident[EI_OSABI] != ELFOSABI_STANDALONE
383        && ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE) {
384        DEBUG("Warning: Compiled for OS ABI %d.  Wrong compiler?\n",
385                     ehdr->e_ident[EI_OSABI]);
386        return ELF_ERR_HEADER;
387    }
388
389    if(ehdr->e_machine != EM_AARCH64) {
390        DEBUG( "Error: Not AArch64\n");
391        return ELF_ERR_HEADER;
392    }
393
394    if(ehdr->e_type != ET_EXEC) {
395        DEBUG("Warning: CPU driver isn't executable! Continuing anyway.\n");
396    }
397
398    // More sanity checks
399    if (ehdr->e_phoff + ehdr->e_phentsize * ehdr->e_phnum > size
400        || ehdr->e_phentsize != sizeof(struct Elf64_Phdr)) {
401        return ELF_ERR_PROGHDR;
402    }
403
404    return SYS_ERR_OK;
405}
406
407static errval_t load_elf_binary(struct module_blob *binary, struct mem_info *mem,
408                         genvaddr_t entry_point, genvaddr_t *reloc_entry_point)
409
410{
411
412    struct Elf64_Ehdr *ehdr = (struct Elf64_Ehdr *)binary->vaddr;
413
414    /* Load the CPU driver from its ELF image. */
415    bool found_entry_point= 0;
416    bool loaded = 0;
417
418    struct Elf64_Phdr *phdr = (struct Elf64_Phdr *)(binary->vaddr + ehdr->e_phoff);
419    for(size_t i= 0; i < ehdr->e_phnum; i++) {
420        if(phdr[i].p_type != PT_LOAD) {
421            DEBUG("Segment %d load address 0x% "PRIx64 ", file size %" PRIu64
422                  ", memory size 0x%" PRIx64 " SKIP\n", i, phdr[i].p_vaddr,
423                  phdr[i].p_filesz, phdr[i].p_memsz);
424            continue;
425        }
426
427        DEBUG("Segment %d load address 0x% "PRIx64 ", file size %" PRIu64
428              ", memory size 0x%" PRIx64 " LOAD\n", i, phdr[i].p_vaddr,
429              phdr[i].p_filesz, phdr[i].p_memsz);
430
431
432        if (loaded) {
433            USER_PANIC("Expected one load able segment!\n");
434        }
435        loaded = 1;
436
437        void *dest = mem->buf;
438        lpaddr_t dest_phys = mem->frameid.base;
439
440        assert(phdr[i].p_offset + phdr[i].p_memsz <= mem->frameid.bytes);
441
442        /* copy loadable part */
443        memcpy(dest, (void *)(binary->vaddr + phdr[i].p_offset), phdr[i].p_filesz);
444
445        /* zero out BSS section */
446        memset(dest + phdr[i].p_filesz, 0, phdr[i].p_memsz - phdr[i].p_filesz);
447
448        if (!found_entry_point) {
449            if(entry_point >= phdr[i].p_vaddr
450                 && entry_point - phdr[i].p_vaddr < phdr[i].p_memsz) {
451               *reloc_entry_point= (dest_phys + (entry_point - phdr[i].p_vaddr));
452               found_entry_point= 1;
453            }
454        }
455    }
456
457    if (!found_entry_point) {
458        USER_PANIC("No entry point loaded\n");
459    }
460
461    return SYS_ERR_OK;
462}
463
464
465static errval_t elf_find_entry(struct module_blob *binary, const char *sym,
466                               genvaddr_t *ret_entry)
467{
468    if (sym && strlen(sym) > 0) {
469        DEBUG("Looking for entry: '%s'\n", sym);
470        struct Elf64_Sym *entry;
471        entry = elf64_find_symbol_by_name(binary->vaddr, binary->size, sym, 0,
472                                          STT_FUNC, 0);
473        if (!entry) {
474            DEBUG("Entry '%s' not found\n", sym);
475            return ELF_ERR_PROGHDR;
476        }
477        *ret_entry = entry->st_value;
478    } else {
479        *ret_entry = ((struct Elf64_Ehdr *)binary->vaddr)->e_entry;
480    }
481
482    return SYS_ERR_OK;
483}
484
485static errval_t load_boot_and_cpu_driver(struct arch_config *cfg,
486                                         struct module_blob *boot_driver,
487                                         struct mem_info *boot_mem,
488                                         struct module_blob *cpu_driver,
489                                         struct mem_info *cpu_mem,
490                                         genvaddr_t *ret_boot_entry,
491                                         genvaddr_t *ret_cpu_entry) {
492
493    errval_t err;
494
495    err = elf_check_header(boot_driver->vaddr, boot_driver->size);
496    if (err_is_fail(err)) {
497        return err;
498    }
499
500    err = elf_check_header(boot_driver->vaddr, cpu_driver->size);
501    if (err_is_fail(err)) {
502        return err;
503    }
504
505    genvaddr_t boot_entry_point = 0;
506    err = elf_find_entry(boot_driver, cfg->boot_driver_entry, &boot_entry_point);
507    if (err_is_fail(err)) {
508        return err;
509    }
510
511    DEBUG("Unrelocated entry point in bootdriver: '%s' @ %" PRIxGENVADDR "\n",
512                 cfg->boot_driver_entry, boot_entry_point);
513
514    genvaddr_t cpu_entry_point = 0;
515    err = elf_find_entry(cpu_driver, "arch_init", &cpu_entry_point);
516    if (err_is_fail(err)) {
517        return err;
518    }
519
520    DEBUG("Unrelocated entry point in cpu driver: '%s' @ %" PRIxGENVADDR "\n",
521                 "arch_init", cpu_entry_point);
522
523
524    err = load_elf_binary(boot_driver, boot_mem, boot_entry_point, &boot_entry_point);
525    if (err_is_fail(err)) {
526        return err;
527    }
528
529    err = load_elf_binary(cpu_driver, cpu_mem, cpu_entry_point, &cpu_entry_point);
530    if (err_is_fail(err)) {
531        return err;
532    }
533
534    err = relocate_elf(boot_driver, boot_mem, 0);
535    if (err_is_fail(err)) {
536        return err;
537    }
538
539    err = relocate_elf(cpu_driver, cpu_mem, KERNEL_OFFSET);
540    if (err_is_fail(err)) {
541        return err;
542    }
543
544    DEBUG("Relocated boot driver point is %p\n",boot_entry_point);
545    DEBUG("Relocated cpu driver point is %p\n", cpu_entry_point);
546
547    *ret_boot_entry = boot_entry_point;
548    *ret_cpu_entry = cpu_entry_point + KERNEL_OFFSET;
549
550    return SYS_ERR_OK;
551}
552
553
554static errval_t get_boot_protocol(coreid_t core_id, uint32_t *parking_version,
555                                  struct mem_info *parking_page)
556{
557    errval_t err;
558
559    char* record = NULL;
560    err = oct_get(&record, "hw.processor.%"PRIuCOREID"", core_id);
561    if (err_is_fail(err)) {
562        goto out;
563    }
564
565    uint64_t parked_address, _parking_version;
566    err = oct_read(record, "_ { parkingVersion: %d, parkedAddress: %d}",
567            &_parking_version, &parked_address);
568    if (err_is_fail(err)) {
569        goto out;
570    }
571
572    *parking_version = _parking_version;
573
574    debug_printf("Parking Version: %u, ParkedAddress= 0x%lx\n", *parking_version,
575                  parked_address);
576
577    if (*parking_version) {
578        struct acpi_binding* acl = get_acpi_binding();
579
580        err = slot_alloc(&parking_page->cap);
581        if (err_is_fail(err)) {
582            USER_PANIC_ERR(err, "slot_alloc for mm_realloc_range_proxy");
583        }
584        errval_t error_code;
585        err = acl->rpc_tx_vtbl.mm_realloc_range_proxy(acl, 12, parked_address,
586                                                      &parking_page->cap,
587                                                      &error_code);
588        if (err_is_fail(err)) {
589            DEBUG_ERR(err, "mm_alloc_range_proxy failed.");
590            goto out;
591        }
592        if (err_is_fail(error_code)) {
593            DEBUG_ERR(error_code, "mm_alloc_range_proxy return failed.");
594            err = error_code;
595            goto out;
596        }
597
598        err = vspace_map_one_frame(&parking_page->buf, 4096, parking_page->cap,
599                                   NULL, NULL);
600        if (err_is_fail(err)) {
601            DEBUG_ERR(err, "failed to map parking page\n");
602            goto out;
603        }
604
605        parking_page->size = 4096;
606        parking_page->frameid.base = parked_address;
607        parking_page->frameid.bytes = 4096;
608
609    } else {
610        memset(parking_page, 0, sizeof(*parking_page));
611        *parking_version = 0;
612    }
613
614    out:
615    if (record) {
616        free(record);
617    }
618    return err;
619
620}
621
622
623errval_t spawn_xcore_monitor(coreid_t coreid, hwid_t hwid,
624                             enum cpu_type cpu_type,
625                             const char *cmdline,
626                             struct frame_identity urpc_frame_id,
627                             struct capref kcb)
628{
629
630    DEBUG("Booting: %" PRIuCOREID ", hwid=%" PRIxHWID "\n", coreid, hwid);
631
632    struct arch_config config;
633
634    errval_t err;
635
636    if(cpu_type != CPU_ARM8) {
637        return SPAWN_ERR_UNKNOWN_TARGET_ARCH;
638    }
639
640    err = get_arch_config(hwid, &config);
641    if (err_is_fail(err)) {
642        DEBUG_ERR(err, "failed to obtain architecture configuration");
643        return err;
644    }
645
646    struct mem_info parking_mem;
647    uint32_t parking_version = 0;
648    err = get_boot_protocol(coreid, &parking_version, &parking_mem);
649    if (err_is_fail(err)) {
650        return err;
651    }
652
653    DEBUG("boot driver: %s\n", config.cpu_driver_binary);
654    DEBUG("boot driver entry: %s\n", config.boot_driver_entry);
655    DEBUG("cpu_driver: %s\n", config.cpu_driver_binary);
656    DEBUG("monitor: %s\n", config.monitor_binary);
657
658
659    // compute size of frame needed and allocate it
660    DEBUG("%s:%s:%d: urpc_frame_id.base=%"PRIxGENPADDR"\n",
661          __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.base);
662    DEBUG("%s:%s:%d: urpc_frame_id.size=0x%" PRIuGENSIZE "\n",
663          __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.bytes);
664
665
666    // XXX: Caching these for now, until we have unmap
667
668    struct module_blob boot_binary;
669    err = get_module_info(config.boot_driver_binary, &boot_binary);
670    if (err_is_fail(err)) {
671        DEBUG_ERR(err, "Can not lookup module");
672        return err;
673    }
674
675    struct module_blob cpu_binary;
676    err = get_module_info(config.cpu_driver_binary, &cpu_binary);
677    if (err_is_fail(err)) {
678        DEBUG_ERR(err, "Can not lookup module");
679        return err;
680    }
681
682    struct module_blob monitor_binary;
683    err = get_module_info(config.monitor_binary, &monitor_binary);
684    if (err_is_fail(err)) {
685        DEBUG_ERR(err, "Can not lookup module");
686        return err;
687    }
688
689    size_t elf_size = ROUND_UP(elf_virtual_size(boot_binary.vaddr),
690                               config.arch_page_size);
691
692    struct mem_info boot_mem;
693    err = cpu_memory_alloc(elf_size, &boot_mem);
694    if (err_is_fail(err)) {
695        DEBUG_ERR(err, "Can not allocate space for new app kernel.");
696        return err;
697    }
698
699    DEBUG("BOOTMEM: %lx, %zu kb\n", boot_mem.frameid.base, boot_mem.size >> 10);
700
701    /* */
702
703    elf_size = ROUND_UP(elf_virtual_size(cpu_binary.vaddr), config.arch_page_size);
704
705    struct mem_info cpu_mem;
706    err = cpu_memory_alloc(elf_size, &cpu_mem);
707    if (err_is_fail(err)) {
708        DEBUG_ERR(err, "Can not allocate space for new app kernel.");
709        return err;
710    }
711
712    DEBUG("CPUMEM: %lx, %zu kb\n", cpu_mem.frameid.base, cpu_mem.size >> 10);
713
714    elf_size = ROUND_UP(elf_virtual_size(monitor_binary.vaddr), config.arch_page_size);
715
716    struct mem_info monitor_mem;
717    err = app_memory_alloc(elf_size + ARMV8_CORE_DATA_PAGES * config.arch_page_size,
718                           &monitor_mem);
719    if (err_is_fail(err)) {
720        DEBUG_ERR(err, "Can not allocate space for new app kernel.");
721        return err;
722    }
723
724    DEBUG("DATAMEM: %lx, %zu kb\n", monitor_mem.frameid.base,
725                 monitor_mem.frameid.bytes >> 10);
726
727    /*
728     * The layout is:
729     *  [ARMv8 CORE DATA]
730     *  [KERNEL STACK]
731     */
732    struct mem_info stack_mem;
733    err = cpu_memory_alloc(config.stack_size + config.arch_page_size , &stack_mem);
734    if (err_is_fail(err)) {
735        DEBUG_ERR(err, "Can not allocate space for new app kernel.");
736        return err;
737    }
738
739    DEBUG("STACKMEM: %lx, %zu kb\n", stack_mem.frameid.base,
740            stack_mem.frameid.bytes >> 10);
741
742
743    /* Load cpu */
744    genvaddr_t boot_entry, cpu_driver_entry;
745    err =  load_boot_and_cpu_driver(&config, &boot_binary, &boot_mem, &cpu_binary,
746                                    &cpu_mem, &boot_entry, &cpu_driver_entry);
747
748    if (err_is_fail(err)) {
749        DEBUG_ERR(err, "Can not load kernel .");
750        return err;
751    }
752
753    DEBUG("Writing core data structure...\n");
754    struct armv8_core_data *core_data = (struct armv8_core_data *)stack_mem.buf;
755
756    core_data->boot_magic = ARMV8_BOOTMAGIC_PSCI;
757    core_data->cpu_driver_stack = stack_mem.frameid.base + stack_mem.frameid.bytes - 16;
758    core_data->cpu_driver_stack_limit = stack_mem.frameid.base + BASE_PAGE_SIZE;
759
760    DEBUG("kernel stack: 0x%" PRIxLPADDR"..0x%" PRIxLPADDR "\n",
761            core_data->cpu_driver_stack_limit,
762            core_data->cpu_driver_stack);
763
764    core_data->cpu_driver_entry = cpu_driver_entry;
765
766
767    core_data->memory.base = monitor_mem.frameid.base;
768    core_data->memory.length = monitor_mem.frameid.bytes;
769
770    core_data->urpc_frame.base = urpc_frame_id.base;
771    core_data->urpc_frame.length = urpc_frame_id.bytes;
772
773    core_data->monitor_binary.base   = monitor_binary.paddr;
774    core_data->monitor_binary.length = monitor_binary.size;
775
776    core_data->src_core_id       = disp_get_core_id();
777    core_data->src_arch_id       = my_arch_id;
778    core_data->dst_core_id       = coreid;
779    core_data->dst_arch_id       = hwid;
780
781    struct frame_identity fid;
782    err = invoke_frame_identify(kcb, &fid);
783    if (err_is_fail(err)) {
784        USER_PANIC_ERR(err, "Invoke frame identity for KCB failed. "
785                            "Did you add the syscall handler for that architecture?");
786    }
787
788    DEBUG("%s:%s:%d: fid.base is 0x%"PRIxGENPADDR"\n",
789           __FILE__, __FUNCTION__, __LINE__, fid.base);
790    core_data->kcb = (genpaddr_t) fid.base;
791#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
792    core_data->chan_id           = chanid;
793#endif
794
795
796    if (cmdline != NULL) {
797        // copy as much of command line as will fit
798        snprintf(core_data->cpu_driver_cmdline, sizeof(core_data->cpu_driver_cmdline),
799                "%s %s", config.cpu_driver_binary, cmdline);
800        // ensure termination
801        core_data->cpu_driver_cmdline[sizeof(core_data->cpu_driver_cmdline) - 1] = '\0';
802
803        DEBUG("%s:%s:%d: %s\n", __FILE__, __FUNCTION__, __LINE__, core_data->cpu_driver_cmdline);
804    }
805
806
807    if (parking_version) {
808        assert(parking_mem.buf);
809        parking_write_mailbox(parking_mem.buf, hwid, boot_entry,
810                              stack_mem.frameid.base);
811    }
812
813    __asm volatile("dsb   sy\n"
814                   "dmb   sy\n"
815                   "isb     \n");
816
817    /* start */
818
819    DEBUG("invoking boot start hwid=%lx entry=%lx context=%lx\n",
820                 hwid, boot_entry, stack_mem.frameid.base);
821
822    err = invoke_monitor_spawn_core(hwid, cpu_type, boot_entry, stack_mem.frameid.base,
823                                    config.psci_use_hvc);
824    if (err_is_fail(err)) {
825        DEBUG_ERR(err, "failed to spawn the cpu\n");
826    }
827
828    if (parking_version) {
829        debug_printf("WAITING FOR ACK!\n");
830
831        debug_printf("ACKNOWLEDGED!\n");
832    }
833
834    err = mem_free(&stack_mem);
835    if (err_is_fail(err)) {
836        USER_PANIC_ERR(err, "cap_destroy failed");
837    }
838
839    err = mem_free(&cpu_mem);
840    if (err_is_fail(err)) {
841        USER_PANIC_ERR(err, "cap_destroy failed");
842    }
843
844    err = mem_free(&monitor_mem);
845    if (err_is_fail(err)) {
846        USER_PANIC_ERR(err, "cap_destroy failed");
847    }
848
849    err = mem_free(&boot_mem);
850    if (err_is_fail(err)) {
851        USER_PANIC_ERR(err, "cap_destroy failed");
852    }
853
854    err = mem_free(&parking_mem);
855    if (err_is_fail(err)) {
856        USER_PANIC_ERR(err, "cap_destroy failed");
857    }
858    return SYS_ERR_OK;
859}
860
861errval_t get_core_info(coreid_t core_id, hwid_t* hw_id, enum cpu_type* cpu_type)
862{
863    char* record = NULL;
864    errval_t err = oct_get(&record, "hw.processor.%"PRIuCOREID"", core_id);
865    if (err_is_fail(err)) {
866        goto out;
867    }
868
869
870    uint64_t enabled, type, barrelfish_id;
871    err = oct_read(record, "_ { " HW_PROCESSOR_GENERIC_FIELDS " }",
872                   &enabled, &barrelfish_id, hw_id, &type);
873    if (err_is_fail(err)) {
874        goto out;
875    }
876
877    if (!enabled) {
878        /* XXX: better error code */
879        err = SYS_ERR_CORE_NOT_FOUND;
880    }
881
882
883    *cpu_type = (enum cpu_type) type;
884out:
885    if (record) {
886        free(record);
887    }
888    return err;
889}
890