1/**
2 * \file
3 * \brief Boot driver arch specific parts for ARM CPUs
4 */
5/*
6 * Copyright (c) 2014,2017 ETH Zurich.
7 * All rights reserved.
8 *
9 * This file is distributed under the terms in the attached LICENSE file.
10 * If you do not find this file, copies can be found by writing to:
11 * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
12 */
13
14#include "../../coreboot.h"
15
16#include <barrelfish_kpi/paging_arch.h>
17#include <barrelfish_kpi/platform.h>
18#include <barrelfish/syscall_arch.h>
19#include <target/arm/barrelfish_kpi/arm_core_data.h>
20
21#include <skb/skb.h>
22
23/// Round up n to the next multiple of size
24#define ROUND_UP(n, size)           ((((n) + (size) - 1)) & (~((size) - 1)))
25
26struct monitor_allocate_state {
27    void          *vbase;
28    genvaddr_t     elfbase;
29};
30
31struct xcore_bind_handler {
32    coreid_t                    coreid;
33    enum cpu_type               cputype;
34    struct monitor_binding      *binding;
35};
36
37extern coreid_t my_arch_id;
38extern struct capref ipi_cap;
39
40errval_t get_core_info(coreid_t core_id,
41                       hwid_t* hw_id,
42                       enum cpu_type* cpu_type) {
43    char* record = NULL;
44    errval_t err = oct_get(&record, "hw.processor.%"PRIuCOREID"", core_id);
45    if (err_is_fail(err)) return err;
46
47    int enabled, type;
48    err = oct_read(record, "_ { hw_id: %d, enabled: %d, type: %d}",
49                   hw_id, &enabled, &type);
50    assert (enabled);
51    if (err_is_fail(err)) return err;
52
53    *cpu_type = (enum cpu_type) type;
54    return SYS_ERR_OK;
55}
56
57#if 0
58static errval_t monitor_elfload_allocate(void *state, genvaddr_t base,
59                                         size_t size, uint32_t flags,
60                                         void **retbase)
61{
62    struct monitor_allocate_state *s = state;
63
64    *retbase = (char *)s->vbase + base - s->elfbase;
65    return SYS_ERR_OK;
66}
67#endif
68
69struct module_blob {
70    size_t             size;
71    lvaddr_t           vaddr;
72    genpaddr_t         paddr;
73    struct mem_region *mem_region;
74};
75
76static errval_t
77module_blob_map(const char *name, struct module_blob *blob)
78{
79    errval_t err;
80
81    err = lookup_module(name, &blob->vaddr,
82                        &blob->paddr, &blob->size);
83    if (err_is_fail(err)) {
84        DEBUG_ERR(err, "Can not lookup module");
85        return err_push(err, SPAWN_ERR_FIND_MODULE);
86    }
87
88    return SYS_ERR_OK;
89}
90
91static errval_t
92cpu_memory_prepare(size_t *size,
93                   struct capref *cap_ret, void **buf_ret,
94                   struct frame_identity *frameid)
95{
96    errval_t err;
97    struct capref cap;
98    void *buf;
99
100     err = frame_alloc(&cap, *size, size);
101     if (err_is_fail(err)) {
102         USER_PANIC("Failed to allocate %zd memory\n", *size);
103     }
104
105#ifdef __gem5__
106    // XXX: We map the frame for the new kernel as uncacheable. Gem5 has a
107    // problem when one core has cacheing on and writes to a location where an
108    // other core reads from without caches enabled. On real hardware one could
109    // clean/flush the cache, but Gem5 doesn't support cache maintenance
110    // operations for ARM
111    err = vspace_map_one_frame_attr(&buf, *size, cap,
112                                    VREGION_FLAGS_READ_WRITE_NOCACHE,
113                                    NULL, NULL);
114#else
115    err = vspace_map_one_frame(&buf, *size, cap, NULL, NULL);
116#endif
117    if (err_is_fail(err)) {
118        return err_push(err, LIB_ERR_VSPACE_MAP);
119    }
120
121    // Mark memory as remote
122    err = cap_mark_remote(cap);
123    if (err_is_fail(err)) {
124        return err;
125    }
126
127    err = frame_identify(cap, frameid);
128    if (err_is_fail(err)) {
129        return err_push(err, LIB_ERR_FRAME_IDENTIFY);
130    }
131
132    *cap_ret = cap;
133    *buf_ret = buf;
134    return SYS_ERR_OK;
135}
136
137static errval_t
138cpu_memory_cleanup(struct capref cap, void *buf)
139{
140    errval_t err;
141
142    err = vspace_unmap(buf);
143    if (err_is_fail(err)) {
144        USER_PANIC_ERR(err, "vspace unmap CPU driver memory failed");
145    }
146
147    // XXX: Should not delete the remote cap
148    err = cap_destroy(cap);
149    if (err_is_fail(err)) {
150        USER_PANIC_ERR(err, "cap_destroy failed");
151    }
152
153    return SYS_ERR_OK;
154}
155
156static errval_t
157spawn_memory_prepare(size_t size, struct capref *cap_ret,
158                     struct frame_identity *frameid)
159{
160    errval_t err;
161    struct capref cap;
162
163    err = frame_alloc(&cap, size, NULL);
164    if (err_is_fail(err)) {
165        return err_push(err, LIB_ERR_FRAME_ALLOC);
166    }
167
168    // Mark memory as remote
169    err = cap_mark_remote(cap);
170    if (err_is_fail(err)) {
171        return err;
172    }
173
174    err = frame_identify(cap, frameid);
175    if (err_is_fail(err)) {
176        USER_PANIC_ERR(err, "frame_identify failed");
177    }
178
179    *cap_ret = cap;
180    return SYS_ERR_OK;
181}
182
183static errval_t
184spawn_memory_cleanup(struct capref cap)
185{
186
187    errval_t err;
188    err = cap_destroy(cap);
189    if (err_is_fail(err)) {
190        USER_PANIC_ERR(err, "cap_destroy failed");
191    }
192
193    return SYS_ERR_OK;
194}
195
196#if 0
197static errval_t
198elf_load_and_relocate(lvaddr_t blob_start, size_t blob_size,
199                      void *to, lvaddr_t reloc_dest,
200                      uintptr_t *reloc_entry)
201{
202    genvaddr_t entry; // entry poing of the loaded elf image
203    struct Elf32_Ehdr *head = (struct Elf32_Ehdr *)blob_start;
204    struct Elf32_Shdr *symhead, *rel, *symtab;
205    errval_t err;
206
207    //state.vbase = (void *)ROUND_UP(to, ARM_L1_ALIGN);
208    struct monitor_allocate_state state;
209    state.vbase   = to;
210    state.elfbase = elf_virtual_base(blob_start);
211
212    err = elf_load(head->e_machine,
213                   monitor_elfload_allocate,
214                   &state,
215                   blob_start, blob_size,
216                   &entry);
217    if (err_is_fail(err)) {
218        return err;
219    }
220
221    // Relocate to new physical base address
222    symhead = (struct Elf32_Shdr *)(blob_start + (uintptr_t)head->e_shoff);
223    rel = elf32_find_section_header_type(symhead, head->e_shnum, SHT_REL);
224    symtab = elf32_find_section_header_type(symhead, head->e_shnum, SHT_DYNSYM);
225    assert(rel != NULL && symtab != NULL);
226
227    elf32_relocate(reloc_dest, state.elfbase,
228                   (struct Elf32_Rel *)(blob_start + rel->sh_offset),
229                   rel->sh_size,
230                   (struct Elf32_Sym *)(blob_start + symtab->sh_offset),
231                   symtab->sh_size,
232                   state.elfbase, state.vbase);
233
234    *reloc_entry = entry - state.elfbase + reloc_dest;
235    return SYS_ERR_OK;
236}
237#endif
238
239/**
240 * \brief Spawn a new core.
241 *
242 * \param cur_kern   Cap of the current kernel
243 * \param core_id    APIC ID of the core to try booting
244 * \param sp_mem     Cap to Ram type memory to relocate the new kernel
245 * \param dcb        Cap to the dcb of the user program to run on the new kernel
246 * \param root_vbits Number of valid bits in root_cptr
247 * \param root_cptr  Cap to the root of cspace of the new user program
248 * \param vtree      Cap to the vtree root of the new user program
249 * \param dispatcher Cap to the dispatcher of the new user program
250 * \param entry      Kernel entry point in physical memory
251 */
252static inline errval_t
253invoke_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
254                          forvaddr_t entry)
255{
256    return cap_invoke5(ipi_cap, IPICmd_Send_Start, core_id, cpu_type,
257            (uintptr_t)(entry >> 32), (uintptr_t) entry).error;
258}
259
260static void
261print_build_id(const char *data, size_t length) {
262    for(size_t i= 0; i < length; i++) printf("%02x", data[i]);
263}
264
265static int
266compare_build_ids(const char *data1, size_t length1,
267                  const char *data2, size_t length2) {
268    if(length1 != length2) return 0;
269
270    for(size_t i= 0; i < length1; i++) {
271        if(data1[i] != data2[i]) return 0;
272    }
273
274    return 1;
275}
276
277/* Return the first program header of type 'type'. */
278static struct Elf32_Phdr *
279elf32_find_segment_type(void *elfdata, uint32_t type) {
280    struct Elf32_Ehdr *ehdr= (struct Elf32_Ehdr *)elfdata;
281
282    if(!IS_ELF(*ehdr) ||
283       ehdr->e_ident[EI_CLASS] != ELFCLASS32 ||
284       ehdr->e_machine != EM_ARM) {
285        return NULL;
286    }
287
288    void *phdrs_base= (void *)(elfdata + ehdr->e_phoff);
289
290    for(size_t i= 0; i < ehdr->e_phnum; i++) {
291        struct Elf32_Phdr *phdr= phdrs_base + i * ehdr->e_phentsize;
292
293        if(phdr->p_type == type) return phdr;
294    }
295
296    return NULL;
297}
298
299static errval_t
300load_cpu_relocatable_segment(void *elfdata, void *out, lvaddr_t vbase,
301                             lvaddr_t text_base, lvaddr_t *got_base) {
302    /* Find the full loadable segment, as it contains the dynamic table. */
303    struct Elf32_Phdr *phdr_full= elf32_find_segment_type(elfdata, PT_LOAD);
304    if(!phdr_full) return ELF_ERR_HEADER;
305    void *full_segment_data= elfdata + phdr_full->p_offset;
306
307    printf("Loadable segment at V:%08"PRIx32"\n", phdr_full->p_vaddr);
308
309    /* Find the relocatable segment to load. */
310    struct Elf32_Phdr *phdr= elf32_find_segment_type(elfdata, PT_BF_RELOC);
311    if(!phdr) return ELF_ERR_HEADER;
312
313    printf("Relocatable segment at V:%08"PRIx32"\n", phdr->p_vaddr);
314
315    /* Copy the raw segment data. */
316    void *in= elfdata + phdr->p_offset;
317    assert(phdr->p_filesz <= phdr->p_memsz);
318    memcpy(out, in, phdr->p_filesz);
319
320    /* Find the dynamic segment. */
321    struct Elf32_Phdr *phdr_dyn= elf32_find_segment_type(elfdata, PT_DYNAMIC);
322    if(!phdr_dyn) return ELF_ERR_HEADER;
323
324    printf("Dynamic segment at V:%08"PRIx32"\n", phdr_dyn->p_vaddr);
325
326    /* The location of the dynamic segment is specified by its *virtual
327     * address* (vaddr), relative to the loadable segment, and *not* by its
328     * p_offset, as for every other segment. */
329    struct Elf32_Dyn *dyn=
330        full_segment_data + (phdr_dyn->p_vaddr - phdr_full->p_vaddr);
331
332    /* There is no *entsize field for dynamic entries. */
333    size_t n_dyn= phdr_dyn->p_filesz / sizeof(struct Elf32_Dyn);
334
335    /* Find the relocations (REL only). */
336    void *rel_base= NULL;
337    size_t relsz= 0, relent= 0;
338    void *dynsym_base= NULL;
339    const char *dynstr_base= NULL;
340    size_t syment= 0, strsz= 0;
341    for(size_t i= 0; i < n_dyn; i++) {
342        switch(dyn[i].d_tag) {
343            /* There shouldn't be any RELA relocations. */
344            case DT_RELA:
345                return ELF_ERR_HEADER;
346
347            case DT_REL:
348                if(rel_base != NULL) return ELF_ERR_HEADER;
349
350                /* Pointers in the DYNAMIC table are *virtual* addresses,
351                 * relative to the vaddr base of the segment that contains
352                 * them. */
353                rel_base= full_segment_data +
354                    (dyn[i].d_un.d_ptr - phdr_full->p_vaddr);
355                break;
356
357            case DT_RELSZ:
358                relsz= dyn[i].d_un.d_val;
359                break;
360
361            case DT_RELENT:
362                relent= dyn[i].d_un.d_val;
363                break;
364
365            case DT_SYMTAB:
366                dynsym_base= full_segment_data +
367                    (dyn[i].d_un.d_ptr - phdr_full->p_vaddr);
368                break;
369
370            case DT_SYMENT:
371                syment= dyn[i].d_un.d_val;
372                break;
373
374            case DT_STRTAB:
375                dynstr_base= full_segment_data +
376                    (dyn[i].d_un.d_ptr - phdr_full->p_vaddr);
377                break;
378
379            case DT_STRSZ:
380                strsz= dyn[i].d_un.d_val;
381        }
382    }
383    if(rel_base == NULL || relsz == 0 || relent == 0 ||
384       dynsym_base == NULL || syment == 0 ||
385       dynstr_base == NULL || strsz == 0)
386        return ELF_ERR_HEADER;
387
388    /* XXX - The dynamic segment doesn't actually tell us the size of the
389     * dynamic symbol table, which is very annoying.  We should fix this by
390     * defining and implementing a standard format for dynamic executables on
391     * Barrelfish, using DT_PLTGOT.  Currently, GNU ld refuses to generate
392     * that for the CPU driver binary. */
393    assert((size_t)dynstr_base > (size_t)dynsym_base);
394    size_t dynsym_len= (size_t)dynstr_base - (size_t)dynsym_base;
395
396    /* Walk the symbol table to find got_base. */
397    size_t dynsym_offset= 0;
398    struct Elf32_Sym *got_sym= NULL;
399    while(dynsym_offset < dynsym_len) {
400        got_sym= dynsym_base + dynsym_offset;
401        if(!strcmp(dynstr_base + got_sym->st_name, "got_base")) break;
402
403        dynsym_offset+= syment;
404    }
405    if(dynsym_offset >= dynsym_len) {
406        printf("got_base not found.\n");
407        return ELF_ERR_HEADER;
408    }
409
410    /* Addresses in the relocatable segment are relocated to the
411     * newly-allocated region, relative to their addresses in the relocatable
412     * segment.  Addresses outside the relocatable segment are relocated to
413     * the shared text segment, relative to their position in the
414     * originally-loaded segment. */
415    uint32_t relocatable_offset= vbase - phdr->p_vaddr;
416    uint32_t text_offset= text_base - phdr_full->p_vaddr;
417
418    /* Relocate the got_base within the relocatable segment. */
419    *got_base= vbase + (got_sym->st_value - phdr->p_vaddr);
420
421    /* Process the relocations. */
422    size_t n_rel= relsz / relent;
423    printf("Have %zu relocations of size %zu\n", n_rel, relent);
424    for(size_t i= 0; i < n_rel; i++) {
425        struct Elf32_Rel *rel= rel_base + i * relent;
426
427        size_t sym=  ELF32_R_SYM(rel->r_info);
428        size_t type= ELF32_R_TYPE(rel->r_info);
429
430        /* We should only see relative relocations (R_ARM_RELATIVE) against
431         * sections (symbol 0). */
432        if(sym != 0 || type != R_ARM_RELATIVE) return ELF_ERR_HEADER;
433
434        uint32_t offset_in_seg= rel->r_offset - phdr->p_vaddr;
435        uint32_t *value= out + offset_in_seg;
436
437        uint32_t offset;
438        if(*value >= phdr->p_vaddr &&
439           (*value - phdr->p_vaddr) < phdr->p_memsz) {
440            /* We have a relocation to an address *inside* the relocatable
441             * segment. */
442            offset= relocatable_offset;
443            //printf("r ");
444        }
445        else {
446            /* We have a relocation to an address in the shared text segment.
447             * */
448            offset= text_offset;
449            //printf("t ");
450        }
451
452        //printf("REL@%08"PRIx32" %08"PRIx32" -> %08"PRIx32"\n",
453               //rel->r_offset, *value, *value + offset);
454        *value+= offset;
455    }
456
457    return SYS_ERR_OK;
458}
459
460/* XXX - this currently only clones the running kernel. */
461errval_t spawn_xcore_monitor(coreid_t coreid, hwid_t hwid,
462                             enum cpu_type cpu_type,
463                             const char *cmdline,
464                             struct frame_identity urpc_frame_id,
465                             struct capref kcb)
466{
467    char cpuname[256], monitorname[256];
468    genpaddr_t arch_page_size;
469    errval_t err;
470
471    if(cpu_type != CPU_ARM7)
472        return SPAWN_ERR_UNKNOWN_TARGET_ARCH;
473
474    /* XXX - ignore command line passed in.  Fixing this requires
475     * cross-architecture changes. */
476    cmdline= NULL;
477
478    err = skb_client_connect();
479    if (err_is_fail(err)) {
480        USER_PANIC_ERR(err, "Connect to SKB.");
481    }
482
483    arch_page_size= BASE_PAGE_SIZE;
484
485    /* Query the SKB for the CPU driver to use. */
486    err= skb_execute_query("arm_core(%d,T), cpu_driver(T,S), write(res(S)).",
487                           hwid);
488    if (err_is_fail(err)) {
489        DEBUG_SKB_ERR(err, "skb_execute_query");
490        return err;
491    }
492    err= skb_read_output("res(%255[^)])", cpuname);
493    if (err_is_fail(err)) return err;
494
495    /* Query the SKB for the monitor binary to use. */
496    err= skb_execute_query("arm_core(%d,T), monitor(T,S), write(res(S)).",
497                           hwid);
498    if (err_is_fail(err)) {
499        DEBUG_SKB_ERR(err, "skb_execute_query");
500        return err;
501    }
502    err= skb_read_output("res(%255[^)])", monitorname);
503    if (err_is_fail(err)) return err;
504
505    // map cpu and monitor module
506    // XXX: caching these for now, until we have unmap
507    static struct module_blob cpu_blob, monitor_blob;
508    err = module_blob_map(cpuname, &cpu_blob);
509    if (err_is_fail(err)) return err;
510    err = module_blob_map(monitorname, &monitor_blob);
511    if (err_is_fail(err)) return err;
512
513    // Find the CPU driver's relocatable segment.
514    struct Elf32_Phdr *rel_phdr=
515        elf32_find_segment_type((void *)cpu_blob.vaddr, PT_BF_RELOC);
516    if(!rel_phdr) return ELF_ERR_HEADER;
517
518    // Allocate memory for the new core_data struct, and the relocated kernel
519    // data segment.
520    assert(sizeof(struct arm_core_data) <= arch_page_size);
521    struct {
522        size_t                size;
523        struct capref         cap;
524        void                  *buf;
525        struct frame_identity frameid;
526    } coredata_mem = {
527        .size = arch_page_size + rel_phdr->p_memsz,
528    };
529    err = cpu_memory_prepare(&coredata_mem.size,
530                             &coredata_mem.cap,
531                             &coredata_mem.buf,
532                             &coredata_mem.frameid);
533    if (err_is_fail(err)) return err;
534
535    /* Zero the memory. */
536    memset(coredata_mem.buf, 0, coredata_mem.size);
537
538    /* The relocated kernel segment will sit one page in. */
539    void *rel_seg_buf= coredata_mem.buf + arch_page_size;
540    lpaddr_t rel_seg_kvaddr=
541        (lpaddr_t)coredata_mem.frameid.base + arch_page_size;
542
543    printf("Allocated %"PRIu64"B for core_data at KV:0x%08"PRIx32"\n",
544            arch_page_size, (lpaddr_t)coredata_mem.frameid.base);
545    printf("Allocated %"PRIu64"B for CPU driver BSS at KV:0x%08"PRIx32"\n",
546            coredata_mem.frameid.bytes - arch_page_size, rel_seg_kvaddr);
547
548    /* Setup the core_data struct in the new kernel */
549    struct arm_core_data *core_data = (struct arm_core_data *)coredata_mem.buf;
550
551    // Initialise the KCB and core data, using that of the running kernel.
552    err= invoke_kcb_clone(kcb, coredata_mem.cap);
553    if(err_is_fail(err)) return err;
554
555    printf("Reusing text segment at KV:0x%08"PRIx32"\n",
556           core_data->kernel_load_base);
557
558    // Check that the build ID matches our binary.
559    struct Elf32_Shdr *build_id_shdr=
560        elf32_find_section_header_name(cpu_blob.vaddr, cpu_blob.size,
561                ".note.gnu.build-id");
562    if(!build_id_shdr) return ELF_ERR_HEADER;
563
564    // Find the GNU build ID note section
565    struct Elf32_Nhdr *build_id_nhdr=
566        (struct Elf32_Nhdr *)(cpu_blob.vaddr + build_id_shdr->sh_offset);
567    assert(build_id_nhdr->n_type == NT_GNU_BUILD_ID);
568    size_t build_id_len= build_id_nhdr->n_descsz;
569    const char *build_id_data=
570        ((const char *)build_id_nhdr) +
571        sizeof(struct Elf32_Nhdr) +
572        build_id_nhdr->n_namesz;
573
574    // Check that the binary we're loading matches the kernel we're cloning.
575    assert(build_id_len <= MAX_BUILD_ID);
576    if(!compare_build_ids(build_id_data,
577                          build_id_len,
578                          core_data->build_id.data,
579                          core_data->build_id.length)) {
580        printf("Build ID mismatch: ");
581        print_build_id(build_id_data, build_id_len);
582        printf(" != ");
583        print_build_id(core_data->build_id.data, core_data->build_id.length);
584        printf("\n");
585        return ELF_ERR_HEADER;
586    }
587
588    // Load and relocate the new kernel's relocatable segment
589    err= load_cpu_relocatable_segment(
590            (void *)cpu_blob.vaddr, rel_seg_buf, rel_seg_kvaddr,
591            core_data->kernel_load_base, &core_data->got_base);
592    if(err_is_fail(err)) return err;
593
594    /* Chunk of memory to load monitor on the app core */
595    struct capref spawn_mem_cap;
596    struct frame_identity spawn_mem_frameid;
597    err = spawn_memory_prepare(ARM_CORE_DATA_PAGES*arch_page_size,
598                               &spawn_mem_cap,
599                               &spawn_mem_frameid);
600    if (err_is_fail(err)) {
601        DEBUG_ERR(err, "spawn_memory_prepare");
602        return err;
603    }
604
605    struct Elf32_Ehdr *head32 = (struct Elf32_Ehdr *)cpu_blob.vaddr;
606    core_data->kernel_elf.size = sizeof(struct Elf32_Shdr);
607    core_data->kernel_elf.addr = cpu_blob.paddr + (uintptr_t)head32->e_shoff;
608    core_data->kernel_elf.num  = head32->e_shnum;
609
610    core_data->kernel_module.mod_start = cpu_blob.paddr;
611    core_data->kernel_module.mod_end   = cpu_blob.paddr + cpu_blob.size;
612
613    core_data->urpc_frame_base     = urpc_frame_id.base;
614    assert((1UL << log2ceil(urpc_frame_id.bytes)) == urpc_frame_id.bytes);
615    core_data->urpc_frame_size     = urpc_frame_id.bytes;
616
617    core_data->monitor_module.mod_start = monitor_blob.paddr;
618    core_data->monitor_module.mod_end = monitor_blob.paddr + monitor_blob.size;
619
620    core_data->memory_base_start   = spawn_mem_frameid.base;
621    assert((1UL << log2ceil(spawn_mem_frameid.bytes)) == spawn_mem_frameid.bytes);
622    core_data->memory_bytes        = spawn_mem_frameid.bytes;
623    core_data->src_core_id         = disp_get_core_id();
624    core_data->src_arch_id         = my_arch_id;
625    core_data->dst_core_id         = coreid;
626#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
627    core_data->chan_id             = chanid;
628#endif
629    struct frame_identity fid;
630    err = invoke_kcb_identify(kcb, &fid);
631    if (err_is_fail(err)) {
632        USER_PANIC_ERR(err, "Invoke frame identity for KCB failed. "
633                            "Did you add the syscall handler for that architecture?");
634    }
635    core_data->kcb = (genpaddr_t) fid.base;
636
637    if (cmdline != NULL) {
638        // copy as much of command line as will fit
639        strncpy(core_data->cmdline_buf, cmdline,
640                sizeof(core_data->cmdline_buf));
641        // ensure termination
642        core_data->cmdline_buf[sizeof(core_data->cmdline_buf) - 1] = '\0';
643    }
644    core_data->cmdline=
645        coredata_mem.frameid.base +
646        (lvaddr_t)((void *)core_data->cmdline_buf - (void *)core_data);
647
648    /* Ensure that everything we just wrote is cleaned sufficiently that the
649     * target core can read it. */
650    sys_armv7_cache_clean_poc((void *)(uint32_t)coredata_mem.frameid.base,
651                              (void *)((uint32_t)coredata_mem.frameid.base +
652                                       (uint32_t)coredata_mem.frameid.bytes - 1));
653
654    /* Invoke kernel capability to boot new core */
655    // XXX: Confusion address translation about l/gen/addr
656    err = invoke_monitor_spawn_core(hwid, cpu_type, coredata_mem.frameid.base);
657    if (err_is_fail(err)) {
658        return err_push(err, MON_ERR_SPAWN_CORE);
659    }
660
661    err = cpu_memory_cleanup(coredata_mem.cap, coredata_mem.buf);
662    if (err_is_fail(err)) {
663        return err;
664    }
665
666    err = spawn_memory_cleanup(spawn_mem_cap);
667    if (err_is_fail(err)) {
668        return err;
669    }
670
671    return SYS_ERR_OK;
672}
673