1/* 2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 */ 6 7#include <autoconf.h> 8#include <capdl_loader_app/gen_config.h> 9 10#include <assert.h> 11#include <inttypes.h> 12#include <limits.h> 13 14#include <stdio.h> 15#include <string.h> 16#include <stdint.h> 17#include <inttypes.h> 18#include <sel4platsupport/platsupport.h> 19#include <cpio/cpio.h> 20#include <simple-default/simple-default.h> 21 22#include <vka/kobject_t.h> 23#include <utils/util.h> 24#include <sel4/sel4.h> 25#include <sel4utils/sel4_zf_logif.h> 26#include "capdl.h" 27 28#ifdef CONFIG_DEBUG_BUILD 29#include <utils/attribute.h> 30#include <muslcsys/vsyscall.h> 31#endif 32 33#include "capdl_spec.h" 34 35#ifdef CONFIG_ARCH_ARM 36#include <capdl_loader_app/platform_info.h> 37#endif 38 39#ifdef CONFIG_ARCH_RISCV 40#define seL4_PageDirIndexBits seL4_PageTableIndexBits 41#define PT_LEVEL_SLOT(vaddr, level) (((vaddr) >> ((seL4_PageTableIndexBits * (level-1)) + seL4_PageBits)) & MASK(seL4_PageTableIndexBits)) 42#endif 43 44#define PML4_SLOT(vaddr) ((vaddr >> (seL4_PDPTIndexBits + seL4_PageDirIndexBits + seL4_PageTableIndexBits + seL4_PageBits)) & MASK(seL4_PML4IndexBits)) 45#define PDPT_SLOT(vaddr) ((vaddr >> (seL4_PageDirIndexBits + seL4_PageTableIndexBits + seL4_PageBits)) & MASK(seL4_PDPTIndexBits)) 46#define PD_SLOT(vaddr) ((vaddr >> (seL4_PageTableIndexBits + seL4_PageBits)) & MASK(seL4_PageDirIndexBits)) 47#define PT_SLOT(vaddr) ((vaddr >> seL4_PageBits) & MASK(seL4_PageTableIndexBits)) 48#define PGD_SLOT(vaddr) ((vaddr >> (seL4_PUDIndexBits + seL4_PageDirIndexBits + seL4_PageTableIndexBits + seL4_PageBits)) & MASK(seL4_PGDIndexBits)) 49#define PUD_SLOT(vaddr) ((vaddr >> (seL4_PageDirIndexBits + seL4_PageTableIndexBits + seL4_PageBits)) & MASK(seL4_PUDIndexBits)) 50 51#define CAPDL_SHARED_FRAMES 52 53#define STACK_ALIGNMENT_BYTES 16 54 55#define MAX_STREAM_IDS 60 56 57static seL4_CPtr capdl_to_sel4_orig[CONFIG_CAPDL_LOADER_MAX_OBJECTS]; 58static seL4_CPtr capdl_to_sel4_copy[CONFIG_CAPDL_LOADER_MAX_OBJECTS]; 59static seL4_CPtr capdl_to_sel4_irq[CONFIG_CAPDL_LOADER_MAX_OBJECTS]; 60static seL4_CPtr capdl_to_sched_ctrl[CONFIG_MAX_NUM_NODES]; 61/* For static allocation, this maps from untyped derivation index to cslot. 62 * Otherwise, this stores the result of sort_untypeds. */ 63static seL4_CPtr untyped_cptrs[CONFIG_MAX_NUM_BOOTINFO_UNTYPED_CAPS]; 64 65static seL4_CPtr free_slot_start, free_slot_end; 66 67static seL4_CPtr first_arm_iospace; 68 69// Hack for seL4_TCB_WriteRegisters because we can't take the address of local variables. 70static seL4_UserContext global_user_context; 71 72extern char _capdl_archive[]; 73extern char _capdl_archive_end[]; 74 75/* This symbol is provided by the GNU linker and points at the start/end of our 76 * ELF image. 77 */ 78extern char __executable_start[]; 79extern char _end[]; 80 81/* A region at which to map destination frames during loading 82 * This is expected to be initialised by 'init_copy_addr' on system init */ 83uintptr_t copy_addr; 84 85uint32_t sid_number = 0; 86/* In the case where we just want a 4K page and we cannot allocate 87 * a page table ourselves, we use this pre allocated region that 88 * is guaranteed to have a pagetable */ 89/* Make this slot large enough for 64KiB frames which is the largest 90 * last-level page size across all architectures that we support. */ 91static char copy_addr_with_pt[PAGE_SIZE_4K * 16] __attribute__((aligned(PAGE_SIZE_4K * 16))); 92 93static seL4_BootInfoHeader *extended_bootinfo_table[SEL4_BOOTINFO_HEADER_NUM] = {0}; 94 95/* helper functions ---------------------------------------------------------------------------- */ 96 97static seL4_CPtr get_free_slot(void) 98{ 99 return free_slot_start; 100} 101 102static void next_free_slot(void) 103{ 104 free_slot_start++; 105 ZF_LOGF_IF(free_slot_start >= free_slot_end, "Ran out of free slots!"); 106} 107 108typedef enum {MOVE, COPY} init_cnode_mode; 109typedef enum {ORIG, DUP, IRQ, SCHED_CTRL} seL4_cap_type; 110 111static seL4_CPtr orig_caps(CDL_ObjID object_id) 112{ 113 assert(object_id < CONFIG_CAPDL_LOADER_MAX_OBJECTS); 114 return capdl_to_sel4_orig[object_id]; 115} 116 117static seL4_CPtr dup_caps(CDL_ObjID object_id) 118{ 119 assert(object_id < CONFIG_CAPDL_LOADER_MAX_OBJECTS); 120 return capdl_to_sel4_copy[object_id]; 121} 122 123static seL4_CPtr irq_caps(CDL_IRQ irq) 124{ 125 assert(irq < CONFIG_CAPDL_LOADER_MAX_OBJECTS); 126 return capdl_to_sel4_irq[irq]; 127} 128 129static seL4_CPtr sched_ctrl_caps(CDL_Core id) 130{ 131 assert(id < CONFIG_MAX_NUM_NODES); 132 return capdl_to_sched_ctrl[id]; 133} 134 135static void add_sel4_cap(CDL_ObjID object_id, seL4_cap_type type, seL4_CPtr slot) 136{ 137 if (type == ORIG) { 138 capdl_to_sel4_orig[object_id] = slot; 139 } else if (type == DUP) { 140 capdl_to_sel4_copy[object_id] = slot; 141 } else if (type == IRQ) { 142 capdl_to_sel4_irq[object_id] = slot; 143 } else if (type == SCHED_CTRL) { 144 capdl_to_sched_ctrl[object_id] = slot; 145 } 146} 147 148static CDL_Object 149*get_spec_object(CDL_Model *spec, CDL_ObjID object_id) 150{ 151 return &spec->objects[object_id]; 152} 153 154static seL4_Word get_capData(CDL_CapData d) 155{ 156 switch (d.tag) { 157 case CDL_CapData_Badge: 158 return d.badge; 159 case CDL_CapData_Guard: 160 return seL4_CNode_CapData_new(d.guard_bits, d.guard_size).words[0]; 161 case CDL_CapData_Raw: 162 return d.data; 163 default: 164 ZF_LOGF("invalid cap data"); 165 return seL4_NilData; 166 } 167} 168 169static CDL_Cap *get_cap_at(CDL_Object *obj, unsigned int slot) 170{ 171 for (unsigned int i = 0; i < CDL_Obj_NumSlots(obj); i++) { 172 CDL_CapSlot *s = CDL_Obj_GetSlot(obj, i); 173 if (CDL_CapSlot_Slot(s) == slot) { 174 return CDL_CapSlot_Cap(s); 175 } 176 } 177 178 /* Not found. */ 179 return NULL; 180} 181 182#ifdef CONFIG_ARCH_X86_64 183static CDL_Cap *get_cdl_frame_pdpt(CDL_ObjID root, uintptr_t vaddr, CDL_Model *spec) 184{ 185 CDL_Object *cdl_pml4 = get_spec_object(spec, root); 186 CDL_Cap *pdpt_cap = get_cap_at(cdl_pml4, PML4_SLOT(vaddr)); 187 if (pdpt_cap == NULL) { 188 ZF_LOGF("Could not find PD cap %s[%d]", CDL_Obj_Name(cdl_pml4), (int)PML4_SLOT(vaddr)); 189 } 190 return pdpt_cap; 191} 192 193static CDL_Cap *get_cdl_frame_pd(CDL_ObjID root, uintptr_t vaddr, CDL_Model *spec) 194{ 195 CDL_Cap *pdpt_cap = get_cdl_frame_pdpt(root, vaddr, spec); 196 CDL_Object *cdl_pdpt = get_spec_object(spec, CDL_Cap_ObjID(pdpt_cap)); 197 CDL_Cap *pd_cap = get_cap_at(cdl_pdpt, PDPT_SLOT(vaddr)); 198 if (pd_cap == NULL) { 199 ZF_LOGF("Could not find PD cap %s[%d]", CDL_Obj_Name(cdl_pdpt), (int)PDPT_SLOT(vaddr)); 200 } 201 return pd_cap; 202} 203#endif 204 205#ifdef CONFIG_ARCH_AARCH64 206static CDL_Cap *get_cdl_frame_pud(CDL_ObjID root, uintptr_t vaddr, CDL_Model *spec) 207{ 208 CDL_Object *cdl_pgd = get_spec_object(spec, root); 209 CDL_Cap *pud_cap = get_cap_at(cdl_pgd, PGD_SLOT(vaddr)); 210 if (pud_cap == NULL) { 211 ZF_LOGF("Could not find PUD cap %s[%d]", CDL_Obj_Name(cdl_pgd), (int)PGD_SLOT(vaddr)); 212 } 213 return pud_cap; 214} 215 216static CDL_Cap *get_cdl_frame_pd(CDL_ObjID root, uintptr_t vaddr, CDL_Model *spec) 217{ 218#if CDL_PT_NUM_LEVELS == 3 219 CDL_Object *cdl_pud = get_spec_object(spec, root); 220#else 221 CDL_Cap *pud_cap = get_cdl_frame_pud(root, vaddr, spec); 222 CDL_Object *cdl_pud = get_spec_object(spec, CDL_Cap_ObjID(pud_cap)); 223#endif 224 CDL_Cap *pd_cap = get_cap_at(cdl_pud, PUD_SLOT(vaddr)); 225 if (pd_cap == NULL) { 226 ZF_LOGF("Could not find PD cap %s[%d]", CDL_Obj_Name(cdl_pud), (int)PUD_SLOT(vaddr)); 227 } 228 return pd_cap; 229} 230#endif 231 232#ifndef CONFIG_ARCH_RISCV 233static CDL_Cap *get_cdl_frame_pt(CDL_ObjID pd, uintptr_t vaddr, CDL_Model *spec) 234{ 235#if defined(CONFIG_ARCH_X86_64) || defined(CONFIG_ARCH_AARCH64) 236 CDL_Cap *pd_cap = get_cdl_frame_pd(pd, vaddr, spec); 237 CDL_Object *cdl_pd = get_spec_object(spec, CDL_Cap_ObjID(pd_cap)); 238#else 239 CDL_Object *cdl_pd = get_spec_object(spec, pd); 240#endif 241 CDL_Cap *pt_cap = get_cap_at(cdl_pd, PD_SLOT(vaddr)); 242 if (pt_cap == NULL) { 243 ZF_LOGF("Could not find PT cap %s[%d]", CDL_Obj_Name(cdl_pd), (int)PD_SLOT(vaddr)); 244 } 245 return pt_cap; 246} 247 248#else /* CONFIG_ARCH_RISCV */ 249 250/** 251 * Do a recursive traversal from the top to bottom of a page table structure to 252 * get the cap for a particular page table object for a certain vaddr at a certain 253 * level. The level variable treats level==CONFIG_PT_LEVELS as the root page table 254 * object, and level 0 as the bottom level 4k frames. 255 */ 256static CDL_Cap *get_cdl_frame_pt_recurse(CDL_ObjID root, uintptr_t vaddr, CDL_Model *spec, int level) 257{ 258 CDL_Object *cdl_pt = NULL; 259 if (level < CONFIG_PT_LEVELS) { 260 CDL_Cap *pt_cap = get_cdl_frame_pt_recurse(root, vaddr, spec, level + 1); 261 cdl_pt = get_spec_object(spec, CDL_Cap_ObjID(pt_cap)); 262 } else { 263 cdl_pt = get_spec_object(spec, root); 264 } 265 CDL_Cap *pt_cap_ret = get_cap_at(cdl_pt, PT_LEVEL_SLOT(vaddr, level)); 266 if (pt_cap_ret == NULL) { 267 ZF_LOGF("Could not find PD cap %s[%d]", CDL_Obj_Name(cdl_pt), (int)PT_LEVEL_SLOT(vaddr, level)); 268 } 269 return pt_cap_ret; 270} 271 272static CDL_Cap *get_cdl_frame_pt(CDL_ObjID pd, uintptr_t vaddr, CDL_Model *spec) 273{ 274 return get_cdl_frame_pt_recurse(pd, vaddr, spec, 2); 275} 276 277#endif 278 279static CDL_Cap *get_cdl_frame_cap(CDL_ObjID pd, uintptr_t vaddr, CDL_Model *spec) 280{ 281 CDL_Cap *pt_cap = get_cdl_frame_pt(pd, vaddr, spec); 282 283 /* Check if the PT cap is actually a large frame cap. */ 284 if (pt_cap->type == CDL_FrameCap) { 285 return pt_cap; 286 } 287 288 CDL_Object *cdl_pt = get_spec_object(spec, CDL_Cap_ObjID(pt_cap)); 289 CDL_Cap *frame_cap = get_cap_at(cdl_pt, PT_SLOT(vaddr)); 290 if (frame_cap == NULL) { 291 ZF_LOGF("Could not find frame cap %s[%d]", CDL_Obj_Name(cdl_pt), (int)PT_SLOT(vaddr)); 292 } 293 294 return frame_cap; 295} 296 297/* elf file loading hack - prefill objects with the data defined in the elf file */ 298static seL4_CPtr get_frame_cap(CDL_ObjID pd, uintptr_t vaddr, CDL_Model *spec) 299{ 300 return orig_caps(CDL_Cap_ObjID(get_cdl_frame_cap(pd, vaddr, spec))); 301} 302 303void init_copy_frame(seL4_BootInfo *bootinfo) 304{ 305 /* An original frame will be mapped, backing copy_addr_with_pt. For 306 * correctness we should unmap this before mapping into this 307 * address. We locate the frame cap by looking in boot info 308 * and knowing that the userImageFrames are ordered by virtual 309 * address in our address space. The flush is probably not 310 * required, but doesn't hurt to be cautious. 311 */ 312 313 /* Find the number of frames in the user image according to 314 * bootinfo, and compare that to the number of frames backing 315 * the image computed by comparing start and end symbols. If 316 * these numbers are different, assume the image was padded 317 * to the left. */ 318 unsigned int num_user_image_frames_reported = 319 bootinfo->userImageFrames.end - bootinfo->userImageFrames.start; 320 unsigned int num_user_image_frames_measured = 321 (ROUND_UP((uintptr_t)&_end, PAGE_SIZE_4K) - 322 (uintptr_t)&__executable_start) / PAGE_SIZE_4K; 323 324 if (num_user_image_frames_reported < num_user_image_frames_measured) { 325 ZF_LOGE("Too few frames caps in bootinfo to back user image"); 326 return; 327 } 328 329 size_t additional_user_image_bytes = 330 (num_user_image_frames_reported - num_user_image_frames_measured) * PAGE_SIZE_4K; 331 332 if (additional_user_image_bytes > (uintptr_t)&__executable_start) { 333 ZF_LOGE("User image padding too high to fit before start symbol"); 334 return; 335 } 336 337 uintptr_t lowest_mapped_vaddr = 338 (uintptr_t)&__executable_start - additional_user_image_bytes; 339 340 seL4_CPtr copy_addr_frame = bootinfo->userImageFrames.start + 341 ((uintptr_t)copy_addr_with_pt) / PAGE_SIZE_4K - 342 lowest_mapped_vaddr / PAGE_SIZE_4K; 343 /* We currently will assume that we are on a 32-bit platform 344 * that has a single PD, followed by all the PTs. So to find 345 * our PT in the paging objects list we just need to add 1 346 * to skip the PD */ 347 seL4_CPtr copy_addr_pt = bootinfo->userImagePaging.start + 1 + 348 PD_SLOT(((uintptr_t)copy_addr)) - PD_SLOT(((uintptr_t)&__executable_start)); 349#if defined(CONFIG_ARCH_X86_64) || defined(CONFIG_ARCH_AARCH64) 350 /* guess that there is one PDPT and PML4 on x86_64 or one PGD and PUD on aarch64 */ 351 copy_addr_pt += 2; 352#endif 353#ifdef CONFIG_ARCH_RISCV 354 /* The base case assumes that there is 2 levels paging structure and already skips 355 * the top level and level after that. We then also need to skip the remaining levels */ 356 copy_addr_pt += CONFIG_PT_LEVELS - 2; 357#endif 358 int error; 359 360 for (int i = 0; i < sizeof(copy_addr_with_pt) / PAGE_SIZE_4K; i++) { 361 error = seL4_ARCH_Page_Unmap(copy_addr_frame + i); 362 ZF_LOGF_IFERR(error, ""); 363 } 364} 365 366#if !CONFIG_CAPDL_LOADER_STATIC_ALLOC 367/* Sort the untyped objects from largest to smallest. 368 * This ensures that fragmentation is eliminated if the objects 369 * themselves are also sorted, largest to smallest. 370 * 371 * Sorting done using counting sort. 372 */ 373static unsigned int sort_untypeds(seL4_BootInfo *bootinfo) 374{ 375 seL4_CPtr untyped_start = bootinfo->untyped.start; 376 seL4_CPtr untyped_end = bootinfo->untyped.end; 377 378 ZF_LOGD("Sorting untypeds..."); 379 380 seL4_Word count[CONFIG_WORD_SIZE] = {0}; 381 382 // Count how many untypeds there are of each size. 383 for (seL4_Word untyped_index = 0; untyped_index != untyped_end - untyped_start; untyped_index++) { 384 if (!bootinfo->untypedList[untyped_index].isDevice) { 385 count[bootinfo->untypedList[untyped_index].sizeBits] += 1; 386 } 387 } 388 389 // Calculate the starting index for each untyped. 390 seL4_Word total = 0; 391 for (seL4_Word size = CONFIG_WORD_SIZE - 1; size != 0; size--) { 392 seL4_Word oldCount = count[size]; 393 count[size] = total; 394 total += oldCount; 395 } 396 397 unsigned int num_normal_untypes = 0; 398 399 // Store untypeds in untyped_cptrs array. 400 for (seL4_Word untyped_index = 0; untyped_index != untyped_end - untyped_start; untyped_index++) { 401 if (bootinfo->untypedList[untyped_index].isDevice) { 402 ZF_LOGD("Untyped %3d (cptr=%p) (addr=%p) is of size %2d. Skipping as it is device", 403 untyped_index, (void *)(untyped_start + untyped_index), 404 (void *)bootinfo->untypedList[untyped_index].paddr, 405 bootinfo->untypedList[untyped_index].sizeBits); 406 } else { 407 ZF_LOGD("Untyped %3d (cptr=%p) (addr=%p) is of size %2d. Placing in slot %d...", 408 untyped_index, (void *)(untyped_start + untyped_index), 409 (void *)bootinfo->untypedList[untyped_index].paddr, 410 bootinfo->untypedList[untyped_index].sizeBits, 411 count[bootinfo->untypedList[untyped_index].sizeBits]); 412 413 untyped_cptrs[count[bootinfo->untypedList[untyped_index].sizeBits]] = untyped_start + untyped_index; 414 count[bootinfo->untypedList[untyped_index].sizeBits] += 1; 415 num_normal_untypes++; 416 } 417 } 418 419 return num_normal_untypes; 420} 421#endif /* !CONFIG_CAPDL_LOADER_STATIC_ALLOC */ 422 423static void parse_bootinfo(seL4_BootInfo *bootinfo, CDL_Model *spec) 424{ 425 ZF_LOGD("Parsing bootinfo..."); 426 427 free_slot_start = bootinfo->empty.start; 428 free_slot_end = bootinfo->empty.end; 429 430 /* When using libsel4platsupport for printing support, we end up using some 431 * of our free slots during serial port initialisation. Skip over these to 432 * avoid failing our own allocations. Note, this value is just hardcoded 433 * for the amount of slots this initialisation currently uses up. 434 * JIRA: CAMKES-204. 435 */ 436 free_slot_start += 16; 437 438 ZF_LOGD(" %ld free cap slots, from %ld to %ld", 439 (long)(free_slot_end - free_slot_start), 440 (long)free_slot_start, 441 (long)free_slot_end); 442 443 /* We need to be able to actual store caps to the maximum number of objects 444 * we may be dealing with. 445 * This check can still pass and initialisation fail as we need extra slots 446 * for duplicates for CNodes and TCBs. 447 */ 448 assert(free_slot_end - free_slot_start >= CONFIG_CAPDL_LOADER_MAX_OBJECTS); 449 450 451#if CONFIG_CAPDL_LOADER_STATIC_ALLOC 452 /* 453 * Make sure the untypeds in the model correspond to what we got 454 * from bootinfo. 455 */ 456 int bi_start = 0; 457 for (int u = 0; u < spec->num_untyped; u++) { 458 bool found = false; 459 int num_untyped = bootinfo->untyped.end - bootinfo->untyped.start; 460 CDL_Object *ut = &spec->objects[spec->untyped[u].untyped]; 461 assert(CDL_Obj_Type(ut) == CDL_Untyped); 462 463 for (int i = bi_start; i < num_untyped; i++) { 464 seL4_Word ut_paddr = bootinfo->untypedList[i].paddr; 465 if (bootinfo->untypedList[i].paddr == ut->paddr) { 466 seL4_Uint8 ut_size = bootinfo->untypedList[i].sizeBits; 467 ZF_LOGF_IF(ut_size != ut->size_bits, 468 "Ut at %p in incorrect size, expected %u got %u", 469 ut->paddr, ut->size_bits, ut_size); 470 untyped_cptrs[u] = bootinfo->untyped.start + i; 471 found = true; 472 if (i == bi_start) { 473 bi_start++; 474 } 475 } 476 } 477 ZF_LOGF_IF(!found, "Failed to find ut for %p", ut->paddr); 478 } 479#else 480 /* Probably an inconsistency in the build configuration, so fail now. */ 481 ZF_LOGF_IF(spec->num_untyped, "spec has static alloc, but loader is compiled for dynamic"); 482#endif 483 484#if CONFIG_CAPDL_LOADER_PRINT_UNTYPEDS 485 int num_untyped = bootinfo->untyped.end - bootinfo->untyped.start; 486 ZF_LOGD(" Untyped memory (%d)", num_untyped); 487 for (int i = 0; i < num_untyped; i++) { 488 uintptr_t ut_paddr = bootinfo->untypedList[i].paddr; 489 uintptr_t ut_size = bootinfo->untypedList[i].sizeBits; 490 bool ut_isDevice = bootinfo->untypedList[i].isDevice; 491 ZF_LOGD(" 0x%016" PRIxPTR " - 0x%016" PRIxPTR " (%s)", ut_paddr, 492 ut_paddr + BIT(ut_size), ut_isDevice ? "device" : "memory"); 493 } 494#endif 495 496 ZF_LOGD("Loader is running in domain %d", bootinfo->initThreadDomain); 497 498 first_arm_iospace = bootinfo->ioSpaceCaps.start; 499} 500 501#if !CONFIG_CAPDL_LOADER_STATIC_ALLOC 502static int find_device_object(seL4_Word paddr, seL4_Word type, int size_bits, seL4_CPtr free_slot, 503 CDL_ObjID obj_id, seL4_BootInfo *bootinfo, CDL_Model *spec) 504{ 505 int error; 506 seL4_CPtr hold_slot = 0; 507 /* See if an overlapping object was already created, can only do this for frames. 508 * Any overlapping object will be the previous one, since objects are created in 509 * order of physical address */ 510 if (type != seL4_UntypedObject && obj_id > 0) { 511 CDL_ObjID prev = obj_id - 1; 512 CDL_Object *obj = &spec->objects[prev]; 513 if (CDL_Obj_Type(obj) == CDL_Frame && 514 obj->frame_extra.paddr == paddr && 515 CDL_Obj_SizeBits(obj) == size_bits) { 516 /* Attempt to copy the cap */ 517 error = seL4_CNode_Copy(seL4_CapInitThreadCNode, free_slot, CONFIG_WORD_SIZE, 518 seL4_CapInitThreadCNode, orig_caps(prev), CONFIG_WORD_SIZE, seL4_AllRights); 519 ZF_LOGF_IFERR(error, ""); 520 return 0; 521 } 522 } 523 /* Assume we are allocating from a device untyped. Do a linear search for it */ 524 for (unsigned int i = 0; i < bootinfo->untyped.end - bootinfo->untyped.start; i++) { 525 if (bootinfo->untypedList[i].paddr <= (uintptr_t)paddr && 526 bootinfo->untypedList[i].paddr + BIT(bootinfo->untypedList[i].sizeBits) - 1 >= (uintptr_t)paddr + BIT(size_bits) - 1) { 527 /* just allocate objects until we get the one we want. To do this 528 * correctly we cannot just destroy the cap we allocate, since 529 * if it's the only frame from the untyped this will reset the 530 * freeIndex in the kernel, resulting in the next allocation 531 * giving the same object. To prevent this we need to hold 532 * a single allocation to (lock) the untyped, allowing us to 533 * allocate and delete over the rest of the untyped. In order 534 * to get a free slot we assume that the slot immediately after 535 * us is not yet allocated. We'll give it back though :) */ 536 while (1) { 537 error = seL4_Untyped_Retype(bootinfo->untyped.start + i, type, size_bits, 538 seL4_CapInitThreadCNode, 0, 0, free_slot, 1); 539 ZF_LOGF_IFERR(error, ""); 540 seL4_ARCH_Page_GetAddress_t addr; 541 if (type == seL4_UntypedObject) { 542 /* if it's an untyped, create a temporary frame in it 543 * to get the address from */ 544 error = seL4_Untyped_Retype(free_slot, arch_kobject_get_type(KOBJECT_FRAME, seL4_PageBits), seL4_PageBits, 545 seL4_CapInitThreadCNode, 0, 0, free_slot + 2, 1); 546 ZF_LOGF_IFERR(error, ""); 547 addr = seL4_ARCH_Page_GetAddress(free_slot + 2); 548 error = seL4_CNode_Delete(seL4_CapInitThreadCNode, free_slot + 2, CONFIG_WORD_SIZE); 549 ZF_LOGF_IFERR(error, ""); 550 } else { 551 addr = seL4_ARCH_Page_GetAddress(free_slot); 552 } 553 ZF_LOGF_IFERR(addr.error, "Could not get address for untyped cap."); 554 if (addr.paddr == (uintptr_t)paddr) { 555 /* nailed it */ 556 /* delete any holding cap */ 557 if (hold_slot) { 558 error = seL4_CNode_Delete(seL4_CapInitThreadCNode, hold_slot, CONFIG_WORD_SIZE); 559 ZF_LOGF_IFERR(error, ""); 560 } 561 return 0; 562 } 563 ZF_LOGF_IF(addr.paddr > (uintptr_t)paddr, "device frames probably not ordered by physical address"); 564 565 /* if we are currently using a hold slot we can just delete the cap, otherwise start the hold */ 566 if (hold_slot) { 567 error = seL4_CNode_Delete(seL4_CapInitThreadCNode, free_slot, CONFIG_WORD_SIZE); 568 ZF_LOGF_IFERR(error, ""); 569 } else { 570 hold_slot = free_slot + 1; 571 error = seL4_CNode_Move(seL4_CapInitThreadCNode, hold_slot, CONFIG_WORD_SIZE, seL4_CapInitThreadCNode, free_slot, 572 CONFIG_WORD_SIZE); 573 ZF_LOGF_IFERR(error, ""); 574 } 575 } 576 } 577 } 578 return -1; 579} 580 581bool isDeviceObject(CDL_Object *obj) 582{ 583 return CDL_Obj_Paddr(obj) != 0; 584} 585#endif /* !CONFIG_CAPDL_LOADER_STATIC_ALLOC */ 586 587/* Create objects */ 588static int retype_untyped(seL4_CPtr free_slot, seL4_CPtr free_untyped, 589 seL4_ArchObjectType object_type, int object_size) 590{ 591 seL4_CPtr root = seL4_CapInitThreadCNode; 592 int node_index = 0; 593 int node_depth = 0; 594 int node_offset = free_slot; 595 596 int no_objects = 1; 597 598 ZF_LOGF_IF(object_type >= seL4_ObjectTypeCount, 599 "Invalid object type %zu size %zu", 600 (size_t) object_type, (size_t) object_size); 601 602 return seL4_Untyped_Retype(free_untyped, object_type, object_size, 603 root, node_index, node_depth, node_offset, no_objects); 604 605} 606 607unsigned int create_object(CDL_Model *spec, CDL_Object *obj, CDL_ObjID id, seL4_BootInfo *info, seL4_CPtr untyped_slot, 608 unsigned int free_slot) 609{ 610 int obj_size = CDL_Obj_SizeBits(obj); 611 seL4_ArchObjectType obj_type; 612 613 switch (CDL_Obj_Type(obj)) { 614 case CDL_Frame: 615 obj_type = kobject_get_type(KOBJECT_FRAME, obj_size); 616 break; 617 case CDL_ASIDPool: 618 obj_type = CDL_Untyped; 619 obj_size = seL4_ASIDPoolBits; 620 break; 621#ifdef CONFIG_KERNEL_MCS 622 case CDL_SchedContext: 623 obj_size = kobject_get_size(KOBJECT_SCHED_CONTEXT, obj_size); 624 obj_type = (seL4_ArchObjectType) CDL_Obj_Type(obj); 625 break; 626#endif 627 default: 628 obj_type = (seL4_ArchObjectType) CDL_Obj_Type(obj); 629 } 630 631 ZF_LOGD_IF(CDL_Obj_Type(obj) == CDL_CNode, " (CNode of size %d bits)", obj_size); 632 633 seL4_Error err = seL4_NoError; 634 635#ifdef CONFIG_ARCH_X86 636 if (CDL_Obj_Type(obj) == CDL_IOPorts) { 637 err = seL4_X86_IOPortControl_Issue(seL4_CapIOPortControl, obj->start, obj->end, seL4_CapInitThreadCNode, free_slot, 638 CONFIG_WORD_SIZE); 639 ZF_LOGF_IF(err != seL4_NoError, "Failed to allocate IOPort for range [%d,%d]", (int)obj->start, (int)obj->end); 640 return seL4_NoError; 641 } 642#endif 643 644 /* There can be multiple sids per context bank, currently only 1 sid per cb is implemented for the vms. 645 * When this gets extended we need to decide to add sid number -> cb number map into the haskell / python tool 646 * or generate the capdl spec so that the order remains correct here e.g a list a stream ids followed by the cb they 647 * are mapped to, the cb condition here (1***) will the reset the stream id number back to 0 for the next context bank. 648 */ 649#ifdef CONFIG_ARM_SMMU 650 if (CDL_Obj_Type(obj) == CDL_SID) { 651 err = seL4_ARM_SIDControl_GetSID(seL4_CapSMMUSIDControl, sid_number, seL4_CapInitThreadCNode, free_slot, 652 CONFIG_WORD_SIZE); 653 ZF_LOGF_IF(err != seL4_NoError, "Failed to allocate SID cap"); 654 sid_number++; 655 ZF_LOGF_IF(sid_number > MAX_STREAM_IDS, "Stream ID numbers exhausted"); 656 return seL4_NoError; 657 } else if (CDL_Obj_Type(obj) == CDL_CB) { 658 err = seL4_ARM_CBControl_GetCB(seL4_CapSMMUCBControl, CDL_CB_Bank(obj), seL4_CapInitThreadCNode, free_slot, 659 CONFIG_WORD_SIZE); 660 ZF_LOGF_IF(err != seL4_NoError, "Failed to allocate CB cap"); 661 sid_number = 0; //(1***) 662 return seL4_NoError; 663 } 664#endif 665 666#if !CONFIG_CAPDL_LOADER_STATIC_ALLOC 667 if (isDeviceObject(obj)) { 668 seL4_Word paddr = CDL_Obj_Paddr(obj); 669 ZF_LOGD(" device frame/untyped, paddr = %p, size = %d bits", (void *) paddr, obj_size); 670 671 /* This is a device object. Look for it in bootinfo. */ 672 err = find_device_object(paddr, obj_type, obj_size, free_slot, id, info, spec); 673 ZF_LOGF_IF(err != seL4_NoError, "Failed to find device frame/untyped at paddr = %p", (void *) paddr); 674 return seL4_NoError; 675 } 676#endif 677 678 /* It's not a device object, or it's a statically allocated device 679 * object, so we don't need to search for it. */ 680 return retype_untyped(free_slot, untyped_slot, obj_type, obj_size); 681} 682 683static int requires_creation(CDL_ObjectType type) 684{ 685 switch (type) { 686 case CDL_Interrupt: 687#ifdef CONFIG_ARCH_X86 688 case CDL_IODevice: 689 case CDL_IOAPICInterrupt: 690 case CDL_MSIInterrupt: 691#endif /* CONFIG_ARCH_X86 */ 692#ifdef CONFIG_ARCH_ARM 693 case CDL_ARMIODevice: 694 case CDL_ARMInterrupt: 695#endif /* CONFIG_ARCH_ARM */ 696 return false; 697 default: 698 return true; 699 } 700} 701 702#if CONFIG_CAPDL_LOADER_STATIC_ALLOC 703 704/* 705 * Spec was statically allocated; just run its untyped derivation steps. 706 */ 707static void create_objects(CDL_Model *spec, seL4_BootInfo *bootinfo) 708{ 709 ZF_LOGD("Creating objects..."); 710 711 unsigned int free_slot_index = 0; 712 713 /* First, allocate most objects and update the spec database with 714 the cslot locations. The exception is ASIDPools, where 715 create_object only allocates the backing untypeds. */ 716 for (int ut_index = 0; ut_index < spec->num_untyped; ut_index++) { 717 CDL_UntypedDerivation *ud = &spec->untyped[ut_index]; 718 seL4_CPtr untyped_cptr = untyped_cptrs[ut_index]; 719 for (int child = 0; child < ud->num; child++) { 720 CDL_ObjID obj_id = ud->children[child]; 721 seL4_CPtr free_slot = free_slot_start + free_slot_index; 722 CDL_Object *obj = &spec->objects[obj_id]; 723 CDL_ObjectType capdl_obj_type = CDL_Obj_Type(obj); 724 725 ZF_LOGV("Creating object %s in slot %ld, from untyped %lx...", 726 CDL_Obj_Name(obj), (long)free_slot, (long)untyped_cptr); 727 728 ZF_LOGF_IF(!requires_creation(capdl_obj_type), 729 "object %s is in static allocation, but requires_creation is false", 730 CDL_Obj_Name(obj)); 731 seL4_Error err = create_object(spec, obj, obj_id, bootinfo, untyped_cptr, free_slot); 732 if (err == seL4_NoError) { 733 add_sel4_cap(obj_id, ORIG, free_slot); 734 free_slot_index++; 735 } else { 736 /* Exit with failure. */ 737 ZF_LOGF_IFERR(err, "Untyped retype failed with unexpected error"); 738 } 739 } 740 } 741 742 /* Now, we turn the backing untypeds into ASID pools, in the order 743 given by the ASID slot allocation policy. This fixes the layout 744 inside the kernel's ASID table, which ensures consistency with 745 verification models. */ 746 if (spec->num_asid_slots > 1) { 747 ZF_LOGD("Creating ASID pools..."); 748 } 749 for (seL4_Word asid_high = 1; asid_high < spec->num_asid_slots; asid_high++) { 750 CDL_ObjID obj_id = spec->asid_slots[asid_high]; 751 seL4_CPtr asidpool_ut = orig_caps(obj_id); 752 seL4_CPtr asidpool_slot = free_slot_start + free_slot_index; 753 754 seL4_Error err = seL4_ARCH_ASIDControl_MakePool(seL4_CapASIDControl, asidpool_ut, 755 seL4_CapInitThreadCNode, asidpool_slot, 756 CONFIG_WORD_SIZE); 757 ZF_LOGF_IFERR(err, "Failed to create ASID pool #%d from ut slot %ld into slot %ld", 758 (int)asid_high, (long)asidpool_ut, (long)asidpool_slot); 759 760 // update to point to our new ASID pool 761 add_sel4_cap(obj_id, ORIG, asidpool_slot); 762 free_slot_index++; 763 } 764 765 // Update the free slot to go past all the objects we just made. 766 free_slot_start += free_slot_index; 767} 768 769#else /* !CONFIG_CAPDL_LOADER_STATIC_ALLOC */ 770 771/* 772 * Spec was not statically allocated; run a simple allocator. 773 * 774 * For best results, this relies on capDL-tool grouping device objects 775 * by address and sorting other objects from largest to smallest, to 776 * minimise memory fragmentation. See CapDL/PrintC.hs. 777 */ 778static void create_objects(CDL_Model *spec, seL4_BootInfo *bootinfo) 779{ 780 /* Sort untypeds from largest to smallest. */ 781 unsigned int num_normal_untypes = sort_untypeds(bootinfo); 782 783 ZF_LOGD("Creating objects..."); 784 785 /* First, allocate most objects and update the spec database with 786 the cslot locations. The exception is ASIDPools, where 787 create_object only allocates the backing untypeds. */ 788 unsigned int obj_id_index = 0; 789 unsigned int free_slot_index = 0; 790 unsigned int ut_index = 0; 791 792 // Each time through the loop either: 793 // - we successfully create an object, and move to the next object to create 794 // OR 795 // - we fail to create an object, and move to the next untyped object 796 797 while (obj_id_index < spec->num && ut_index < num_normal_untypes) { 798 CDL_ObjID obj_id = obj_id_index; 799 seL4_CPtr free_slot = free_slot_start + free_slot_index; 800 seL4_CPtr untyped_cptr = untyped_cptrs[ut_index]; 801 CDL_Object *obj = &spec->objects[obj_id_index]; 802 CDL_ObjectType capdl_obj_type = CDL_Obj_Type(obj); 803 804 ZF_LOGV("Creating object %s in slot %ld, from untyped %lx...", CDL_Obj_Name(obj), (long)free_slot, 805 (long)untyped_cptr); 806 807 if (requires_creation(capdl_obj_type)) { 808 /* at this point we are definitely creating an object - figure out what it is */ 809 seL4_Error err = create_object(spec, obj, obj_id, bootinfo, untyped_cptr, free_slot); 810 if (err == seL4_NoError) { 811 add_sel4_cap(obj_id, ORIG, free_slot); 812 free_slot_index++; 813 } else if (err == seL4_NotEnoughMemory) { 814 /* go to the next untyped to allocate objects - this one is empty */ 815 ut_index++; 816 /* we failed to process the current object, go back 1 */ 817 obj_id_index--; 818 } else { 819 /* Exit with failure. */ 820 ZF_LOGF_IFERR(err, "Untyped retype failed with unexpected error"); 821 } 822 } 823 obj_id_index++; 824 } 825 826 if (obj_id_index != spec->num) { 827 /* We didn't iterate through all the objects. */ 828 ZF_LOGF("Ran out of untyped memory while creating objects."); 829 } 830 831 /* Now, we turn the backing untypeds into ASID pools, in the order 832 given by the ASID slot allocation policy. This fixes the layout 833 inside the kernel's ASID table, which ensures consistency with 834 verification models. */ 835 if (spec->num_asid_slots > 1) { 836 ZF_LOGD("Creating ASID pools..."); 837 } 838 for (seL4_Word asid_high = 1; asid_high < spec->num_asid_slots; asid_high++) { 839 CDL_ObjID obj_id = spec->asid_slots[asid_high]; 840 seL4_CPtr asid_ut = orig_caps(obj_id); 841 seL4_CPtr asid_slot = free_slot_start + free_slot_index; 842 843 seL4_Error err = seL4_ARCH_ASIDControl_MakePool(seL4_CapASIDControl, asid_ut, 844 seL4_CapInitThreadCNode, asid_slot, 845 CONFIG_WORD_SIZE); 846 ZF_LOGF_IFERR(err, "Failed to create asid pool"); 847 848 // update to point to our new ASID pool 849 add_sel4_cap(obj_id, ORIG, asid_slot); 850 free_slot_index++; 851 } 852 853 // Update the free slot to go past all the objects we just made. 854 free_slot_start += free_slot_index; 855} 856 857#endif /* !CONFIG_CAPDL_LOADER_STATIC_ALLOC */ 858 859static void create_irq_cap(CDL_IRQ irq, CDL_Object *obj, seL4_CPtr free_slot) 860{ 861 seL4_CPtr root = seL4_CapInitThreadCNode; 862 int index = free_slot; 863 int depth = CONFIG_WORD_SIZE; 864 int error; 865 866 switch (CDL_Obj_Type(obj)) { 867#if defined(CONFIG_ARCH_X86) 868 case CDL_IOAPICInterrupt: 869 error = seL4_IRQControl_GetIOAPIC(seL4_CapIRQControl, root, index, depth, 870 obj->ioapicirq_extra.ioapic, obj->ioapicirq_extra.ioapic_pin, 871 obj->ioapicirq_extra.level, obj->ioapicirq_extra.polarity, 872 irq); 873 break; 874 case CDL_MSIInterrupt: 875 error = seL4_IRQControl_GetMSI(seL4_CapIRQControl, root, index, depth, 876 obj->msiirq_extra.pci_bus, obj->msiirq_extra.pci_dev, 877 obj->msiirq_extra.pci_fun, obj->msiirq_extra.handle, irq); 878 break; 879#elif defined(CONFIG_ARCH_ARM) 880 case CDL_ARMInterrupt: 881#if CONFIG_MAX_NUM_NODES > 1 882 error = seL4_IRQControl_GetTriggerCore(seL4_CapIRQControl, irq, obj->armirq_extra.trigger, 883 root, index, depth, obj->armirq_extra.target); 884#else 885 error = seL4_IRQControl_GetTrigger(seL4_CapIRQControl, irq, obj->armirq_extra.trigger, 886 root, index, depth); 887#endif 888 break; 889#endif 890 default: 891 error = seL4_IRQControl_Get(seL4_CapIRQControl, irq, root, index, depth); 892 } 893 ZF_LOGF_IFERR(error, "Failed to create irq cap"); 894 add_sel4_cap(irq, IRQ, index); 895} 896 897static void create_irq_caps(CDL_Model *spec) 898{ 899 ZF_LOGD("Creating irq handler caps..."); 900 901 for (CDL_IRQ irq = 0; irq < spec->num_irqs; irq++) { 902 if (spec->irqs[irq] != INVALID_OBJ_ID) { 903 seL4_CPtr free_slot = get_free_slot(); 904 905 ZF_LOGD(" Creating irq handler cap for IRQ %d...", irq); 906 create_irq_cap(irq, &spec->objects[spec->irqs[irq]], free_slot); 907 next_free_slot(); 908 } 909 } 910} 911 912/* Mint a cap that will not be given to the user */ 913/* Used for badging interrupt notifications and, in the RT kernel, fault eps */ 914static void mint_cap(CDL_ObjID object_id, int free_slot, seL4_Word badge) 915{ 916 seL4_CapRights_t rights = seL4_AllRights; 917 918 seL4_CPtr dest_root = seL4_CapInitThreadCNode; 919 int dest_index = free_slot; 920 int dest_depth = CONFIG_WORD_SIZE; 921 922 seL4_CPtr src_root = seL4_CapInitThreadCNode; 923 int src_index = orig_caps(object_id); 924 int src_depth = CONFIG_WORD_SIZE; 925 926 int error = seL4_CNode_Mint(dest_root, dest_index, dest_depth, 927 src_root, src_index, src_depth, rights, 928 badge); 929 ZF_LOGF_IF(error, "Failed to mint cap"); 930} 931 932/* Duplicate capabilities */ 933static void duplicate_cap(CDL_ObjID object_id, int free_slot) 934{ 935 seL4_CapRights_t rights = seL4_AllRights; 936 937 seL4_CPtr dest_root = seL4_CapInitThreadCNode; 938 int dest_index = free_slot; 939 int dest_depth = CONFIG_WORD_SIZE; 940 941 seL4_CPtr src_root = seL4_CapInitThreadCNode; 942 int src_index = orig_caps(object_id); 943 int src_depth = CONFIG_WORD_SIZE; 944 945 int error = seL4_CNode_Copy(dest_root, dest_index, dest_depth, 946 src_root, src_index, src_depth, rights); 947 ZF_LOGF_IFERR(error, ""); 948 949 add_sel4_cap(object_id, DUP, dest_index); 950} 951 952static void duplicate_caps(CDL_Model *spec) 953{ 954 ZF_LOGD("Duplicating CNodes..."); 955 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 956 if (spec->objects[obj_id].type == CDL_CNode || spec->objects[obj_id].type == CDL_TCB) { 957 ZF_LOGD(" Duplicating %s...", CDL_Obj_Name(&spec->objects[obj_id])); 958 int free_slot = get_free_slot(); 959 duplicate_cap(obj_id, free_slot); 960 next_free_slot(); 961 } 962 } 963} 964 965static void create_sched_ctrl_caps(seL4_BootInfo *bi) 966{ 967#ifdef CONFIG_KERNEL_MCS 968 for (seL4_Word i = 0; i <= bi->schedcontrol.end - bi->schedcontrol.start; i++) { 969 add_sel4_cap(i, SCHED_CTRL, i + bi->schedcontrol.start); 970 } 971#endif 972} 973 974/* Initialise SCs */ 975static void init_sc(CDL_Model *spec, CDL_ObjID sc, CDL_Core affinity) 976{ 977 CDL_Object *cdl_sc = get_spec_object(spec, sc); 978 979 uint64_t UNUSED budget = CDL_SC_Budget(cdl_sc); 980 uint64_t UNUSED period = CDL_SC_Period(cdl_sc); 981 seL4_Word UNUSED data = CDL_SC_Data(cdl_sc); 982 983 ZF_LOGD("budget: %llu, period: %llu, data: %u", budget, period, data); 984 985 seL4_CPtr UNUSED seL4_sc = orig_caps(sc); 986 seL4_CPtr UNUSED sched_control = sched_ctrl_caps(affinity); 987#ifdef CONFIG_KERNEL_MCS 988 /* Assign the sched context to run on the CPU that the root task runs on. */ 989 int error = seL4_SchedControl_Configure(sched_control, 990 seL4_sc, budget, period, 0, data); 991 ZF_LOGF_IFERR(error, ""); 992#endif 993} 994 995/* Initialise TCBs */ 996static void init_tcb(CDL_Model *spec, CDL_ObjID tcb) 997{ 998 CDL_Object *cdl_tcb = get_spec_object(spec, tcb); 999 1000 CDL_Cap *cdl_cspace_root = get_cap_at(cdl_tcb, CDL_TCB_CTable_Slot); 1001 if (cdl_cspace_root == NULL) { 1002 ZF_LOGD("Could not find CSpace cap for %s", CDL_Obj_Name(cdl_tcb)); 1003 } 1004 CDL_Cap *cdl_vspace_root = get_cap_at(cdl_tcb, CDL_TCB_VTable_Slot); 1005 if (cdl_vspace_root == NULL) { 1006 ZF_LOGD("Could not find VSpace cap for %s", CDL_Obj_Name(cdl_tcb)); 1007 } 1008 CDL_Cap *cdl_ipcbuffer = get_cap_at(cdl_tcb, CDL_TCB_IPCBuffer_Slot); 1009 if (cdl_ipcbuffer == NULL) { 1010 ZF_LOGD(" Warning: TCB has no IPC buffer"); 1011 } 1012#if defined(CONFIG_ARM_HYPERVISOR_SUPPORT) || defined(CONFIG_VTX) 1013 CDL_Cap *cdl_vcpu = get_cap_at(cdl_tcb, CDL_TCB_VCPU_SLOT); 1014#endif 1015 1016 CDL_Cap *cdl_sc = get_cap_at(cdl_tcb, CDL_TCB_SC_Slot); 1017 1018 seL4_Word ipcbuffer_addr = CDL_TCB_IPCBuffer_Addr(cdl_tcb); 1019 uint8_t priority = CDL_TCB_Priority(cdl_tcb); 1020 CDL_Core UNUSED affinity = CDL_TCB_Affinity(cdl_tcb); 1021 uint8_t UNUSED max_priority = CDL_TCB_MaxPriority(cdl_tcb); 1022 1023 seL4_CPtr sel4_tcb = orig_caps(tcb); 1024 1025 seL4_CPtr sel4_cspace_root = cdl_cspace_root == NULL ? 0 : orig_caps(CDL_Cap_ObjID(cdl_cspace_root)); 1026 seL4_CPtr sel4_vspace_root = cdl_vspace_root ? orig_caps(CDL_Cap_ObjID(cdl_vspace_root)) : 0; 1027 seL4_CPtr sel4_ipcbuffer = cdl_ipcbuffer ? orig_caps(CDL_Cap_ObjID(cdl_ipcbuffer)) : 0; 1028 seL4_CPtr UNUSED sel4_sc = cdl_sc ? orig_caps(CDL_Cap_ObjID(cdl_sc)) : 0; 1029#if defined(CONFIG_ARM_HYPERVISOR_SUPPORT) || defined(CONFIG_VTX) 1030 seL4_CPtr sel4_vcpu = cdl_vcpu ? orig_caps(CDL_Cap_ObjID(cdl_vcpu)) : 0; 1031#endif 1032 1033 seL4_CPtr sel4_fault_ep; 1034 seL4_CPtr UNUSED sel4_tempfault_ep; 1035 seL4_CPtr badged_sel4_fault_ep; 1036 1037 if (config_set(CONFIG_KERNEL_MCS)) { 1038 /* Fault ep cptrs are in the caller's cspace */ 1039 1040 CDL_Cap *cdl_fault_ep = get_cap_at(cdl_tcb, CDL_TCB_FaultEP_Slot); 1041 if (cdl_fault_ep == NULL) { 1042 ZF_LOGW(" Warning: TCB has no fault endpoint"); 1043 } 1044 1045 CDL_Cap *cdl_tempfault_ep = get_cap_at(cdl_tcb, CDL_TCB_TemporalFaultEP_Slot); 1046 if (cdl_tempfault_ep == NULL) { 1047 ZF_LOGW(" Warning: TCB has no temporal fault endpoint"); 1048 } 1049 1050 sel4_fault_ep = cdl_fault_ep ? orig_caps(CDL_Cap_ObjID(cdl_fault_ep)) : 0; 1051 sel4_tempfault_ep = cdl_tempfault_ep ? orig_caps(CDL_Cap_ObjID(cdl_tempfault_ep)) : 0; 1052 1053 if (sel4_fault_ep != 0) { 1054 seL4_Word fault_ep_badge = get_capData(CDL_Cap_Data(cdl_fault_ep)); 1055 if (fault_ep_badge != 0) { 1056 badged_sel4_fault_ep = (seL4_CPtr) get_free_slot(); 1057 mint_cap(CDL_Cap_ObjID(cdl_fault_ep), badged_sel4_fault_ep, 1058 fault_ep_badge); 1059 next_free_slot(); 1060 sel4_fault_ep = badged_sel4_fault_ep; 1061 1062 } 1063 } 1064 } else { 1065 /* Fault ep cptrs are in the configured thread's cspace */ 1066 sel4_fault_ep = cdl_tcb->tcb_extra.fault_ep; 1067 } 1068 1069 seL4_Word sel4_cspace_root_data = seL4_NilData; 1070 seL4_Word sel4_vspace_root_data = seL4_NilData; 1071 if (cdl_cspace_root != NULL) { 1072 sel4_cspace_root_data = get_capData(CDL_Cap_Data(cdl_cspace_root)); 1073 } 1074 if (cdl_vspace_root != NULL) { 1075 sel4_vspace_root_data = get_capData(CDL_Cap_Data(cdl_vspace_root)); 1076 } 1077 1078 /* 1079 * seL4_TCB_Configure requires a valid CSpace, VSpace and IPC buffer cap to 1080 * succeed at assigning any of them. We first try and perform seL4_TCB_Configure 1081 * but if any of these objects are missing we fall back to only trying to assign 1082 * an IPC buffer if we have one using seL4_TCB_SetIPCBuffer. We print an error 1083 * if a CSpace is available but a VSpace is not or if there is a VSpace but no CSpace. 1084 */ 1085 int error; 1086#ifdef CONFIG_KERNEL_MCS 1087 if (sel4_sc) { 1088 init_sc(spec, CDL_Cap_ObjID(cdl_sc), affinity); 1089 } 1090 1091 if (cdl_cspace_root && cdl_vspace_root && sel4_ipcbuffer) { 1092 error = seL4_TCB_Configure(sel4_tcb, 1093 sel4_cspace_root, sel4_cspace_root_data, 1094 sel4_vspace_root, sel4_vspace_root_data, 1095 ipcbuffer_addr, sel4_ipcbuffer); 1096 ZF_LOGF_IFERR(error, ""); 1097 } else { 1098 ZF_LOGE_IFERR(cdl_cspace_root || cdl_vspace_root || sel4_ipcbuffer, 1099 "Could not call seL4_TCB_Configure as not all required objects provided: " 1100 "VSpace: %"PRIxPTR", CSpace: %"PRIxPTR", IPC Buffer: %"PRIxPTR, cdl_vspace_root, cdl_cspace_root, sel4_ipcbuffer); 1101 1102 if (sel4_ipcbuffer) { 1103 error = seL4_TCB_SetIPCBuffer(sel4_tcb, ipcbuffer_addr, sel4_ipcbuffer); 1104 ZF_LOGF_IFERR(error, ""); 1105 } 1106 } 1107 1108 error = seL4_TCB_SetSchedParams(sel4_tcb, seL4_CapInitThreadTCB, max_priority, priority, 1109 sel4_sc, sel4_fault_ep); 1110 ZF_LOGF_IFERR(error, ""); 1111 1112 error = seL4_TCB_SetTimeoutEndpoint(sel4_tcb, sel4_tempfault_ep); 1113#else 1114 if (cdl_cspace_root && cdl_vspace_root && sel4_ipcbuffer) { 1115 error = seL4_TCB_Configure(sel4_tcb, sel4_fault_ep, 1116 sel4_cspace_root, sel4_cspace_root_data, 1117 sel4_vspace_root, sel4_vspace_root_data, 1118 ipcbuffer_addr, sel4_ipcbuffer); 1119 ZF_LOGF_IFERR(error, ""); 1120 } else { 1121 ZF_LOGE_IFERR(cdl_cspace_root || cdl_vspace_root || sel4_ipcbuffer, 1122 "Could not call seL4_TCB_Configure as not all required objects provided: " 1123 "VSpace: %"PRIxPTR", CSpace: %"PRIxPTR", IPC Buffer: %"PRIxPTR, cdl_vspace_root, cdl_cspace_root, sel4_ipcbuffer); 1124 1125 if (sel4_ipcbuffer) { 1126 error = seL4_TCB_SetIPCBuffer(sel4_tcb, ipcbuffer_addr, sel4_ipcbuffer); 1127 ZF_LOGF_IFERR(error, ""); 1128 } 1129 } 1130 1131 error = seL4_TCB_SetSchedParams(sel4_tcb, seL4_CapInitThreadTCB, max_priority, priority); 1132#endif 1133 1134 ZF_LOGF_IFERR(error, ""); 1135 1136#if CONFIG_MAX_NUM_NODES > 1 1137 error = seL4_TCB_SetAffinity(sel4_tcb, affinity); 1138#endif 1139 1140 ZF_LOGF_IFERR(error, ""); 1141 1142#if defined(CONFIG_ARM_HYPERVISOR_SUPPORT) || defined(CONFIG_VTX) 1143 if (sel4_vcpu) { 1144#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 1145 int error = seL4_ARM_VCPU_SetTCB(sel4_vcpu, sel4_tcb); 1146#else //CONFIG_VTX 1147 int error = seL4_X86_VCPU_SetTCB(sel4_vcpu, sel4_tcb); 1148#endif 1149 ZF_LOGF_IFERR(error, "Failed to bind TCB %s to VCPU %s", 1150 CDL_Obj_Name(cdl_tcb), CDL_Obj_Name(get_spec_object(spec, CDL_Cap_ObjID(cdl_vcpu)))); 1151 } 1152#endif 1153 1154#ifdef CONFIG_DEBUG_BUILD 1155 /* Name the thread after its TCB name if possible. We need to do some 1156 * juggling first to ensure the name will not overflow the IPC buffer. 1157 */ 1158 char safe_name[seL4_MsgMaxLength * sizeof(seL4_Word)]; 1159 const char *name = CDL_Obj_Name(cdl_tcb); 1160 if (name != NULL) { 1161 strncpy(safe_name, name, sizeof(safe_name) - 1); 1162 safe_name[sizeof(safe_name) - 1] = '\0'; 1163 (void)seL4_DebugNameThread(sel4_tcb, safe_name); 1164 } 1165#endif 1166} 1167 1168static void configure_tcb(CDL_Model *spec, CDL_ObjID tcb) 1169{ 1170 seL4_CPtr sel4_tcb = dup_caps(tcb); 1171 1172 CDL_Object *cdl_tcb = get_spec_object(spec, tcb); 1173 const seL4_Word *argv = cdl_tcb->tcb_extra.init; 1174 int argc = cdl_tcb->tcb_extra.init_sz; 1175 1176 uintptr_t pc = CDL_TCB_PC(cdl_tcb); 1177 uintptr_t sp = CDL_TCB_SP(cdl_tcb); 1178 1179 if (sp % (sizeof(uintptr_t) * 2) != 0) { 1180 ZF_LOGF("TCB %s's stack pointer is not dword-aligned", CDL_Obj_Name(&spec->objects[tcb])); 1181 } 1182 int reg_args = 0; 1183#if defined(CONFIG_ARCH_ARM) 1184 /* On ARM, the first four arguments go in registers. */ 1185 reg_args = 4; 1186#endif 1187#if defined(CONFIG_ARCH_IA32) && defined(CONFIG_CAPDL_LOADER_CC_REGISTERS) 1188 reg_args = 4; 1189#endif 1190#if defined(CONFIG_ARCH_X86_64) 1191 reg_args = 4; 1192#endif 1193#if defined(CONFIG_ARCH_RISCV) 1194 reg_args = 4; 1195#endif 1196 1197 if (argc > reg_args) { 1198#ifdef CONFIG_CAPDL_LOADER_CC_REGISTERS 1199 ZF_LOGF("TCB %s has more than four arguments, which is not supported using" 1200 " the register calling convention", CDL_Obj_Name(&spec->objects[tcb])); 1201#else //!CONFIG_CAPDL_LOADER_CC_REGISTERS 1202 /* We need to map the TCB's stack into our address space because there 1203 * are arguments to write. 1204 */ 1205 1206 /* Find the TCB's PD. */ 1207 CDL_Cap *cdl_vspace_root = get_cap_at(cdl_tcb, CDL_TCB_VTable_Slot); 1208 CDL_ObjID pd = CDL_Cap_ObjID(cdl_vspace_root); 1209 1210 if (STACK_ALIGNMENT_BYTES % sizeof(*argv)) { 1211 ZF_LOGF("Stack alignment requirement not evenly divisible by argument size"); 1212 } 1213 1214 /* The stack pointer of new threads will initially be aligned to 1215 * STACK_ALIGNMENT_BYTES bytes. Any padding required to enforce 1216 * this alignment will come before any stack arguments. 1217 */ 1218 1219 unsigned int num_stack_args = argc - reg_args; // positive because argc > reg_args 1220 unsigned int args_per_alignment = (STACK_ALIGNMENT_BYTES / sizeof(*argv)); 1221 unsigned int num_unaligned_args = num_stack_args % args_per_alignment; 1222 1223 if (num_unaligned_args != 0) { 1224 unsigned int num_padding_args = args_per_alignment - num_unaligned_args; 1225 unsigned int num_padding_bytes = num_padding_args * sizeof(*argv); 1226 sp -= num_padding_bytes; 1227 } 1228 1229 /* Find and map the frame representing the TCB's stack. Note that we do 1230 * `sp - sizeof(uintptr_t)` because the stack pointer may be on a page 1231 * boundary. 1232 */ 1233 seL4_CPtr frame = get_frame_cap(pd, sp - sizeof(uintptr_t), spec); 1234 /* FIXME: The above could actually fail messily if the user has given a 1235 * spec with stack pointers that point outside the ELF image. 1236 */ 1237 seL4_ARCH_VMAttributes attribs = seL4_ARCH_Default_VMAttributes; 1238#ifdef CONFIG_ARCH_ARM 1239 attribs |= seL4_ARM_ExecuteNever; 1240#endif 1241 int error = seL4_ARCH_Page_Map(frame, seL4_CapInitThreadPD, (seL4_Word)copy_addr_with_pt, 1242 seL4_ReadWrite, attribs); 1243 ZF_LOGF_IFERR(error, ""); 1244 1245 /* Write all necessary arguments to the TCB's stack. */ 1246 for (int i = argc - 1; i >= 0 && i >= reg_args; i--) { 1247 if (i != argc - 1 && sp % PAGE_SIZE_4K == 0) { 1248 /* We could support this case with more complicated logic, but 1249 * choose not to. 1250 */ 1251 ZF_LOGF("TCB %s's initial arguments cause its stack to cross a page boundary", 1252 CDL_Obj_Name(&spec->objects[tcb])); 1253 } 1254 sp -= sizeof(seL4_Word); 1255 *(seL4_Word *)(copy_addr_with_pt + sp % PAGE_SIZE_4K) = argv[i]; 1256 } 1257 1258#ifdef CONFIG_ARCH_ARM 1259 error = seL4_ARM_Page_Unify_Instruction(frame, 0, PAGE_SIZE_4K); 1260 ZF_LOGF_IFERR(error, ""); 1261#endif //CONFIG_ARCH_ARM 1262 error = seL4_ARCH_Page_Unmap(frame); 1263 ZF_LOGF_IFERR(error, ""); 1264#endif //CONFIG_CAPDL_LOADER_CC_REGISTERS 1265 } 1266 1267 seL4_UserContext regs = { 1268#if defined(CONFIG_ARCH_ARM) 1269 .pc = pc, 1270 .sp = sp, 1271#ifdef CONFIG_ARCH_AARCH32 1272 .r0 = argc > 0 ? argv[0] : 0, 1273 .r1 = argc > 1 ? argv[1] : 0, 1274 .r2 = argc > 2 ? argv[2] : 0, 1275 .r3 = argc > 3 ? argv[3] : 0, 1276#else // CONFIG_ARCH_AARCH64 1277 .x0 = argc > 0 ? argv[0] : 0, 1278 .x1 = argc > 1 ? argv[1] : 0, 1279 .x2 = argc > 2 ? argv[2] : 0, 1280 .x3 = argc > 3 ? argv[3] : 0, 1281#endif // CONFIG_ARCH_AARCH32 1282#elif defined(CONFIG_ARCH_IA32) 1283 .eip = pc, 1284 .esp = sp, 1285#ifdef CONFIG_CAPDL_LOADER_CC_REGISTERS 1286 .eax = argc > 2 ? argv[2] : 0, 1287 .ebx = argc > 3 ? argv[3] : 0, 1288 .ecx = argc > 0 ? argv[0] : 0, 1289 .edx = argc > 1 ? argv[1] : 0, 1290#endif 1291#elif defined(CONFIG_ARCH_X86_64) 1292 .rip = pc, 1293 .rsp = sp, 1294 .rdi = argc > 0 ? argv[0] : 0, 1295 .rsi = argc > 1 ? argv[1] : 0, 1296 .rdx = argc > 2 ? argv[2] : 0, 1297 .rcx = argc > 3 ? argv[3] : 0, 1298#elif defined(CONFIG_ARCH_RISCV) 1299 .pc = pc, 1300 .sp = sp, 1301 .a0 = argc > 0 ? argv[0] : 0, 1302 .a1 = argc > 1 ? argv[1] : 0, 1303 .a2 = argc > 2 ? argv[2] : 0, 1304 .a3 = argc > 3 ? argv[3] : 0, 1305#endif 1306 }; 1307 ZF_LOGD(" Setting up _start()"); 1308 ZF_LOGD(" pc = %p", (void *)pc); 1309 ZF_LOGD(" sp = %p", (void *)sp); 1310 for (int i = 0; i < argc; i++) { 1311 ZF_LOGD(" arg%d = %p", i, (void *)argv[i]); 1312 } 1313 1314 global_user_context = regs; 1315 1316 int error = seL4_TCB_WriteRegisters(sel4_tcb, false, 0, 1317 sizeof(seL4_UserContext) / sizeof(seL4_Word), 1318 &global_user_context); 1319 ZF_LOGF_IFERR(error, ""); 1320 1321 uint32_t UNUSED domain = CDL_TCB_Domain(cdl_tcb); 1322 ZF_LOGD(" Assigning thread to domain %u...", domain); 1323 error = seL4_DomainSet_Set(seL4_CapDomain, domain, sel4_tcb); 1324 ZF_LOGF_IFERR(error, ""); 1325} 1326 1327static void init_tcbs(CDL_Model *spec) 1328{ 1329 ZF_LOGD("Initialising TCBs..."); 1330 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 1331 if (spec->objects[obj_id].type == CDL_TCB) { 1332 ZF_LOGD(" Initialising %s...", CDL_Obj_Name(&spec->objects[obj_id])); 1333 init_tcb(spec, obj_id); 1334 1335 ZF_LOGD(" Configuring %s...", CDL_Obj_Name(&spec->objects[obj_id])); 1336 configure_tcb(spec, obj_id); 1337 } 1338 } 1339} 1340 1341 1342static void init_irq(CDL_Model *spec, CDL_IRQ irq_no) 1343{ 1344 seL4_CPtr irq_handler_cap = irq_caps(irq_no); 1345 1346 CDL_Object *cdl_irq = get_spec_object(spec, spec->irqs[irq_no]); 1347 assert(cdl_irq != NULL); 1348 1349#ifdef CONFIG_ARCH_X86 1350 assert(cdl_irq->type == CDL_Interrupt || cdl_irq->type == CDL_IOAPICInterrupt || cdl_irq->type == CDL_MSIInterrupt); 1351#elif CONFIG_ARCH_ARM 1352 assert(cdl_irq->type == CDL_Interrupt || cdl_irq->type == CDL_ARMInterrupt); 1353#else 1354 assert(cdl_irq->type == CDL_Interrupt); 1355#endif 1356 1357 if (cdl_irq->size_bits != 0) { 1358 ZF_LOGF("Misconfigured IRQ; an IRQ must have a size of 0."); 1359 } 1360 if (cdl_irq->slots.num > 1) { 1361 ZF_LOGF("Misconfigured IRQ; an IRQ cannot have more than one assigned endpoint."); 1362 } 1363 1364 if (cdl_irq->slots.num == 1) { 1365 /* This IRQ is bound. */ 1366 CDL_Cap *endpoint_cap = &cdl_irq->slots.slot[0].cap; 1367 seL4_CPtr endpoint_cptr; 1368 1369 seL4_Word badge = get_capData(CDL_Cap_Data(endpoint_cap)); 1370 if (badge) { 1371 endpoint_cptr = (seL4_CPtr)get_free_slot(); 1372 mint_cap(CDL_Cap_ObjID(endpoint_cap), endpoint_cptr, badge); 1373 next_free_slot(); 1374 } else { 1375 endpoint_cptr = orig_caps(CDL_Cap_ObjID(endpoint_cap)); 1376 } 1377 1378 int error = seL4_IRQHandler_SetNotification(irq_handler_cap, endpoint_cptr); 1379 ZF_LOGF_IFERR(error, ""); 1380 } 1381} 1382 1383static void init_irqs(CDL_Model *spec) 1384{ 1385 ZF_LOGD("Initialising IRQ handler caps..."); 1386 1387 for (CDL_IRQ irq = 0; irq < spec->num_irqs; irq++) { 1388 if (spec->irqs[irq] != INVALID_OBJ_ID) { 1389 ZF_LOGD(" Initialising handler for IRQ %d...", irq); 1390 init_irq(spec, irq); 1391 } 1392 } 1393} 1394 1395/* Initialise virtual address spaces */ 1396static void set_asid(CDL_Model *spec UNUSED, CDL_ObjID page) 1397{ 1398 seL4_CPtr sel4_page = orig_caps(page); 1399 int error = seL4_ARCH_ASIDPool_Assign(seL4_CapInitThreadASIDPool, sel4_page); 1400 ZF_LOGF_IFERR(error, ""); 1401} 1402 1403static void init_pd_asids(CDL_Model *spec) 1404{ 1405 ZF_LOGD("Initialising Page Directory ASIDs..."); 1406 1407 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 1408 CDL_ObjectType type = CDL_TOP_LEVEL_PD; 1409 if (spec->objects[obj_id].type == type) { 1410 ZF_LOGD(" Initialising pd/pml4 ASID %s...", 1411 CDL_Obj_Name(&spec->objects[obj_id])); 1412 set_asid(spec, obj_id); 1413 } 1414 } 1415} 1416 1417static void map_page(CDL_Model *spec UNUSED, CDL_Cap *page_cap, CDL_ObjID pd_id, 1418 seL4_CapRights_t rights, seL4_Word vaddr) 1419{ 1420 CDL_ObjID page = CDL_Cap_ObjID(page_cap); 1421 1422 // TODO: We should not be using the original cap here 1423 seL4_CPtr sel4_page = orig_caps(page); 1424 seL4_CPtr sel4_pd = orig_caps(pd_id); 1425#ifdef CONFIG_CAPDL_LOADER_WRITEABLE_PAGES 1426 /* Make instruction pages writeable to support software breakpoints */ 1427 if (seL4_CapRights_get_capAllowGrant(rights)) { 1428 rights = seL4_CapRights_set_capAllowWrite(rights, true); 1429 } 1430#endif 1431 seL4_ARCH_VMAttributes vm_attribs = CDL_Cap_VMAttributes(page_cap); 1432 ZF_LOGD(" Mapping %s into %s with rights={G: %d, R: %d, W: %d}, vaddr=0x%x, vm_attribs=0x%x", 1433 CDL_Obj_Name(&spec->objects[page]), 1434 CDL_Obj_Name(&spec->objects[pd_id]), 1435 seL4_CapRights_get_capAllowGrant(rights), 1436 seL4_CapRights_get_capAllowRead(rights), 1437 seL4_CapRights_get_capAllowWrite(rights), 1438 vaddr, vm_attribs); 1439 1440 if (CDL_Cap_Type(page_cap) == CDL_PTCap) { 1441 int error = seL4_ARCH_PageTable_Map(sel4_page, sel4_pd, vaddr, vm_attribs); 1442 ZF_LOGF_IFERR(error, ""); 1443 1444 } else if (CDL_Cap_Type(page_cap) == CDL_FrameCap) { 1445#ifdef CAPDL_SHARED_FRAMES 1446 /* hack to support shared frames: create a new cap for each mapping */ 1447 int dest_index = get_free_slot(); 1448 1449 int error_0 = seL4_CNode_Copy(seL4_CapInitThreadCNode, dest_index, CONFIG_WORD_SIZE, 1450 seL4_CapInitThreadCNode, sel4_page, CONFIG_WORD_SIZE, seL4_AllRights); 1451 ZF_LOGF_IFERR(error_0, ""); 1452 1453 next_free_slot(); 1454 sel4_page = dest_index; 1455#endif 1456 1457 /* XXX: Write-only mappings are silently downgraded by the kernel to 1458 * kernel-only. This is clearly not what the user intended if they 1459 * passed us a write-only mapping. Help them out by upgrading it here. 1460 */ 1461 if (seL4_CapRights_get_capAllowWrite(rights)) { 1462 rights = seL4_CapRights_set_capAllowRead(rights, true); 1463 } 1464 1465#ifdef CONFIG_ARCH_ARM 1466 if (!seL4_CapRights_get_capAllowGrant(rights)) { 1467 vm_attribs |= seL4_ARM_ExecuteNever; 1468 } 1469#endif 1470 1471 /* Store the cap used for mapping the page in the CDL_Cap in the 1472 * corresponding page table/directory slot for later use. */ 1473 page_cap->mapped_frame_cap = sel4_page; 1474 1475 // FIXME: Add support for super-pages. 1476 int error = seL4_ARCH_Page_Map(sel4_page, sel4_pd, vaddr, rights, vm_attribs); 1477 if (error) { 1478 /* Try and retrieve some useful information to help the user 1479 * diagnose the error. 1480 */ 1481 ZF_LOGE("Failed to map frame "); 1482 seL4_ARCH_Page_GetAddress_t addr UNUSED = seL4_ARCH_Page_GetAddress(sel4_page); 1483 if (addr.error) { 1484 ZF_LOGE("<unknown physical address (error = %d)>", addr.error); 1485 } else { 1486 ZF_LOGE("%p", (void *)addr.paddr); 1487 } 1488 ZF_LOGE(" -> %p (error = %d)", (void *)vaddr, error); 1489 ZF_LOGF_IFERR(error, ""); 1490 } 1491#ifdef CONFIG_ARCH_ARM 1492 /* When seL4 creates a new frame object it zeroes the associated memory 1493 * through a cached kernel mapping. If we are configuring a cached 1494 * mapping for the target, standard coherence protocols ensure 1495 * everything works as expected. However, if we are configuring an 1496 * uncached mapping for the target, the dirty zero data cached from the 1497 * kernel's mapping is likely flushed to memory at some time in the 1498 * future causing an unpleasant surprise for the target whose own 1499 * uncached writes are mysteriously overwritten. To prevent this, we 1500 * unify the mapping here, flushing the cached data from the kernel's 1501 * mapping. 1502 */ 1503 seL4_Word size_bits = spec->objects[page].size_bits; 1504 assert(size_bits <= sizeof(uintptr_t) * CHAR_BIT - 1 && "illegal object size"); 1505 1506 seL4_ARCH_Page_GetAddress_t addr = seL4_ARCH_Page_GetAddress(sel4_page); 1507 if (addr.paddr >= memory_region[0].start && addr.paddr <= memory_region[0].end) { 1508 if (!(vm_attribs & seL4_ARM_PageCacheable) && CDL_Obj_Paddr(&spec->objects[page]) == 0) { 1509 error = seL4_ARM_Page_CleanInvalidate_Data(sel4_page, 0, BIT(size_bits)); 1510 ZF_LOGF_IFERR(error, ""); 1511 } 1512 1513 if (seL4_CapRights_get_capAllowGrant(rights)) { 1514 error = seL4_ARM_Page_Unify_Instruction(sel4_page, 0, BIT(size_bits)); 1515 ZF_LOGF_IFERR(error, ""); 1516 } 1517 } 1518#endif 1519 } else { 1520 ZF_LOGF("attempt to map something that is not a frame or PT"); 1521 } 1522} 1523 1524#if defined(CONFIG_ARCH_X86_64) || defined(CONFIG_ARCH_AARCH64) || defined(CONFIG_ARCH_RISCV) 1525 1526static void init_level_3(CDL_Model *spec, CDL_ObjID level_0_obj, uintptr_t level_3_base, CDL_ObjID level_3_obj) 1527{ 1528 CDL_Object *obj = get_spec_object(spec, level_3_obj); 1529 for (unsigned long slot_index = 0; slot_index < CDL_Obj_NumSlots(obj); slot_index++) { 1530 CDL_CapSlot *slot = CDL_Obj_GetSlot(obj, slot_index); 1531 unsigned long obj_slot = CDL_CapSlot_Slot(slot); 1532 uintptr_t base = level_3_base + (obj_slot << (seL4_PageBits)); 1533 CDL_Cap *frame_cap = CDL_CapSlot_Cap(slot); 1534 seL4_CapRights_t frame_rights = CDL_seL4_Cap_Rights(frame_cap); 1535 map_page(spec, frame_cap, level_0_obj, frame_rights, base); 1536 } 1537} 1538 1539static void init_level_2(CDL_Model *spec, CDL_ObjID level_0_obj, uintptr_t level_2_base, CDL_ObjID level_2_obj) 1540{ 1541 CDL_Object *obj = get_spec_object(spec, level_2_obj); 1542 for (unsigned long slot_index = 0; slot_index < CDL_Obj_NumSlots(obj); slot_index++) { 1543 CDL_CapSlot *slot = CDL_Obj_GetSlot(obj, slot_index); 1544 unsigned long obj_slot = CDL_CapSlot_Slot(slot); 1545 uintptr_t base = level_2_base + (obj_slot << (CDL_PT_LEVEL_3_IndexBits + seL4_PageBits)); 1546 CDL_Cap *level_3_cap = CDL_CapSlot_Cap(slot); 1547 CDL_ObjID level_3_obj = CDL_Cap_ObjID(level_3_cap); 1548 if (CDL_Cap_Type(level_3_cap) == CDL_FrameCap) { 1549 seL4_CapRights_t frame_rights = CDL_seL4_Cap_Rights(level_3_cap); 1550 map_page(spec, level_3_cap, level_0_obj, frame_rights, base); 1551 } else { 1552 seL4_ARCH_VMAttributes vm_attribs = CDL_Cap_VMAttributes(level_3_cap); 1553 CDL_PT_LEVEL_3_MAP(orig_caps(level_3_obj), orig_caps(level_0_obj), base, vm_attribs); 1554 init_level_3(spec, level_0_obj, base, level_3_obj); 1555 } 1556 } 1557} 1558 1559static void init_level_1(CDL_Model *spec, CDL_ObjID level_0_obj, uintptr_t level_1_base, CDL_ObjID level_1_obj) 1560{ 1561 CDL_Object *obj = get_spec_object(spec, level_1_obj); 1562 for (unsigned int slot_index = 0; slot_index < CDL_Obj_NumSlots(obj); slot_index++) { 1563 CDL_CapSlot *slot = CDL_Obj_GetSlot(obj, slot_index); 1564 unsigned long obj_slot = CDL_CapSlot_Slot(slot); 1565 uintptr_t base = level_1_base + (obj_slot << (CDL_PT_LEVEL_2_IndexBits + CDL_PT_LEVEL_3_IndexBits + seL4_PageBits)); 1566 CDL_Cap *level_2_cap = CDL_CapSlot_Cap(slot); 1567 CDL_ObjID level_2_obj = CDL_Cap_ObjID(level_2_cap); 1568 if (CDL_Cap_Type(level_2_cap) == CDL_FrameCap) { 1569 seL4_CapRights_t frame_rights = CDL_seL4_Cap_Rights(level_2_cap); 1570 map_page(spec, level_2_cap, level_0_obj, frame_rights, base); 1571 } else { 1572 seL4_ARCH_VMAttributes vm_attribs = CDL_Cap_VMAttributes(level_2_cap); 1573 CDL_PT_LEVEL_2_MAP(orig_caps(level_2_obj), orig_caps(level_0_obj), base, vm_attribs); 1574 init_level_2(spec, level_0_obj, base, level_2_obj); 1575 } 1576 } 1577} 1578 1579static void init_level_0(CDL_Model *spec, CDL_ObjID level_0_obj, uintptr_t level_0_base, CDL_ObjID level_0_obj_unused) 1580{ 1581 CDL_Object *obj = get_spec_object(spec, level_0_obj); 1582 for (unsigned long slot_index = 0; slot_index < CDL_Obj_NumSlots(obj); slot_index++) { 1583 CDL_CapSlot *slot = CDL_Obj_GetSlot(obj, slot_index); 1584 unsigned long obj_slot = CDL_CapSlot_Slot(slot); 1585 uintptr_t base = (level_0_base + obj_slot) << (CDL_PT_LEVEL_1_IndexBits + CDL_PT_LEVEL_2_IndexBits + 1586 CDL_PT_LEVEL_3_IndexBits + seL4_PageBits); 1587 CDL_Cap *level_1_cap = CDL_CapSlot_Cap(slot); 1588 CDL_ObjID level_1_obj = CDL_Cap_ObjID(level_1_cap); 1589 seL4_ARCH_VMAttributes vm_attribs = CDL_Cap_VMAttributes(level_1_cap); 1590#ifdef CDL_PT_LEVEL_1_MAP 1591 CDL_PT_LEVEL_1_MAP(orig_caps(level_1_obj), orig_caps(level_0_obj), base, vm_attribs); 1592 init_level_1(spec, level_0_obj, base, level_1_obj); 1593#else 1594 ZF_LOGF("CDL_PT_LEVEL_1_MAP is not defined"); 1595#endif 1596 } 1597} 1598 1599#else 1600 1601static void map_page_directory_slot(CDL_Model *spec UNUSED, CDL_ObjID pd_id, CDL_CapSlot *pd_slot) 1602{ 1603 ZF_LOGD(" Mapping slot %d in %s", pd_slot->slot, CDL_Obj_Name(&spec->objects[pd_id])); 1604 CDL_Cap *page_cap = CDL_CapSlot_Cap(pd_slot); 1605 1606 seL4_Word page_vaddr = CDL_CapSlot_Slot(pd_slot) << (seL4_PageTableIndexBits + seL4_PageBits); 1607 seL4_CapRights_t page_rights = CDL_seL4_Cap_Rights(page_cap); 1608 1609 map_page(spec, page_cap, pd_id, page_rights, page_vaddr); 1610} 1611 1612static void map_page_directory(CDL_Model *spec, CDL_ObjID pd_id) 1613{ 1614 CDL_Object *cdl_pd = get_spec_object(spec, pd_id); 1615 1616 for (unsigned int slot_index = 0; slot_index < CDL_Obj_NumSlots(cdl_pd); slot_index++) { 1617 map_page_directory_slot(spec, pd_id, CDL_Obj_GetSlot(cdl_pd, slot_index)); 1618 } 1619} 1620 1621static void map_page_table_slot(CDL_Model *spec UNUSED, CDL_ObjID pd, CDL_ObjID pt UNUSED, 1622 seL4_Word pt_vaddr, CDL_CapSlot *pt_slot) 1623{ 1624 CDL_Cap *page_cap = CDL_CapSlot_Cap(pt_slot); 1625 1626 seL4_Word page_vaddr = pt_vaddr + (CDL_CapSlot_Slot(pt_slot) << seL4_PageBits); 1627 seL4_CapRights_t page_rights = CDL_seL4_Cap_Rights(page_cap); 1628 1629 ZF_LOGD(" Mapping %s into %s[%d] with rights={G: %d, R: %d, W: %d}, vaddr=0x%" PRIxPTR "", 1630 CDL_Obj_Name(&spec->objects[pt]), CDL_Obj_Name(&spec->objects[pd]), pt_slot->slot, 1631 seL4_CapRights_get_capAllowGrant(page_rights), 1632 seL4_CapRights_get_capAllowRead(page_rights), 1633 seL4_CapRights_get_capAllowWrite(page_rights), 1634 (uintptr_t)pt_vaddr); 1635 1636 map_page(spec, page_cap, pd, page_rights, page_vaddr); 1637} 1638 1639static void map_page_table_slots(CDL_Model *spec, CDL_ObjID pd, CDL_CapSlot *pd_slot) 1640{ 1641 CDL_Cap *page_cap = CDL_CapSlot_Cap(pd_slot); 1642 CDL_ObjID page = CDL_Cap_ObjID(page_cap); 1643 1644 seL4_Word page_vaddr = CDL_CapSlot_Slot(pd_slot) << (seL4_PageTableIndexBits + seL4_PageBits); 1645 1646 if (CDL_Cap_Type(page_cap) == CDL_PTCap) { 1647 CDL_Object *obj = get_spec_object(spec, page); 1648 for (unsigned int slot_index = 0; slot_index < CDL_Obj_NumSlots(obj); slot_index++) { 1649 map_page_table_slot(spec, pd, page, page_vaddr, CDL_Obj_GetSlot(obj, slot_index)); 1650 } 1651 } 1652} 1653 1654static void map_page_directory_page_tables(CDL_Model *spec, CDL_ObjID pd) 1655{ 1656 CDL_Object *cdl_pd = get_spec_object(spec, pd); 1657 for (unsigned int slot_index = 0; slot_index < CDL_Obj_NumSlots(cdl_pd); slot_index++) { 1658 map_page_table_slots(spec, pd, CDL_Obj_GetSlot(cdl_pd, slot_index)); 1659 } 1660} 1661#endif 1662 1663static void init_vspace(CDL_Model *spec) 1664{ 1665 ZF_LOGD("Initialising VSpaces..."); 1666 1667#if defined(CONFIG_ARCH_X86_64) || defined(CONFIG_ARCH_AARCH64) || defined(CONFIG_ARCH_RISCV) 1668 /* Have no understanding of the logic of model of whatever the hell the 1669 other code in this function is doing as it is pure gibberish. For 1670 x86_64 and aarch64 we will just do the obvious recursive initialization */ 1671 ZF_LOGD("================================"); 1672 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 1673 if (spec->objects[obj_id].type == CDL_TOP_LEVEL_PD) { 1674 ZF_LOGD(" Initialising top level %s...", CDL_Obj_Name(&spec->objects[obj_id])); 1675 if (CDL_PT_NUM_LEVELS == 4) { 1676 init_level_0(spec, obj_id, 0, obj_id); 1677 } else if (CDL_PT_NUM_LEVELS == 3) { 1678 init_level_1(spec, obj_id, 0, obj_id); 1679 } else if (CDL_PT_NUM_LEVELS == 2) { 1680 init_level_2(spec, obj_id, 0, obj_id); 1681 } else { 1682 ZF_LOGF("Unsupported CDL_PT_NUM_LEVELS value: \"%d\"", CDL_PT_NUM_LEVELS); 1683 } 1684 } 1685 } 1686#else 1687 ZF_LOGD("================================"); 1688 ZF_LOGD("Initialising page directories..."); 1689 1690 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 1691 if (spec->objects[obj_id].type == CDL_PD) { 1692 ZF_LOGD(" Initialising page directory %s...", CDL_Obj_Name(&spec->objects[obj_id])); 1693 map_page_directory(spec, obj_id); 1694 } 1695 } 1696 1697 ZF_LOGD("==========================="); 1698 ZF_LOGD("Initialising page tables..."); 1699 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 1700 if (spec->objects[obj_id].type == CDL_PD) { 1701 ZF_LOGD(" Initialising page tables in %s...", CDL_Obj_Name(&spec->objects[obj_id])); 1702 map_page_directory_page_tables(spec, obj_id); 1703 } 1704 } 1705#endif 1706} 1707 1708static bool ep_related_cap(CDL_CapType cap) 1709{ 1710 switch (cap) { 1711 case CDL_EPCap: 1712 case CDL_NotificationCap: 1713 case CDL_ReplyCap: 1714 return true; 1715 default: 1716 return false; 1717 } 1718} 1719 1720/* Initialise capability spaces */ 1721static void init_cnode_slot(CDL_Model *spec, init_cnode_mode mode, CDL_ObjID cnode_id, CDL_CapSlot *cnode_slot) 1722{ 1723 CDL_Cap *target_cap = CDL_CapSlot_Cap(cnode_slot); 1724 CDL_ObjID target_cap_obj = CDL_Cap_ObjID(target_cap); 1725 CDL_IRQ target_cap_irq = CDL_Cap_IRQ(target_cap); 1726 1727 CDL_CapType target_cap_type = CDL_Cap_Type(target_cap); 1728 seL4_CapRights_t target_cap_rights = CDL_seL4_Cap_Rights(target_cap); 1729 1730 // For endpoint this is the badge, for cnodes, this is the (encoded) guard. 1731 seL4_Word target_cap_data = get_capData(CDL_Cap_Data(target_cap)); 1732 1733 /* To support moving original caps, we need a spec with a CDT (most don't). 1734 * This shoud probably become a separate configuration option for when to 1735 * use the CDT, and when to just copy. For now, let's just copy. 1736 */ 1737 bool move_cap = false; //FIXME 1738 bool is_ep_cap = ep_related_cap(target_cap_type); 1739 bool is_irq_handler_cap = (target_cap_type == CDL_IRQHandlerCap); 1740 bool is_frame_cap = (target_cap_type == CDL_FrameCap); 1741 1742 CDL_Object *dest_obj = get_spec_object(spec, cnode_id); 1743 uint8_t dest_size = CDL_Obj_SizeBits(dest_obj); 1744 1745 // Use a copy of the cap to reference the destination, in case the original has already been moved. 1746 seL4_CPtr dest_root = dup_caps(cnode_id); 1747 int dest_index = CDL_CapSlot_Slot(cnode_slot); 1748 uint8_t dest_depth = dest_size; 1749 1750 // Use an original cap to reference the object to copy. 1751 seL4_CPtr src_root = seL4_CapInitThreadCNode; 1752 int src_index; 1753 switch (target_cap_type) { 1754#ifdef CONFIG_ARCH_X86 1755 case CDL_IOSpaceCap: 1756 src_index = seL4_CapIOSpace; 1757 break; 1758#endif 1759#if defined(CONFIG_ARCH_ARM) 1760 case CDL_ARMIOSpaceCap: 1761 src_index = first_arm_iospace + target_cap_data; 1762 target_cap_data = seL4_NilData; 1763 break; 1764#endif 1765 case CDL_IRQHandlerCap: 1766 src_index = irq_caps(target_cap_irq); 1767 break; 1768 case CDL_IRQControlCap: 1769 /* there's only one */ 1770 src_index = seL4_CapIRQControl; 1771 move_cap = true; 1772 is_irq_handler_cap = true; 1773 break; 1774 case CDL_SchedControlCap: 1775 src_index = sched_ctrl_caps(CDL_Cap_ObjID(target_cap)); 1776 break; 1777 case CDL_DomainCap: 1778 /* there's only one */ 1779 src_index = seL4_CapDomain; 1780 move_cap = false; 1781 break; 1782 case CDL_ASIDControlCap: 1783 /* there's only one */ 1784 src_index = seL4_CapASIDControl; 1785 move_cap = false; 1786 break; 1787 default: 1788 src_index = orig_caps(target_cap_obj); 1789 break; 1790 } 1791 1792 uint8_t src_depth = CONFIG_WORD_SIZE; 1793 1794 if (mode == MOVE && move_cap) { 1795 if (is_ep_cap || is_irq_handler_cap) { 1796 ZF_LOGD("moving..."); 1797 int error = seL4_CNode_Move(dest_root, dest_index, dest_depth, 1798 src_root, src_index, src_depth); 1799 ZF_LOGF_IFERR(error, ""); 1800 } else { 1801 ZF_LOGD("mutating (with badge/guard %p)...", (void *)target_cap_data); 1802 int error = seL4_CNode_Mutate(dest_root, dest_index, dest_depth, 1803 src_root, src_index, src_depth, target_cap_data); 1804 ZF_LOGF_IFERR(error, ""); 1805 } 1806 } else if (mode == COPY && !move_cap) { 1807 if (is_frame_cap && target_cap->mapping_container_id != INVALID_OBJ_ID) { 1808 ZF_LOGD("moving mapped..."); 1809 /* The spec requires the frame cap in the current slot be the same one 1810 * used to perform the mapping of the frame in some container (either 1811 * a page table or page directory). */ 1812 CDL_ObjID container_id = target_cap->mapping_container_id; 1813 seL4_Word slot_index = target_cap->mapping_slot; 1814 1815 /* Look up the container object which contains the mapping. */ 1816 CDL_Object *container = get_spec_object(spec, container_id); 1817 assert(container); 1818 /* When the frame was mapped in, a copy of the cap was first created, 1819 * and the copy used for the mapping. This copy is the cap that must 1820 * be moved into the current slot. */ 1821 CDL_Cap *frame_cap = get_cap_at(container, slot_index); 1822 assert(frame_cap); 1823 assert(frame_cap->type == CDL_FrameCap); 1824 seL4_CPtr mapped_frame_cap = frame_cap->mapped_frame_cap; 1825 1826 /* Move the cap to the frame used for the mapping into the destination slot. */ 1827 int error = seL4_CNode_Move(dest_root, dest_index, dest_depth, 1828 src_root, mapped_frame_cap, src_depth); 1829 ZF_LOGF_IFERR(error, ""); 1830 } else { 1831 ZF_LOGD("minting (with badge/guard %p)...", (void *)target_cap_data); 1832 int error = seL4_CNode_Mint(dest_root, dest_index, dest_depth, 1833 src_root, src_index, src_depth, target_cap_rights, target_cap_data); 1834 ZF_LOGF_IFERR(error, ""); 1835 } 1836 } else { 1837 ZF_LOGV("skipping"); 1838 } 1839} 1840 1841static void init_cnode(CDL_Model *spec, init_cnode_mode mode, CDL_ObjID cnode) 1842{ 1843 CDL_Object *cdl_cnode = get_spec_object(spec, cnode); 1844 for (unsigned int slot_index = 0; slot_index < CDL_Obj_NumSlots(cdl_cnode); slot_index++) { 1845 if (CDL_Obj_GetSlot(cdl_cnode, slot_index)->cap.type == CDL_IRQHandlerCap) { 1846 CDL_IRQ UNUSED irq = CDL_Obj_GetSlot(cdl_cnode, slot_index)->cap.irq; 1847 ZF_LOGD(" Populating slot %d with cap to IRQ %d, name %s...", 1848 CDL_Obj_GetSlot(cdl_cnode, slot_index)->slot, irq, 1849 CDL_Obj_Name(&spec->objects[spec->irqs[irq]])); 1850 } else { 1851 ZF_LOGD(" Populating slot %d with cap to %s...", 1852 CDL_Obj_GetSlot(cdl_cnode, slot_index)->slot, 1853 CDL_Obj_Name(&spec->objects[CDL_Obj_GetSlot(cdl_cnode, slot_index)->cap.obj_id])); 1854 } 1855 init_cnode_slot(spec, mode, cnode, CDL_Obj_GetSlot(cdl_cnode, slot_index)); 1856 } 1857} 1858 1859static void init_cspace(CDL_Model *spec) 1860{ 1861 ZF_LOGD("Copying Caps..."); 1862 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 1863 if (spec->objects[obj_id].type == CDL_CNode) { 1864 ZF_LOGD(" Copying into %s...", CDL_Obj_Name(&spec->objects[obj_id])); 1865 init_cnode(spec, COPY, obj_id); 1866 } 1867 } 1868 1869 ZF_LOGD("Moving Caps..."); 1870 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 1871 if (spec->objects[obj_id].type == CDL_CNode) { 1872 ZF_LOGD(" Moving into %s...", CDL_Obj_Name(&spec->objects[obj_id])); 1873 init_cnode(spec, MOVE, obj_id); 1874 } 1875 } 1876} 1877 1878static void start_threads(CDL_Model *spec) 1879{ 1880 ZF_LOGD("Starting threads..."); 1881 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 1882 if (spec->objects[obj_id].type == CDL_TCB && spec->objects[obj_id].tcb_extra.resume) { 1883 ZF_LOGD(" Starting %s...", CDL_Obj_Name(&spec->objects[obj_id])); 1884 seL4_CPtr tcb = orig_caps(obj_id); 1885 int error = seL4_TCB_Resume(tcb); 1886 ZF_LOGF_IFERR(error, ""); 1887 } 1888 } 1889} 1890 1891 1892static void init_copy_addr(seL4_BootInfo *bi) 1893{ 1894 /* We need a page sized and aligned region at which to map the 1895 * destination frame during loading. We know we have free memory 1896 * after the end of our binary image + any additional frames 1897 * the kernel has mapped. The kernel maps 1 frame for IPC buffer 1898 * 1 frame for bootinfo and on some platforms additional extended 1899 * bootinfo frames. So we skip these frames and then round up to 1900 * the next 16mb alignment where we can map in a pagetable. 1901 */ 1902 uintptr_t bi_start = (uintptr_t)bi; 1903 copy_addr = ROUND_UP(bi_start + PAGE_SIZE_4K + bi->extraLen, 0x1000000); 1904} 1905 1906static void cache_extended_bootinfo_headers(seL4_BootInfo *bi) 1907{ 1908 uintptr_t cur = (uintptr_t)bi + PAGE_SIZE_4K; 1909 uintptr_t end = cur + bi->extraLen; 1910 1911 while (cur < end) { 1912 seL4_BootInfoHeader *header = (seL4_BootInfoHeader *)cur; 1913 extended_bootinfo_table[header->id] = header; 1914 cur += header->len; 1915 } 1916} 1917 1918static void fill_frame_with_bootinfo(uintptr_t base, CDL_FrameFill_Element_t frame_fill) 1919{ 1920 CDL_FrameFill_BootInfoEnum_t bi_type = frame_fill.bi_type.type; 1921 switch (bi_type) { 1922 case CDL_FrameFill_BootInfo_X86_VBE: 1923 case CDL_FrameFill_BootInfo_X86_TSC_Freq: 1924 case CDL_FrameFill_BootInfo_FDT: 1925 break; 1926 default: 1927 ZF_LOGF("Unable to parse extra information for \"bootinfo\", given \"%d\"", 1928 bi_type); 1929 } 1930 1931 uintptr_t dest = base + frame_fill.dest_offset; 1932 size_t max_len = frame_fill.dest_len; 1933 size_t block_offset = frame_fill.bi_type.src_offset; 1934 seL4_BootInfoHeader *header = extended_bootinfo_table[bi_type]; 1935 1936 /* Check if the bootinfo has been found */ 1937 if (header) { 1938 /* Don't copy past the bootinfo */ 1939 size_t copy_len = header->len < block_offset ? 0 : header->len - block_offset; 1940 /* Don't copy more than what the frame can hold */ 1941 copy_len = MIN(copy_len, max_len); 1942 void *copy_start = (void *) header + block_offset; 1943 memcpy((void *) dest, copy_start, copy_len); 1944 } else { 1945 /* Bootinfo hasn't been found. 1946 * If we're at the start of a block, copy an empty header across, otherwise skip the copy */ 1947 if (block_offset == 0) { 1948 seL4_BootInfoHeader empty = (seL4_BootInfoHeader) { 1949 .id = -1, .len = -1 1950 }; 1951 size_t copy_len = MIN(sizeof(empty), max_len); 1952 memcpy((void *)base, &empty, copy_len); 1953 } 1954 } 1955} 1956 1957static void fill_frame_with_filedata(uintptr_t base, CDL_FrameFill_Element_t frame_fill) 1958{ 1959 unsigned long file_size; 1960 unsigned long cpio_size = _capdl_archive_end - _capdl_archive; 1961 void *file = cpio_get_file(_capdl_archive, cpio_size, frame_fill.file_data_type.filename, &file_size); 1962 memcpy((void *)base + frame_fill.dest_offset, file + frame_fill.file_data_type.file_offset, frame_fill.dest_len); 1963} 1964 1965static void init_frame(CDL_Model *spec, CDL_ObjID obj_id, CDL_FrameFill_Element_t frame_fill) 1966{ 1967 seL4_CPtr cap = orig_caps(obj_id); 1968 1969 /* get the cap to the original object */ 1970 /* try a large mapping */ 1971 uintptr_t base = (uintptr_t)copy_addr; 1972 int error = seL4_ARCH_Page_Map(cap, seL4_CapInitThreadPD, (seL4_Word)copy_addr, 1973 seL4_ReadWrite, seL4_ARCH_Default_VMAttributes); 1974 if (error == seL4_FailedLookup) { 1975 /* try a small mapping */ 1976 base = (uintptr_t)copy_addr_with_pt; 1977 error = seL4_ARCH_Page_Map(cap, seL4_CapInitThreadPD, (seL4_Word)copy_addr_with_pt, 1978 seL4_ReadWrite, seL4_ARCH_Default_VMAttributes); 1979 } 1980 ZF_LOGF_IFERR(error, ""); 1981 1982 ssize_t max = BIT(spec->objects[obj_id].size_bits) - frame_fill.dest_offset; 1983 ZF_LOGF_IF(frame_fill.dest_len > max, "Bad spec, fill frame with len larger than frame size"); 1984 1985 /* Check for which type */ 1986 switch (frame_fill.type) { 1987 case CDL_FrameFill_BootInfo: 1988 fill_frame_with_bootinfo(base, frame_fill); 1989 break; 1990 case CDL_FrameFill_FileData: 1991 fill_frame_with_filedata(base, frame_fill); 1992 break; 1993 default: 1994 ZF_LOGF("Unsupported frame fill type %u", frame_fill.type); 1995 } 1996 1997 /* Unmap the frame */ 1998 error = seL4_ARCH_Page_Unmap(cap); 1999 ZF_LOGF_IFERR(error, ""); 2000} 2001 2002static void init_frames(CDL_Model *spec) 2003{ 2004 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 2005 if (spec->objects[obj_id].type == CDL_Frame) { 2006 for (int i = 0; i < CONFIG_CAPDL_LOADER_FILLS_PER_FRAME 2007 && spec->objects[obj_id].frame_extra.fill[i].type != CDL_FrameFill_None; i++) { 2008 CDL_FrameFill_Element_t frame_fill = spec->objects[obj_id].frame_extra.fill[i]; 2009 init_frame(spec, obj_id, frame_fill); 2010 } 2011 } 2012 } 2013} 2014 2015static void init_scs(CDL_Model *spec) 2016{ 2017 ZF_LOGD(" Initialising SCs"); 2018 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 2019 if (spec->objects[obj_id].type == CDL_SchedContext) { 2020 ZF_LOGD(" Initialising %s...", CDL_Obj_Name(&spec->objects[obj_id])); 2021 /* all scs get configured on core 0, any scs that should be bound to a tcb will 2022 be reconfigured for the correct core in init_tcbs */ 2023 init_sc(spec, obj_id, 0); 2024 } 2025 } 2026} 2027 2028#ifdef CONFIG_ARCH_RISCV 2029/** 2030 * RISC-V uses a PageTable object as all table objects in the address structure. 2031 * in several places this loader assumes that the root VSpace object is a unique 2032 * object type and can be iterated over in the spec for performing operations on 2033 * a vspace_root. This function updates the CDL object type of all PageTables that 2034 * exist in a TCB VSpace slot to CDL_PT_ROOT_ALIAS which allows the rest of the 2035 * loader to treat the roots as unique objects. 2036 */ 2037static void mark_vspace_roots(CDL_Model *spec) 2038{ 2039 ZF_LOGD("Marking top level PageTables as CDL_PT_ROOT_ALIAS..."); 2040 2041 for (CDL_ObjID obj_id = 0; obj_id < spec->num; obj_id++) { 2042 CDL_ObjectType type = CDL_TCB; 2043 if (spec->objects[obj_id].type == type) { 2044 CDL_ObjID root = CDL_Cap_ObjID(get_cap_at(get_spec_object(spec, obj_id), CDL_TCB_VTable_Slot)); 2045 ZF_LOGD(" Updating vspace_root: %d", root); 2046 spec->objects[root].type = CDL_PT_ROOT_ALIAS; 2047 } 2048 } 2049} 2050#endif 2051 2052 2053static void init_system(CDL_Model *spec) 2054{ 2055 seL4_BootInfo *bootinfo = platsupport_get_bootinfo(); 2056 simple_t simple; 2057 2058 cache_extended_bootinfo_headers(bootinfo); 2059 init_copy_addr(bootinfo); 2060 2061 simple_default_init_bootinfo(&simple, bootinfo); 2062 2063 init_copy_frame(bootinfo); 2064 2065 parse_bootinfo(bootinfo, spec); 2066 2067 create_objects(spec, bootinfo); 2068#ifdef CONFIG_ARCH_RISCV 2069 /* 2070 * This needs to be called after create_objects as it modifies parts of the 2071 * spec that create_objects uses, but are _hopefully_ safe to change after. 2072 */ 2073 mark_vspace_roots(spec); 2074#endif 2075 create_irq_caps(spec); 2076 if (config_set(CONFIG_KERNEL_MCS)) { 2077 create_sched_ctrl_caps(bootinfo); 2078 } 2079 duplicate_caps(spec); 2080 2081 init_irqs(spec); 2082 init_pd_asids(spec); 2083 init_frames(spec); 2084 init_vspace(spec); 2085 init_scs(spec); 2086 init_tcbs(spec); 2087 init_cspace(spec); 2088 start_threads(spec); 2089 2090 ZF_LOGD("%d of %d CSlots used (%.2LF%%)", get_free_slot(), 2091 BIT(CONFIG_ROOT_CNODE_SIZE_BITS), 2092 ((long double)get_free_slot() / BIT(CONFIG_ROOT_CNODE_SIZE_BITS)) 2093 * 100); 2094 2095} 2096 2097#ifdef CONFIG_DEBUG_BUILD 2098/* We need to give malloc enough memory for musllibc to allocate memory 2099 * for stdin, stdout, and stderr. */ 2100extern char *morecore_area; 2101extern size_t morecore_size; 2102#define DEBUG_LIBC_MORECORE_SIZE 4096 2103static char debug_libc_morecore_area[DEBUG_LIBC_MORECORE_SIZE]; 2104 2105static void CONSTRUCTOR(MUSLCSYS_WITH_VSYSCALL_PRIORITY) init_bootinfo(void) 2106{ 2107 /* Init memory area for musl. */ 2108 morecore_area = debug_libc_morecore_area; 2109 morecore_size = DEBUG_LIBC_MORECORE_SIZE; 2110 2111 /* Allow us to print via seL4_Debug_PutChar. */ 2112 platsupport_serial_setup_bootinfo_failsafe(); 2113} 2114#endif 2115 2116int main(void) 2117{ 2118 ZF_LOGI("Starting CapDL Loader..."); 2119 init_system(&capdl_spec); 2120 ZF_LOGI(A_RESET A_FG_G "CapDL Loader done, suspending..." A_RESET ""); 2121 seL4_TCB_Suspend(seL4_CapInitThreadTCB); 2122} 2123