1/* 2 * Copyright 2020, DornerWorks 3 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) 4 * Copyright 2015, 2016 Hesham Almatary <heshamelmatary@gmail.com> 5 * 6 * SPDX-License-Identifier: GPL-2.0-only 7 */ 8 9#include <types.h> 10#include <benchmark/benchmark.h> 11#include <api/failures.h> 12#include <api/syscall.h> 13#include <kernel/boot.h> 14#include <kernel/cspace.h> 15#include <kernel/thread.h> 16#include <object/tcb.h> 17#include <machine/io.h> 18#include <model/preemption.h> 19#include <model/statedata.h> 20#include <object/cnode.h> 21#include <object/untyped.h> 22#include <arch/api/invocation.h> 23#include <arch/kernel/vspace.h> 24#include <linker.h> 25#include <arch/machine.h> 26#include <plat/machine/hardware.h> 27#include <kernel/stack.h> 28#include <util.h> 29 30struct resolve_ret { 31 paddr_t frameBase; 32 vm_page_size_t frameSize; 33 bool_t valid; 34}; 35typedef struct resolve_ret resolve_ret_t; 36 37static exception_t performPageGetAddress(void *vbase_ptr); 38 39static word_t CONST RISCVGetWriteFromVMRights(vm_rights_t vm_rights) 40{ 41 /* Write-only frame cap rights not currently supported. */ 42 return vm_rights == VMReadWrite; 43} 44 45static inline word_t CONST RISCVGetReadFromVMRights(vm_rights_t vm_rights) 46{ 47 /* Write-only frame cap rights not currently supported. 48 * Kernel-only conveys no user rights. */ 49 return vm_rights != VMKernelOnly; 50} 51 52static inline bool_t isPTEPageTable(pte_t *pte) 53{ 54 return pte_ptr_get_valid(pte) && 55 !(pte_ptr_get_read(pte) || pte_ptr_get_write(pte) || pte_ptr_get_execute(pte)); 56} 57 58/** Helper function meant only to be used for mapping the kernel 59 * window. 60 * 61 * Maps all pages with full RWX and supervisor perms by default. 62 */ 63static pte_t pte_next(word_t phys_addr, bool_t is_leaf) 64{ 65 word_t ppn = (word_t)(phys_addr >> 12); 66 67 uint8_t read = is_leaf ? 1 : 0; 68 uint8_t write = read; 69 uint8_t exec = read; 70 71 return pte_new(ppn, 72 0, /* sw */ 73 1, /* dirty */ 74 1, /* accessed */ 75 1, /* global */ 76 0, /* user */ 77 exec, /* execute */ 78 write, /* write */ 79 read, /* read */ 80 1 /* valid */ 81 ); 82} 83 84/* ==================== BOOT CODE STARTS HERE ==================== */ 85 86BOOT_CODE void map_kernel_frame(paddr_t paddr, pptr_t vaddr, vm_rights_t vm_rights) 87{ 88#if __riscv_xlen == 32 89 paddr = ROUND_DOWN(paddr, RISCV_GET_LVL_PGSIZE_BITS(0)); 90 assert((paddr % RISCV_GET_LVL_PGSIZE(0)) == 0); 91 kernel_root_pageTable[RISCV_GET_PT_INDEX(vaddr, 0)] = pte_next(paddr, true); 92#else 93 if (vaddr >= KDEV_BASE) { 94 /* Map devices in 2nd-level page table */ 95 paddr = ROUND_DOWN(paddr, RISCV_GET_LVL_PGSIZE_BITS(1)); 96 assert((paddr % RISCV_GET_LVL_PGSIZE(1)) == 0); 97 kernel_image_level2_dev_pt[RISCV_GET_PT_INDEX(vaddr, 1)] = pte_next(paddr, true); 98 } else { 99 paddr = ROUND_DOWN(paddr, RISCV_GET_LVL_PGSIZE_BITS(0)); 100 assert((paddr % RISCV_GET_LVL_PGSIZE(0)) == 0); 101 kernel_root_pageTable[RISCV_GET_PT_INDEX(vaddr, 0)] = pte_next(paddr, true); 102 } 103#endif 104} 105 106BOOT_CODE VISIBLE void map_kernel_window(void) 107{ 108 /* mapping of KERNEL_ELF_BASE (virtual address) to kernel's 109 * KERNEL_ELF_PHYS_BASE */ 110 assert(CONFIG_PT_LEVELS > 1 && CONFIG_PT_LEVELS <= 4); 111 112 /* kernel window starts at PPTR_BASE */ 113 word_t pptr = PPTR_BASE; 114 115 /* first we map in memory from PADDR_BASE */ 116 word_t paddr = PADDR_BASE; 117 while (pptr < PPTR_TOP) { 118 assert(IS_ALIGNED(pptr, RISCV_GET_LVL_PGSIZE_BITS(0))); 119 assert(IS_ALIGNED(paddr, RISCV_GET_LVL_PGSIZE_BITS(0))); 120 121 kernel_root_pageTable[RISCV_GET_PT_INDEX(pptr, 0)] = pte_next(paddr, true); 122 123 pptr += RISCV_GET_LVL_PGSIZE(0); 124 paddr += RISCV_GET_LVL_PGSIZE(0); 125 } 126 /* now we should be mapping the 1GiB kernel base */ 127 assert(pptr == PPTR_TOP); 128 pptr = ROUND_DOWN(KERNEL_ELF_BASE, RISCV_GET_LVL_PGSIZE_BITS(0)); 129 paddr = ROUND_DOWN(KERNEL_ELF_PADDR_BASE, RISCV_GET_LVL_PGSIZE_BITS(0)); 130 131#if __riscv_xlen == 32 132 kernel_root_pageTable[RISCV_GET_PT_INDEX(pptr, 0)] = pte_next(paddr, true); 133 pptr += RISCV_GET_LVL_PGSIZE(0); 134 paddr += RISCV_GET_LVL_PGSIZE(0); 135#ifdef CONFIG_KERNEL_LOG_BUFFER 136 kernel_root_pageTable[RISCV_GET_PT_INDEX(KS_LOG_PPTR, 0)] = 137 pte_next(kpptr_to_paddr(kernel_image_level2_log_buffer_pt), false); 138#endif 139#else 140 word_t index = 0; 141 /* The kernel image are mapped twice, locating the two indexes in the 142 * root page table, pointing them to the same second level page table. 143 */ 144 kernel_root_pageTable[RISCV_GET_PT_INDEX(KERNEL_ELF_PADDR_BASE + PPTR_BASE_OFFSET, 0)] = 145 pte_next(kpptr_to_paddr(kernel_image_level2_pt), false); 146 kernel_root_pageTable[RISCV_GET_PT_INDEX(pptr, 0)] = 147 pte_next(kpptr_to_paddr(kernel_image_level2_pt), false); 148 while (pptr < PPTR_TOP + RISCV_GET_LVL_PGSIZE(0)) { 149 kernel_image_level2_pt[index] = pte_next(paddr, true); 150 index++; 151 pptr += RISCV_GET_LVL_PGSIZE(1); 152 paddr += RISCV_GET_LVL_PGSIZE(1); 153 } 154 155 /* Map kernel device page table */ 156 kernel_root_pageTable[RISCV_GET_PT_INDEX(KDEV_BASE, 0)] = 157 pte_next(kpptr_to_paddr(kernel_image_level2_dev_pt), false); 158#endif 159 160 /* There should be 1GiB free where we put device mapping */ 161 assert(pptr == UINTPTR_MAX - RISCV_GET_LVL_PGSIZE(0) + 1); 162 map_kernel_devices(); 163} 164 165BOOT_CODE void map_it_pt_cap(cap_t vspace_cap, cap_t pt_cap) 166{ 167 lookupPTSlot_ret_t pt_ret; 168 pte_t *targetSlot; 169 vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap); 170 pte_t *lvl1pt = PTE_PTR(pptr_of_cap(vspace_cap)); 171 172 /* pt to be mapped */ 173 pte_t *pt = PTE_PTR(pptr_of_cap(pt_cap)); 174 175 /* Get PT slot to install the address in */ 176 pt_ret = lookupPTSlot(lvl1pt, vptr); 177 178 targetSlot = pt_ret.ptSlot; 179 180 *targetSlot = pte_new( 181 (addrFromPPtr(pt) >> seL4_PageBits), 182 0, /* sw */ 183 1, /* dirty */ 184 1, /* accessed */ 185 0, /* global */ 186 0, /* user */ 187 0, /* execute */ 188 0, /* write */ 189 0, /* read */ 190 1 /* valid */ 191 ); 192 sfence(); 193} 194 195BOOT_CODE void map_it_frame_cap(cap_t vspace_cap, cap_t frame_cap) 196{ 197 pte_t *lvl1pt = PTE_PTR(pptr_of_cap(vspace_cap)); 198 pte_t *frame_pptr = PTE_PTR(pptr_of_cap(frame_cap)); 199 vptr_t frame_vptr = cap_frame_cap_get_capFMappedAddress(frame_cap); 200 201 /* We deal with a frame as 4KiB */ 202 lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, frame_vptr); 203 assert(lu_ret.ptBitsLeft == seL4_PageBits); 204 205 pte_t *targetSlot = lu_ret.ptSlot; 206 207 *targetSlot = pte_new( 208 (pptr_to_paddr(frame_pptr) >> seL4_PageBits), 209 0, /* sw */ 210 1, /* dirty */ 211 1, /* accessed */ 212 0, /* global */ 213 1, /* user */ 214 1, /* execute */ 215 1, /* write */ 216 1, /* read */ 217 1 /* valid */ 218 ); 219 sfence(); 220} 221 222BOOT_CODE cap_t create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large) 223{ 224 cap_t cap = cap_frame_cap_new( 225 asidInvalid, /* capFMappedASID */ 226 pptr, /* capFBasePtr */ 227 0, /* capFSize */ 228 0, /* capFVMRights */ 229 0, 230 0 /* capFMappedAddress */ 231 ); 232 233 return cap; 234} 235 236/* Create a page table for the initial thread */ 237static BOOT_CODE cap_t create_it_pt_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid) 238{ 239 cap_t cap; 240 cap = cap_page_table_cap_new( 241 asid, /* capPTMappedASID */ 242 pptr, /* capPTBasePtr */ 243 1, /* capPTIsMapped */ 244 vptr /* capPTMappedAddress */ 245 ); 246 247 map_it_pt_cap(vspace_cap, cap); 248 return cap; 249} 250 251BOOT_CODE word_t arch_get_n_paging(v_region_t it_v_reg) 252{ 253 word_t n = 0; 254 for (int i = 0; i < CONFIG_PT_LEVELS - 1; i++) { 255 n += get_n_paging(it_v_reg, RISCV_GET_LVL_PGSIZE_BITS(i)); 256 } 257 return n; 258} 259 260/* Create an address space for the initial thread. 261 * This includes page directory and page tables */ 262BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg) 263{ 264 cap_t lvl1pt_cap; 265 vptr_t pt_vptr; 266 267 copyGlobalMappings(PTE_PTR(rootserver.vspace)); 268 269 lvl1pt_cap = 270 cap_page_table_cap_new( 271 IT_ASID, /* capPTMappedASID */ 272 (word_t) rootserver.vspace, /* capPTBasePtr */ 273 1, /* capPTIsMapped */ 274 (word_t) rootserver.vspace /* capPTMappedAddress */ 275 ); 276 277 seL4_SlotPos slot_pos_before = ndks_boot.slot_pos_cur; 278 write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), lvl1pt_cap); 279 280 /* create all n level PT caps necessary to cover userland image in 4KiB pages */ 281 for (int i = 0; i < CONFIG_PT_LEVELS - 1; i++) { 282 283 for (pt_vptr = ROUND_DOWN(it_v_reg.start, RISCV_GET_LVL_PGSIZE_BITS(i)); 284 pt_vptr < it_v_reg.end; 285 pt_vptr += RISCV_GET_LVL_PGSIZE(i)) { 286 if (!provide_cap(root_cnode_cap, 287 create_it_pt_cap(lvl1pt_cap, it_alloc_paging(), pt_vptr, IT_ASID)) 288 ) { 289 return cap_null_cap_new(); 290 } 291 } 292 293 } 294 295 seL4_SlotPos slot_pos_after = ndks_boot.slot_pos_cur; 296 ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) { 297 slot_pos_before, slot_pos_after 298 }; 299 300 return lvl1pt_cap; 301} 302 303BOOT_CODE void activate_kernel_vspace(void) 304{ 305 setVSpaceRoot(kpptr_to_paddr(&kernel_root_pageTable), 0); 306} 307 308BOOT_CODE void write_it_asid_pool(cap_t it_ap_cap, cap_t it_lvl1pt_cap) 309{ 310 asid_pool_t *ap = ASID_POOL_PTR(pptr_of_cap(it_ap_cap)); 311 ap->array[IT_ASID] = PTE_PTR(pptr_of_cap(it_lvl1pt_cap)); 312 riscvKSASIDTable[IT_ASID >> asidLowBits] = ap; 313} 314 315/* ==================== BOOT CODE FINISHES HERE ==================== */ 316 317static findVSpaceForASID_ret_t findVSpaceForASID(asid_t asid) 318{ 319 findVSpaceForASID_ret_t ret; 320 asid_pool_t *poolPtr; 321 pte_t *vspace_root; 322 323 poolPtr = riscvKSASIDTable[asid >> asidLowBits]; 324 if (!poolPtr) { 325 current_lookup_fault = lookup_fault_invalid_root_new(); 326 327 ret.vspace_root = NULL; 328 ret.status = EXCEPTION_LOOKUP_FAULT; 329 return ret; 330 } 331 332 vspace_root = poolPtr->array[asid & MASK(asidLowBits)]; 333 if (!vspace_root) { 334 current_lookup_fault = lookup_fault_invalid_root_new(); 335 336 ret.vspace_root = NULL; 337 ret.status = EXCEPTION_LOOKUP_FAULT; 338 return ret; 339 } 340 341 ret.vspace_root = vspace_root; 342 ret.status = EXCEPTION_NONE; 343 return ret; 344} 345 346void copyGlobalMappings(pte_t *newLvl1pt) 347{ 348 unsigned long i; 349 pte_t *global_kernel_vspace = kernel_root_pageTable; 350 351 for (i = RISCV_GET_PT_INDEX(PPTR_BASE, 0); i < BIT(PT_INDEX_BITS); i++) { 352 newLvl1pt[i] = global_kernel_vspace[i]; 353 } 354} 355 356word_t *PURE lookupIPCBuffer(bool_t isReceiver, tcb_t *thread) 357{ 358 word_t w_bufferPtr; 359 cap_t bufferCap; 360 vm_rights_t vm_rights; 361 362 w_bufferPtr = thread->tcbIPCBuffer; 363 bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap; 364 365 if (unlikely(cap_get_capType(bufferCap) != cap_frame_cap)) { 366 return NULL; 367 } 368 if (unlikely(cap_frame_cap_get_capFIsDevice(bufferCap))) { 369 return NULL; 370 } 371 372 vm_rights = cap_frame_cap_get_capFVMRights(bufferCap); 373 if (likely(vm_rights == VMReadWrite || 374 (!isReceiver && vm_rights == VMReadOnly))) { 375 word_t basePtr, pageBits; 376 377 basePtr = cap_frame_cap_get_capFBasePtr(bufferCap); 378 pageBits = pageBitsForSize(cap_frame_cap_get_capFSize(bufferCap)); 379 return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits))); 380 } else { 381 return NULL; 382 } 383} 384 385static inline pte_t *getPPtrFromHWPTE(pte_t *pte) 386{ 387 return PTE_PTR(ptrFromPAddr(pte_ptr_get_ppn(pte) << seL4_PageTableBits)); 388} 389 390lookupPTSlot_ret_t lookupPTSlot(pte_t *lvl1pt, vptr_t vptr) 391{ 392 lookupPTSlot_ret_t ret; 393 394 word_t level = CONFIG_PT_LEVELS - 1; 395 pte_t *pt = lvl1pt; 396 397 /* this is how many bits we potentially have left to decode. Initially we have the 398 * full address space to decode, and every time we walk this will be reduced. The 399 * final value of this after the walk is the size of the frame that can be inserted, 400 * or already exists, in ret.ptSlot. The following formulation is an invariant of 401 * the loop: */ 402 ret.ptBitsLeft = PT_INDEX_BITS * level + seL4_PageBits; 403 ret.ptSlot = pt + ((vptr >> ret.ptBitsLeft) & MASK(PT_INDEX_BITS)); 404 405 while (isPTEPageTable(ret.ptSlot) && likely(0 < level)) { 406 level--; 407 ret.ptBitsLeft -= PT_INDEX_BITS; 408 pt = getPPtrFromHWPTE(ret.ptSlot); 409 ret.ptSlot = pt + ((vptr >> ret.ptBitsLeft) & MASK(PT_INDEX_BITS)); 410 } 411 412 return ret; 413} 414 415exception_t handleVMFault(tcb_t *thread, vm_fault_type_t vm_faultType) 416{ 417 uint64_t addr; 418 419 addr = read_stval(); 420 421 switch (vm_faultType) { 422 case RISCVLoadPageFault: 423 case RISCVLoadAccessFault: 424 current_fault = seL4_Fault_VMFault_new(addr, RISCVLoadAccessFault, false); 425 return EXCEPTION_FAULT; 426 case RISCVStorePageFault: 427 case RISCVStoreAccessFault: 428 current_fault = seL4_Fault_VMFault_new(addr, RISCVStoreAccessFault, false); 429 return EXCEPTION_FAULT; 430 case RISCVInstructionPageFault: 431 case RISCVInstructionAccessFault: 432 current_fault = seL4_Fault_VMFault_new(addr, RISCVInstructionAccessFault, true); 433 return EXCEPTION_FAULT; 434 435 default: 436 fail("Invalid VM fault type"); 437 } 438} 439 440void deleteASIDPool(asid_t asid_base, asid_pool_t *pool) 441{ 442 /* Haskell error: "ASID pool's base must be aligned" */ 443 assert(IS_ALIGNED(asid_base, asidLowBits)); 444 445 if (riscvKSASIDTable[asid_base >> asidLowBits] == pool) { 446 riscvKSASIDTable[asid_base >> asidLowBits] = NULL; 447 setVMRoot(NODE_STATE(ksCurThread)); 448 } 449} 450 451static exception_t performASIDControlInvocation(void *frame, cte_t *slot, cte_t *parent, asid_t asid_base) 452{ 453 /** AUXUPD: "(True, typ_region_bytes (ptr_val \<acute>frame) 12)" */ 454 /** GHOSTUPD: "(True, gs_clear_region (ptr_val \<acute>frame) 12)" */ 455 cap_untyped_cap_ptr_set_capFreeIndex(&(parent->cap), 456 MAX_FREE_INDEX(cap_untyped_cap_get_capBlockSize(parent->cap))); 457 458 memzero(frame, BIT(pageBitsForSize(RISCV_4K_Page))); 459 /** AUXUPD: "(True, ptr_retyps 1 (Ptr (ptr_val \<acute>frame) :: asid_pool_C ptr))" */ 460 461 cteInsert( 462 cap_asid_pool_cap_new( 463 asid_base, /* capASIDBase */ 464 WORD_REF(frame) /* capASIDPool */ 465 ), 466 parent, 467 slot 468 ); 469 /* Haskell error: "ASID pool's base must be aligned" */ 470 assert((asid_base & MASK(asidLowBits)) == 0); 471 riscvKSASIDTable[asid_base >> asidLowBits] = (asid_pool_t *)frame; 472 473 return EXCEPTION_NONE; 474} 475 476static exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t *poolPtr, cte_t *vspaceCapSlot) 477{ 478 cap_t cap = vspaceCapSlot->cap; 479 pte_t *regionBase = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap)); 480 cap = cap_page_table_cap_set_capPTMappedASID(cap, asid); 481 cap = cap_page_table_cap_set_capPTMappedAddress(cap, 0); 482 cap = cap_page_table_cap_set_capPTIsMapped(cap, 1); 483 vspaceCapSlot->cap = cap; 484 485 copyGlobalMappings(regionBase); 486 487 poolPtr->array[asid & MASK(asidLowBits)] = regionBase; 488 489 return EXCEPTION_NONE; 490} 491 492void deleteASID(asid_t asid, pte_t *vspace) 493{ 494 asid_pool_t *poolPtr; 495 496 poolPtr = riscvKSASIDTable[asid >> asidLowBits]; 497 if (poolPtr != NULL && poolPtr->array[asid & MASK(asidLowBits)] == vspace) { 498 hwASIDFlush(asid); 499 poolPtr->array[asid & MASK(asidLowBits)] = NULL; 500 setVMRoot(NODE_STATE(ksCurThread)); 501 } 502} 503 504void unmapPageTable(asid_t asid, vptr_t vptr, pte_t *target_pt) 505{ 506 findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid); 507 if (unlikely(find_ret.status != EXCEPTION_NONE)) { 508 /* nothing to do */ 509 return; 510 } 511 /* We won't ever unmap a top level page table */ 512 assert(find_ret.vspace_root != target_pt); 513 pte_t *ptSlot = NULL; 514 pte_t *pt = find_ret.vspace_root; 515 516 for (word_t i = 0; i < CONFIG_PT_LEVELS - 1 && pt != target_pt; i++) { 517 ptSlot = pt + RISCV_GET_PT_INDEX(vptr, i); 518 if (unlikely(!isPTEPageTable(ptSlot))) { 519 /* couldn't find it */ 520 return; 521 } 522 pt = getPPtrFromHWPTE(ptSlot); 523 } 524 525 if (pt != target_pt) { 526 /* didn't find it */ 527 return; 528 } 529 /* If we found a pt then ptSlot won't be null */ 530 assert(ptSlot != NULL); 531 *ptSlot = pte_new( 532 0, /* phy_address */ 533 0, /* sw */ 534 0, /* dirty */ 535 0, /* accessed */ 536 0, /* global */ 537 0, /* user */ 538 0, /* execute */ 539 0, /* write */ 540 0, /* read */ 541 0 /* valid */ 542 ); 543 sfence(); 544} 545 546static pte_t pte_pte_invalid_new(void) 547{ 548 return (pte_t) { 549 0 550 }; 551} 552 553void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, pptr_t pptr) 554{ 555 findVSpaceForASID_ret_t find_ret; 556 lookupPTSlot_ret_t lu_ret; 557 558 find_ret = findVSpaceForASID(asid); 559 if (find_ret.status != EXCEPTION_NONE) { 560 return; 561 } 562 563 lu_ret = lookupPTSlot(find_ret.vspace_root, vptr); 564 if (unlikely(lu_ret.ptBitsLeft != pageBitsForSize(page_size))) { 565 return; 566 } 567 if (!pte_ptr_get_valid(lu_ret.ptSlot) || isPTEPageTable(lu_ret.ptSlot) 568 || (pte_ptr_get_ppn(lu_ret.ptSlot) << seL4_PageBits) != pptr_to_paddr((void *)pptr)) { 569 return; 570 } 571 572 lu_ret.ptSlot[0] = pte_pte_invalid_new(); 573 sfence(); 574} 575 576void setVMRoot(tcb_t *tcb) 577{ 578 cap_t threadRoot; 579 asid_t asid; 580 pte_t *lvl1pt; 581 findVSpaceForASID_ret_t find_ret; 582 583 threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap; 584 585 if (cap_get_capType(threadRoot) != cap_page_table_cap) { 586 setVSpaceRoot(kpptr_to_paddr(&kernel_root_pageTable), 0); 587 return; 588 } 589 590 lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(threadRoot)); 591 592 asid = cap_page_table_cap_get_capPTMappedASID(threadRoot); 593 find_ret = findVSpaceForASID(asid); 594 if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != lvl1pt)) { 595 setVSpaceRoot(kpptr_to_paddr(&kernel_root_pageTable), 0); 596 return; 597 } 598 599 setVSpaceRoot(addrFromPPtr(lvl1pt), asid); 600} 601 602bool_t CONST isValidVTableRoot(cap_t cap) 603{ 604 return (cap_get_capType(cap) == cap_page_table_cap && 605 cap_page_table_cap_get_capPTIsMapped(cap)); 606} 607 608exception_t checkValidIPCBuffer(vptr_t vptr, cap_t cap) 609{ 610 if (unlikely(cap_get_capType(cap) != cap_frame_cap)) { 611 userError("Requested IPC Buffer is not a frame cap."); 612 current_syscall_error.type = seL4_IllegalOperation; 613 return EXCEPTION_SYSCALL_ERROR; 614 } 615 616 if (unlikely(cap_frame_cap_get_capFIsDevice(cap))) { 617 userError("Specifying a device frame as an IPC buffer is not permitted."); 618 current_syscall_error.type = seL4_IllegalOperation; 619 return EXCEPTION_SYSCALL_ERROR; 620 } 621 622 if (unlikely(!IS_ALIGNED(vptr, seL4_IPCBufferSizeBits))) { 623 userError("Requested IPC Buffer location 0x%x is not aligned.", 624 (int)vptr); 625 current_syscall_error.type = seL4_AlignmentError; 626 return EXCEPTION_SYSCALL_ERROR; 627 } 628 629 return EXCEPTION_NONE; 630} 631 632vm_rights_t CONST maskVMRights(vm_rights_t vm_rights, seL4_CapRights_t cap_rights_mask) 633{ 634 if (vm_rights == VMReadOnly && seL4_CapRights_get_capAllowRead(cap_rights_mask)) { 635 return VMReadOnly; 636 } 637 if (vm_rights == VMReadWrite && seL4_CapRights_get_capAllowRead(cap_rights_mask)) { 638 if (!seL4_CapRights_get_capAllowWrite(cap_rights_mask)) { 639 return VMReadOnly; 640 } else { 641 return VMReadWrite; 642 } 643 } 644 return VMKernelOnly; 645} 646 647/* The rest of the file implements the RISCV object invocations */ 648 649static pte_t CONST makeUserPTE(paddr_t paddr, bool_t executable, vm_rights_t vm_rights) 650{ 651 word_t write = RISCVGetWriteFromVMRights(vm_rights); 652 word_t read = RISCVGetReadFromVMRights(vm_rights); 653 if (unlikely(!read && !write && !executable)) { 654 return pte_pte_invalid_new(); 655 } else { 656 return pte_new( 657 paddr >> seL4_PageBits, 658 0, /* sw */ 659 1, /* dirty */ 660 1, /* accessed */ 661 0, /* global */ 662 1, /* user */ 663 executable, /* execute */ 664 RISCVGetWriteFromVMRights(vm_rights), /* write */ 665 RISCVGetReadFromVMRights(vm_rights), /* read */ 666 1 /* valid */ 667 ); 668 } 669} 670 671static inline bool_t CONST checkVPAlignment(vm_page_size_t sz, word_t w) 672{ 673 return (w & MASK(pageBitsForSize(sz))) == 0; 674} 675 676static exception_t decodeRISCVPageTableInvocation(word_t label, word_t length, 677 cte_t *cte, cap_t cap, extra_caps_t extraCaps, 678 word_t *buffer) 679{ 680 if (label == RISCVPageTableUnmap) { 681 if (unlikely(!isFinalCapability(cte))) { 682 userError("RISCVPageTableUnmap: cannot unmap if more than once cap exists"); 683 current_syscall_error.type = seL4_RevokeFirst; 684 return EXCEPTION_SYSCALL_ERROR; 685 } 686 /* Ensure that if the page table is mapped, it is not a top level table */ 687 if (likely(cap_page_table_cap_get_capPTIsMapped(cap))) { 688 asid_t asid = cap_page_table_cap_get_capPTMappedASID(cap); 689 findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid); 690 pte_t *pte = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap)); 691 if (unlikely(find_ret.status == EXCEPTION_NONE && 692 find_ret.vspace_root == pte)) { 693 userError("RISCVPageTableUnmap: cannot call unmap on top level PageTable"); 694 current_syscall_error.type = seL4_RevokeFirst; 695 return EXCEPTION_SYSCALL_ERROR; 696 } 697 } 698 699 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 700 return performPageTableInvocationUnmap(cap, cte); 701 } 702 703 if (unlikely((label != RISCVPageTableMap))) { 704 userError("RISCVPageTable: Illegal Operation"); 705 current_syscall_error.type = seL4_IllegalOperation; 706 return EXCEPTION_SYSCALL_ERROR; 707 } 708 709 if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) { 710 userError("RISCVPageTable: truncated message"); 711 current_syscall_error.type = seL4_TruncatedMessage; 712 return EXCEPTION_SYSCALL_ERROR; 713 } 714 if (unlikely(cap_page_table_cap_get_capPTIsMapped(cap))) { 715 userError("RISCVPageTable: PageTable is already mapped."); 716 current_syscall_error.type = seL4_InvalidCapability; 717 current_syscall_error.invalidCapNumber = 0; 718 return EXCEPTION_SYSCALL_ERROR; 719 } 720 721 word_t vaddr = getSyscallArg(0, buffer); 722 cap_t lvl1ptCap = extraCaps.excaprefs[0]->cap; 723 724 if (unlikely(cap_get_capType(lvl1ptCap) != cap_page_table_cap || 725 cap_page_table_cap_get_capPTIsMapped(lvl1ptCap) == asidInvalid)) { 726 userError("RISCVPageTableMap: Invalid top-level PageTable."); 727 current_syscall_error.type = seL4_InvalidCapability; 728 current_syscall_error.invalidCapNumber = 1; 729 730 return EXCEPTION_SYSCALL_ERROR; 731 } 732 733 pte_t *lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(lvl1ptCap)); 734 asid_t asid = cap_page_table_cap_get_capPTMappedASID(lvl1ptCap); 735 736 if (unlikely(vaddr >= USER_TOP)) { 737 userError("RISCVPageTableMap: Virtual address cannot be in kernel window."); 738 current_syscall_error.type = seL4_InvalidArgument; 739 current_syscall_error.invalidArgumentNumber = 0; 740 741 return EXCEPTION_SYSCALL_ERROR; 742 } 743 744 findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid); 745 if (unlikely(find_ret.status != EXCEPTION_NONE)) { 746 userError("RISCVPageTableMap: ASID lookup failed"); 747 current_syscall_error.type = seL4_FailedLookup; 748 current_syscall_error.failedLookupWasSource = false; 749 return EXCEPTION_SYSCALL_ERROR; 750 } 751 752 if (unlikely(find_ret.vspace_root != lvl1pt)) { 753 userError("RISCVPageTableMap: ASID lookup failed"); 754 current_syscall_error.type = seL4_InvalidCapability; 755 current_syscall_error.invalidCapNumber = 1; 756 return EXCEPTION_SYSCALL_ERROR; 757 } 758 759 lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, vaddr); 760 761 /* if there is already something mapped (valid is set) or we have traversed far enough 762 * that a page table is not valid to map then tell the user that they ahve to delete 763 * something before they can put a PT here */ 764 if (lu_ret.ptBitsLeft == seL4_PageBits || pte_ptr_get_valid(lu_ret.ptSlot)) { 765 userError("RISCVPageTableMap: All objects mapped at this address"); 766 current_syscall_error.type = seL4_DeleteFirst; 767 return EXCEPTION_SYSCALL_ERROR; 768 } 769 770 /* Get the slot to install the PT in */ 771 pte_t *ptSlot = lu_ret.ptSlot; 772 773 paddr_t paddr = addrFromPPtr( 774 PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap))); 775 pte_t pte = pte_new((paddr >> seL4_PageBits), 776 0, /* sw */ 777 1, /* dirty */ 778 1, /* accessed */ 779 0, /* global */ 780 0, /* user */ 781 0, /* execute */ 782 0, /* write */ 783 0, /* read */ 784 1 /* valid */ 785 ); 786 787 cap = cap_page_table_cap_set_capPTIsMapped(cap, 1); 788 cap = cap_page_table_cap_set_capPTMappedASID(cap, asid); 789 cap = cap_page_table_cap_set_capPTMappedAddress(cap, (vaddr & ~MASK(lu_ret.ptBitsLeft))); 790 791 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 792 return performPageTableInvocationMap(cap, cte, pte, ptSlot); 793} 794 795static exception_t decodeRISCVFrameInvocation(word_t label, word_t length, 796 cte_t *cte, cap_t cap, extra_caps_t extraCaps, 797 word_t *buffer) 798{ 799 switch (label) { 800 case RISCVPageMap: { 801 if (unlikely(length < 3 || extraCaps.excaprefs[0] == NULL)) { 802 userError("RISCVPageMap: Truncated message."); 803 current_syscall_error.type = seL4_TruncatedMessage; 804 return EXCEPTION_SYSCALL_ERROR; 805 } 806 807 word_t vaddr = getSyscallArg(0, buffer); 808 word_t w_rightsMask = getSyscallArg(1, buffer); 809 vm_attributes_t attr = vmAttributesFromWord(getSyscallArg(2, buffer)); 810 cap_t lvl1ptCap = extraCaps.excaprefs[0]->cap; 811 812 vm_page_size_t frameSize = cap_frame_cap_get_capFSize(cap); 813 vm_rights_t capVMRights = cap_frame_cap_get_capFVMRights(cap); 814 815 if (unlikely(cap_get_capType(lvl1ptCap) != cap_page_table_cap || 816 !cap_page_table_cap_get_capPTIsMapped(lvl1ptCap))) { 817 userError("RISCVPageMap: Bad PageTable cap."); 818 current_syscall_error.type = seL4_InvalidCapability; 819 current_syscall_error.invalidCapNumber = 1; 820 return EXCEPTION_SYSCALL_ERROR; 821 } 822 823 pte_t *lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(lvl1ptCap)); 824 asid_t asid = cap_page_table_cap_get_capPTMappedASID(lvl1ptCap); 825 826 findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid); 827 if (unlikely(find_ret.status != EXCEPTION_NONE)) { 828 userError("RISCVPageMap: No PageTable for ASID"); 829 current_syscall_error.type = seL4_FailedLookup; 830 current_syscall_error.failedLookupWasSource = false; 831 return EXCEPTION_SYSCALL_ERROR; 832 } 833 834 if (unlikely(find_ret.vspace_root != lvl1pt)) { 835 userError("RISCVPageMap: ASID lookup failed"); 836 current_syscall_error.type = seL4_InvalidCapability; 837 current_syscall_error.invalidCapNumber = 1; 838 return EXCEPTION_SYSCALL_ERROR; 839 } 840 841 /* check the vaddr is valid */ 842 word_t vtop = vaddr + BIT(pageBitsForSize(frameSize)) - 1; 843 if (unlikely(vtop >= USER_TOP)) { 844 current_syscall_error.type = seL4_InvalidArgument; 845 current_syscall_error.invalidArgumentNumber = 0; 846 return EXCEPTION_SYSCALL_ERROR; 847 } 848 if (unlikely(!checkVPAlignment(frameSize, vaddr))) { 849 current_syscall_error.type = seL4_AlignmentError; 850 return EXCEPTION_SYSCALL_ERROR; 851 } 852 853 /* Check if this page is already mapped */ 854 lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, vaddr); 855 if (unlikely(lu_ret.ptBitsLeft != pageBitsForSize(frameSize))) { 856 current_lookup_fault = lookup_fault_missing_capability_new(lu_ret.ptBitsLeft); 857 current_syscall_error.type = seL4_FailedLookup; 858 current_syscall_error.failedLookupWasSource = false; 859 return EXCEPTION_SYSCALL_ERROR; 860 } 861 862 asid_t frame_asid = cap_frame_cap_get_capFMappedASID(cap); 863 if (unlikely(frame_asid != asidInvalid)) { 864 /* this frame is already mapped */ 865 if (frame_asid != asid) { 866 userError("RISCVPageMap: Attempting to remap a frame that does not belong to the passed address space"); 867 current_syscall_error.type = seL4_InvalidCapability; 868 current_syscall_error.invalidCapNumber = 1; 869 return EXCEPTION_SYSCALL_ERROR; 870 } 871 word_t mapped_vaddr = cap_frame_cap_get_capFMappedAddress(cap); 872 if (unlikely(mapped_vaddr != vaddr)) { 873 userError("RISCVPageMap: attempting to map frame into multiple addresses"); 874 current_syscall_error.type = seL4_InvalidArgument; 875 current_syscall_error.invalidArgumentNumber = 0; 876 return EXCEPTION_SYSCALL_ERROR; 877 } 878 /* this check is redundant, as lookupPTSlot does not stop on a page 879 * table PTE */ 880 if (unlikely(isPTEPageTable(lu_ret.ptSlot))) { 881 userError("RISCVPageMap: no mapping to remap."); 882 current_syscall_error.type = seL4_DeleteFirst; 883 return EXCEPTION_SYSCALL_ERROR; 884 } 885 } else { 886 /* check this vaddr isn't already mapped */ 887 if (unlikely(pte_ptr_get_valid(lu_ret.ptSlot))) { 888 userError("Virtual address already mapped"); 889 current_syscall_error.type = seL4_DeleteFirst; 890 return EXCEPTION_SYSCALL_ERROR; 891 } 892 } 893 894 vm_rights_t vmRights = maskVMRights(capVMRights, rightsFromWord(w_rightsMask)); 895 paddr_t frame_paddr = addrFromPPtr((void *) cap_frame_cap_get_capFBasePtr(cap)); 896 cap = cap_frame_cap_set_capFMappedASID(cap, asid); 897 cap = cap_frame_cap_set_capFMappedAddress(cap, vaddr); 898 899 bool_t executable = !vm_attributes_get_riscvExecuteNever(attr); 900 pte_t pte = makeUserPTE(frame_paddr, executable, vmRights); 901 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 902 return performPageInvocationMapPTE(cap, cte, pte, lu_ret.ptSlot); 903 } 904 905 case RISCVPageUnmap: { 906 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 907 return performPageInvocationUnmap(cap, cte); 908 } 909 910 case RISCVPageGetAddress: { 911 912 /* Check that there are enough message registers */ 913 assert(n_msgRegisters >= 1); 914 915 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 916 return performPageGetAddress((void *)cap_frame_cap_get_capFBasePtr(cap)); 917 } 918 919 default: 920 userError("RISCVPage: Illegal operation."); 921 current_syscall_error.type = seL4_IllegalOperation; 922 923 return EXCEPTION_SYSCALL_ERROR; 924 } 925 926} 927 928exception_t decodeRISCVMMUInvocation(word_t label, word_t length, cptr_t cptr, 929 cte_t *cte, cap_t cap, extra_caps_t extraCaps, 930 word_t *buffer) 931{ 932 switch (cap_get_capType(cap)) { 933 934 case cap_page_table_cap: 935 return decodeRISCVPageTableInvocation(label, length, cte, cap, extraCaps, buffer); 936 937 case cap_frame_cap: 938 return decodeRISCVFrameInvocation(label, length, cte, cap, extraCaps, buffer); 939 940 case cap_asid_control_cap: { 941 word_t i; 942 asid_t asid_base; 943 word_t index; 944 word_t depth; 945 cap_t untyped; 946 cap_t root; 947 cte_t *parentSlot; 948 cte_t *destSlot; 949 lookupSlot_ret_t lu_ret; 950 void *frame; 951 exception_t status; 952 953 if (label != RISCVASIDControlMakePool) { 954 current_syscall_error.type = seL4_IllegalOperation; 955 956 return EXCEPTION_SYSCALL_ERROR; 957 } 958 959 if (length < 2 || extraCaps.excaprefs[0] == NULL 960 || extraCaps.excaprefs[1] == NULL) { 961 current_syscall_error.type = seL4_TruncatedMessage; 962 return EXCEPTION_SYSCALL_ERROR; 963 } 964 965 index = getSyscallArg(0, buffer); 966 depth = getSyscallArg(1, buffer); 967 parentSlot = extraCaps.excaprefs[0]; 968 untyped = parentSlot->cap; 969 root = extraCaps.excaprefs[1]->cap; 970 971 /* Find first free pool */ 972 for (i = 0; i < nASIDPools && riscvKSASIDTable[i]; i++); 973 974 if (i == nASIDPools) { 975 /* no unallocated pool is found */ 976 current_syscall_error.type = seL4_DeleteFirst; 977 978 return EXCEPTION_SYSCALL_ERROR; 979 } 980 981 asid_base = i << asidLowBits; 982 983 if (cap_get_capType(untyped) != cap_untyped_cap || 984 cap_untyped_cap_get_capBlockSize(untyped) != seL4_ASIDPoolBits || 985 cap_untyped_cap_get_capIsDevice(untyped)) { 986 current_syscall_error.type = seL4_InvalidCapability; 987 current_syscall_error.invalidCapNumber = 1; 988 989 return EXCEPTION_SYSCALL_ERROR; 990 } 991 992 status = ensureNoChildren(parentSlot); 993 if (status != EXCEPTION_NONE) { 994 return status; 995 } 996 997 frame = WORD_PTR(cap_untyped_cap_get_capPtr(untyped)); 998 999 lu_ret = lookupTargetSlot(root, index, depth); 1000 if (lu_ret.status != EXCEPTION_NONE) { 1001 return lu_ret.status; 1002 } 1003 destSlot = lu_ret.slot; 1004 1005 status = ensureEmptySlot(destSlot); 1006 if (status != EXCEPTION_NONE) { 1007 return status; 1008 } 1009 1010 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 1011 return performASIDControlInvocation(frame, destSlot, parentSlot, asid_base); 1012 } 1013 1014 case cap_asid_pool_cap: { 1015 cap_t vspaceCap; 1016 cte_t *vspaceCapSlot; 1017 asid_pool_t *pool; 1018 word_t i; 1019 asid_t asid; 1020 1021 if (label != RISCVASIDPoolAssign) { 1022 current_syscall_error.type = seL4_IllegalOperation; 1023 1024 return EXCEPTION_SYSCALL_ERROR; 1025 } 1026 if (extraCaps.excaprefs[0] == NULL) { 1027 current_syscall_error.type = seL4_TruncatedMessage; 1028 1029 return EXCEPTION_SYSCALL_ERROR; 1030 } 1031 1032 vspaceCapSlot = extraCaps.excaprefs[0]; 1033 vspaceCap = vspaceCapSlot->cap; 1034 1035 if (unlikely( 1036 cap_get_capType(vspaceCap) != cap_page_table_cap || 1037 cap_page_table_cap_get_capPTIsMapped(vspaceCap))) { 1038 userError("RISCVASIDPool: Invalid vspace root."); 1039 current_syscall_error.type = seL4_InvalidCapability; 1040 current_syscall_error.invalidCapNumber = 1; 1041 1042 return EXCEPTION_SYSCALL_ERROR; 1043 } 1044 1045 pool = riscvKSASIDTable[cap_asid_pool_cap_get_capASIDBase(cap) >> asidLowBits]; 1046 if (!pool) { 1047 current_syscall_error.type = seL4_FailedLookup; 1048 current_syscall_error.failedLookupWasSource = false; 1049 current_lookup_fault = lookup_fault_invalid_root_new(); 1050 return EXCEPTION_SYSCALL_ERROR; 1051 } 1052 1053 if (pool != ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap))) { 1054 current_syscall_error.type = seL4_InvalidCapability; 1055 current_syscall_error.invalidCapNumber = 0; 1056 return EXCEPTION_SYSCALL_ERROR; 1057 } 1058 1059 /* Find first free ASID */ 1060 asid = cap_asid_pool_cap_get_capASIDBase(cap); 1061 for (i = 0; i < BIT(asidLowBits) && (asid + i == 0 || pool->array[i]); i++); 1062 1063 if (i == BIT(asidLowBits)) { 1064 current_syscall_error.type = seL4_DeleteFirst; 1065 1066 return EXCEPTION_SYSCALL_ERROR; 1067 } 1068 1069 asid += i; 1070 1071 setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); 1072 return performASIDPoolInvocation(asid, pool, vspaceCapSlot); 1073 } 1074 default: 1075 fail("Invalid arch cap type"); 1076 } 1077} 1078 1079exception_t performPageTableInvocationMap(cap_t cap, cte_t *ctSlot, 1080 pte_t pte, pte_t *ptSlot) 1081{ 1082 ctSlot->cap = cap; 1083 *ptSlot = pte; 1084 sfence(); 1085 1086 return EXCEPTION_NONE; 1087} 1088 1089exception_t performPageTableInvocationUnmap(cap_t cap, cte_t *ctSlot) 1090{ 1091 if (cap_page_table_cap_get_capPTIsMapped(cap)) { 1092 pte_t *pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap)); 1093 unmapPageTable( 1094 cap_page_table_cap_get_capPTMappedASID(cap), 1095 cap_page_table_cap_get_capPTMappedAddress(cap), 1096 pt 1097 ); 1098 clearMemory((void *)pt, seL4_PageTableBits); 1099 } 1100 cap_page_table_cap_ptr_set_capPTIsMapped(&(ctSlot->cap), 0); 1101 1102 return EXCEPTION_NONE; 1103} 1104 1105static exception_t performPageGetAddress(void *vbase_ptr) 1106{ 1107 paddr_t capFBasePtr; 1108 1109 /* Get the physical address of this frame. */ 1110 capFBasePtr = addrFromPPtr(vbase_ptr); 1111 1112 /* return it in the first message register */ 1113 setRegister(NODE_STATE(ksCurThread), msgRegisters[0], capFBasePtr); 1114 setRegister(NODE_STATE(ksCurThread), msgInfoRegister, 1115 wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, 1))); 1116 1117 return EXCEPTION_NONE; 1118} 1119 1120static exception_t updatePTE(pte_t pte, pte_t *base) 1121{ 1122 *base = pte; 1123 sfence(); 1124 return EXCEPTION_NONE; 1125} 1126 1127exception_t performPageInvocationMapPTE(cap_t cap, cte_t *ctSlot, 1128 pte_t pte, pte_t *base) 1129{ 1130 ctSlot->cap = cap; 1131 return updatePTE(pte, base); 1132} 1133 1134exception_t performPageInvocationUnmap(cap_t cap, cte_t *ctSlot) 1135{ 1136 if (cap_frame_cap_get_capFMappedASID(cap) != asidInvalid) { 1137 unmapPage(cap_frame_cap_get_capFSize(cap), 1138 cap_frame_cap_get_capFMappedASID(cap), 1139 cap_frame_cap_get_capFMappedAddress(cap), 1140 cap_frame_cap_get_capFBasePtr(cap) 1141 ); 1142 } 1143 1144 cap_t slotCap = ctSlot->cap; 1145 slotCap = cap_frame_cap_set_capFMappedAddress(slotCap, 0); 1146 slotCap = cap_frame_cap_set_capFMappedASID(slotCap, asidInvalid); 1147 ctSlot->cap = slotCap; 1148 1149 return EXCEPTION_NONE; 1150} 1151 1152#ifdef CONFIG_PRINTING 1153void Arch_userStackTrace(tcb_t *tptr) 1154{ 1155 cap_t threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap; 1156 if (!isValidVTableRoot(threadRoot)) { 1157 printf("Invalid vspace\n"); 1158 return; 1159 } 1160 1161 word_t sp = getRegister(tptr, SP); 1162 if (!IS_ALIGNED(sp, seL4_WordSizeBits)) { 1163 printf("SP %p not aligned", (void *) sp); 1164 return; 1165 } 1166 1167 pte_t *vspace_root = PTE_PTR(pptr_of_cap(threadRoot)); 1168 for (int i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) { 1169 word_t address = sp + (i * sizeof(word_t)); 1170 lookupPTSlot_ret_t ret = lookupPTSlot(vspace_root, address); 1171 if (pte_ptr_get_valid(ret.ptSlot) && !isPTEPageTable(ret.ptSlot)) { 1172 pptr_t pptr = (pptr_t)(getPPtrFromHWPTE(ret.ptSlot)); 1173 word_t *value = (word_t *)((word_t)pptr + (address & MASK(ret.ptBitsLeft))); 1174 printf("0x%lx: 0x%lx\n", (long) address, (long) *value); 1175 } else { 1176 printf("0x%lx: INVALID\n", (long) address); 1177 } 1178 } 1179} 1180#endif 1181 1182#ifdef CONFIG_KERNEL_LOG_BUFFER 1183exception_t benchmark_arch_map_logBuffer(word_t frame_cptr) 1184{ 1185 lookupCapAndSlot_ret_t lu_ret; 1186 vm_page_size_t frameSize; 1187 pptr_t frame_pptr; 1188 1189 /* faulting section */ 1190 lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), frame_cptr); 1191 1192 if (unlikely(lu_ret.status != EXCEPTION_NONE)) { 1193 userError("Invalid cap #%lu.", frame_cptr); 1194 current_fault = seL4_Fault_CapFault_new(frame_cptr, false); 1195 1196 return EXCEPTION_SYSCALL_ERROR; 1197 } 1198 1199 if (cap_get_capType(lu_ret.cap) != cap_frame_cap) { 1200 userError("Invalid cap. Log buffer should be of a frame cap"); 1201 current_fault = seL4_Fault_CapFault_new(frame_cptr, false); 1202 1203 return EXCEPTION_SYSCALL_ERROR; 1204 } 1205 1206 frameSize = cap_frame_cap_get_capFSize(lu_ret.cap); 1207 1208 if (frameSize != RISCV_Mega_Page) { 1209 userError("Invalid frame size. The kernel expects large page log buffer"); 1210 current_fault = seL4_Fault_CapFault_new(frame_cptr, false); 1211 1212 return EXCEPTION_SYSCALL_ERROR; 1213 } 1214 1215 frame_pptr = cap_frame_cap_get_capFBasePtr(lu_ret.cap); 1216 1217 ksUserLogBuffer = pptr_to_paddr((void *) frame_pptr); 1218 1219#if __riscv_xlen == 32 1220 paddr_t physical_address = ksUserLogBuffer; 1221 for (word_t i = 0; i < BIT(PT_INDEX_BITS); i += 1) { 1222 kernel_image_level2_log_buffer_pt[i] = pte_next(physical_address, true); 1223 physical_address += BIT(PAGE_BITS); 1224 } 1225 assert(physical_address - ksUserLogBuffer == BIT(seL4_LargePageBits)); 1226#else 1227 kernel_image_level2_dev_pt[RISCV_GET_PT_INDEX(KS_LOG_PPTR, 1)] = pte_next(ksUserLogBuffer, true); 1228#endif 1229 1230 sfence(); 1231 1232 return EXCEPTION_NONE; 1233} 1234#endif /* CONFIG_KERNEL_LOG_BUFFER */ 1235