1/* 2 * Copyright 2014, General Dynamics C4 Systems 3 * 4 * SPDX-License-Identifier: GPL-2.0-only 5 */ 6 7#include <config.h> 8#include <types.h> 9#include <api/failures.h> 10#include <kernel/vspace.h> 11#include <object/structures.h> 12#include <arch/machine.h> 13#include <arch/model/statedata.h> 14#include <arch/object/objecttype.h> 15#include <arch/machine/tlb.h> 16#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 17#include <arch/object/vcpu.h> 18#endif 19 20bool_t Arch_isFrameType(word_t type) 21{ 22 switch (type) { 23 case seL4_ARM_SmallPageObject: 24 return true; 25 case seL4_ARM_LargePageObject: 26 return true; 27 case seL4_ARM_SectionObject: 28 return true; 29 case seL4_ARM_SuperSectionObject: 30 return true; 31 default: 32 return false; 33 } 34} 35 36deriveCap_ret_t Arch_deriveCap(cte_t *slot, cap_t cap) 37{ 38 deriveCap_ret_t ret; 39 40 switch (cap_get_capType(cap)) { 41 case cap_page_table_cap: 42 if (cap_page_table_cap_get_capPTIsMapped(cap)) { 43 ret.cap = cap; 44 ret.status = EXCEPTION_NONE; 45 } else { 46 userError("Deriving an unmapped PT cap"); 47 current_syscall_error.type = seL4_IllegalOperation; 48 ret.cap = cap_null_cap_new(); 49 ret.status = EXCEPTION_SYSCALL_ERROR; 50 } 51 return ret; 52 53 case cap_page_directory_cap: 54 if (cap_page_directory_cap_get_capPDIsMapped(cap)) { 55 ret.cap = cap; 56 ret.status = EXCEPTION_NONE; 57 } else { 58 userError("Deriving a PD cap without an assigned ASID"); 59 current_syscall_error.type = seL4_IllegalOperation; 60 ret.cap = cap_null_cap_new(); 61 ret.status = EXCEPTION_SYSCALL_ERROR; 62 } 63 return ret; 64 65 /* This is a deviation from haskell, which has only 66 * one frame cap type on ARM */ 67 case cap_small_frame_cap: 68 ret.cap = cap_small_frame_cap_set_capFMappedASID(cap, asidInvalid); 69 ret.status = EXCEPTION_NONE; 70 return ret; 71 72 case cap_frame_cap: 73 ret.cap = cap_frame_cap_set_capFMappedASID(cap, asidInvalid); 74 ret.status = EXCEPTION_NONE; 75 return ret; 76 77 case cap_asid_control_cap: 78 case cap_asid_pool_cap: 79 ret.cap = cap; 80 ret.status = EXCEPTION_NONE; 81 return ret; 82 83#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 84 case cap_vcpu_cap: 85 ret.cap = cap; 86 ret.status = EXCEPTION_NONE; 87 return ret; 88#endif 89 90#ifdef CONFIG_TK1_SMMU 91 case cap_io_space_cap: 92 ret.cap = cap; 93 ret.status = EXCEPTION_NONE; 94 return ret; 95 96 case cap_io_page_table_cap: 97 if (cap_io_page_table_cap_get_capIOPTIsMapped(cap)) { 98 ret.cap = cap; 99 ret.status = EXCEPTION_NONE; 100 } else { 101 userError("Deriving a IOPT cap without an assigned IOASID"); 102 current_syscall_error.type = seL4_IllegalOperation; 103 ret.cap = cap_null_cap_new(); 104 ret.status = EXCEPTION_SYSCALL_ERROR; 105 } 106 return ret; 107#endif 108 default: 109 /* This assert has no equivalent in haskell, 110 * as the options are restricted by type */ 111 fail("Invalid arch cap"); 112 } 113} 114 115cap_t CONST Arch_updateCapData(bool_t preserve, word_t data, cap_t cap) 116{ 117 return cap; 118} 119 120cap_t CONST Arch_maskCapRights(seL4_CapRights_t cap_rights_mask, cap_t cap) 121{ 122 if (cap_get_capType(cap) == cap_small_frame_cap) { 123 vm_rights_t vm_rights; 124 125 vm_rights = vmRightsFromWord( 126 cap_small_frame_cap_get_capFVMRights(cap)); 127 vm_rights = maskVMRights(vm_rights, cap_rights_mask); 128 return cap_small_frame_cap_set_capFVMRights(cap, 129 wordFromVMRights(vm_rights)); 130 } else if (cap_get_capType(cap) == cap_frame_cap) { 131 vm_rights_t vm_rights; 132 133 vm_rights = vmRightsFromWord( 134 cap_frame_cap_get_capFVMRights(cap)); 135 vm_rights = maskVMRights(vm_rights, cap_rights_mask); 136 return cap_frame_cap_set_capFVMRights(cap, 137 wordFromVMRights(vm_rights)); 138 } else { 139 return cap; 140 } 141} 142 143finaliseCap_ret_t Arch_finaliseCap(cap_t cap, bool_t final) 144{ 145 finaliseCap_ret_t fc_ret; 146 147 switch (cap_get_capType(cap)) { 148 case cap_asid_pool_cap: 149 if (final) { 150 deleteASIDPool(cap_asid_pool_cap_get_capASIDBase(cap), 151 ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap))); 152 } 153 break; 154 155 case cap_page_directory_cap: 156 if (final && cap_page_directory_cap_get_capPDIsMapped(cap)) { 157 deleteASID(cap_page_directory_cap_get_capPDMappedASID(cap), 158 PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(cap))); 159 } 160 break; 161 162 case cap_page_table_cap: 163 if (final && cap_page_table_cap_get_capPTIsMapped(cap)) { 164 unmapPageTable( 165 cap_page_table_cap_get_capPTMappedASID(cap), 166 cap_page_table_cap_get_capPTMappedAddress(cap), 167 PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap))); 168 } 169 break; 170 171 case cap_small_frame_cap: 172 if (cap_small_frame_cap_get_capFMappedASID(cap)) { 173#ifdef CONFIG_TK1_SMMU 174 if (isIOSpaceFrameCap(cap)) { 175 unmapIOPage(cap); 176 break; 177 } 178#endif 179 180 unmapPage(ARMSmallPage, 181 cap_small_frame_cap_get_capFMappedASID(cap), 182 cap_small_frame_cap_get_capFMappedAddress(cap), 183 (void *)cap_small_frame_cap_get_capFBasePtr(cap)); 184 } 185 break; 186 187 case cap_frame_cap: 188 if (cap_frame_cap_get_capFMappedASID(cap)) { 189#ifdef CONFIG_KERNEL_LOG_BUFFER 190 /* If the last cap to the user-level log buffer frame is being revoked, 191 * reset the ksLog so that the kernel doesn't log anymore 192 */ 193 if (unlikely(cap_frame_cap_get_capFSize(cap) == ARMSection)) { 194 if (pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap)) == ksUserLogBuffer) { 195 ksUserLogBuffer = 0; 196 197 /* Invalidate log page table entries */ 198 clearMemory((void *) armKSGlobalLogPT, BIT(seL4_PageTableBits)); 199 200 cleanCacheRange_PoU((pptr_t) &armKSGlobalLogPT[0], 201 (pptr_t) &armKSGlobalLogPT[0] + BIT(seL4_PageTableBits), 202 addrFromPPtr((void *)&armKSGlobalLogPT[0])); 203 204 for (int idx = 0; idx < BIT(PT_INDEX_BITS); idx++) { 205 invalidateTranslationSingle(KS_LOG_PPTR + (idx << seL4_PageBits)); 206 } 207 208 userError("Log buffer frame is invalidated, kernel can't benchmark anymore"); 209 } 210 } 211#endif /* CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER */ 212 213 unmapPage(cap_frame_cap_get_capFSize(cap), 214 cap_frame_cap_get_capFMappedASID(cap), 215 cap_frame_cap_get_capFMappedAddress(cap), 216 (void *)cap_frame_cap_get_capFBasePtr(cap)); 217 } 218 break; 219 220#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 221 case cap_vcpu_cap: 222 if (final) { 223 vcpu_finalise(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap))); 224 } 225 break; 226#endif 227 228#ifdef CONFIG_TK1_SMMU 229 case cap_io_space_cap: 230 if (final) { 231 clearIOPageDirectory(cap); 232 } 233 break; 234 235 case cap_io_page_table_cap: 236 if (final && cap_io_page_table_cap_get_capIOPTIsMapped(cap)) { 237 deleteIOPageTable(cap); 238 } 239 break; 240#endif 241 242 default: 243 break; 244 } 245 246 fc_ret.remainder = cap_null_cap_new(); 247 fc_ret.cleanupInfo = cap_null_cap_new(); 248 return fc_ret; 249} 250 251bool_t CONST Arch_sameRegionAs(cap_t cap_a, cap_t cap_b) 252{ 253 switch (cap_get_capType(cap_a)) { 254 case cap_small_frame_cap: 255 case cap_frame_cap: 256 if (cap_get_capType(cap_b) == cap_small_frame_cap || 257 cap_get_capType(cap_b) == cap_frame_cap) { 258 word_t botA, botB, topA, topB; 259 botA = generic_frame_cap_get_capFBasePtr(cap_a); 260 botB = generic_frame_cap_get_capFBasePtr(cap_b); 261 topA = botA + MASK(pageBitsForSize(generic_frame_cap_get_capFSize(cap_a))); 262 topB = botB + MASK(pageBitsForSize(generic_frame_cap_get_capFSize(cap_b))) ; 263 return ((botA <= botB) && (topA >= topB) && (botB <= topB)); 264 } 265 break; 266 267 case cap_page_table_cap: 268 if (cap_get_capType(cap_b) == cap_page_table_cap) { 269 return cap_page_table_cap_get_capPTBasePtr(cap_a) == 270 cap_page_table_cap_get_capPTBasePtr(cap_b); 271 } 272 break; 273 274 case cap_page_directory_cap: 275 if (cap_get_capType(cap_b) == cap_page_directory_cap) { 276 return cap_page_directory_cap_get_capPDBasePtr(cap_a) == 277 cap_page_directory_cap_get_capPDBasePtr(cap_b); 278 } 279 break; 280 281 case cap_asid_control_cap: 282 if (cap_get_capType(cap_b) == cap_asid_control_cap) { 283 return true; 284 } 285 break; 286 287 case cap_asid_pool_cap: 288 if (cap_get_capType(cap_b) == cap_asid_pool_cap) { 289 return cap_asid_pool_cap_get_capASIDPool(cap_a) == 290 cap_asid_pool_cap_get_capASIDPool(cap_b); 291 } 292 break; 293 294#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 295 case cap_vcpu_cap: 296 if (cap_get_capType(cap_b) == cap_vcpu_cap) { 297 return cap_vcpu_cap_get_capVCPUPtr(cap_a) == 298 cap_vcpu_cap_get_capVCPUPtr(cap_b); 299 } 300 break; 301#endif 302 303#ifdef CONFIG_TK1_SMMU 304 case cap_io_space_cap: 305 if (cap_get_capType(cap_b) == cap_io_space_cap) { 306 return cap_io_space_cap_get_capModuleID(cap_a) == 307 cap_io_space_cap_get_capModuleID(cap_b); 308 } 309 break; 310 311 case cap_io_page_table_cap: 312 if (cap_get_capType(cap_b) == cap_io_page_table_cap) { 313 return cap_io_page_table_cap_get_capIOPTBasePtr(cap_a) == 314 cap_io_page_table_cap_get_capIOPTBasePtr(cap_b); 315 } 316 break; 317#endif 318 } 319 320 return false; 321} 322 323bool_t CONST Arch_sameObjectAs(cap_t cap_a, cap_t cap_b) 324{ 325 if (cap_get_capType(cap_a) == cap_small_frame_cap) { 326 if (cap_get_capType(cap_b) == cap_small_frame_cap) { 327 return ((cap_small_frame_cap_get_capFBasePtr(cap_a) == 328 cap_small_frame_cap_get_capFBasePtr(cap_b)) && 329 ((cap_small_frame_cap_get_capFIsDevice(cap_a) == 0) == 330 (cap_small_frame_cap_get_capFIsDevice(cap_b) == 0))); 331 } else if (cap_get_capType(cap_b) == cap_frame_cap) { 332 return false; 333 } 334 } 335 if (cap_get_capType(cap_a) == cap_frame_cap) { 336 if (cap_get_capType(cap_b) == cap_frame_cap) { 337 return ((cap_frame_cap_get_capFBasePtr(cap_a) == 338 cap_frame_cap_get_capFBasePtr(cap_b)) && 339 (cap_frame_cap_get_capFSize(cap_a) == 340 cap_frame_cap_get_capFSize(cap_b)) && 341 ((cap_frame_cap_get_capFIsDevice(cap_a) == 0) == 342 (cap_frame_cap_get_capFIsDevice(cap_b) == 0))); 343 } else if (cap_get_capType(cap_b) == cap_small_frame_cap) { 344 return false; 345 } 346 } 347 return Arch_sameRegionAs(cap_a, cap_b); 348} 349 350word_t Arch_getObjectSize(word_t t) 351{ 352 switch (t) { 353 case seL4_ARM_SmallPageObject: 354 return ARMSmallPageBits; 355 case seL4_ARM_LargePageObject: 356 return ARMLargePageBits; 357 case seL4_ARM_SectionObject: 358 return ARMSectionBits; 359 case seL4_ARM_SuperSectionObject: 360 return ARMSuperSectionBits; 361 case seL4_ARM_PageTableObject: 362 return PTE_SIZE_BITS + PT_INDEX_BITS; 363 case seL4_ARM_PageDirectoryObject: 364 return PDE_SIZE_BITS + PD_INDEX_BITS; 365#ifdef CONFIG_TK1_SMMU 366 case seL4_ARM_IOPageTableObject: 367 return seL4_IOPageTableBits; 368#endif 369#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 370 case seL4_ARM_VCPUObject: 371 return VCPU_SIZE_BITS; 372#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 373 default: 374 fail("Invalid object type"); 375 return 0; 376 } 377} 378 379cap_t Arch_createObject(object_t t, void *regionBase, word_t userSize, bool_t deviceMemory) 380{ 381 switch (t) { 382 case seL4_ARM_SmallPageObject: 383 if (deviceMemory) { 384 /** AUXUPD: "(True, ptr_retyps 1 385 (Ptr (ptr_val \<acute>regionBase) :: user_data_device_C ptr))" */ 386 /** GHOSTUPD: "(True, gs_new_frames vmpage_size.ARMSmallPage 387 (ptr_val \<acute>regionBase) 388 (unat ARMSmallPageBits))" */ 389 } else { 390 /** AUXUPD: "(True, ptr_retyps 1 391 (Ptr (ptr_val \<acute>regionBase) :: user_data_C ptr))" */ 392 /** GHOSTUPD: "(True, gs_new_frames vmpage_size.ARMSmallPage 393 (ptr_val \<acute>regionBase) 394 (unat ARMSmallPageBits))" */ 395 } 396 return cap_small_frame_cap_new( 397 ASID_LOW(asidInvalid), VMReadWrite, 398 0, !!deviceMemory, 399#ifdef CONFIG_TK1_SMMU 400 0, 401#endif 402 ASID_HIGH(asidInvalid), 403 (word_t)regionBase); 404 405 case seL4_ARM_LargePageObject: 406 if (deviceMemory) { 407 /** AUXUPD: "(True, ptr_retyps 16 408 (Ptr (ptr_val \<acute>regionBase) :: user_data_device_C ptr))" */ 409 /** GHOSTUPD: "(True, gs_new_frames vmpage_size.ARMLargePage 410 (ptr_val \<acute>regionBase) 411 (unat ARMLargePageBits))" */ 412 } else { 413 /** AUXUPD: "(True, ptr_retyps 16 414 (Ptr (ptr_val \<acute>regionBase) :: user_data_C ptr))" */ 415 /** GHOSTUPD: "(True, gs_new_frames vmpage_size.ARMLargePage 416 (ptr_val \<acute>regionBase) 417 (unat ARMLargePageBits))" */ 418 } 419 return cap_frame_cap_new( 420 ARMLargePage, ASID_LOW(asidInvalid), VMReadWrite, 421 0, !!deviceMemory, ASID_HIGH(asidInvalid), 422 (word_t)regionBase); 423 424 case seL4_ARM_SectionObject: 425 if (deviceMemory) { 426#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 427 /** AUXUPD: "(True, ptr_retyps 512 428 (Ptr (ptr_val \<acute>regionBase) :: user_data_device_C ptr))" */ 429#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 430 /** AUXUPD: "(True, ptr_retyps 256 431 (Ptr (ptr_val \<acute>regionBase) :: user_data_device_C ptr))" */ 432#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 433 /** GHOSTUPD: "(True, gs_new_frames vmpage_size.ARMSection 434 (ptr_val \<acute>regionBase) 435 (unat ARMSectionBits))" */ 436 } else { 437#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 438 /** AUXUPD: "(True, ptr_retyps 512 439 (Ptr (ptr_val \<acute>regionBase) :: user_data_C ptr))" */ 440#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 441 /** AUXUPD: "(True, ptr_retyps 256 442 (Ptr (ptr_val \<acute>regionBase) :: user_data_C ptr))" */ 443#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 444 /** GHOSTUPD: "(True, gs_new_frames vmpage_size.ARMSection 445 (ptr_val \<acute>regionBase) 446 (unat ARMSectionBits))" */ 447 } 448 return cap_frame_cap_new( 449 ARMSection, ASID_LOW(asidInvalid), VMReadWrite, 450 0, !!deviceMemory, ASID_HIGH(asidInvalid), 451 (word_t)regionBase); 452 453 case seL4_ARM_SuperSectionObject: 454 if (deviceMemory) { 455#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 456 /** AUXUPD: "(True, ptr_retyps 8192 457 (Ptr (ptr_val \<acute>regionBase) :: user_data_device_C ptr))" */ 458#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 459 /** AUXUPD: "(True, ptr_retyps 4096 460 (Ptr (ptr_val \<acute>regionBase) :: user_data_device_C ptr))" */ 461#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 462 /** GHOSTUPD: "(True, gs_new_frames vmpage_size.ARMSuperSection 463 (ptr_val \<acute>regionBase) 464 (unat ARMSuperSectionBits))" */ 465 } else { 466#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 467 /** AUXUPD: "(True, ptr_retyps 8192 468 (Ptr (ptr_val \<acute>regionBase) :: user_data_C ptr))" */ 469#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 470 /** AUXUPD: "(True, ptr_retyps 4096 471 (Ptr (ptr_val \<acute>regionBase) :: user_data_C ptr))" */ 472#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 473 /** GHOSTUPD: "(True, gs_new_frames vmpage_size.ARMSuperSection 474 (ptr_val \<acute>regionBase) 475 (unat ARMSuperSectionBits))" */ 476 } 477 return cap_frame_cap_new( 478 ARMSuperSection, ASID_LOW(asidInvalid), VMReadWrite, 479 0, !!deviceMemory, ASID_HIGH(asidInvalid), 480 (word_t)regionBase); 481 482 case seL4_ARM_PageTableObject: 483#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 484 /** AUXUPD: "(True, ptr_retyps 1 485 (Ptr (ptr_val \<acute>regionBase) :: (pte_C[512]) ptr))" */ 486#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 487 /** AUXUPD: "(True, ptr_retyps 1 488 (Ptr (ptr_val \<acute>regionBase) :: (pte_C[256]) ptr))" */ 489#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 490 491 return cap_page_table_cap_new(false, asidInvalid, 0, 492 (word_t)regionBase); 493 494 case seL4_ARM_PageDirectoryObject: 495#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 496 /** AUXUPD: "(True, ptr_retyps 1 497 (Ptr (ptr_val \<acute>regionBase) :: (pde_C[2048]) ptr))" */ 498#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 499 /** AUXUPD: "(True, ptr_retyps 1 500 (Ptr (ptr_val \<acute>regionBase) :: (pde_C[4096]) ptr))" */ 501#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 502 copyGlobalMappings((pde_t *)regionBase); 503 cleanCacheRange_PoU((word_t)regionBase, 504 (word_t)regionBase + (1 << (PD_INDEX_BITS + PDE_SIZE_BITS)) - 1, 505 addrFromPPtr(regionBase)); 506 507 return cap_page_directory_cap_new(false, asidInvalid, 508 (word_t)regionBase); 509#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 510 case seL4_ARM_VCPUObject: 511 /** AUXUPD: "(True, ptr_retyp 512 (Ptr (ptr_val \<acute>regionBase) :: vcpu_C ptr))" */ 513 vcpu_init(VCPU_PTR(regionBase)); 514 return cap_vcpu_cap_new(VCPU_REF(regionBase)); 515#endif 516 517#ifdef CONFIG_TK1_SMMU 518 case seL4_ARM_IOPageTableObject: 519 /* When the untyped was zeroed it was cleaned to the PoU, but the SMMUs 520 * typically pull directly from RAM, so we do a futher clean to RAM here */ 521 cleanCacheRange_RAM((word_t)regionBase, 522 (word_t)regionBase + (1 << seL4_IOPageTableBits) - 1, 523 addrFromPPtr(regionBase)); 524 return cap_io_page_table_cap_new(0, asidInvalid, (word_t)regionBase, 0); 525#endif 526 default: 527 /* 528 * This is a conflation of the haskell error: "Arch.createNewCaps 529 * got an API type" and the case where an invalid object type is 530 * passed (which is impossible in haskell). 531 */ 532 fail("Arch_createObject got an API type or invalid object type"); 533 } 534} 535 536exception_t Arch_decodeInvocation(word_t invLabel, word_t length, cptr_t cptr, 537 cte_t *slot, cap_t cap, extra_caps_t excaps, 538 bool_t call, word_t *buffer) 539{ 540 /* The C parser cannot handle a switch statement with only a default 541 * case. So we need to do some gymnastics to remove the switch if 542 * there are no other cases */ 543#if defined(CONFIG_TK1_SMMU) || defined(CONFIG_ARM_HYPERVISOR_SUPPORT) 544 switch (cap_get_capType(cap)) { 545#ifdef CONFIG_TK1_SMMU 546 case cap_io_space_cap: 547 return decodeARMIOSpaceInvocation(invLabel, cap); 548 case cap_io_page_table_cap: 549 return decodeARMIOPTInvocation(invLabel, length, slot, cap, excaps, buffer); 550#endif 551#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 552 case cap_vcpu_cap: 553 return decodeARMVCPUInvocation(invLabel, length, cptr, slot, cap, excaps, call, buffer); 554#endif /* end of CONFIG_ARM_HYPERVISOR_SUPPORT */ 555 default: 556#else 557{ 558#endif 559 return decodeARMMMUInvocation(invLabel, length, cptr, slot, cap, excaps, buffer); 560} 561} 562 563void 564Arch_prepareThreadDelete(tcb_t * thread) { 565#ifdef CONFIG_HAVE_FPU 566 fpuThreadDelete(thread); 567#endif 568 569#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 570 if (thread->tcbArch.tcbVCPU) { 571 dissociateVCPUTCB(thread->tcbArch.tcbVCPU, thread); 572 } 573#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 574} 575 576