1/** 2 * \file 3 * \brief Capability system user code 4 */ 5 6/* 7 * Copyright (c) 2007-2010, 2012, 2016, ETH Zurich. 8 * All rights reserved. 9 * 10 * This file is distributed under the terms in the attached LICENSE file. 11 * If you do not find this file, copies can be found by writing to: 12 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group. 13 */ 14 15#include <stdint.h> 16#include <stdbool.h> 17#include <barrelfish/barrelfish.h> 18#include <barrelfish/cspace.h> 19#include <barrelfish/caddr.h> 20#include <barrelfish/lmp_endpoints.h> 21#include <if/monitor_defs.h> 22#include <if/monitor_blocking_defs.h> 23#include <if/if_types.h> 24#include <barrelfish/monitor_client.h> 25#include <trace/trace.h> 26#include <stdio.h> 27 28/// Root CNode 29#define ROOT_CNODE_INIT { \ 30 .croot = CPTR_ROOTCN, \ 31 .cnode = 0, \ 32 .level = CNODE_TYPE_ROOT, } 33 34struct cnoderef cnode_root = ROOT_CNODE_INIT; 35 36#define TASK_CNODE_INIT { \ 37 .croot = CPTR_ROOTCN, \ 38 .cnode = CPTR_TASKCN_BASE, \ 39 .level = CNODE_TYPE_OTHER, } 40 41#define PAGE_CNODE_INIT { \ 42 .croot = CPTR_ROOTCN, \ 43 .cnode = CPTR_PAGECN_BASE, \ 44 .level = CNODE_TYPE_OTHER, } 45 46/// Task CNode 47struct cnoderef cnode_task = TASK_CNODE_INIT; 48 49/// Base CNode 50struct cnoderef cnode_base = { 51 .cnode = CPTR_BASE_PAGE_CN_BASE, 52 .level = CNODE_TYPE_OTHER, 53 .croot = CPTR_ROOTCN, 54}; 55 56/// Super CNode 57struct cnoderef cnode_super = { 58 .cnode = CPTR_SUPERCN_BASE, 59 .level = CNODE_TYPE_OTHER, 60 .croot = CPTR_ROOTCN, 61}; 62 63/// Page CNode 64struct cnoderef cnode_page = PAGE_CNODE_INIT; 65 66/// Module CNode 67struct cnoderef cnode_module = { 68 .cnode = CPTR_MODULECN_BASE, 69 .level = CNODE_TYPE_OTHER, 70 .croot = CPTR_ROOTCN, 71}; 72 73/// Capability to Root CNode 74struct capref cap_root = { 75 .cnode = TASK_CNODE_INIT, 76 .slot = TASKCN_SLOT_ROOTCN 77}; 78 79/// Capability for IRQ table 80struct capref cap_irq = { 81 .cnode = TASK_CNODE_INIT, 82 .slot = TASKCN_SLOT_IRQ 83}; 84 85/// Capability for legacy IO 86struct capref cap_io = { 87 .cnode = TASK_CNODE_INIT, 88 .slot = TASKCN_SLOT_IO 89}; 90 91/// Capability for endpoint to self 92struct capref cap_selfep = { 93 .cnode = TASK_CNODE_INIT, 94 .slot = TASKCN_SLOT_SELFEP 95}; 96 97/// Capability for dispatcher 98struct capref cap_dispatcher = { 99 .cnode = TASK_CNODE_INIT, 100 .slot = TASKCN_SLOT_DISPATCHER 101}; 102 103/// Capability for dispatcher 104struct capref cap_dispframe = { 105 .cnode = TASK_CNODE_INIT, 106 .slot = TASKCN_SLOT_DISPFRAME 107}; 108 109/// Capability for ArgSpace 110struct capref cap_argcn = { 111 .cnode = ROOT_CNODE_INIT, 112 .slot = ROOTCN_SLOT_ARGCN 113}; 114 115/// Capability for monitor endpoint 116struct capref cap_monitorep = { 117 .cnode = TASK_CNODE_INIT, 118 .slot = TASKCN_SLOT_MONITOREP 119}; 120 121/// Capability for kernel (only in monitor) 122struct capref cap_kernel = { 123 .cnode = TASK_CNODE_INIT, 124 .slot = TASKCN_SLOT_KERNELCAP 125}; 126 127/// Capability for IPI sending (only in monitor) 128struct capref cap_ipi = { 129 .cnode = TASK_CNODE_INIT, 130 .slot = TASKCN_SLOT_IPI 131}; 132 133/// PerfMon CNode 134struct capref cap_perfmon = { 135 .cnode = TASK_CNODE_INIT, 136 .slot = TASKCN_SLOT_PERF_MON 137}; 138 139/// Capability for endpoint to init (only in monitor/mem_serv) 140struct capref cap_initep = { 141 .cnode = TASK_CNODE_INIT, 142 .slot = TASKCN_SLOT_INITEP 143}; 144 145/// Session ID 146struct capref cap_sessionid = { 147 .cnode = TASK_CNODE_INIT, 148 .slot = TASKCN_SLOT_SESSIONID 149}; 150 151/// Process manager cap, allows creating domains. 152struct capref cap_procmng = { 153 .cnode = TASK_CNODE_INIT, 154 .slot = TASKCN_SLOT_PROC_MNG 155}; 156 157/// Domain ID cap. 158struct capref cap_domainid = { 159 .cnode = TASK_CNODE_INIT, 160 .slot = TASKCN_SLOT_DOMAINID 161}; 162 163/// Root PML4 VNode 164struct capref cap_vroot = { 165 .cnode = PAGE_CNODE_INIT, 166 .slot = PAGECN_SLOT_VROOT, 167}; 168 169static inline bool backoff(int count) 170{ 171 // very crude exponential backoff based upon core id 172 int yieldcnt = 2^count * disp_get_core_id(); 173 for (int i=0; i<yieldcnt; i++) { 174 thread_yield(); 175 } 176 return true; 177} 178 179/** 180 * \brief Retype a capability into one or more new capabilities, going through 181 * the monitor to ensure consistancy with other cores. Only necessary for 182 * caps that have been sent remotely. 183 */ 184static errval_t cap_retype_remote(struct capref src_root, struct capref dest_root, 185 capaddr_t src, gensize_t offset, enum objtype new_type, 186 gensize_t objsize, size_t count, capaddr_t to, 187 capaddr_t slot, int to_level) 188{ 189 errval_t err, remote_cap_err; 190 struct monitor_blocking_binding *mrc = get_monitor_blocking_binding(); 191 if (!mrc) { 192 err = monitor_client_blocking_rpc_init(); 193 mrc = get_monitor_blocking_binding(); 194 if (err_is_fail(err) || !mrc) { 195 return LIB_ERR_MONITOR_RPC_NULL; 196 } 197 } 198 199 int send_count = 0; 200 do { 201 if (capcmp(src_root, dest_root)) { 202 dest_root = NULL_CAP; 203 } 204 err = mrc->rpc_tx_vtbl.remote_cap_retype(mrc, src_root, dest_root, src, 205 offset, (uint64_t)new_type, objsize, 206 count, to, slot, to_level, &remote_cap_err); 207 if (err_is_fail(err)){ 208 DEBUG_ERR(err, "remote cap retype\n"); 209 } 210 } while (err_no(remote_cap_err) == MON_ERR_REMOTE_CAP_RETRY && backoff(++send_count)); 211 212 return remote_cap_err; 213 214} 215 216 217/** 218 * \brief Delete the given capability, going through the monitor to ensure 219 * consistancy with other cores. Only necessary for caps that have been sent 220 * remotely. 221 * 222 * \param cap Capability to be deleted 223 * 224 * Deletes (but does not revoke) the given capability, allowing the CNode slot 225 * to be reused. 226 */ 227static errval_t cap_delete_remote(struct capref root, capaddr_t src, uint8_t level) 228{ 229 errval_t err, remote_cap_err; 230 struct monitor_blocking_binding *mrc = get_monitor_blocking_binding(); 231 if (!mrc) { 232 err = monitor_client_blocking_rpc_init(); 233 mrc = get_monitor_blocking_binding(); 234 if (err_is_fail(err) || !mrc) { 235 return LIB_ERR_MONITOR_RPC_NULL; 236 } 237 } 238 239 int count = 0; 240 do { 241 err = mrc->rpc_tx_vtbl.remote_cap_delete(mrc, root, src, level, 242 &remote_cap_err); 243 if (err_is_fail(err)){ 244 DEBUG_ERR(err, "remote cap delete\n"); 245 } 246 } while (err_no(remote_cap_err) == MON_ERR_REMOTE_CAP_RETRY && backoff(++count)); 247 248 return remote_cap_err; 249} 250 251/** 252 * \brief Revoke (delete all copies and descendants of) the given capability, 253 * going through the monitor to ensure consistancy with other cores. Only 254 * necessary for caps that have been sent remotely. 255 * 256 * \param cap Capability to be revoked 257 * 258 * Deletes all copies and descendants of the given capability, but not the 259 * capability itself. If this succeeds, the capability is guaranteed to be 260 * the only copy in the system. 261 */ 262static errval_t cap_revoke_remote(struct capref root, capaddr_t src, uint8_t level) 263{ 264 errval_t err, remote_cap_err; 265 struct monitor_blocking_binding *mrc = get_monitor_blocking_binding(); 266 if (!mrc) { 267 err = monitor_client_blocking_rpc_init(); 268 mrc = get_monitor_blocking_binding(); 269 if (err_is_fail(err) || !mrc) { 270 return LIB_ERR_MONITOR_RPC_NULL; 271 } 272 } 273 274 int count = 0; 275 do { 276 err = mrc->rpc_tx_vtbl.remote_cap_revoke(mrc, root, src, level, 277 &remote_cap_err); 278 if (err_is_fail(err)){ 279 DEBUG_ERR(err, "remote cap delete\n"); 280 } 281 } while (err_no(remote_cap_err) == MON_ERR_REMOTE_CAP_RETRY && backoff(++count)); 282 283 return remote_cap_err; 284} 285 286/** 287 * \brief Retype (part of) a capability into one or more new capabilities 288 * 289 * \param dest_start Location of first destination slot, which must be empty 290 * \param src Source capability to retype 291 * \param offset Offset into source capability 292 * \param new_type Kernel object type to retype to. 293 * \param objsize Size of created objects in bytes 294 * (ignored for fixed-size objects) 295 * \param count The number of new objects to create 296 * 297 * When retyping IRQSrc capabilities, offset and objsize represent the start 298 * and end of the to be created interrupt range. Count must be 1 for IRQSrc. 299 * 300 * Retypes (part of) the given source capability into a number of new 301 * capabilities, which may be of the same or of different type. The new 302 * capabilities are created in the slots starting from dest_start, which must 303 * all be empty and lie in the same CNode. The number of objects created is 304 * determined by the argument `count`. 305 */ 306errval_t cap_retype(struct capref dest_start, struct capref src, gensize_t offset, 307 enum objtype new_type, gensize_t objsize, size_t count) 308{ 309 errval_t err; 310 311 // Address of destination cspace 312 capaddr_t dcs_addr = get_croot_addr(dest_start); 313 // Address of the cap to the destination CNode 314 capaddr_t dcn_addr = get_cnode_addr(dest_start); 315 // Depth/Level of destination cnode 316 enum cnode_type dcn_level = get_cnode_level(dest_start); 317 // Address of source cspace 318 capaddr_t scp_root = get_croot_addr(src); 319 // Address of source capability 320 capaddr_t scp_addr = get_cap_addr(src); 321 322 err = invoke_cnode_retype(cap_root, scp_root, scp_addr, offset, new_type, 323 objsize, count, dcs_addr, dcn_addr, dcn_level, 324 dest_start.slot); 325 326 if (err_no(err) == SYS_ERR_RETRY_THROUGH_MONITOR) { 327 struct capref src_root = get_croot_capref(src); 328 struct capref dest_root = get_croot_capref(dest_start); 329 TRACE(CAPOPS, USER_RETYPE_RPC, 0); 330 err = cap_retype_remote(src_root, dest_root, scp_addr, offset, new_type, 331 objsize, count, dcn_addr, dest_start.slot, 332 dcn_level); 333 TRACE(CAPOPS, USER_RETYPE_RPC_DONE, 0); 334 return err; 335 } else { 336 return err; 337 } 338} 339 340 341/** 342 * \brief Create a capability 343 * 344 * \param dest Location where to create the cap, which must be empty. 345 * \param type Kernel object type to create. 346 * \param size Size of the created capability in bytes. 347 * (ignored for fixed-size objects) 348 * 349 * Only certain types of capabilities can be created this way. If invoked on 350 * a capability type, that is not creatable at runtime the error 351 * SYS_ERR_TYPE_NOT_CREATABLE is returned. Most capabilities have to be retyped 352 * from other capabilities with cap_retype(). 353 */ 354errval_t cap_create(struct capref dest, enum objtype type, size_t size) 355{ 356 errval_t err; 357 358 // Address of the cap to the destination CNode 359 capaddr_t dest_cnode_cptr = get_cnode_addr(dest); 360 enum cnode_type dest_cnode_level = get_cnode_level(dest); 361 362 err = invoke_cnode_create(cap_root, type, size, dest_cnode_cptr, 363 dest_cnode_level, dest.slot); 364 365 return err; 366} 367 368/** 369 * \brief Delete the given capability 370 * 371 * \param cap Capability to be deleted 372 * 373 * Deletes (but does not revoke) the given capability, allowing the CNode slot 374 * to be reused. 375 */ 376errval_t cap_delete(struct capref cap) 377{ 378 errval_t err; 379 struct capref croot = get_croot_capref(cap); 380 capaddr_t caddr = get_cap_addr(cap); 381 enum cnode_type level = get_cap_level(cap); 382 383 err = invoke_cnode_delete(croot, caddr, level); 384 385 if (err_no(err) == SYS_ERR_RETRY_THROUGH_MONITOR) { 386 TRACE(CAPOPS, USER_DELETE_RPC, 0); 387 err = cap_delete_remote(croot, caddr, level); 388 TRACE(CAPOPS, USER_DELETE_RPC_DONE, 0); 389 return err; 390 } else { 391 return err; 392 } 393} 394 395/** 396 * \brief Revoke (delete all copies and descendants of) the given capability 397 * 398 * \param cap Capability to be revoked 399 * 400 * Deletes all copies and descendants of the given capability, but not the 401 * capability itself. If this succeeds, the capability is guaranteed to be 402 * the only copy in the system. 403 */ 404errval_t cap_revoke(struct capref cap) 405{ 406 errval_t err; 407 struct capref croot = get_croot_capref(cap); 408 capaddr_t caddr = get_cap_addr(cap); 409 enum cnode_type level = get_cap_level(cap); 410 411 err = invoke_cnode_revoke(croot, caddr, level); 412 413 if (err_no(err) == SYS_ERR_RETRY_THROUGH_MONITOR) { 414 TRACE(CAPOPS, USER_REVOKE_RPC, 0); 415 err = cap_revoke_remote(croot, caddr, level); 416 TRACE(CAPOPS, USER_REVOKE_RPC_DONE, 0); 417 return err; 418 } else { 419 return err; 420 } 421} 422 423/** 424 * \brief Destroy a capability, i.e. delete it and free the slot. 425 * 426 * \param cap Capability to be destroyed 427 */ 428errval_t cap_destroy(struct capref cap) 429{ 430 errval_t err; 431 err = cap_delete(cap); 432 if (err_is_fail(err)) { 433 return err; 434 } 435 436 err = slot_free(cap); 437 if (err_is_fail(err)) { 438 return err_push(err, LIB_ERR_WHILE_FREEING_SLOT); 439 } 440 441 return SYS_ERR_OK; 442} 443 444/** 445 * \brief Replace own L1 CNode 446 * 447 * \param new the replacement L1 CNode 448 * \param ret the slot to put the old L1 CNode 449 */ 450errval_t root_cnode_resize(struct capref new, struct capref ret) 451{ 452 assert(get_croot_addr(new) == CPTR_ROOTCN); 453 assert(get_cap_level(new) == CNODE_TYPE_COUNT); 454 capaddr_t new_cptr = get_cap_addr(new); 455 456 assert(get_croot_addr(ret) == CPTR_ROOTCN); 457 assert(get_cap_level(ret) == CNODE_TYPE_COUNT); 458 capaddr_t retcn_ptr= get_cnode_addr(ret); 459 460 return invoke_cnode_resize(cap_root, new_cptr, retcn_ptr, ret.slot); 461} 462 463/** 464 * \brief Create a CNode from a given RAM capability in a specific slot 465 * 466 * \param dest location in which to place newly-created CNode cap 467 * \param src location of RAM capability to be retyped to new CNode 468 * \param cnoderef cnoderef struct, filled-in if non-NULL with relevant info 469 * \param slots number of slots in created CNode 470 * must match size of RAM capability. 471 * 472 * This function requires that dest refer to an existing but empty slot. It 473 * retypes the given memory to a new CNode. 474 */ 475errval_t cnode_create_from_mem(struct capref dest, struct capref src, 476 enum objtype cntype, struct cnoderef *cnoderef, 477 size_t slots) 478{ 479 errval_t err; 480 481 if (cntype != ObjType_L1CNode && 482 cntype != ObjType_L2CNode) 483 { 484 return LIB_ERR_CNODE_TYPE; 485 } 486 487 488 // Retype it to the destination 489 err = cap_retype(dest, src, 0, cntype, slots * (1UL << OBJBITS_CTE), 1); 490 if (err_is_fail(err)) { 491 return err_push(err, LIB_ERR_CAP_RETYPE); 492 } 493 494 // Construct the cnoderef to return 495 if (cnoderef != NULL) { 496 enum cnode_type ref_cntype = cntype == ObjType_L1CNode ? CNODE_TYPE_ROOT : CNODE_TYPE_OTHER; 497 *cnoderef = build_cnoderef(dest, ref_cntype); 498 } 499 500 return SYS_ERR_OK; 501} 502 503/** 504 * \brief Create a CNode from newly-allocated RAM in a newly-allocated slot 505 * 506 * \param ret_dest capref struct to be filled-in with location of CNode 507 * \param cnoderef cnoderef struct, filled-in if non-NULL with relevant info 508 * \param slots Minimum number of slots in created CNode 509 * \param retslots If non-NULL, filled in with the number of slots in created CNode 510 */ 511errval_t cnode_create(struct capref *ret_dest, struct cnoderef *cnoderef, 512 cslot_t slots, cslot_t *retslots) 513{ 514 USER_PANIC("cnode_create deprecated; use cnode_create_l1, cnode_create_l2, or cnode_create_foreign_l2: %p %p %p %p\n", 515 __builtin_return_address(0), 516#ifdef __x86_64__ 517 __builtin_return_address(1), 518 __builtin_return_address(2), 519 __builtin_return_address(3) 520#else 521 NULL, NULL, NULL 522#endif 523 ); 524 return LIB_ERR_NOT_IMPLEMENTED; 525} 526 527/** 528 * \brief Create a L2 CNode from newly-allocated RAM in a newly-allocated slot 529 * 530 * \param ret_dest capref struct to be filled-in with location of CNode 531 * \param cnoderef cnoderef struct, filled-in if non-NULL with relevant info 532 * 533 * This function always creates a L2 CNode which contains 256 capabilities 534 */ 535errval_t cnode_create_l2(struct capref *ret_dest, struct cnoderef *cnoderef) 536{ 537 errval_t err; 538 539 // Allocate a slot in root cn for destination 540 assert(ret_dest != NULL); 541 err = slot_alloc_root(ret_dest); 542 if (err_is_fail(err)) { 543 DEBUG_ERR(err, "slot_alloc_root"); 544 return err_push(err, LIB_ERR_SLOT_ALLOC); 545 } 546 547 cslot_t retslots; 548 err = cnode_create_raw(*ret_dest, cnoderef, ObjType_L2CNode, 549 L2_CNODE_SLOTS, &retslots); 550 if (retslots != L2_CNODE_SLOTS) { 551 debug_printf("Unable to create properly sized L2 CNode: got %"PRIuCSLOT" slots instead of %"PRIuCSLOT"\n", 552 retslots, (cslot_t)L2_CNODE_SLOTS); 553 } 554 return err; 555} 556 557errval_t cnode_create_l1(struct capref *ret_dest, struct cnoderef *cnoderef) 558{ 559 errval_t err; 560 561 // Allocate a slot in root cn for destination 562 assert(ret_dest != NULL); 563 err = slot_alloc(ret_dest); 564 if (err_is_fail(err)) { 565 return err_push(err, LIB_ERR_SLOT_ALLOC); 566 } 567 568 cslot_t retslots; 569 err = cnode_create_raw(*ret_dest, cnoderef, ObjType_L1CNode, 570 L2_CNODE_SLOTS, &retslots); 571 if (retslots != L2_CNODE_SLOTS) { 572 debug_printf("Unable to create initial L1 CNode: got %"PRIuCSLOT" slots instead of %"PRIuCSLOT"\n", 573 retslots, (cslot_t)L2_CNODE_SLOTS); 574 } 575 return err; 576} 577 578/** 579 * \brief Create a CNode for another cspace from newly-allocated RAM in a 580 * newly-allocated slot 581 * 582 * \param dest_l1 capref to L1 (root) cnode of destination cspace 583 * \param dest_slot slot to fill with new cnode in destination L1 cnode 584 * \param cnoderef cnoderef struct, filled-in if non-NULL with relevant info 585 * 586 * This function creates a CNode which contains 256 capabilities initially 587 * and puts it in a slot in our cspace. 588 */ 589errval_t cnode_create_foreign_l2(struct capref dest_l1, cslot_t dest_slot, 590 struct cnoderef *cnoderef) 591{ 592 errval_t err; 593 594 if (capref_is_null(dest_l1)) { 595 return LIB_ERR_CROOT_NULL; 596 } 597 assert(!capref_is_null(dest_l1)); 598 599 struct capref dest; 600 dest.cnode = build_cnoderef(dest_l1, CNODE_TYPE_ROOT); 601 dest.slot = dest_slot; 602 603 cslot_t retslots; 604 err = cnode_create_raw(dest, NULL, ObjType_L2CNode, L2_CNODE_SLOTS, &retslots); 605 if (retslots != L2_CNODE_SLOTS) { 606 debug_printf("Unable to create properly sized foreign CNode: " 607 "got %"PRIuCSLOT" slots instead of %"PRIuCSLOT"\n", 608 retslots, (cslot_t)L2_CNODE_SLOTS); 609 } 610 611 // Create proper cnoderef for foreign L2 612 if (cnoderef) { 613 cnoderef->croot = get_cap_addr(dest_l1); 614 cnoderef->cnode = ROOTCN_SLOT_ADDR(dest_slot); 615 cnoderef->level = CNODE_TYPE_OTHER; 616 } 617 return err; 618} 619 620/** 621 * \brief early allocator for L2 CNode sized RAM caps 622 * 623 * This function returns the preallocated RAM caps stored in 624 * ROOTCN_SLOT_EARLY_CN_CN. 625 */ 626/// Base CNode 627static struct cnoderef cnode_earlycn = { 628 .cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_EARLY_CN_CN), 629 .level = CNODE_TYPE_OTHER, 630 .croot = CPTR_ROOTCN, 631}; 632 633errval_t ram_alloc_fixed_cn(struct capref *retcap); 634errval_t ram_alloc_fixed_cn(struct capref *retcap) 635{ 636 // We keep track of which slots we've used in the dispatcher's 637 // ram_alloc_state. 638 struct ram_alloc_state *state = get_ram_alloc_state(); 639 640 if (state->earlycn_capnum >= EARLY_CNODE_ALLOCATED_SLOTS) { 641 debug_printf("trying to allocate slot %d!\n", state->earlycn_capnum); 642 return LIB_ERR_RAM_ALLOC_FIXED_EXHAUSTED; 643 } 644 645 retcap->cnode = cnode_earlycn; 646 retcap->slot = state->earlycn_capnum++; 647 648 return SYS_ERR_OK; 649} 650 651/** 652 * \brief Create a CNode from newly-allocated RAM in the given slot 653 * 654 * \param dest location in which to place CNode cap 655 * \param cnoderef cnoderef struct, filled-in if non-NULL with relevant info 656 * \param cntype, type of new cnode 657 * \param slots Minimum number of slots in created CNode 658 * \param retslots If non-NULL, filled in with the number of slots in created CNode 659 * 660 * This function requires that dest refer to an existing but empty slot. It 661 * allocates memory (using #ram_alloc), and retypes that memory to a new CNode. 662 * The intermediate ram cap is destroyed. 663 */ 664errval_t cnode_create_raw(struct capref dest, struct cnoderef *cnoderef, 665 enum objtype cntype, cslot_t slots, cslot_t *retslots) 666{ 667 errval_t err; 668 struct capref ram; 669 670 assert(slots > 0); 671 672 if (cntype != ObjType_L1CNode && 673 cntype != ObjType_L2CNode) 674 { 675 return LIB_ERR_CNODE_TYPE; 676 } 677 678 if (slots < L2_CNODE_SLOTS || 679 (cntype == ObjType_L2CNode && slots != L2_CNODE_SLOTS)) 680 { 681 return LIB_ERR_CNODE_SLOTS; 682 } 683 684 if (retslots != NULL) { 685 *retslots = slots; 686 } 687 688 // XXX: mem_serv should serve non-power-of-two requests 689 uint8_t bits = log2ceil(slots); 690 assert(slots >= (1UL << bits)); 691 692 // Allocate some memory 693 err = ram_alloc(&ram, bits + OBJBITS_CTE); 694 if (err_no(err) == LIB_ERR_RAM_ALLOC_WRONG_SIZE) { 695 // early cnode alloc request, use special allocator 696 err = ram_alloc_fixed_cn(&ram); 697 } 698 if (err_is_fail(err)) { 699 return err_push(err, LIB_ERR_RAM_ALLOC); 700 } 701 702 err = cnode_create_from_mem(dest, ram, cntype, cnoderef, slots); 703 if (err_is_fail(err)) { 704 return err_push(err, LIB_ERR_CNODE_CREATE_FROM_MEM); 705 } 706 707 err = cap_destroy(ram); 708 if (err_is_fail(err)) { 709 return err_push(err, LIB_ERR_CAP_DESTROY); 710 } 711 712 return SYS_ERR_OK; 713} 714 715/** 716 * \brief Create CNode with a given guard 717 * 718 * \param dest Location where to place the cnode 719 * \param cnoderef Filled in cnoderef struct if non-NULL 720 * \param slots Minimum number of slots in created CNode 721 * \param retslots If non-NULL, filled in with the number of slots in created CNode 722 * \param guard The guard value to set 723 * \param guard_size The length of the guard in bits 724 * 725 * This function requires that dest refer to an existing but empty slot. It 726 * allocates memory (using #ram_alloc), and retypes that memory to a new CNode 727 * with the given guard value and size. An intermediate slot is used in order to 728 * set the guard value. 729 */ 730errval_t cnode_create_with_guard(struct capref dest, struct cnoderef *cnoderef, 731 cslot_t slots, cslot_t *retslots, 732 uint64_t guard, uint8_t guard_size) 733{ 734 USER_PANIC("%s: GPT CNodes are deprecated\n", __FUNCTION__); 735} 736 737static errval_t create_ram_descendant(struct capref dest, enum objtype type, 738 uint8_t bits, size_t *retbytes) 739{ 740 errval_t err; 741 if (bits < BASE_PAGE_BITS) { 742 bits = BASE_PAGE_BITS; 743 } 744 745 struct capref ram; 746 err = ram_alloc(&ram, bits); 747 if (err_is_fail(err)) { 748 if (err_no(err) == MM_ERR_NOT_FOUND || 749 err_no(err) == LIB_ERR_RAM_ALLOC_WRONG_SIZE) { 750 return err_push(err, LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS); 751 } 752 return err_push(err, LIB_ERR_RAM_ALLOC); 753 } 754 755 err = cap_retype(dest, ram, 0, type, (1UL << bits), 1); 756 if (err_is_fail(err)) { 757 return err_push(err, LIB_ERR_CAP_RETYPE); 758 } 759 760 err = cap_destroy(ram); 761 if (err_is_fail(err)) { 762 return err_push(err, LIB_ERR_CAP_DESTROY); 763 } 764 765 if (retbytes != NULL) { 766 *retbytes = 1UL << bits; 767 } 768 769 return SYS_ERR_OK; 770} 771 772static errval_t create_mappable_cap(struct capref dest, enum objtype type, 773 size_t bytes, size_t *retbytes) 774{ 775 if (!type_is_mappable(type)) { 776 return SYS_ERR_DEST_TYPE_INVALID; 777 } 778 779 assert(bytes > 0); 780 uint8_t bits = log2ceil(bytes); 781 assert((1UL << bits) >= bytes); 782 783 return create_ram_descendant(dest, type, bits, retbytes); 784 785} 786 787/** 788 * \brief Create a VNode in newly-allocated memory 789 * 790 * \param dest location to place new VNode cap 791 * \param type VNode type to create 792 * 793 * This function requires that dest refer to an existing but empty slot. 794 * The intermidiate ram cap is destroyed. 795 */ 796errval_t vnode_create(struct capref dest, enum objtype type) 797{ 798 errval_t err; 799 800 struct capref ram; 801 802 size_t objbits_vnode = vnode_objbits(type); 803 err = ram_alloc(&ram, objbits_vnode); 804 if (err_no(err) == LIB_ERR_RAM_ALLOC_WRONG_SIZE && type != ObjType_VNode_ARM_l1) { 805 // can only get 4kB pages, cannot create ARM_l1, and waste 3kB for 806 // ARM_l2 807 err = ram_alloc(&ram, BASE_PAGE_BITS); 808 } 809 if (err_is_fail(err)) { 810 return err_push(err, LIB_ERR_RAM_ALLOC); 811 } 812 813 assert(type_is_vnode(type)); 814 err = cap_retype(dest, ram, 0, type, vnode_objsize(type), 1); 815 if (err_is_fail(err)) { 816 return err_push(err, LIB_ERR_CAP_RETYPE); 817 } 818 819 err = cap_destroy(ram); 820 if (err_is_fail(err)) { 821 return err_push(err, LIB_ERR_CAP_DESTROY); 822 } 823 824 return SYS_ERR_OK; 825} 826 827/** 828 * \brief Create a Frame cap referring to newly-allocated RAM in a given slot 829 * 830 * \param dest Location to place new frame cap 831 * \param bytes Minimum size of frame to create 832 * \param retbytes If non-NULL, filled in with size of created frame 833 * 834 * This function requires that dest refer to an existing but empty slot. 835 * #ram_alloc is used to allocate memory. After retyping the intermediate 836 * ram cap is destroyed. 837 * 838 * This function will returns a special error code if ram_alloc fails 839 * due to the constrains on the memory server (size of cap or region 840 * of memory). This is to facilitate retrying with different 841 * constraints. 842 */ 843errval_t frame_create(struct capref dest, size_t bytes, size_t *retbytes) 844{ 845 return create_mappable_cap(dest, ObjType_Frame, bytes, retbytes); 846} 847 848/** 849 * \brief Create a Dispatcher in newly-allocated memory 850 * 851 * \param dest location to place new dispatcher cap 852 * 853 * This function requires that dest refer to an existing but empty slot. It does 854 * not map in nor initialise the Dispatcher. 855 * The intermediate ram cap is destroyed. 856 */ 857errval_t dispatcher_create(struct capref dest) 858{ 859 errval_t err; 860 861 struct capref ram; 862 assert(1 << log2ceil(OBJSIZE_DISPATCHER) == OBJSIZE_DISPATCHER); 863 err = ram_alloc(&ram, log2ceil(OBJSIZE_DISPATCHER)); 864 if (err_is_fail(err)) { 865 return err_push(err, LIB_ERR_RAM_ALLOC); 866 } 867 868 err = cap_retype(dest, ram, 0, ObjType_Dispatcher, 0, 1); 869 if (err_is_fail(err)) { 870 return err_push(err, LIB_ERR_CAP_RETYPE); 871 } 872 873 err = cap_destroy(ram); 874 if (err_is_fail(err)) { 875 return err_push(err, LIB_ERR_CAP_DESTROY); 876 } 877 return SYS_ERR_OK; 878} 879 880/** 881 * \brief Create endpoint to caller on current dispatcher. 882 * 883 * \param buflen Length of incoming LMP buffer, in words 884 * \param retcap Pointer to capref struct, filled-in with location of cap 885 * \param retep Double pointer to LMP endpoint, filled-in with allocated EP 886 */ 887errval_t endpoint_create(size_t buflen, struct capref *retcap, 888 struct lmp_endpoint **retep) 889{ 890 errval_t err = slot_alloc(retcap); 891 if (err_is_fail(err)) { 892 return err_push(err, LIB_ERR_SLOT_ALLOC); 893 } 894 895 return lmp_endpoint_create_in_slot(buflen, *retcap, retep); 896} 897 898 899/** 900 * @brief allocates a memory region for a UMP endpoint 901 * @param cap capability to store the UMP endpoint in 902 * @param bytes size of the endpoint 903 * @param iftype Flounder interface type 904 * @return SYS_ERR_OK on success, errval on failure 905 */ 906errval_t ump_endpoint_create_with_iftype(struct capref dest, size_t bytes, 907 uint16_t iftype) 908{ 909 errval_t err; 910 struct capref tmp; 911 err = slot_alloc(&tmp); 912 if (err_is_fail(err)) { 913 return err; 914 } 915 err = create_mappable_cap(tmp, ObjType_EndPointUMP, bytes, NULL); 916 if (err_is_fail(err)) { 917 return err; 918 } 919 err = cap_mint(dest, tmp, iftype, 0); 920 if (err_is_fail(err)) { 921 return err; 922 } 923 return cap_destroy(tmp); 924} 925 926/** 927 * @brief allocates a memory region for a UMP endpoint 928 * @param cap capability to store the UMP endpoint in 929 * @param bytes size of the endpoint 930 * @return SYS_ERR_OK on success, errval on failure 931 */ 932errval_t ump_endpoint_create(struct capref dest, size_t bytes) 933{ 934 return ump_endpoint_create_with_iftype(dest, bytes, IF_TYPE_DUMMY); 935} 936 937/** 938 * \brief Create a Frame cap referring to newly-allocated RAM in an allocated slot 939 * 940 * \param dest Pointer to capref struct, filled-in with location of new cap 941 * \param bytes Minimum size of frame to create 942 * \param retbytes If non-NULL, filled in with size of created frame 943 */ 944errval_t frame_alloc(struct capref *dest, size_t bytes, size_t *retbytes) 945{ 946 errval_t err = slot_alloc(dest); 947 if (err_is_fail(err)) { 948 return err_push(err, LIB_ERR_SLOT_ALLOC); 949 } 950 951 return frame_create(*dest, bytes, retbytes); 952} 953 954/** 955 * \brief Create a DevFrame cap by retyping out of given source PhysAddr cap 956 * 957 * \param dest Pointer to capref struct, filled-in with location of new cap 958 * \param src Cap_info struct for the source PhysAddr cap 959 * \param size_bits Size of created objects as a power of two 960 * (ignored for fixed-size objects) 961 */ 962errval_t devframe_type(struct capref *dest, struct capref src, uint8_t bits) 963{ 964 errval_t err = slot_alloc(dest); 965 if (err_is_fail(err)) { 966 return err_push(err, LIB_ERR_SLOT_ALLOC); 967 } 968 969 return cap_retype(*dest, src, 0, ObjType_DevFrame, 1UL << bits, 1); 970} 971 972/** 973 * \brief Create an ID cap in a newly allocated slot. 974 * 975 * \param dest Pointer to capref struct, filld-in with location of new cap. 976 * 977 * The caller is responsible for revoking the cap after using it. 978 */ 979errval_t idcap_alloc(struct capref *dest) 980{ 981 errval_t err = slot_alloc(dest); 982 983 if (err_is_fail(err)) { 984 return err_push(err, LIB_ERR_SLOT_ALLOC); 985 } 986 987 return idcap_create(*dest); 988} 989 990/** 991 * \brief Create an ID cap in the specified slot. 992 * 993 * \param dest Capref, where ID cap should be created. 994 * 995 * The caller is responsible for revoking the cap after using it. 996 */ 997errval_t idcap_create(struct capref dest) 998{ 999 return cap_create(dest, ObjType_ID, 0); 1000} 1001 1002/** 1003 * \brief Builds a #cnoderef struct from a #capref struct using cap 1004 * identification. 1005 * 1006 * \param cnoder Pointer to a cnoderef struct, fill-in by function. 1007 * \param capr Capref to a CNode capability. 1008 */ 1009errval_t cnode_build_cnoderef(struct cnoderef *cnoder, struct capref capr) 1010{ 1011 struct capability cap; 1012 errval_t err = cap_direct_identify(capr, &cap); 1013 if (err_is_fail(err)) { 1014 return err; 1015 } 1016 1017 if (cap.type != ObjType_L1CNode && 1018 cap.type != ObjType_L2CNode) { 1019 return LIB_ERR_NOT_CNODE; 1020 } 1021 1022 if (!cnodecmp(capr.cnode, cnode_root)) { 1023 USER_PANIC("cnode_build_cnoderef NYI for non rootcn caprefs"); 1024 } 1025 1026 cnoder->croot = get_croot_addr(capr); 1027 cnoder->cnode = capr.slot << L2_CNODE_BITS; 1028 cnoder->level = CNODE_TYPE_OTHER; 1029 1030 return SYS_ERR_OK; 1031} 1032 1033 1034struct cap_notify_st 1035{ 1036 ///< Event Queue node for sending on the monitor binding 1037 struct event_queue_node qn; 1038 1039 ///< the event closure to be called on an event 1040 struct event_closure cont; 1041 1042 ///< the capability 1043 struct capref cap; 1044 1045 ///< the cap addr of the cap 1046 capaddr_t capaddr; 1047}; 1048 1049 1050static void cap_revoke_ack_sender(void *arg) 1051{ 1052 struct monitor_binding *mb = get_monitor_binding(); 1053 errval_t err; 1054 1055 /* Send request to the monitor on our existing binding */ 1056 err = mb->tx_vtbl.cap_revoke_response(mb, NOP_CONT, (uintptr_t)arg); 1057 if (err_is_ok(err)) { 1058 event_mutex_unlock(&mb->mutex); 1059 free(arg); 1060 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { 1061 err = mb->register_send(mb, mb->waitset, 1062 MKCONT(cap_revoke_ack_sender,arg)); 1063 assert(err_is_ok(err)); // shouldn't fail, as we have the mutex 1064 } else { // permanent error 1065 event_mutex_unlock(&mb->mutex); 1066 free(arg); 1067 } 1068} 1069 1070 1071static void cap_revoke_request(struct monitor_binding *mb, uintptr_t cap, uintptr_t id) 1072{ 1073 /* XXX: this trusts the monitor to send us the right ID back */ 1074 struct cap_notify_st *st = (struct cap_notify_st *)id; 1075 1076 if (st->cont.handler) { 1077 st->cont.handler(st->cont.arg); 1078 } 1079 1080 /* Wait to use the monitor binding */ 1081 event_mutex_enqueue_lock(&mb->mutex, &st->qn, 1082 MKCLOSURE(cap_revoke_ack_sender,st)); 1083} 1084 1085 1086errval_t cap_register_revoke(struct capref cap, struct event_closure cont) 1087{ 1088 errval_t err; 1089 1090 struct cap_notify_st *st = calloc(1, sizeof(*st)); 1091 if (st == NULL) { 1092 return LIB_ERR_MALLOC_FAIL; 1093 } 1094 1095 struct monitor_binding *mb = get_monitor_binding(); 1096 mb->rx_vtbl.cap_revoke_request = cap_revoke_request; 1097 1098 st->cont = cont; 1099 st->cap = cap; 1100 st->capaddr = get_cap_addr(cap); 1101 1102 struct monitor_blocking_binding *mcb = get_monitor_blocking_binding(); 1103 assert(mcb); 1104 1105 errval_t msgerr; 1106 err = mcb->rpc_tx_vtbl.cap_needs_revoke_agreement(mcb, cap, (uintptr_t)st, 1107 &msgerr); 1108 if (err_is_fail(err)) { 1109 free(st); 1110 return err; 1111 } 1112 1113 if (err_is_fail(msgerr)) { 1114 free(st); 1115 return msgerr; 1116 } 1117 1118 return SYS_ERR_OK; 1119} 1120