1/** 2 * \file 3 * \brief Kernel capability management implementation. 4 */ 5 6/* 7 * Copyright (c) 2007-2012,2015,2016 ETH Zurich. 8 * Copyright (c) 2015, 2016 Hewlett Packard Enterprise Development LP. 9 * All rights reserved. 10 * 11 * This file is distributed under the terms in the attached LICENSE file. 12 * If you do not find this file, copies can be found by writing to: 13 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group. 14 */ 15 16#include <stdio.h> 17#include <string.h> 18#include <kernel.h> 19#include <barrelfish_kpi/syscalls.h> 20#include <barrelfish_kpi/paging_arch.h> 21#include <barrelfish_kpi/lmp.h> 22#include <offsets.h> 23#include <capabilities.h> 24#include <cap_predicates.h> 25#include <distcaps.h> 26#include <dispatch.h> 27#include <kcb.h> 28#include <paging_kernel_arch.h> 29#include <mdb/mdb.h> 30#include <mdb/mdb_tree.h> 31#include <trace/trace.h> 32#include <trace_definitions/trace_defs.h> 33#include <wakeup.h> 34#include <bitmacros.h> 35 36// XXX: remove 37#pragma GCC diagnostic ignored "-Wsuggest-attribute=noreturn" 38 39#ifdef TRACE_PMEM_CAPS 40uint64_t trace_types_enabled = TRACE_TYPES_ENABLED_INITIAL; 41genpaddr_t TRACE_PMEM_BEGIN = TRACE_PMEM_BEGIN_INITIAL; 42gensize_t TRACE_PMEM_SIZE = TRACE_PMEM_SIZE_INITIAL; 43 44void caps_trace_ctrl(uint64_t types, genpaddr_t start, gensize_t size) 45{ 46 if (types) { 47 trace_types_enabled = types; 48 TRACE_PMEM_BEGIN = start; 49 TRACE_PMEM_SIZE = size; 50 } else { 51 trace_types_enabled = 0; 52 } 53} 54#endif 55 56struct capability monitor_ep; 57 58STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types"); 59int sprint_cap(char *buf, size_t len, struct capability *cap) 60{ 61 char *mappingtype; 62 switch (cap->type) { 63 case ObjType_PhysAddr: 64 return snprintf(buf, len, 65 "physical address range cap (0x%" PRIxGENPADDR ":0x%" PRIxGENSIZE ")", 66 cap->u.physaddr.base, cap->u.physaddr.bytes); 67 68 case ObjType_RAM: 69 return snprintf(buf, len, "RAM cap (0x%" PRIxGENPADDR ":0x%" PRIxGENSIZE ")", 70 cap->u.ram.base, cap->u.ram.bytes); 71 72 case ObjType_L1CNode: { 73 int ret = snprintf(buf, len, "L1 CNode cap " 74 "(base=%#"PRIxGENPADDR", allocated bytes %#"PRIxGENSIZE 75 ", rights mask %#"PRIxCAPRIGHTS")", 76 get_address(cap), get_size(cap), 77 cap->u.l1cnode.rightsmask); 78 return ret; 79 } 80 81 case ObjType_L2CNode: { 82 int ret = snprintf(buf, len, "L2 CNode cap " 83 "(base=%#"PRIxGENPADDR", rights mask %#"PRIxCAPRIGHTS")", 84 get_address(cap), cap->u.l1cnode.rightsmask); 85 return ret; 86 } 87 88 case ObjType_Dispatcher: 89 return snprintf(buf, len, "Dispatcher cap %p", cap->u.dispatcher.dcb); 90 91 case ObjType_Frame: 92 return snprintf(buf, len, "Frame cap (0x%" PRIxGENPADDR ":0x%" PRIxGENSIZE ")", 93 cap->u.frame.base, cap->u.frame.bytes); 94 95 case ObjType_DevFrame: 96 return snprintf(buf, len, "Device Frame cap (0x%" PRIxGENPADDR ":0x%" PRIxGENSIZE ")", 97 cap->u.devframe.base, cap->u.devframe.bytes); 98 99 case ObjType_VNode_ARM_l1: 100 return snprintf(buf, len, "ARM L1 table at 0x%" PRIxGENPADDR, 101 cap->u.vnode_arm_l1.base); 102 103 case ObjType_VNode_ARM_l2: 104 return snprintf(buf, len, "ARM L2 table at 0x%" PRIxGENPADDR, 105 cap->u.vnode_arm_l2.base); 106 107 case ObjType_VNode_AARCH64_l0: 108 return snprintf(buf, len, "AARCH64 L0 table at 0x%" PRIxGENPADDR, 109 cap->u.vnode_aarch64_l0.base); 110 111 case ObjType_VNode_AARCH64_l1: 112 return snprintf(buf, len, "AARCH64 L1 table at 0x%" PRIxGENPADDR, 113 cap->u.vnode_aarch64_l1.base); 114 115 case ObjType_VNode_AARCH64_l2: 116 return snprintf(buf, len, "AARCH64 L2 table at 0x%" PRIxGENPADDR, 117 cap->u.vnode_aarch64_l2.base); 118 119 case ObjType_VNode_AARCH64_l3: 120 return snprintf(buf, len, "AARCH64 L3 table at 0x%" PRIxGENPADDR, 121 cap->u.vnode_aarch64_l3.base); 122 123 case ObjType_VNode_x86_32_ptable: 124 return snprintf(buf, len, "x86_32 Page table at 0x%" PRIxGENPADDR, 125 cap->u.vnode_x86_32_ptable.base); 126 127 case ObjType_VNode_x86_32_pdir: 128 return snprintf(buf, len, "x86_32 Page directory at 0x%" PRIxGENPADDR, 129 cap->u.vnode_x86_32_pdir.base); 130 131 case ObjType_VNode_x86_32_pdpt: 132 return snprintf(buf, len, "x86_32 PDPT at 0x%" PRIxGENPADDR, 133 cap->u.vnode_x86_32_pdpt.base); 134 135 case ObjType_VNode_x86_64_ptable: 136 return snprintf(buf, len, "x86_64 Page table at 0x%" PRIxGENPADDR, 137 cap->u.vnode_x86_64_ptable.base); 138 139 case ObjType_VNode_x86_64_pdir: 140 return snprintf(buf, len, "x86_64 Page directory at 0x%" PRIxGENPADDR, 141 cap->u.vnode_x86_64_pdir.base); 142 143 case ObjType_VNode_x86_64_pdpt: 144 return snprintf(buf, len, "x86_64 PDPT at 0x%" PRIxGENPADDR, 145 cap->u.vnode_x86_64_pdpt.base); 146 147 case ObjType_VNode_x86_64_pml4: 148 return snprintf(buf, len, "x86_64 PML4 at 0x%" PRIxGENPADDR, 149 cap->u.vnode_x86_64_pml4.base); 150 151 case ObjType_Frame_Mapping: 152 mappingtype = "Frame"; 153 goto ObjType_Mapping; 154 case ObjType_DevFrame_Mapping: 155 mappingtype = "DevFrame"; 156 goto ObjType_Mapping; 157 158 case ObjType_VNode_x86_64_pml4_Mapping: 159 mappingtype = "x86_64 PML4"; 160 goto ObjType_Mapping; 161 case ObjType_VNode_x86_64_pdpt_Mapping: 162 mappingtype = "x86_64 PDPT"; 163 goto ObjType_Mapping; 164 case ObjType_VNode_x86_64_pdir_Mapping: 165 mappingtype = "x86_64 PDIR"; 166 goto ObjType_Mapping; 167 case ObjType_VNode_x86_64_ptable_Mapping: 168 mappingtype = "x86_64 PTABLE"; 169 goto ObjType_Mapping; 170 171 case ObjType_VNode_x86_32_pdpt_Mapping: 172 mappingtype = "x86_32 PDPT"; 173 goto ObjType_Mapping; 174 case ObjType_VNode_x86_32_pdir_Mapping: 175 mappingtype = "x86_32 PDIR"; 176 goto ObjType_Mapping; 177 case ObjType_VNode_x86_32_ptable_Mapping: 178 mappingtype = "x86_32 PTABLE"; 179 goto ObjType_Mapping; 180 181 case ObjType_VNode_ARM_l1_Mapping: 182 mappingtype = "ARM l1"; 183 goto ObjType_Mapping; 184 case ObjType_VNode_ARM_l2_Mapping: 185 mappingtype = "ARM l2"; 186 goto ObjType_Mapping; 187 188 case ObjType_VNode_AARCH64_l0_Mapping: 189 mappingtype = "AARCH64 l0"; 190 goto ObjType_Mapping; 191 case ObjType_VNode_AARCH64_l1_Mapping: 192 mappingtype = "AARCH64 l1"; 193 goto ObjType_Mapping; 194 case ObjType_VNode_AARCH64_l2_Mapping: 195 mappingtype = "AARCH64 l2"; 196 goto ObjType_Mapping; 197 case ObjType_VNode_AARCH64_l3_Mapping: 198 mappingtype = "AARCH64 l3"; 199 goto ObjType_Mapping; 200 201ObjType_Mapping: 202 return snprintf(buf, len, "%s Mapping (%s cap @%p, " 203 "ptable cap @0x%p, entry=%hu, pte_count=%hu)", 204 mappingtype, mappingtype, 205 cap->u.frame_mapping.cap, 206 cap->u.frame_mapping.ptable, 207 cap->u.frame_mapping.entry, 208 cap->u.frame_mapping.pte_count); 209 210 case ObjType_IRQTable: 211 return snprintf(buf, len, "IRQTable cap"); 212 213 case ObjType_IRQDest: 214 return snprintf(buf, len, "IRQDest cap (vec: %"PRIu64", cpu: %"PRIu64")", 215 cap->u.irqdest.vector, cap->u.irqdest.cpu); 216 217 case ObjType_EndPoint: 218 return snprintf(buf, len, "EndPoint cap (disp %p offset 0x%" PRIxLVADDR ")", 219 cap->u.endpoint.listener, cap->u.endpoint.epoffset); 220 221 case ObjType_IO: 222 return snprintf(buf, len, "IO cap (0x%hx-0x%hx)", 223 cap->u.io.start, cap->u.io.end); 224 225 case ObjType_Kernel: 226 return snprintf(buf, len, "Kernel cap"); 227 228 case ObjType_KernelControlBlock: 229 return snprintf(buf, len, "Kernel control block"); 230 231 case ObjType_ID: 232 return snprintf(buf, len, "ID capability (coreid 0x%" PRIxCOREID 233 " core_local_id 0x%" PRIx32 ")", cap->u.id.coreid, 234 cap->u.id.core_local_id); 235 case ObjType_ProcessManager: 236 return snprintf(buf, len, "Process manager capability"); 237 238 case ObjType_Domain: 239 return snprintf(buf, len, "Domain capability (coreid 0x%" PRIxCOREID 240 " core_local_id 0x%" PRIx32 ")", cap->u.domain.coreid, 241 cap->u.domain.core_local_id); 242 243 case ObjType_PerfMon: 244 return snprintf(buf, len, "PerfMon cap"); 245 246 case ObjType_Null: 247 return snprintf(buf, len, "Null capability (empty slot)"); 248 249 case ObjType_IPI: 250 return snprintf(buf, len, "IPI cap"); 251 252 default: 253 return snprintf(buf, len, "UNKNOWN TYPE! (%d)", cap->type); 254 } 255} 256 257void caps_trace(const char *func, int line, struct cte *cte, const char *msg) 258{ 259 char cap_buf[512]; 260 sprint_cap(cap_buf, 512, &cte->cap); 261 262 char disp_buf[64]; 263 if (dcb_current) { 264 dispatcher_handle_t handle = dcb_current->disp; 265 struct dispatcher_shared_generic *disp = 266 get_dispatcher_shared_generic(handle); 267 snprintf(disp_buf, 64, "from %.*s", DISP_NAME_LEN, disp->name); 268 } 269 else { 270 strcpy(disp_buf, "no disp"); 271 } 272 273 printk(LOG_WARN, "%s: %s:%d: %s %p %s" 274 " (owner:%" PRIuCOREID ", rc:%d/ra:%d/rd:%d)\n", 275 disp_buf, func, line, (msg ? : ""), cte, cap_buf, cte->mdbnode.owner, 276 cte->mdbnode.remote_copies, cte->mdbnode.remote_ancs, 277 cte->mdbnode.remote_descs); 278} 279 280/** 281 * ID capability core_local_id counter. 282 */ 283static uint32_t id_cap_counter = 1; 284 285/** 286 * Domain capability core_local_id counter. 287 */ 288static uint32_t domain_cap_counter = 1; 289 290/** 291 * Sets #dest equal to #src 292 * 293 * #dest cannot be in use. 294 */ 295static errval_t set_cap(struct capability *dest, struct capability *src) 296{ 297 /* Parameter checking */ 298 assert(src != NULL); 299 assert(dest != NULL); 300 301 debug(SUBSYS_CAPS, "Copying cap from %#"PRIxLPADDR" to %#"PRIxLPADDR"\n", 302 mem_to_local_phys((lvaddr_t)cte_for_cap(src)), 303 mem_to_local_phys((lvaddr_t)cte_for_cap(dest))); 304 305 // Reserved object bits must always be greater/equal to actual object size 306 assert((1UL << OBJBITS_CTE) >= sizeof(struct cte)); 307 308 // Cannot overwrite an already existing cap 309 if (dest->type != ObjType_Null) { 310 return SYS_ERR_SLOT_IN_USE; 311 } 312 313 memcpy(dest, src, sizeof(struct capability)); 314 return SYS_ERR_OK; 315} 316 317/** 318 * \brief Determine how many objects can be created in a specified region. 319 * 320 * This function computes the number of objects that can be created by a call 321 * to caps_create(). 322 * 323 * \param type Type of objects to create. 324 * \param srcsize Size of memory area in bytes 325 * \param objsize For variable-sized objects, size multiplier 326 * 327 * \return Number of objects to be created, or zero on error 328 */ 329 330// If you create more capability types you need to deal with them 331// in the table below. 332STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types"); 333static size_t caps_max_numobjs(enum objtype type, gensize_t srcsize, gensize_t objsize) 334{ 335 switch(type) { 336 case ObjType_PhysAddr: 337 case ObjType_RAM: 338 case ObjType_Frame: 339 case ObjType_DevFrame: 340 if (objsize > srcsize) { 341 return 0; 342 } else { 343 return srcsize / objsize; 344 } 345 346 case ObjType_L1CNode: 347 if (srcsize < OBJSIZE_L2CNODE || objsize < OBJSIZE_L2CNODE) { 348 // disallow L1 CNode to be smaller than 16kB. 349 return 0; 350 } else { 351 return srcsize / objsize; 352 } 353 354 case ObjType_L2CNode: 355 if (srcsize < OBJSIZE_L2CNODE || objsize != OBJSIZE_L2CNODE) { 356 // disallow L2 CNode creation if source too small or objsize wrong 357 return 0; 358 } else { 359 return srcsize / objsize; 360 } 361 362 363 case ObjType_VNode_x86_64_pml4: 364 case ObjType_VNode_x86_64_pdpt: 365 case ObjType_VNode_x86_64_pdir: 366 case ObjType_VNode_x86_64_ptable: 367 case ObjType_VNode_x86_32_pdpt: 368 case ObjType_VNode_x86_32_pdir: 369 case ObjType_VNode_x86_32_ptable: 370 case ObjType_VNode_ARM_l1: 371 case ObjType_VNode_ARM_l2: 372 case ObjType_VNode_AARCH64_l0: 373 case ObjType_VNode_AARCH64_l1: 374 case ObjType_VNode_AARCH64_l2: 375 case ObjType_VNode_AARCH64_l3: 376 { 377 if (srcsize < vnode_objsize(type)) { 378 return 0; 379 } else { 380 return srcsize / vnode_objsize(type); 381 } 382 } 383 384 case ObjType_Dispatcher: 385 if (srcsize < OBJSIZE_DISPATCHER) { 386 return 0; 387 } else { 388 return srcsize / OBJSIZE_DISPATCHER; 389 } 390 391 case ObjType_KernelControlBlock: 392 if (srcsize < OBJSIZE_KCB) { 393 return 0; 394 } else { 395 return srcsize / OBJSIZE_KCB; 396 } 397 398 case ObjType_Domain: 399 return L2_CNODE_SLOTS; 400 401 case ObjType_Kernel: 402 case ObjType_IRQTable: 403 case ObjType_IRQDest: 404 case ObjType_IRQSrc: 405 case ObjType_IO: 406 case ObjType_EndPoint: 407 case ObjType_ID: 408 case ObjType_Notify_IPI: 409 case ObjType_PerfMon: 410 case ObjType_IPI: 411 case ObjType_ProcessManager: 412 case ObjType_VNode_ARM_l1_Mapping: 413 case ObjType_VNode_ARM_l2_Mapping: 414 case ObjType_VNode_AARCH64_l0_Mapping: 415 case ObjType_VNode_AARCH64_l1_Mapping: 416 case ObjType_VNode_AARCH64_l2_Mapping: 417 case ObjType_VNode_AARCH64_l3_Mapping: 418 case ObjType_VNode_x86_64_pml4_Mapping: 419 case ObjType_VNode_x86_64_pdpt_Mapping: 420 case ObjType_VNode_x86_64_pdir_Mapping: 421 case ObjType_VNode_x86_64_ptable_Mapping: 422 case ObjType_VNode_x86_32_pdpt_Mapping: 423 case ObjType_VNode_x86_32_pdir_Mapping: 424 case ObjType_VNode_x86_32_ptable_Mapping: 425 case ObjType_DevFrame_Mapping: 426 case ObjType_Frame_Mapping: 427 return 1; 428 429 default: 430 panic("invalid type"); 431 return 0; 432 } 433} 434 435/** 436 * \brief Initialize the objects for which local caps are about to be created. 437 * 438 * For the meaning of the parameters, see the 'caps_create' function. 439 */ 440STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types"); 441 442static errval_t caps_zero_objects(enum objtype type, lpaddr_t lpaddr, 443 gensize_t objsize, size_t count) 444{ 445 assert(type < ObjType_Num); 446 447 // Virtual address of the memory the kernel object resides in 448 // XXX: A better of doing this, 449 // this is creating caps that the kernel cannot address. 450 // It assumes that the cap is not of the type which will have to zeroed out. 451 lvaddr_t lvaddr; 452 if(lpaddr < PADDR_SPACE_LIMIT) { 453 lvaddr = local_phys_to_mem(lpaddr); 454 } else { 455 lvaddr = 0; 456 } 457 458 switch (type) { 459 460 case ObjType_Frame: 461 debug(SUBSYS_CAPS, "Frame: zeroing %zu bytes @%#"PRIxLPADDR"\n", 462 (size_t)objsize * count, lpaddr); 463 TRACE(KERNEL, BZERO, 1); 464 memset((void*)lvaddr, 0, objsize * count); 465 TRACE(KERNEL, BZERO, 0); 466 break; 467 468 case ObjType_L1CNode: 469 case ObjType_L2CNode: 470 debug(SUBSYS_CAPS, "L%dCNode: zeroing %zu bytes @%#"PRIxLPADDR"\n", 471 type == ObjType_L1CNode ? 1 : 2, (size_t)objsize * count, 472 lpaddr); 473 TRACE(KERNEL, BZERO, 1); 474 memset((void*)lvaddr, 0, objsize * count); 475 TRACE(KERNEL, BZERO, 0); 476 break; 477 478 case ObjType_VNode_ARM_l1: 479 case ObjType_VNode_ARM_l2: 480 case ObjType_VNode_AARCH64_l0: 481 case ObjType_VNode_AARCH64_l1: 482 case ObjType_VNode_AARCH64_l2: 483 case ObjType_VNode_AARCH64_l3: 484 case ObjType_VNode_x86_32_ptable: 485 case ObjType_VNode_x86_32_pdir: 486 case ObjType_VNode_x86_32_pdpt: 487 case ObjType_VNode_x86_64_ptable: 488 case ObjType_VNode_x86_64_pdir: 489 case ObjType_VNode_x86_64_pdpt: 490 case ObjType_VNode_x86_64_pml4: 491 // objsize is size of VNode; but not given as such 492 objsize = vnode_objsize(type); 493 debug(SUBSYS_CAPS, "VNode: zeroing %zu bytes @%#"PRIxLPADDR"\n", 494 (size_t)objsize * count, lpaddr); 495 TRACE(KERNEL, BZERO, 1); 496 memset((void*)lvaddr, 0, objsize * count); 497 TRACE(KERNEL, BZERO, 0); 498 break; 499 500 case ObjType_Dispatcher: 501 debug(SUBSYS_CAPS, "Dispatcher: zeroing %zu bytes @%#"PRIxLPADDR"\n", 502 ((size_t) OBJSIZE_DISPATCHER) * count, lpaddr); 503 TRACE(KERNEL, BZERO, 1); 504 memset((void*)lvaddr, 0, OBJSIZE_DISPATCHER * count); 505 TRACE(KERNEL, BZERO, 0); 506 break; 507 508 case ObjType_KernelControlBlock: 509 debug(SUBSYS_CAPS, "KCB: zeroing %zu bytes @%#"PRIxLPADDR"\n", 510 ((size_t) OBJSIZE_KCB) * count, lpaddr); 511 TRACE(KERNEL, BZERO, 1); 512 memset((void*)lvaddr, 0, OBJSIZE_KCB * count); 513 TRACE(KERNEL, BZERO, 0); 514 break; 515 516 default: 517 debug(SUBSYS_CAPS, "Not zeroing %zu bytes @%#"PRIxLPADDR" for type %d\n", 518 (size_t)objsize * count, lpaddr, (int)type); 519 break; 520 521 } 522 523 return SYS_ERR_OK; 524} 525 526/** 527 * \brief Create capabilities to kernel objects. 528 * 529 * This function creates 'count' kernel objects of 'type' into the memory 530 * area, based at 'addr' and of size 'objsize'. For each created kernel 531 * object, a capability is created to it and put consecutively into the array 532 * of CTEs pointed to by 'caps'. The array needs to have the appropriate size 533 * to hold all created caps. Some kernel objects can have a variable size. In 534 * that case, 'objsize' should be non-zero. and give the size multiplier. * 535 * 536 * \param type Type of objects to create. 537 * \param lpaddr Base address in the local address space. 538 * \param size Size of memory area as bytes. 539 * \param objsize For variable-sized objects, size in bytes. 540 * \param count Number of objects to be created 541 * (count <= caps_max_numobjs(type, size, objsize)) 542 * \param dest_caps Pointer to array of CTEs to hold created caps. 543 * 544 * \return Error code 545 */ 546// If you create more capability types you need to deal with them 547// in the table below. 548STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types"); 549 550static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, gensize_t size, 551 gensize_t objsize, size_t count, coreid_t owner, 552 struct cte *dest_caps) 553{ 554 errval_t err; 555 556 /* Parameter checking */ 557 assert(dest_caps != NULL); 558 assert(type != ObjType_Null); 559 assert(type < ObjType_Num); 560 assert(count > 0); 561 // objsize is 0 for non-sized types (e.g. VNodes) 562 // TODO cleanup semantics for type == CNode 563 //assert(objsize % BASE_PAGE_SIZE == 0); 564 assert(!type_is_mapping(type)); 565 566 genpaddr_t genpaddr = local_phys_to_gen_phys(lpaddr); 567 568 debug(SUBSYS_CAPS, "creating caps for %#"PRIxGENPADDR 569 ", %" PRIuGENSIZE " bytes, objsize=%"PRIuGENSIZE 570 ", count=%zu, owner=%d, type=%d\n", 571 genpaddr, size, objsize, count, (int)owner, (int)type); 572 573 // Virtual address of the memory the kernel object resides in 574 // XXX: A better of doing this, 575 // this is creating caps that the kernel cannot address. 576 // It assumes that the cap is not of the type which will have to zeroed out. 577 lvaddr_t lvaddr; 578 if(lpaddr < PADDR_SPACE_LIMIT) { 579 lvaddr = local_phys_to_mem(lpaddr); 580 } else { 581 lvaddr = 0; 582 } 583 584 /* Initialize the created capability */ 585 struct capability temp_cap; 586 memset(&temp_cap, 0, sizeof(struct capability)); 587 temp_cap.type = type; 588 // XXX: Handle rights! 589 temp_cap.rights = CAPRIGHTS_ALLRIGHTS; 590 591 debug(SUBSYS_CAPS, "owner = %d, my_core_id = %d\n", owner, my_core_id); 592 if (owner == my_core_id) { 593 // If we're creating new local objects, they need to be cleared 594 err = caps_zero_objects(type, lpaddr, objsize, count); 595 if (err_is_fail(err)) { 596 return err; 597 } 598 } 599 600 size_t dest_i = 0; 601 err = SYS_ERR_OK; 602 603 /* Set the type specific fields and insert into #dest_caps */ 604 switch(type) { 605 case ObjType_Frame: 606 for(dest_i = 0; dest_i < count; dest_i++) { 607 // Initialize type specific fields 608 temp_cap.u.frame.base = genpaddr + dest_i * objsize; 609 temp_cap.u.frame.bytes = objsize; 610 assert((get_size(&temp_cap) & BASE_PAGE_MASK) == 0); 611 // Insert the capability 612 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 613 if (err_is_fail(err)) { 614 break; 615 } 616 } 617 break; 618 619 case ObjType_PhysAddr: 620 for(dest_i = 0; dest_i < count; dest_i++) { 621 // Initialize type specific fields 622 temp_cap.u.physaddr.base = genpaddr + dest_i * objsize; 623 temp_cap.u.physaddr.bytes = objsize; 624 // Insert the capability 625 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 626 if (err_is_fail(err)) { 627 break; 628 } 629 } 630 break; 631 632 case ObjType_RAM: 633 for(dest_i = 0; dest_i < count; dest_i++) { 634 // Initialize type specific fields 635 temp_cap.u.ram.base = genpaddr + dest_i * objsize; 636 temp_cap.u.ram.bytes = objsize; 637 // Insert the capabilities 638 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 639 if (err_is_fail(err)) { 640 break; 641 } 642 } 643 break; 644 645 case ObjType_DevFrame: 646 for(dest_i = 0; dest_i < count; dest_i++) { 647 // Initialize type specific fields 648 temp_cap.u.devframe.base = genpaddr + dest_i * objsize; 649 temp_cap.u.devframe.bytes = objsize; 650 // Insert the capabilities 651 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 652 if (err_is_fail(err)) { 653 break; 654 } 655 } 656 break; 657 658 case ObjType_L1CNode: 659 for (dest_i = 0; dest_i < count; dest_i++) { 660 assert(objsize >= OBJSIZE_L2CNODE); 661 assert(objsize % OBJSIZE_L2CNODE == 0); 662 temp_cap.u.l1cnode.cnode = lpaddr + dest_i * objsize; 663 temp_cap.u.l1cnode.allocated_bytes = objsize; 664 // XXX: implement CNode cap rights 665 temp_cap.u.l1cnode.rightsmask = CAPRIGHTS_ALLRIGHTS; 666 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 667 if (err_is_fail(err)) { 668 break; 669 } 670 } 671 break; 672 673 case ObjType_L2CNode: 674 for (dest_i = 0; dest_i < count; dest_i++) { 675 temp_cap.u.l2cnode.cnode = lpaddr + dest_i * objsize; 676 // XXX: implement CNode cap rights 677 temp_cap.u.l2cnode.rightsmask = CAPRIGHTS_ALLRIGHTS; 678 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 679 if (err_is_fail(err)) { 680 break; 681 } 682 } 683 break; 684 685 case ObjType_VNode_ARM_l1: 686 { 687 size_t objsize_vnode = vnode_objsize(type); 688 689 for(dest_i = 0; dest_i < count; dest_i++) { 690 // Initialize type specific fields 691 temp_cap.u.vnode_arm_l1.base = 692 genpaddr + dest_i * objsize_vnode; 693 694 695 // Insert the capability 696 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 697 if (err_is_fail(err)) { 698 break; 699 } 700 } 701 702 break; 703 } 704 705 case ObjType_VNode_ARM_l2: 706 { 707 size_t objsize_vnode = vnode_objsize(type); 708 709 for(dest_i = 0; dest_i < count; dest_i++) { 710 // Initialize type specific fields 711 temp_cap.u.vnode_arm_l2.base = 712 genpaddr + dest_i * objsize_vnode; 713 714 // Insert the capability 715 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 716 if (err_is_fail(err)) { 717 break; 718 } 719 } 720 break; 721 } 722 723 case ObjType_VNode_AARCH64_l0: 724 { 725 size_t objsize_vnode = vnode_objsize(type); 726 727 for(dest_i = 0; dest_i < count; dest_i++) { 728 // Initialize type specific fields 729 temp_cap.u.vnode_aarch64_l0.base = 730 genpaddr + dest_i * objsize_vnode; 731 732 // Insert the capability 733 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 734 if (err_is_fail(err)) { 735 break; 736 } 737 } 738 739 break; 740 } 741 742 case ObjType_VNode_AARCH64_l1: 743 { 744 size_t objsize_vnode = vnode_objsize(type); 745 746 for(dest_i = 0; dest_i < count; dest_i++) { 747 // Initialize type specific fields 748 temp_cap.u.vnode_aarch64_l1.base = 749 genpaddr + dest_i * objsize_vnode; 750 751 // Insert the capability 752 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 753 if (err_is_fail(err)) { 754 break; 755 } 756 } 757 758 break; 759 } 760 761 case ObjType_VNode_AARCH64_l2: 762 { 763 size_t objsize_vnode = vnode_objsize(type); 764 765 for(dest_i = 0; dest_i < count; dest_i++) { 766 // Initialize type specific fields 767 temp_cap.u.vnode_aarch64_l2.base = 768 genpaddr + dest_i * objsize_vnode; 769 770 // Insert the capability 771 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 772 773 if (err_is_fail(err)) { 774 break; 775 } 776 } 777 break; 778 } 779 780 case ObjType_VNode_AARCH64_l3: 781 { 782 size_t objsize_vnode = vnode_objsize(type); 783 784 for(dest_i = 0; dest_i < count; dest_i++) { 785 // Initialize type specific fields 786 temp_cap.u.vnode_aarch64_l3.base = 787 genpaddr + dest_i * objsize_vnode; 788 789 // Insert the capability 790 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 791 if (err_is_fail(err)) { 792 break; 793 } 794 } 795 break; 796 } 797 798 case ObjType_VNode_x86_32_ptable: 799 { 800 size_t objsize_vnode = vnode_objsize(type); 801 802 for(dest_i = 0; dest_i < count; dest_i++) { 803 // Initialize type specific fields 804 temp_cap.u.vnode_x86_32_ptable.base = 805 genpaddr + dest_i * objsize_vnode; 806 807 // Insert the capability 808 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 809 if (err_is_fail(err)) { 810 break; 811 } 812 } 813 break; 814 } 815 816 case ObjType_VNode_x86_32_pdir: 817 { 818 size_t objsize_vnode = vnode_objsize(type); 819 820 for(dest_i = 0; dest_i < count; dest_i++) { 821 // Initialize type specific fields 822 temp_cap.u.vnode_x86_32_pdir.base = 823 genpaddr + dest_i * objsize_vnode; 824 825#if defined(__i386__) && !defined(CONFIG_PAE) 826 // Make it a good PDE by inserting kernel/mem VSpaces 827 lpaddr = gen_phys_to_local_phys(temp_cap.u.vnode_x86_32_pdir.base); 828 paging_x86_32_make_good_pdir(lpaddr); 829#endif 830 831 // Insert the capability 832 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 833 if (err_is_fail(err)) { 834 break; 835 } 836 } 837 break; 838 } 839 840 case ObjType_VNode_x86_32_pdpt: 841 { 842 size_t objsize_vnode = vnode_objsize(type); 843 844 for(dest_i = 0; dest_i < count; dest_i++) { 845 // Initialize type specific fields 846 temp_cap.u.vnode_x86_32_pdir.base = 847 genpaddr + dest_i * objsize_vnode; 848 849#if defined(__i386__) && defined(CONFIG_PAE) 850 // Make it a good PDPTE by inserting kernel/mem VSpaces 851 lpaddr_t var = 852 gen_phys_to_local_phys(temp_cap.u.vnode_x86_32_pdpt.base); 853 paging_x86_32_make_good_pdpte(var); 854#endif 855 856 // Insert the capability 857 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 858 if (err_is_fail(err)) { 859 break; 860 } 861 } 862 break; 863 } 864 865 case ObjType_VNode_x86_64_ptable: 866 { 867 size_t objsize_vnode = vnode_objsize(type); 868 869 for(dest_i = 0; dest_i < count; dest_i++) { 870 // Initialize type specific fields 871 temp_cap.u.vnode_x86_64_ptable.base = 872 genpaddr + dest_i * objsize_vnode; 873 874 // Insert the capability 875 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 876 if (err_is_fail(err)) { 877 break; 878 } 879 } 880 break; 881 } 882 883 case ObjType_VNode_x86_64_pdir: 884 { 885 size_t objsize_vnode = vnode_objsize(type); 886 887 for(dest_i = 0; dest_i < count; dest_i++) { 888 // Initialize type specific fields 889 temp_cap.u.vnode_x86_64_pdir.base = 890 genpaddr + dest_i * objsize_vnode; 891 892 // Insert the capability 893 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 894 if (err_is_fail(err)) { 895 break; 896 } 897 } 898 break; 899 } 900 901 case ObjType_VNode_x86_64_pdpt: 902 { 903 size_t objsize_vnode = vnode_objsize(type); 904 905 for(dest_i = 0; dest_i < count; dest_i++) { 906 // Initialize type specific fields 907 temp_cap.u.vnode_x86_64_pdpt.base = 908 genpaddr + dest_i * objsize_vnode; 909 910 // Insert the capability 911 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 912 if (err_is_fail(err)) { 913 break; 914 } 915 } 916 break; 917 } 918 919 case ObjType_VNode_x86_64_pml4: 920 { 921 size_t objsize_vnode = vnode_objsize(type); 922 923 for(dest_i = 0; dest_i < count; dest_i++) { 924 // Initialize type specific fields 925 temp_cap.u.vnode_x86_64_pml4.base = 926 genpaddr + dest_i * objsize_vnode; 927 928#if defined(__x86_64__) || defined(__k1om__) 929 // Make it a good PML4 by inserting kernel/mem VSpaces 930 lpaddr_t var = gen_phys_to_local_phys(get_address(&temp_cap)); 931 paging_x86_64_make_good_pml4(var); 932#endif 933 934 // Insert the capability 935 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 936 if (err_is_fail(err)) { 937 break; 938 } 939 } 940 941 break; 942 } 943 944 case ObjType_Dispatcher: 945 assert(OBJSIZE_DISPATCHER >= sizeof(struct dcb)); 946 947 for(dest_i = 0; dest_i < count; dest_i++) { 948 // Initialize type specific fields 949 temp_cap.u.dispatcher.dcb = (struct dcb *) 950 (lvaddr + dest_i * OBJSIZE_DISPATCHER); 951 // Insert the capability 952 err = set_cap(&dest_caps[dest_i].cap, &temp_cap); 953 if (err_is_fail(err)) { 954 break; 955 } 956 } 957 break; 958 959 case ObjType_ID: 960 // ID type does not refer to a kernel object 961 assert(lpaddr == 0); 962 assert(size == 0); 963 assert(objsize == 0); 964 assert(count == 1); 965 966 // Prevent wrap around 967 if (id_cap_counter >= UINT32_MAX) { 968 return SYS_ERR_ID_SPACE_EXHAUSTED; 969 } 970 971 // Generate a new ID, core_local_id monotonically increases 972 temp_cap.u.id.coreid = my_core_id; 973 temp_cap.u.id.core_local_id = id_cap_counter++; 974 975 // Insert the capability 976 err = set_cap(&dest_caps->cap, &temp_cap); 977 break; 978 979 case ObjType_Domain: 980 // Domain type does not refer to a kernel object 981 assert(lpaddr == 0); 982 assert(size == 0); 983 assert(objsize == 0); 984 assert(count <= L2_CNODE_SLOTS); 985 986 // Prevent wrap around 987 if (domain_cap_counter + count >= UINT32_MAX) { 988 return SYS_ERR_DOMAIN_SPACE_EXHAUSTED; 989 } 990 991 for(size_t i = 0; i < count; i++) { 992 // Initialize type specific fields 993 temp_cap.u.domain.coreid = my_core_id; 994 temp_cap.u.domain.core_local_id = domain_cap_counter++; 995 // Insert the capability 996 err = set_cap(&dest_caps[i].cap, &temp_cap); 997 if (err_is_fail(err)) { 998 break; 999 } 1000 } 1001 break; 1002 case ObjType_IO: 1003 temp_cap.u.io.start = 0; 1004 temp_cap.u.io.end = 65535; 1005 /* fall through */ 1006 1007 case ObjType_IRQSrc: 1008 /* Caller has to set vec_start and vec_end */ 1009 case ObjType_Kernel: 1010 case ObjType_IPI: 1011 case ObjType_IRQTable: 1012 case ObjType_IRQDest: 1013 case ObjType_EndPoint: 1014 case ObjType_Notify_IPI: 1015 case ObjType_PerfMon: 1016 case ObjType_ProcessManager: 1017 // These types do not refer to a kernel object 1018 assert(lpaddr == 0); 1019 assert(size == 0); 1020 assert(objsize == 0); 1021 assert(count == 1); 1022 1023 // Insert the capability 1024 err = set_cap(&dest_caps->cap, &temp_cap); 1025 if (err_is_ok(err)) { 1026 dest_i = 1; 1027 } 1028 break; 1029 1030 case ObjType_KernelControlBlock: 1031 assert(OBJSIZE_KCB >= sizeof(struct kcb)); 1032 1033 for(size_t i = 0; i < count; i++) { 1034 // Initialize type specific fields 1035 temp_cap.u.kernelcontrolblock.kcb = (struct kcb *) 1036 (lvaddr + i * OBJSIZE_KCB); 1037 // Insert the capability 1038 err = set_cap(&dest_caps[i].cap, &temp_cap); 1039 if (err_is_fail(err)) { 1040 return err; 1041 } 1042 } 1043 return SYS_ERR_OK; 1044 1045 default: 1046 panic("Unhandled capability type or capability of this type cannot" 1047 " be created"); 1048 } 1049 1050 if (err_is_fail(err)) { 1051 // Revert the partially initialized caps to zero 1052 for (size_t i = 0; i < dest_i; i++) { 1053 memset(&dest_caps[i], 0, sizeof(dest_caps[i])); 1054 } 1055 return err; 1056 } 1057 else { 1058 // Set the owner for all the new caps 1059 for (size_t i = 0; i < dest_i; i++) { 1060 dest_caps[i].mdbnode.owner = owner; 1061 } 1062 } 1063 1064 return SYS_ERR_OK; 1065} 1066 1067/** 1068 * Look up a capability in two-level cspace rooted at `rootcn`. 1069 */ 1070errval_t caps_lookup_slot(struct capability *rootcn, capaddr_t cptr, 1071 uint8_t level, struct cte **ret, CapRights rights) 1072{ 1073 TRACE(KERNEL, CAP_LOOKUP_SLOT, 0); 1074 1075 cslot_t l1index, l2index; 1076 l1index = (cptr >> L2_CNODE_BITS) & MASK(CPTR_BITS-L2_CNODE_BITS); 1077 l2index = cptr & MASK(L2_CNODE_BITS); 1078 1079 assert(ret != NULL); 1080 assert(rootcn != NULL); 1081 1082 if (level > 2) { 1083 debug(SUBSYS_CAPS, "%s called with level=%hhu, from %p\n", 1084 __FUNCTION__, level, 1085 (void*)kernel_virt_to_elf_addr(__builtin_return_address(0))); 1086 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1087 return SYS_ERR_CAP_LOOKUP_DEPTH; 1088 } 1089 assert(level <= 2); 1090 1091 // level 0 means that we do not do any resolution and just return the cte 1092 // for rootcn. 1093 if (level == 0) { 1094 *ret = cte_for_cap(rootcn); 1095 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1096 return SYS_ERR_OK; 1097 } 1098 1099 if (rootcn->type != ObjType_L1CNode) { 1100 debug(SUBSYS_CAPS, "%s: rootcn->type = %d, called from %p\n", 1101 __FUNCTION__, rootcn->type, 1102 (void*)kernel_virt_to_elf_addr(__builtin_return_address(0))); 1103 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1104 // XXX: think about errors 1105 return SYS_ERR_CNODE_TYPE; 1106 } 1107 assert(rootcn->type == ObjType_L1CNode); 1108 1109 if (l1index >= cnode_get_slots(rootcn)) { 1110 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1111 debug(SUBSYS_CAPS, "%s: l1index = %"PRIuCSLOT", slots= %zu\n", 1112 __FUNCTION__, l1index, cnode_get_slots(rootcn)); 1113 return SYS_ERR_L1_CNODE_INDEX; 1114 } 1115 1116 /* Apply rights to L1 CNode */ 1117 if ((rootcn->rights & rights) != rights) { 1118 debug(SUBSYS_CAPS, "caps_lookup_slot: Rights mismatch\n" 1119 "Passed rights = %u, cnode_cap->rights = %u\n", 1120 rights, rootcn->rights); 1121 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1122 return SYS_ERR_CNODE_RIGHTS; 1123 } 1124 1125 struct cte *l2cnode = caps_locate_slot(get_address(rootcn), l1index); 1126 1127 // level == 1 means that we terminate after looking up the slot in the L1 1128 // cnode. 1129 if (level == 1) { 1130 if (l2cnode->cap.type == ObjType_Null) { 1131 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1132 return SYS_ERR_CAP_NOT_FOUND; 1133 } 1134 *ret = l2cnode; 1135 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1136 return SYS_ERR_OK; 1137 } 1138 1139 // L2 CNode in given L1 slot does not exist 1140 if (l2cnode->cap.type == ObjType_Null) { 1141 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1142 debug(SUBSYS_CAPS, "%s: l2cnode is NULL\n", __FUNCTION__); 1143 return SYS_ERR_CNODE_NOT_FOUND; 1144 } 1145 if (l2cnode->cap.type != ObjType_L2CNode) { 1146 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1147 debug(SUBSYS_CAPS, "%s: l2cnode->type = %d\n", __FUNCTION__, 1148 l2cnode->cap.type); 1149 return SYS_ERR_CNODE_TYPE; 1150 } 1151 assert(l2cnode->cap.type == ObjType_L2CNode); 1152 1153 assert(l2index < L2_CNODE_SLOTS); 1154 1155 /* Apply rights to L2 CNode */ 1156 if ((l2cnode->cap.rights & rights) != rights) { 1157 debug(SUBSYS_CAPS, "caps_lookup_slot: Rights mismatch\n" 1158 "Passed rights = %u, cnode_cap->rights = %u\n", 1159 rights, l2cnode->cap.rights); 1160 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1161 return SYS_ERR_CNODE_RIGHTS; 1162 } 1163 1164 struct cte *cte = caps_locate_slot(get_address(&l2cnode->cap), l2index); 1165 if (cte->cap.type == ObjType_Null) { 1166 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1167 return SYS_ERR_CAP_NOT_FOUND; 1168 } 1169 1170 *ret = cte; 1171 1172 TRACE(KERNEL, CAP_LOOKUP_SLOT, 1); 1173 return SYS_ERR_OK; 1174} 1175 1176/** 1177 * Wrapper for caps_lookup_slot returning capability instead of cte. 1178 */ 1179errval_t caps_lookup_cap(struct capability *cnode_cap, capaddr_t cptr, 1180 uint8_t level, struct capability **ret, CapRights rights) 1181{ 1182 TRACE(KERNEL, CAP_LOOKUP_CAP, 0); 1183 1184 struct cte *ret_cte; 1185 errval_t err = caps_lookup_slot(cnode_cap, cptr, level, &ret_cte, rights); 1186 if (err_is_fail(err)) { 1187 return err; 1188 } 1189 *ret = &ret_cte->cap; 1190 TRACE(KERNEL, CAP_LOOKUP_CAP, 1); 1191 return SYS_ERR_OK; 1192} 1193 1194/** 1195 * \brief Create a capability from an existing capability metadata. 1196 * 1197 * Used when sending capabilities across cores. The metadata is sent across 1198 * cores and the receiving monitor can create the new capability on its core. 1199 * 1200 * \bug Does not check that supplied owner matches existing copies of cap. 1201 */ 1202errval_t caps_create_from_existing(struct capability *root, capaddr_t cnode_cptr, 1203 int cnode_level, cslot_t dest_slot, coreid_t owner, 1204 struct capability *src) 1205{ 1206 TRACE(KERNEL, CAP_CREATE_FROM_EXISTING, 0); 1207 errval_t err; 1208 struct capability *cnode; 1209 err = caps_lookup_cap(root, cnode_cptr, cnode_level, &cnode, 1210 CAPRIGHTS_READ_WRITE); 1211 if (err_is_fail(err)) { 1212 return err_push(err, SYS_ERR_SLOT_LOOKUP_FAIL); 1213 } 1214 if (cnode->type != ObjType_L1CNode && 1215 cnode->type != ObjType_L2CNode) 1216 { 1217 return SYS_ERR_CNODE_TYPE; 1218 } 1219 1220 struct cte *dest = caps_locate_slot(get_address(cnode), dest_slot); 1221 1222 err = set_cap(&dest->cap, src); 1223 if (err_is_fail(err)) { 1224 return err; 1225 } 1226 1227 dest->mdbnode.owner = owner; 1228 1229 err = mdb_insert(dest); 1230 assert(err_is_ok(err)); 1231 1232 struct cte *neighbour = NULL; 1233 if (!neighbour 1234 && (neighbour = mdb_predecessor(dest)) 1235 && !is_copy(&dest->cap, &neighbour->cap)) 1236 { 1237 neighbour = NULL; 1238 } 1239 if (!neighbour 1240 && (neighbour = mdb_successor(dest)) 1241 && !is_copy(&dest->cap, &neighbour->cap)) 1242 { 1243 neighbour = NULL; 1244 } 1245 1246 if (neighbour) { 1247 assert(!neighbour->mdbnode.in_delete); 1248 assert(neighbour->mdbnode.owner == owner); 1249#define CP_ATTR(a) dest->mdbnode.a = neighbour->mdbnode.a 1250 CP_ATTR(locked); 1251 CP_ATTR(remote_copies); 1252 CP_ATTR(remote_ancs); 1253 CP_ATTR(remote_descs); 1254#undef CP_ATTR 1255 } 1256 else { 1257 dest->mdbnode.locked = false; 1258 if (owner != my_core_id) { 1259 // For foreign caps it does not really matter if ancestors or 1260 // descendants exist 1261 dest->mdbnode.remote_copies = true; 1262 dest->mdbnode.remote_ancs = false; 1263 dest->mdbnode.remote_descs = false; 1264 } 1265 else { 1266 // We just created a new copy of a owned capability from nothing. 1267 // This is either caused by a retype, or by sharing a capability 1268 // that does not care about locality. 1269 // XXX: This should probably be done more explicitly -MN 1270 if (distcap_needs_locality(dest->cap.type)) { 1271 // Retype, so have ancestors and no descendants 1272 dest->mdbnode.remote_copies = false; 1273 dest->mdbnode.remote_ancs = true; 1274 dest->mdbnode.remote_descs = false; 1275 } 1276 else { 1277 dest->mdbnode.remote_copies = false; 1278 dest->mdbnode.remote_ancs = false; 1279 dest->mdbnode.remote_descs = false; 1280 } 1281 } 1282 } 1283 1284 TRACE_CAP_MSG("created", dest); 1285 1286 TRACE(KERNEL, CAP_CREATE_FROM_EXISTING, 1); 1287 return SYS_ERR_OK; 1288} 1289 1290//{{{1 Capability creation 1291 1292/// check arguments, return true iff ok 1293STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types"); 1294#ifndef NDEBUG 1295static bool check_caps_create_arguments(enum objtype type, 1296 size_t bytes, size_t objsize, 1297 bool exact) 1298{ 1299 gensize_t base_mask = BASE_PAGE_MASK; 1300 if (type_is_vnode(type)) { 1301 base_mask = vnode_objsize(type) - 1; 1302 } 1303 /* mappable types need to be at least BASE_PAGE_SIZEd */ 1304 if (type_is_mappable(type)) { 1305 /* source size not multiple of or not aligned to BASE_PAGE_SIZE */ 1306 if (bytes & base_mask) { 1307 debug(SUBSYS_CAPS, "source size not multiple of BASE_PAGE_SIZE\n"); 1308 return false; 1309 } 1310 /* objsize > 0 and not multiple of BASE_PAGE_SIZE */ 1311 if (objsize > 0 && objsize & base_mask) { 1312 debug(SUBSYS_CAPS, "object size not multiple of BASE_PAGE_SIZE\n"); 1313 return false; 1314 } 1315 1316 /* check that bytes can be evenly divided into objsize sized chunks */ 1317 if (exact && bytes > 0 && objsize > 0) { 1318 if (bytes % objsize) { 1319 debug(SUBSYS_CAPS, "source size cannot be evenly divided into object size-sized chunks\n"); 1320 } 1321 return bytes % objsize == 0; 1322 } 1323 1324 return true; 1325 } 1326 1327 if (type == ObjType_L1CNode) { 1328 /* L1 CNode minimum size is OBJSIZE_L2CNODE */ 1329 if (bytes < OBJSIZE_L2CNODE || objsize < OBJSIZE_L2CNODE) { 1330 debug(SUBSYS_CAPS, "source size or L1 CNode objsize < OBJSIZE_L2CNODE\n"); 1331 return false; 1332 } 1333 /* check that bytes can be evenly divided into L1 CNodes of objsize */ 1334 if (exact && (bytes % objsize != 0)) { 1335 debug(SUBSYS_CAPS, "source not evenly divisible into L1 CNodes of objsize\n"); 1336 return false; 1337 } 1338 /* L1 CNode size must be multiple of 1UL << OBJBITS_CTE */ 1339 return objsize % (1UL << OBJBITS_CTE) == 0; 1340 } 1341 1342 if (type == ObjType_L2CNode) { 1343 /* L2 CNode size must be OBJSIZE_L2CNODE */ 1344 if (bytes < OBJSIZE_L2CNODE || objsize != OBJSIZE_L2CNODE) { 1345 debug(SUBSYS_CAPS, "source size < or L2 CNode objsize != OBJSIZE_L2CNODE\n"); 1346 return false; 1347 } 1348 if (exact && (bytes % objsize != 0)) { 1349 debug(SUBSYS_CAPS, "source not evenly divisible into L2 CNodes of objsize\n"); 1350 return false; 1351 } 1352 return true; 1353 } 1354 1355 /* special case Dispatcher which is 1kB right now */ 1356 if (type == ObjType_Dispatcher) { 1357 if (bytes & (OBJSIZE_DISPATCHER - 1)) { 1358 return false; 1359 } 1360 if (objsize > 0 && objsize != OBJSIZE_DISPATCHER) { 1361 return false; 1362 } 1363 1364 return true; 1365 } 1366 1367 // All other types do not need special alignments/offsets 1368 return true; 1369} 1370#else 1371#define check_caps_create_arguments(a,b,c,d) 0 1372#endif 1373 1374/** Create caps to new kernel objects. 1375 * This takes the size of the memory region in bytes, and the size of 1376 * individual objects in bytes. The following needs to hold: 1377 * bytes % objbytes == 0 1378 */ 1379errval_t caps_create_new(enum objtype type, lpaddr_t addr, size_t bytes, 1380 size_t objsize, coreid_t owner, struct cte *caps) 1381{ 1382 TRACE(KERNEL, CAP_CREATE_NEW, 0); 1383 /* Parameter checking */ 1384 assert(type != ObjType_EndPoint); // Cap of this type cannot be created 1385 debug(SUBSYS_CAPS, "caps_create_new: type = %d, addr = %#"PRIxLPADDR 1386 ", bytes=%zu, objsize=%zu\n", type, addr, bytes, objsize); 1387 1388 assert(check_caps_create_arguments(type, bytes, objsize, false)); 1389 assert(addr == 0 || check_caps_create_arguments(type, bytes, objsize, true)); 1390 1391 size_t numobjs = caps_max_numobjs(type, bytes, objsize); 1392 assert(numobjs > 0); 1393 // XXX: Dispatcher creation is kind of hacky right now :( 1394 // Consider allowing non-mappable types to be < BASE_PAGE_SIZE 1395 //if (type == ObjType_Dispatcher) { 1396 // numobjs = 1; 1397 //} 1398 1399 /* Create the new capabilities */ 1400 errval_t err = caps_create(type, addr, bytes, objsize, numobjs, owner, caps); 1401 if (err_is_fail(err)) { 1402 return err; 1403 } 1404 1405 // Handle the mapping database 1406 set_init_mapping(caps, numobjs); 1407 1408 TRACE_CAP_MSG("created", &caps[0]); 1409 1410 TRACE(KERNEL, CAP_CREATE_NEW, 1); 1411 return SYS_ERR_OK; 1412} 1413 1414STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types"); 1415/// Retype caps 1416/// Create `count` new caps of `type` from `offset` in src, and put them in 1417/// `dest_cnode` starting at `dest_slot`. 1418errval_t caps_retype(enum objtype type, gensize_t objsize, size_t count, 1419 struct capability *dest_cnode, cslot_t dest_slot, 1420 struct cte *src_cte, gensize_t offset, 1421 bool from_monitor) 1422{ 1423 TRACE(KERNEL, CAP_RETYPE, 0); 1424 size_t maxobjs; 1425 genpaddr_t base = 0; 1426 gensize_t size = 0; 1427 errval_t err; 1428 bool do_range_check = false; 1429 struct capability *src_cap = &src_cte->cap; 1430 1431 /* Parameter checking */ 1432 assert(type != ObjType_Null); 1433 assert(type < ObjType_Num); 1434 if (type == ObjType_Null || type >= ObjType_Num) { 1435 return SYS_ERR_INVALID_RETYPE; 1436 } 1437 1438 debug(SUBSYS_CAPS, "%s: Retyping to type=%d, from offset=%" PRIuGENSIZE 1439 ", objsize=%" PRIuGENSIZE ", count=%zu\n", 1440 __FUNCTION__, type, offset, objsize, count); 1441 1442 /* 1443 * check that offset into source cap is multiple of destination object 1444 * size, or base page size, whichever is smaller. 1445 */ 1446 gensize_t dest_obj_alignment = BASE_PAGE_SIZE; 1447 if (type_is_vnode(type) && vnode_objsize(type) < BASE_PAGE_SIZE) { 1448 dest_obj_alignment = vnode_objsize(type); 1449 } else if (type == ObjType_Dispatcher) { 1450 dest_obj_alignment = OBJSIZE_DISPATCHER; 1451 } 1452 if (src_cap->type != ObjType_IRQSrc && offset % dest_obj_alignment != 0) { 1453 return SYS_ERR_RETYPE_INVALID_OFFSET; 1454 } 1455 assert(offset % dest_obj_alignment == 0 || src_cap->type == ObjType_IRQSrc); 1456 1457 // check that size is multiple of BASE_PAGE_SIZE for mappable types 1458 gensize_t base_size = BASE_PAGE_SIZE; 1459 if (type_is_vnode(type)) { 1460 base_size = vnode_objsize(type); 1461 } 1462 if (type_is_mappable(type) && objsize % base_size != 0) { 1463 debug(SUBSYS_CAPS, "%s: objsize = %"PRIuGENSIZE"\n", __FUNCTION__, objsize); 1464 return SYS_ERR_INVALID_SIZE; 1465 } 1466 else if (type == ObjType_L1CNode && objsize % OBJSIZE_L2CNODE != 0) 1467 { 1468 printk(LOG_WARN, "%s: CNode: objsize = %" PRIuGENSIZE "\n", __FUNCTION__, objsize); 1469 return SYS_ERR_INVALID_SIZE; 1470 } 1471 else if (type == ObjType_L2CNode && objsize != OBJSIZE_L2CNODE) 1472 { 1473 printk(LOG_WARN, "%s: L2CNode: objsize = %"PRIuGENSIZE"\n", __FUNCTION__, objsize); 1474 return SYS_ERR_INVALID_SIZE; 1475 } 1476 assert((type_is_mappable(type) && objsize % base_size == 0) || 1477 (type == ObjType_L1CNode && objsize % OBJSIZE_L2CNODE == 0 && 1478 objsize >= OBJSIZE_L2CNODE) || 1479 (type == ObjType_L2CNode && objsize == OBJSIZE_L2CNODE) || 1480 !type_is_mappable(type)); 1481 1482 /* No explicit retypes to Mapping allowed */ 1483 if (type_is_mapping(type)) { 1484 return SYS_ERR_RETYPE_MAPPING_EXPLICIT; 1485 } 1486 1487 1488 TRACE_CAP_MSG("retyping", src_cte); 1489 1490 /* Check retypability */ 1491 err = is_retypeable(src_cte, src_cap->type, type, from_monitor); 1492 if (err_is_fail(err)) { 1493 if (err_no(err) != SYS_ERR_REVOKE_FIRST) { 1494 printk(LOG_NOTE, "caps_retype: is_retypeable failed: %"PRIuERRV"\n", err); 1495 debug(SUBSYS_CAPS, "caps_retype: is_retypeable failed\n"); 1496 return err; 1497 } else { 1498 debug(SUBSYS_CAPS, 1499 "caps_retype: is_retypeable() returned SYS_ERR_REVOKE_FIRST, doing range check\n"); 1500 // We handle err_revoke_first fine-grained checking below, as it 1501 // might happen for non-overlapping regions. 1502 1503 // TODO: move the range checking into is_retypeable() or even 1504 // is_revoked_first(), -SG 2016-04-18 1505 do_range_check = true; 1506 } 1507 } 1508 // from here: src cap type is one of these. 1509 assert(src_cap->type == ObjType_PhysAddr || 1510 src_cap->type == ObjType_RAM || 1511 src_cap->type == ObjType_Dispatcher || 1512 src_cap->type == ObjType_Frame || 1513 src_cap->type == ObjType_DevFrame || 1514 src_cap->type == ObjType_IRQSrc || 1515 src_cap->type == ObjType_ProcessManager); 1516 1517 if (src_cap->type != ObjType_Dispatcher && src_cap->type != ObjType_IRQSrc) { 1518 base = get_address(src_cap); 1519 size = get_size(src_cap); 1520 } 1521 1522 maxobjs = caps_max_numobjs(type, get_size(src_cap), objsize); 1523 debug(SUBSYS_CAPS, "maximum possible new object count: %zu\n", maxobjs); 1524 1525 if (maxobjs == 0) { 1526 debug(SUBSYS_CAPS, "caps_retype: maxobjs == 0\n"); 1527 return SYS_ERR_INVALID_SIZE; 1528 } 1529 1530 if (count > maxobjs) { 1531 debug(SUBSYS_CAPS, "caps_retype: maxobjs = %zu, count = %zu\n", maxobjs, count); 1532 return SYS_ERR_RETYPE_INVALID_COUNT; 1533 } 1534 // from here: count <= maxobjs 1535 assert(count <= maxobjs); 1536 // make sure nobody calls with the old behaviour 1537 if (count == 0) { 1538 return SYS_ERR_RETYPE_INVALID_COUNT; 1539 } 1540 assert(count > 0); 1541 1542 /* check that we can create `count` objs from `offset` in source, and 1543 * update base accordingly */ 1544 if (src_cap->type != ObjType_Dispatcher && src_cap->type != ObjType_IRQSrc 1545 && src_cap->type != ObjType_Domain) { 1546 // TODO: convince ourselves that this is the only condition on offset 1547 if (offset + count * objsize > get_size(src_cap)) { 1548 debug(SUBSYS_CAPS, "caps_retype: cannot create all %zu objects" 1549 " of size 0x%" PRIxGENSIZE " from offset 0x%" PRIxGENSIZE "\n", 1550 count, objsize, offset); 1551 return SYS_ERR_RETYPE_INVALID_OFFSET; 1552 } 1553 // adjust base address for new objects 1554 base += offset; 1555 1556 // Check whether we got SYS_ERR_REVOKE_FIRST because of 1557 // non-overlapping child 1558 if (do_range_check) { 1559 int find_range_result = 0; 1560 struct cte *found_cte = NULL; 1561 err = mdb_find_range(get_type_root(src_cap->type), base, objsize * count, 1562 MDB_RANGE_FOUND_SURROUNDING, &found_cte, &find_range_result); 1563 // this should never return an error unless we mess up the 1564 // non-user supplied arguments 1565 if (err_is_fail(err)) { 1566 printk(LOG_WARN, "mdb_find_range returned: %"PRIuERRV"\n", err); 1567 } 1568 assert(err_is_ok(err)); 1569 // return REVOKE_FIRST, if we found a cap inside the region 1570 // (FOUND_INNER == 2) or overlapping the region (FOUND_PARTIAL == 3) 1571 if (find_range_result >= MDB_RANGE_FOUND_INNER) { 1572 debug(SUBSYS_CAPS, 1573 "%s: found existing region inside, or overlapping requested region:\n", 1574 __FUNCTION__); 1575 debug(SUBSYS_CAPS, "%s: our region: %#"PRIxGENPADDR"--%#"PRIxGENPADDR"\n", 1576 __FUNCTION__, base, base+objsize*count); 1577 if (found_cte && kernel_loglevel >= LOG_DEBUG && 1578 kernel_log_subsystem_mask & SUBSYS_CAPS) 1579 { 1580 char capbuf[128]; 1581 sprint_cap(capbuf, 128, &found_cte->cap); 1582 printk(LOG_NOTE, "%s: cap=%s\n", __FUNCTION__, capbuf); 1583 if (type_is_mapping(found_cte->cap.type)) { 1584 sprint_cap(capbuf, 128, found_cte->cap.u.frame_mapping.cap); 1585 printk(LOG_NOTE, "%s: ... is mapping for cap=%s\n", 1586 __FUNCTION__, capbuf); 1587 } 1588 assert(get_address(&found_cte->cap) >= base && 1589 get_address(&found_cte->cap) < base+objsize*count); 1590 } 1591 return SYS_ERR_REVOKE_FIRST; 1592 } 1593 // return REVOKE_FIRST, if we found a cap that isn't our source 1594 // (or a copy of our source) covering the whole requested region. 1595 else if (find_range_result == MDB_RANGE_FOUND_SURROUNDING && 1596 !is_copy(&found_cte->cap, src_cap)) 1597 { 1598 debug(SUBSYS_CAPS, 1599 "%s: found non source region fully covering requested region\n", 1600 __FUNCTION__); 1601 return SYS_ERR_REVOKE_FIRST; 1602 } 1603 } 1604 } 1605 1606 /* check that destination slots all fit within target cnode */ 1607 if (dest_slot + count > cnode_get_slots(dest_cnode)) { 1608 debug(SUBSYS_CAPS, "caps_retype: dest slots don't fit in cnode\n"); 1609 return SYS_ERR_SLOTS_INVALID; 1610 } 1611 1612 /* check that destination slots are all empty */ 1613 debug(SUBSYS_CAPS, "caps_retype: dest cnode is %#" PRIxLPADDR 1614 " dest_slot %d\n", 1615 get_address(dest_cnode), (int)dest_slot); 1616 for (cslot_t i = 0; i < count; i++) { 1617 if (caps_locate_slot(get_address(dest_cnode), dest_slot + i)->cap.type 1618 != ObjType_Null) { 1619 debug(SUBSYS_CAPS, "caps_retype: dest slot %d in use\n", 1620 (int)(dest_slot + i)); 1621 return SYS_ERR_SLOTS_IN_USE; 1622 } 1623 } 1624 1625 /* Check that L1 CNode is destination when creating L2 CNode */ 1626 if (type == ObjType_L2CNode) { 1627 debug(SUBSYS_CAPS, "caps_retype: check that dest cnode is L1" 1628 " when creating L2 CNodes\n"); 1629 if (dest_cnode->type != ObjType_L1CNode && 1630 dest_cnode->type != ObjType_L2CNode) 1631 { 1632 panic("L2 CNode can only be created in L1 or L2 CNode\n"); 1633 } 1634 } 1635 1636 // IRQSrc specific checks 1637 uint64_t vec_start_new = offset; 1638 uint64_t vec_end_new = objsize; 1639 if(src_cap->type == ObjType_IRQSrc){ 1640 1641 // Check new range is valid 1642 if(vec_start_new > vec_end_new){ 1643 return SYS_ERR_RETYPE_INVALID_OFFSET; 1644 } 1645 1646 // Check vec_start_new in range 1647 if(!(src_cap->u.irqsrc.vec_start <= vec_start_new && 1648 vec_start_new <= src_cap->u.irqsrc.vec_end)){ 1649 return SYS_ERR_RETYPE_INVALID_OFFSET; 1650 } 1651 1652 // Check vec_end_new in range 1653 if(!(src_cap->u.irqsrc.vec_start <= vec_end_new && 1654 vec_end_new <= src_cap->u.irqsrc.vec_end)){ 1655 return SYS_ERR_RETYPE_INVALID_OBJSIZE; 1656 } 1657 } 1658 1659 1660 /* create new caps */ 1661 struct cte *dest_cte = 1662 caps_locate_slot(get_address(dest_cnode), dest_slot); 1663 if(type == ObjType_IRQSrc){ 1664 // Pass special arguments 1665 err = caps_create(type, 0, 0, 0, 1, my_core_id, dest_cte); 1666 if(err_is_ok(err)){ 1667 dest_cte->cap.u.irqsrc.vec_start = vec_start_new; 1668 dest_cte->cap.u.irqsrc.vec_end = vec_end_new; 1669 } 1670 } else { 1671 err = caps_create(type, base, size, objsize, count, my_core_id, dest_cte); 1672 } 1673 if (err_is_fail(err)) { 1674 debug(SUBSYS_CAPS, "caps_retype: failed to create a dest cap\n"); 1675 return err_push(err, SYS_ERR_RETYPE_CREATE); 1676 } 1677 1678 /* special initialisation for endpoint caps */ 1679 if (type == ObjType_EndPoint) { 1680 assert(src_cap->type == ObjType_Dispatcher); 1681 assert(count == 1); 1682 struct capability *dest_cap = &dest_cte->cap; 1683 dest_cap->u.endpoint.listener = src_cap->u.dispatcher.dcb; 1684 } 1685 1686 // XXX: Treat full object retypes to same type as copies as calling 1687 // is_copy(dst, src) will return true for such retypes. 1688 if (count == 1 && objsize == get_size(src_cap) && type == src_cap->type) { 1689 // sanity check: is_copy() really returns true for the two caps 1690 assert(is_copy(&dest_cte[0].cap, src_cap)); 1691 // If we're not owner, and type needs locality 1692 if (src_cte->mdbnode.owner != my_core_id && 1693 distcap_needs_locality(dest_cte[0].cap.type)) 1694 { 1695 // fix owner for new cap and set remote_copies bit 1696 dest_cte[0].mdbnode.owner = src_cte->mdbnode.owner; 1697 dest_cte[0].mdbnode.remote_copies = true; 1698 } 1699 } 1700 1701 /* Handle mapping */ 1702 for (size_t i = 0; i < count; i++) { 1703 mdb_insert(&dest_cte[i]); 1704 } 1705 1706#ifdef TRACE_PMEM_CAPS 1707 for (size_t i = 0; i < count; i++) { 1708 TRACE_CAP_MSG("created", &dest_cte[i]); 1709 } 1710#endif 1711 1712 TRACE(KERNEL, CAP_RETYPE, 1); 1713 return SYS_ERR_OK; 1714} 1715 1716/// Check the validity of a retype operation 1717errval_t is_retypeable(struct cte *src_cte, enum objtype src_type, 1718 enum objtype dest_type, bool from_monitor) 1719{ 1720 if (!is_well_founded(src_type, dest_type)) { 1721 return SYS_ERR_INVALID_RETYPE; 1722 } else if (!is_revoked_first(src_cte, src_type)){ 1723 //printf("err_revoke_first: (%p, %d, %d)\n", src_cte, src_type, dest_type); 1724 return SYS_ERR_REVOKE_FIRST; 1725 } else if (dest_type == ObjType_EndPoint && src_cte->mdbnode.owner == my_core_id) { 1726 // XXX: because of the current "multi-retype" hack for endpoints, a 1727 // dispatcher->endpoint retype can happen irrespective of the existence 1728 // of descendants on any core. 1729 // However, we only do this for locally owned caps as the owner should 1730 // be notified that the cap has remote descendants 1731 return SYS_ERR_OK; 1732 } else if (!from_monitor && (src_cte->mdbnode.owner != my_core_id 1733 || src_cte->mdbnode.remote_descs)) { 1734 return SYS_ERR_RETRY_THROUGH_MONITOR; 1735 } else { 1736 return SYS_ERR_OK; 1737 } 1738} 1739 1740/// Create copies to a slot within a cnode 1741errval_t caps_copy_to_cnode(struct cte *dest_cnode_cte, cslot_t dest_slot, 1742 struct cte *src_cte, bool mint, uintptr_t param1, 1743 uintptr_t param2) 1744{ 1745 /* Parameter Checking */ 1746 assert(dest_cnode_cte->cap.type == ObjType_L1CNode || 1747 dest_cnode_cte->cap.type == ObjType_L2CNode); 1748 1749 // only allow L2 CNodes and BSP KCB in L1 CNode 1750 // XXX: BSPKCB should not be in rootcn... 1751 if (dest_cnode_cte->cap.type == ObjType_L1CNode && 1752 src_cte->cap.type != ObjType_L2CNode && 1753 src_cte->cap.type != ObjType_KernelControlBlock) 1754 { 1755 printk(LOG_WARN, "trying to copy cap type %d into cap type %d\n", 1756 src_cte->cap.type, dest_cnode_cte->cap.type); 1757 return SYS_ERR_DEST_TYPE_INVALID; 1758 } 1759 1760 struct cte *dest_cte; 1761 dest_cte = caps_locate_slot(get_address(&dest_cnode_cte->cap), dest_slot); 1762 return caps_copy_to_cte(dest_cte, src_cte, mint, param1, param2); 1763 1764} 1765 1766/// Create copies to a cte 1767STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types"); 1768errval_t caps_copy_to_cte(struct cte *dest_cte, struct cte *src_cte, bool mint, 1769 uintptr_t param1, uintptr_t param2) 1770{ 1771 errval_t err; 1772 /* Parameter checking */ 1773 // Null checking 1774 assert(dest_cte != NULL); 1775 assert(src_cte != NULL); 1776 1777 struct capability *src_cap = &src_cte->cap; 1778 struct capability *dest_cap = &dest_cte->cap; 1779 // NULL caps cannot be copied/minted 1780 if (src_cap->type == ObjType_Null) { 1781 return SYS_ERR_CAP_NOT_FOUND; 1782 } 1783 // Parameters should be 0 if not minting 1784 if (!mint) { 1785 assert(param1 == 0); 1786 assert(param2 == 0); 1787 } 1788 1789 assert(!src_cte->mdbnode.in_delete); 1790 1791 /* Insert #source_cap into #dest_cap */ 1792 err = set_cap(dest_cap, src_cap); 1793 if (err_is_fail(err)) { 1794 return err; 1795 } 1796 1797 /* Transfer MDB attributes that must be equal for all copies */ 1798#define CP_ATTR(at) dest_cte->mdbnode.at = src_cte->mdbnode.at 1799 CP_ATTR(owner); 1800 CP_ATTR(locked); 1801 CP_ATTR(remote_copies); 1802 CP_ATTR(remote_ancs); 1803 CP_ATTR(remote_descs); 1804#undef CP_ATTR 1805 1806 /* Copy is done */ 1807 if(!mint) { 1808 TRACE_CAP_MSG("copied to", dest_cte); 1809 // Handle mapping here only for non-mint operations 1810 // (mint can change eq fields which would make the early insertion 1811 // invalid in some cases) 1812 mdb_insert(dest_cte); 1813 return SYS_ERR_OK; 1814 } 1815 else { 1816 TRACE_CAP_MSG("minting to", dest_cte); 1817 } 1818 1819 /* For minting, set the specified parameters */ 1820 // Process source-specific parameters for minting 1821 // XXX: If failure, revert the insertion 1822 switch(src_cap->type) { 1823 case ObjType_EndPoint: 1824 // XXX: FIXME: check that buffer offset lies wholly within the disp frame 1825 // can't easily enforce this here, because the dispatcher frame may not 1826 // yet be setup 1827/* if (param1 < sizeof(struct dispatcher) || 1828 dest_cap->u.endpoint.listener->disp == NULL || 1829 param2 < IDC_RECV_LENGTH || 1830 param1 + sizeof(struct idc_endpoint) + param2 * sizeof(uintptr_t) > 1831 (1UL << dest_cap->u.endpoint.listener->disp_cte.cap.u.frame.bits)) { 1832 return SYS_ERR_INVALID_EPBUF; 1833 }*/ 1834 if (param2 < LMP_RECV_HEADER_LENGTH) { 1835 return SYS_ERR_INVALID_EPLEN; 1836 } 1837 dest_cap->u.endpoint.epoffset = param1; 1838 dest_cap->u.endpoint.epbuflen = param2; 1839 break; 1840 1841 case ObjType_IO: 1842 if(src_cap->u.io.start <= param1) { 1843 dest_cap->u.io.start = param1; 1844 } 1845 if(src_cap->u.io.end >= param2) { 1846 dest_cap->u.io.end = param2; 1847 } 1848 break; 1849 1850 default: 1851 // Unhandled source type for mint 1852 return SYS_ERR_INVALID_SOURCE_TYPE; 1853 } 1854 1855 // Insert after doing minting operation 1856 mdb_insert(dest_cte); 1857 1858 return SYS_ERR_OK; 1859} 1860