1/* 2 * Copyright (c) 2012 ETH Zurich. 3 * All rights reserved. 4 * 5 * This file is distributed under the terms in the attached LICENSE file. 6 * If you do not find this file, copies can be found by writing to: 7 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group. 8 */ 9 10#include <barrelfish/barrelfish.h> 11#include <barrelfish/core_state.h> 12#include "monitor.h" 13#include "capops.h" 14#include "capsend.h" 15#include "caplock.h" 16#include "capqueue.h" 17#include "dom_invocations.h" 18#include "delete_int.h" 19#include "internal.h" 20#include "ram_alloc.h" 21#include <if/mem_defs.h> 22 23struct delete_remote_mc_st { 24 struct capsend_mc_st mc_st; 25 struct delete_st *del_st; 26 errval_t status; 27}; 28 29struct delete_remote_result_msg_st { 30 struct intermon_msg_queue_elem queue_elem; 31 errval_t status; 32 genvaddr_t st; 33}; 34 35static uint64_t delete_seqnum = 0; 36 37static void delete_trylock_cont(void *st); 38 39static void 40delete_result__rx(errval_t status, struct delete_st *del_st, bool locked) 41{ 42 DEBUG_CAPOPS("%s: status=%s, locked=%d\n", __FUNCTION__, err_getcode(status), locked); 43 errval_t err; 44 45 if (locked) { 46 caplock_unlock(del_st->capref); 47 } 48 49 err = slot_free(del_st->newcap); 50 if (err_is_fail(err) && err_no(err) != LIB_ERR_SLOT_UNALLOCATED) { 51 DEBUG_ERR(err, "freeing reclamation slot, will leak"); 52 } 53 54 // Delete our copy of domain's rootcn 55 err = cap_destroy(del_st->capref.croot); 56 PANIC_IF_ERR(err, "cleaning up domain's rootcn"); 57 58 delete_result_handler_t handler = del_st->result_handler; 59 void *st = del_st->st; 60 free(del_st); 61 TRACE(CAPOPS, DELETE_DONE, delete_seqnum); 62 handler(status, st); 63} 64 65void 66send_new_ram_cap(struct capref cap) 67{ 68 DEBUG_CAPOPS("%s\n", __FUNCTION__); 69 errval_t err, result; 70 71 struct capability cap_data; 72 err = monitor_cap_identify(cap, &cap_data); 73 assert(err_is_ok(err)); 74 assert(cap_data.type == ObjType_RAM); 75 struct RAM ram = cap_data.u.ram; 76 77 struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state(); 78 thread_mutex_lock(&ram_alloc_state->ram_alloc_lock); 79 80 struct mem_binding *b = get_mem_client(); 81 if (!b) { 82 DEBUG_CAPOPS("%s: forwarding to monitor.0\n", __FUNCTION__); 83 // we're not on core 0, so forward free_monitor msg to monitor.0 84 err = mon_ram_free(&cap_data, ram.base, log2ceil(ram.bytes)); 85 assert(err_is_ok(err)); 86 } else { 87 DEBUG_CAPOPS("%s: we are monitor.0\n", __FUNCTION__); 88 // XXX: This should not be an RPC! It could stall the monitor, but 89 // we trust mem_serv for the moment. 90 err = b->rpc_tx_vtbl.free_monitor(b, cap, ram.base, log2ceil(ram.bytes), &result); 91 assert(err_is_ok(err)); 92 assert(err_is_ok(result)); 93 } 94 95 thread_mutex_unlock(&ram_alloc_state->ram_alloc_lock); 96 97 // XXX: this seems to happen during the lmp transfer anyway -SG 98 if (!b) { 99 DEBUG_CAPOPS("%s: not monitor.0, deleting local copy\n", __FUNCTION__); 100 // should we do this if not on core 0? -SG 101 err = cap_delete(cap); 102 assert(err_is_ok(err)); 103 } 104 DEBUG_CAPOPS("%s: finished\n", __FUNCTION__); 105} 106 107static void delete_wait__fin(void *st_) 108{ 109 DEBUG_CAPOPS("%s\n", __FUNCTION__); 110 struct delete_st *st = (struct delete_st*)st_; 111 TRACE(CAPOPS, DELETE_CALL_RX, 5); 112 delete_result__rx(SYS_ERR_OK, st, false); 113} 114 115static void delete_last(struct delete_st* del_st) 116{ 117 DEBUG_CAPOPS("%s\n", __FUNCTION__); 118 TRACE(CAPOPS, DELETE_LAST, 0); 119 errval_t err; 120 bool locked = true; 121 122 err = monitor_delete_last(del_st->capref.croot, del_st->capref.cptr, 123 del_st->capref.level, del_st->newcap); 124 GOTO_IF_ERR(err, report_error); 125 if (err_no(err) == SYS_ERR_RAM_CAP_CREATED) { 126 DEBUG_CAPOPS("%s: sending reclaimed RAM to memserv.\n", __FUNCTION__); 127 send_new_ram_cap(del_st->newcap); 128 err = SYS_ERR_OK; 129 } 130 131 DEBUG_CAPOPS("%s: deleted last copy\n", __FUNCTION__); 132 // at this point the cap has become "unlocked" because it is either deleted 133 // or in a clear/delete queue 134 locked = false; 135 136 if (!del_st->wait) { 137 goto report_error; 138 } 139 140 TRACE(CAPOPS, DELETE_QUEUE_FIN, 0); 141 DEBUG_CAPOPS("%s: waiting on delete queue\n", __FUNCTION__); 142 delete_queue_wait(&del_st->qn, MKCLOSURE(delete_wait__fin, del_st)); 143 144 return; 145 146report_error: 147 DEBUG_CAPOPS("%s: reporting error: %s\n", __FUNCTION__, 148 err_getstring(err)); 149 TRACE(CAPOPS, DELETE_CALL_RX, 1); 150 delete_result__rx(err, del_st, locked); 151} 152 153/* 154 * Non-moveable cap types: deleting all foreign copies when last owned copy of 155 * cap is deleted 156 */ 157 158static errval_t 159delete_remote__send(struct intermon_binding *b, intermon_caprep_t *caprep, 160 struct capsend_mc_st *st) 161{ 162 return intermon_capops_delete_remote__tx(b, NOP_CONT, *caprep, 163 (lvaddr_t)st); 164} 165 166static void 167delete_remote__enq(struct capability *cap, struct delete_st *st) 168{ 169 DEBUG_CAPOPS("%s\n", __FUNCTION__); 170 TRACE(CAPOPS, DELETE_REMOTE_ENQ, 0); 171 errval_t err; 172 struct delete_remote_mc_st *mc_st; 173 174 err = malloce(sizeof(*mc_st), &mc_st); 175 GOTO_IF_ERR(err, report_error); 176 mc_st->del_st = st; 177 mc_st->status = SYS_ERR_OK; 178 179 err = capsend_copies(cap, delete_remote__send, 180 (struct capsend_mc_st*)mc_st); 181 GOTO_IF_ERR(err, report_error); 182 183 return; 184 185report_error: 186 TRACE(CAPOPS, DELETE_CALL_RX, 6); 187 delete_result__rx(err, st, true); 188} 189 190static void 191delete_remote_result__send(struct intermon_binding *b, struct intermon_msg_queue_elem *e) 192{ 193 errval_t err; 194 struct delete_remote_result_msg_st *msg_st = (struct delete_remote_result_msg_st*)e; 195 err = intermon_capops_delete_remote_result__tx(b, NOP_CONT, msg_st->status, msg_st->st); 196 197 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { 198 DEBUG_CAPOPS("%s: got FLOUNDER_ERR_TX_BUSY; requeueing msg.\n", __FUNCTION__); 199 struct intermon_state *inter_st = (struct intermon_state *)b->st; 200 // requeue send request at front and return 201 err = intermon_enqueue_send_at_front(b, &inter_st->queue, b->waitset, 202 (struct msg_queue_elem *)e); 203 GOTO_IF_ERR(err, handle_err); 204 return; 205 } 206 207handle_err: 208 PANIC_IF_ERR(err, "failed to send delete_remote_result msg"); 209 free(msg_st); 210} 211 212static void 213delete_remote_result__enq(coreid_t dest, errval_t status, genvaddr_t st) 214{ 215 DEBUG_CAPOPS("%s: dest=%d, status=%s\n", __FUNCTION__, dest, err_getcode(status)); 216 errval_t err; 217 218 struct delete_remote_result_msg_st *msg_st; 219 err = calloce(1, sizeof(*msg_st), &msg_st); 220 PANIC_IF_ERR(err, "allocating delete_remote_result st"); 221 222 msg_st->queue_elem.cont = delete_remote_result__send; 223 msg_st->status = status; 224 msg_st->st = st; 225 226 err = capsend_target(dest, (struct msg_queue_elem*)msg_st); 227 PANIC_IF_ERR(err, "failed to send delete_remote result"); 228} 229 230void 231delete_remote__rx(struct intermon_binding *b, intermon_caprep_t caprep, 232 genvaddr_t st) 233{ 234 DEBUG_CAPOPS("%s\n", __FUNCTION__); 235 TRACE(CAPOPS, DELETE_REMOTE_RX, 0); 236 errval_t err, err2; 237 struct capability cap; 238 struct intermon_state *inter_st = (struct intermon_state*)b->st; 239 coreid_t from = inter_st->core_id; 240 caprep_to_capability(&caprep, &cap); 241 struct capref capref; 242 243 err = slot_alloc(&capref); 244 GOTO_IF_ERR(err, send_err); 245 246 err = monitor_copy_if_exists(&cap, capref); 247 if (err_is_fail(err)) { 248 DEBUG_CAPOPS("%s: monitor_copy_if_exists: %s\n", __FUNCTION__, err_getcode(err)); 249 if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { 250 // not found implies there were no copies, so everything is OK 251 err = SYS_ERR_OK; 252 } 253 goto free_slot; 254 } 255 256 err = monitor_delete_foreigns(capref); 257 DEBUG_CAPOPS("%s: monitor_delete_foreigns: %s\n", __FUNCTION__, err_getcode(err)); 258 //err = monitor_delete_copies(capref); 259 //err2 = cap_delete(capref); 260 //DEBUG_IF_ERR(err2, "deleting temp delete_remote cap"); 261 //if (err_is_ok(err) && err_is_fail(err2)) { 262 // err = err2; 263 //} 264 265free_slot: 266 err2 = slot_free(capref); 267 DEBUG_IF_ERR(err2, "freeing temp delete_remote cap, will leak"); 268 269send_err: 270 delete_remote_result__enq(from, err, st); 271} 272 273void 274delete_remote_result__rx(struct intermon_binding *b, errval_t status, 275 genvaddr_t st) 276{ 277 DEBUG_CAPOPS("%s\n", __FUNCTION__); 278 TRACE(CAPOPS, DELETE_REMOTE_RESULT_RX, 0); 279 errval_t err; 280 struct delete_remote_mc_st *mc_st = (struct delete_remote_mc_st*)(lvaddr_t)st; 281 struct delete_st *del_st = mc_st->del_st; 282 283 // XXX: do something with received errors? 284 if (err_is_fail(status)) { 285 mc_st->status = status; 286 } 287 status = mc_st->status; 288 289 if (!capsend_handle_mc_reply(&mc_st->mc_st)) { 290 // multicast not complete 291 return; 292 } 293 294 // multicast is complete, free state 295 free(mc_st); 296 297 // unlock cap so it can be deleted 298 caplock_unlock(del_st->capref); 299 300 if (err_is_ok(status)) { 301 // remote copies have been deleted, reset corresponding relations bit 302 err = monitor_domcap_remote_relations(del_st->capref.croot, 303 del_st->capref.cptr, 304 del_st->capref.level, 305 0, RRELS_COPY_BIT, NULL); 306 if (err_is_fail(err)) { 307 USER_PANIC_ERR(err, "clearing remote descs bit after remote delete"); 308 } 309 310 // All remote copies deleted, delete local copy; can be last 311 err = dom_cnode_delete(del_st->capref); 312 errval_t last_owned = err_push(SYS_ERR_DELETE_LAST_OWNED, 313 SYS_ERR_RETRY_THROUGH_MONITOR); 314 // We got DELETE_LAST_OWNED from cpu driver, do delete_last() 315 if (err == last_owned) { 316 delete_last(del_st); 317 // We just assume that delete_last() succeeds 318 err = SYS_ERR_OK; 319 } 320 else if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { 321 // this shouldn't really happen either, but isn't a problem 322 err = SYS_ERR_OK; 323 } 324 else if (err_is_fail(err)) { 325 // other than DELETE_LAST_OWNED, the simple delete should not fail 326 // here. 327 USER_PANIC_ERR(err, "this really should not happen"); 328 } 329 } 330 else { 331 err = status; 332 } 333 334 TRACE(CAPOPS, DELETE_CALL_RX, 7); 335 delete_result__rx(err, del_st, false); 336} 337 338/* 339 * Moveable cap type: try to migrate ownership elsewhere 340 */ 341 342static void move_result_cont(errval_t status, void *st); 343 344static void 345find_core_cont(errval_t status, coreid_t core, void *st) 346{ 347 DEBUG_CAPOPS("%s\n", __FUNCTION__); 348 TRACE(CAPOPS, DELETE_FIND_CORE_CONT, 0); 349 // called with the result of "find core with cap" when trying to move the 350 // last cap 351 errval_t err = status; 352 struct delete_st *del_st = (struct delete_st*)st; 353 354 // unlock cap so it can be manipulated 355 caplock_unlock(del_st->capref); 356 357 if (err_no(status) == SYS_ERR_CAP_NOT_FOUND) { 358 // no core with cap exists, delete local cap with cleanup 359 err = monitor_domcap_remote_relations(del_st->capref.croot, 360 del_st->capref.cptr, 361 del_st->capref.level, 362 0, RRELS_COPY_BIT, NULL); 363 if (err_is_fail(err)) { 364 if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { 365 err = SYS_ERR_OK; 366 } 367 goto report_error; 368 } 369 370 delete_last(del_st); 371 } 372 else if (err_is_fail(status)) { 373 // an error occured 374 goto report_error; 375 } 376 else { 377 // core found, attempt move 378 err = capops_move(del_st->capref, core, move_result_cont, st); 379 GOTO_IF_ERR(err, report_error); 380 } 381 382 return; 383 384report_error: 385 TRACE(CAPOPS, DELETE_CALL_RX, 2); 386 delete_result__rx(err, del_st, false); 387} 388 389static void 390move_result_cont(errval_t status, void *st) 391{ 392 DEBUG_CAPOPS("%s\n", __FUNCTION__); 393 TRACE(CAPOPS, DELETE_MOVE_RESULT_CONT, 0); 394 errval_t err = status; 395 struct delete_st *del_st = (struct delete_st*)st; 396 assert(distcap_is_moveable(del_st->cap.type)); 397 398 if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { 399 // the found remote copy has disappeared, restart move process 400 delete_trylock_cont(del_st); 401 } 402 else if (err_is_fail(err)) { 403 TRACE(CAPOPS, DELETE_CALL_RX, 3); 404 delete_result__rx(err, del_st, false); 405 } 406 else { 407 // move succeeded, cap is now foreign 408 err = dom_cnode_delete(del_st->capref); 409 if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { 410 err = SYS_ERR_OK; 411 } 412 TRACE(CAPOPS, DELETE_CALL_RX, 4); 413 delete_result__rx(err, del_st, false); 414 } 415} 416 417/* 418 * Delete operation 419 */ 420 421static void 422delete_trylock_cont(void *st) 423{ 424 DEBUG_CAPOPS("%s\n", __FUNCTION__); 425 TRACE(CAPOPS, DELETE_LOCK, 0); 426 errval_t err; 427 bool locked = false; 428 struct delete_st *del_st = (struct delete_st*)st; 429 430 // try a simple delete 431 // NOTE: on the first pass, this is done twice (once in the capops_delete 432 // entry), but only this function is executed on every unlock event 433 err = dom_cnode_delete(del_st->capref); 434 if (err_no(err) != SYS_ERR_RETRY_THROUGH_MONITOR) { 435 // If cap is already locked, just enqueue for retry 436 if (err_no(err) == SYS_ERR_CAP_LOCKED) { 437 DEBUG_CAPOPS("%s: from cnode_delete(): cap already locked, queuing retry\n", __FUNCTION__); 438 TRACE(CAPOPS, DELETE_QUEUE_RETRY, 0); 439 caplock_wait(del_st->capref, &del_st->lock_qn, 440 MKCLOSURE(delete_trylock_cont, del_st)); 441 return; 442 } 443 // If cap not found, it has been deleted elsewhere, return OK 444 if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { 445 DEBUG_CAPOPS("%s: from cnode_delete(): cap not found, got deleted from elsewhere\n", __FUNCTION__); 446 err = err_push(SYS_ERR_OK, err); 447 } 448 goto report_error; 449 } 450 451 err = monitor_lock_cap(del_st->capref.croot, del_st->capref.cptr, 452 del_st->capref.level); 453 if (err_no(err) == SYS_ERR_CAP_LOCKED) { 454 DEBUG_CAPOPS("%s: from lock(): cap already locked, queuing retry\n", __FUNCTION__); 455 TRACE(CAPOPS, DELETE_QUEUE_RETRY, 1); 456 caplock_wait(del_st->capref, &del_st->lock_qn, 457 MKCLOSURE(delete_trylock_cont, del_st)); 458 return; 459 } 460 else if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) { 461 DEBUG_CAPOPS("%s: from lock(): cap not found, got deleted from elsewhere\n", __FUNCTION__); 462 // Some other operation (another delete or a revoke) has deleted the 463 // target cap. This is OK. 464 err = err_push(SYS_ERR_OK, err); 465 goto report_error; 466 } 467 else if (err_is_fail(err)) { 468 DEBUG_ERR(err, "locking cap for delete"); 469 goto report_error; 470 } 471 else { 472 locked = true; 473 } 474 475 TRACE(CAPOPS, DELETE_DO_WORK, 0); 476 // check if there could be any remote relations 477 uint8_t relations; 478 err = monitor_domcap_remote_relations(del_st->capref.croot, 479 del_st->capref.cptr, 480 del_st->capref.level, 481 0, 0, &relations); 482 GOTO_IF_ERR(err, report_error); 483 484 if (!(relations & RRELS_COPY_BIT)) { 485 // no remote relations, proceed with final delete 486 DEBUG_CAPOPS("%s: deleting last copy\n", __FUNCTION__); 487 delete_last(del_st); 488 } 489 else if (distcap_is_moveable(del_st->cap.type)) { 490 // if cap is moveable, move ownership so cap can then be deleted 491 DEBUG_CAPOPS("%s: move ownership\n", __FUNCTION__); 492 TRACE(CAPOPS, DELETE_FIND_NEW_OWNER, 0); 493 err = capsend_find_cap(&del_st->cap, find_core_cont, del_st); 494 GOTO_IF_ERR(err, report_error); 495 } 496 else { 497 DEBUG_CAPOPS("%s: cap type %d not moveable, delete all copies\n", 498 __FUNCTION__, del_st->cap.type); 499 // otherwise delete all remote copies and then delete last copy 500 delete_remote__enq(&del_st->cap, del_st); 501 } 502 503 return; 504 505report_error: 506 DEBUG_CAPOPS("%s: reporting error: %s\n", __FUNCTION__, err_getcode(err)); 507 TRACE(CAPOPS, DELETE_CALL_RX, 0); 508 delete_result__rx(err, del_st, locked); 509} 510 511void 512capops_delete_int(struct delete_st *del_st) 513{ 514 DEBUG_CAPOPS("%s\n", __FUNCTION__); 515 delete_trylock_cont(del_st); 516} 517 518void 519capops_delete(struct domcapref cap, 520 delete_result_handler_t result_handler, 521 void *st) 522{ 523 errval_t err; 524 DEBUG_CAPOPS("%s\n", __FUNCTION__); 525 TRACE(CAPOPS, DELETE_ENTER, ++delete_seqnum); 526 527 // try a simple delete 528 DEBUG_CAPOPS("%s: trying simple delete\n", __FUNCTION__); 529 err = dom_cnode_delete(cap); 530 // We can also continue here if we get SYS_ERR_CAP_LOCKED, as we're going 531 // to handle already locked caps correctly in delete_trylock_cont(). 532 // -SG, 2017-05-02 533 if (err_no(err) != SYS_ERR_RETRY_THROUGH_MONITOR && 534 err_no(err) != SYS_ERR_CAP_LOCKED) 535 { 536 DEBUG_CAPOPS("%s: err != RETRY && err != LOCKED\n", __FUNCTION__); 537 goto err_cont; 538 } 539 540 // simple delete was not able to delete cap as: 541 // * it was last copy and: 542 // - may have remote copies, need to move or revoke cap 543 // - contains further slots which need to be cleared 544 // * currently locked 545 546 struct delete_st *del_st; 547 err = calloce(1, sizeof(*del_st), &del_st); 548 GOTO_IF_ERR(err, err_cont); 549 550 err = monitor_domains_cap_identify(cap.croot, cap.cptr, cap.level, 551 &del_st->cap); 552 GOTO_IF_ERR(err, free_st); 553 554 err = slot_alloc(&del_st->newcap); 555 GOTO_IF_ERR(err, free_st); 556 557 del_st->capref = cap; 558 del_st->wait = true; 559 del_st->result_handler = result_handler; 560 del_st->st = st; 561 562 // after this setup is complete, nothing less than a catastrophic failure 563 // should stop the delete 564 delete_trylock_cont(del_st); 565 return; 566 567free_st: 568 free(del_st); 569 570err_cont: 571 DEBUG_CAPOPS("%s: calling result handler with err=%"PRIuERRV"\n", __FUNCTION__, err); 572 TRACE(CAPOPS, DELETE_DONE, delete_seqnum); 573 result_handler(err, st); 574} 575