1/* 2 * Copyright 2009, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2009, Axel D��rfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8#include "VMAddressSpaceLocking.h" 9 10#include <AutoDeleter.h> 11 12#include <vm/vm.h> 13#include <vm/VMAddressSpace.h> 14#include <vm/VMArea.h> 15#include <vm/VMCache.h> 16 17 18// #pragma mark - AddressSpaceLockerBase 19 20 21/*static*/ VMAddressSpace* 22AddressSpaceLockerBase::GetAddressSpaceByAreaID(area_id id) 23{ 24 VMAddressSpace* addressSpace = NULL; 25 26 VMAreas::ReadLock(); 27 28 VMArea* area = VMAreas::LookupLocked(id); 29 if (area != NULL) { 30 addressSpace = area->address_space; 31 addressSpace->Get(); 32 } 33 34 VMAreas::ReadUnlock(); 35 36 return addressSpace; 37} 38 39 40// #pragma mark - AddressSpaceReadLocker 41 42 43AddressSpaceReadLocker::AddressSpaceReadLocker(team_id team) 44 : 45 fSpace(NULL), 46 fLocked(false) 47{ 48 SetTo(team); 49} 50 51 52/*! Takes over the reference of the address space, if \a getNewReference is 53 \c false. 54*/ 55AddressSpaceReadLocker::AddressSpaceReadLocker(VMAddressSpace* space, 56 bool getNewReference) 57 : 58 fSpace(NULL), 59 fLocked(false) 60{ 61 SetTo(space, getNewReference); 62} 63 64 65AddressSpaceReadLocker::AddressSpaceReadLocker() 66 : 67 fSpace(NULL), 68 fLocked(false) 69{ 70} 71 72 73AddressSpaceReadLocker::~AddressSpaceReadLocker() 74{ 75 Unset(); 76} 77 78 79void 80AddressSpaceReadLocker::Unset() 81{ 82 Unlock(); 83 if (fSpace != NULL) 84 fSpace->Put(); 85} 86 87 88status_t 89AddressSpaceReadLocker::SetTo(team_id team) 90{ 91 fSpace = VMAddressSpace::Get(team); 92 if (fSpace == NULL) 93 return B_BAD_TEAM_ID; 94 95 fSpace->ReadLock(); 96 fLocked = true; 97 return B_OK; 98} 99 100 101/*! Takes over the reference of the address space, if \a getNewReference is 102 \c false. 103*/ 104void 105AddressSpaceReadLocker::SetTo(VMAddressSpace* space, bool getNewReference) 106{ 107 fSpace = space; 108 109 if (getNewReference) 110 fSpace->Get(); 111 112 fSpace->ReadLock(); 113 fLocked = true; 114} 115 116 117status_t 118AddressSpaceReadLocker::SetFromArea(area_id areaID, VMArea*& area) 119{ 120 fSpace = GetAddressSpaceByAreaID(areaID); 121 if (fSpace == NULL) 122 return B_BAD_TEAM_ID; 123 124 fSpace->ReadLock(); 125 126 area = VMAreas::Lookup(areaID); 127 128 if (area == NULL || area->address_space != fSpace) { 129 fSpace->ReadUnlock(); 130 return B_BAD_VALUE; 131 } 132 133 fLocked = true; 134 return B_OK; 135} 136 137 138bool 139AddressSpaceReadLocker::Lock() 140{ 141 if (fLocked) 142 return true; 143 if (fSpace == NULL) 144 return false; 145 146 fSpace->ReadLock(); 147 fLocked = true; 148 149 return true; 150} 151 152 153void 154AddressSpaceReadLocker::Unlock() 155{ 156 if (fLocked) { 157 fSpace->ReadUnlock(); 158 fLocked = false; 159 } 160} 161 162 163// #pragma mark - AddressSpaceWriteLocker 164 165 166AddressSpaceWriteLocker::AddressSpaceWriteLocker(team_id team) 167 : 168 fSpace(NULL), 169 fLocked(false), 170 fDegraded(false) 171{ 172 SetTo(team); 173} 174 175 176AddressSpaceWriteLocker::AddressSpaceWriteLocker(VMAddressSpace* space, 177 bool getNewReference) 178 : 179 fSpace(NULL), 180 fLocked(false), 181 fDegraded(false) 182{ 183 SetTo(space, getNewReference); 184} 185 186 187AddressSpaceWriteLocker::AddressSpaceWriteLocker() 188 : 189 fSpace(NULL), 190 fLocked(false), 191 fDegraded(false) 192{ 193} 194 195 196AddressSpaceWriteLocker::~AddressSpaceWriteLocker() 197{ 198 Unset(); 199} 200 201 202void 203AddressSpaceWriteLocker::Unset() 204{ 205 Unlock(); 206 if (fSpace != NULL) 207 fSpace->Put(); 208} 209 210 211status_t 212AddressSpaceWriteLocker::SetTo(team_id team) 213{ 214 fSpace = VMAddressSpace::Get(team); 215 if (fSpace == NULL) 216 return B_BAD_TEAM_ID; 217 218 fSpace->WriteLock(); 219 fLocked = true; 220 return B_OK; 221} 222 223 224void 225AddressSpaceWriteLocker::SetTo(VMAddressSpace* space, bool getNewReference) 226{ 227 fSpace = space; 228 229 if (getNewReference) 230 fSpace->Get(); 231 232 fSpace->WriteLock(); 233 fLocked = true; 234} 235 236 237status_t 238AddressSpaceWriteLocker::SetFromArea(area_id areaID, VMArea*& area) 239{ 240 fSpace = GetAddressSpaceByAreaID(areaID); 241 if (fSpace == NULL) 242 return B_BAD_VALUE; 243 244 fSpace->WriteLock(); 245 246 area = VMAreas::Lookup(areaID); 247 248 if (area == NULL || area->address_space != fSpace) { 249 fSpace->WriteUnlock(); 250 return B_BAD_VALUE; 251 } 252 253 fLocked = true; 254 return B_OK; 255} 256 257 258status_t 259AddressSpaceWriteLocker::SetFromArea(team_id team, area_id areaID, 260 bool allowKernel, VMArea*& area) 261{ 262 VMAreas::ReadLock(); 263 264 area = VMAreas::LookupLocked(areaID); 265 if (area != NULL 266 && (area->address_space->ID() == team 267 || (allowKernel && team == VMAddressSpace::KernelID()))) { 268 fSpace = area->address_space; 269 fSpace->Get(); 270 } 271 272 VMAreas::ReadUnlock(); 273 274 if (fSpace == NULL) 275 return B_BAD_VALUE; 276 277 // Second try to get the area -- this time with the address space 278 // write lock held 279 280 fSpace->WriteLock(); 281 282 area = VMAreas::Lookup(areaID); 283 284 if (area == NULL) { 285 fSpace->WriteUnlock(); 286 return B_BAD_VALUE; 287 } 288 289 fLocked = true; 290 return B_OK; 291} 292 293 294status_t 295AddressSpaceWriteLocker::SetFromArea(team_id team, area_id areaID, 296 VMArea*& area) 297{ 298 return SetFromArea(team, areaID, false, area); 299} 300 301 302void 303AddressSpaceWriteLocker::Unlock() 304{ 305 if (fLocked) { 306 if (fDegraded) 307 fSpace->ReadUnlock(); 308 else 309 fSpace->WriteUnlock(); 310 fLocked = false; 311 fDegraded = false; 312 } 313} 314 315 316void 317AddressSpaceWriteLocker::DegradeToReadLock() 318{ 319 fSpace->ReadLock(); 320 fSpace->WriteUnlock(); 321 fDegraded = true; 322} 323 324 325// #pragma mark - MultiAddressSpaceLocker 326 327 328MultiAddressSpaceLocker::MultiAddressSpaceLocker() 329 : 330 fItems(NULL), 331 fCapacity(0), 332 fCount(0), 333 fLocked(false) 334{ 335} 336 337 338MultiAddressSpaceLocker::~MultiAddressSpaceLocker() 339{ 340 Unset(); 341 free(fItems); 342} 343 344 345/*static*/ int 346MultiAddressSpaceLocker::_CompareItems(const void* _a, const void* _b) 347{ 348 lock_item* a = (lock_item*)_a; 349 lock_item* b = (lock_item*)_b; 350 return b->space->ID() - a->space->ID(); 351 // descending order, i.e. kernel address space last 352} 353 354 355bool 356MultiAddressSpaceLocker::_ResizeIfNeeded() 357{ 358 if (fCount == fCapacity) { 359 lock_item* items = (lock_item*)realloc(fItems, 360 (fCapacity + 4) * sizeof(lock_item)); 361 if (items == NULL) 362 return false; 363 364 fCapacity += 4; 365 fItems = items; 366 } 367 368 return true; 369} 370 371 372int32 373MultiAddressSpaceLocker::_IndexOfAddressSpace(VMAddressSpace* space) const 374{ 375 for (int32 i = 0; i < fCount; i++) { 376 if (fItems[i].space == space) 377 return i; 378 } 379 380 return -1; 381} 382 383 384status_t 385MultiAddressSpaceLocker::_AddAddressSpace(VMAddressSpace* space, 386 bool writeLock, VMAddressSpace** _space) 387{ 388 if (!space) 389 return B_BAD_VALUE; 390 391 int32 index = _IndexOfAddressSpace(space); 392 if (index < 0) { 393 if (!_ResizeIfNeeded()) { 394 space->Put(); 395 return B_NO_MEMORY; 396 } 397 398 lock_item& item = fItems[fCount++]; 399 item.space = space; 400 item.write_lock = writeLock; 401 } else { 402 403 // one reference is enough 404 space->Put(); 405 406 fItems[index].write_lock |= writeLock; 407 } 408 409 if (_space != NULL) 410 *_space = space; 411 412 return B_OK; 413} 414 415 416void 417MultiAddressSpaceLocker::Unset() 418{ 419 Unlock(); 420 421 for (int32 i = 0; i < fCount; i++) 422 fItems[i].space->Put(); 423 424 fCount = 0; 425} 426 427 428status_t 429MultiAddressSpaceLocker::Lock() 430{ 431 ASSERT(!fLocked); 432 433 qsort(fItems, fCount, sizeof(lock_item), &_CompareItems); 434 435 for (int32 i = 0; i < fCount; i++) { 436 status_t status; 437 if (fItems[i].write_lock) 438 status = fItems[i].space->WriteLock(); 439 else 440 status = fItems[i].space->ReadLock(); 441 442 if (status < B_OK) { 443 while (--i >= 0) { 444 if (fItems[i].write_lock) 445 fItems[i].space->WriteUnlock(); 446 else 447 fItems[i].space->ReadUnlock(); 448 } 449 return status; 450 } 451 } 452 453 fLocked = true; 454 return B_OK; 455} 456 457 458void 459MultiAddressSpaceLocker::Unlock() 460{ 461 if (!fLocked) 462 return; 463 464 for (int32 i = 0; i < fCount; i++) { 465 if (fItems[i].write_lock) 466 fItems[i].space->WriteUnlock(); 467 else 468 fItems[i].space->ReadUnlock(); 469 } 470 471 fLocked = false; 472} 473 474 475/*! Adds all address spaces of the areas associated with the given area's cache, 476 locks them, and locks the cache (including a reference to it). It retries 477 until the situation is stable (i.e. the neither cache nor cache's areas 478 changed) or an error occurs. 479*/ 480status_t 481MultiAddressSpaceLocker::AddAreaCacheAndLock(area_id areaID, 482 bool writeLockThisOne, bool writeLockOthers, VMArea*& _area, 483 VMCache** _cache) 484{ 485 // remember the original state 486 int originalCount = fCount; 487 lock_item* originalItems = NULL; 488 if (fCount > 0) { 489 originalItems = new(nothrow) lock_item[fCount]; 490 if (originalItems == NULL) 491 return B_NO_MEMORY; 492 memcpy(originalItems, fItems, fCount * sizeof(lock_item)); 493 } 494 ArrayDeleter<lock_item> _(originalItems); 495 496 // get the cache 497 VMCache* cache; 498 VMArea* area; 499 status_t error; 500 { 501 AddressSpaceReadLocker locker; 502 error = locker.SetFromArea(areaID, area); 503 if (error != B_OK) 504 return error; 505 506 cache = vm_area_get_locked_cache(area); 507 } 508 509 while (true) { 510 // add all areas 511 VMArea* firstArea = cache->areas; 512 for (VMArea* current = firstArea; current; 513 current = current->cache_next) { 514 error = AddArea(current, 515 current == area ? writeLockThisOne : writeLockOthers); 516 if (error != B_OK) { 517 vm_area_put_locked_cache(cache); 518 return error; 519 } 520 } 521 522 // unlock the cache and attempt to lock the address spaces 523 vm_area_put_locked_cache(cache); 524 525 error = Lock(); 526 if (error != B_OK) 527 return error; 528 529 // lock the cache again and check whether anything has changed 530 531 // check whether the area is gone in the meantime 532 area = VMAreas::Lookup(areaID); 533 534 if (area == NULL) { 535 Unlock(); 536 return B_BAD_VALUE; 537 } 538 539 // lock the cache 540 VMCache* oldCache = cache; 541 cache = vm_area_get_locked_cache(area); 542 543 // If neither the area's cache has changed nor its area list we're 544 // done. 545 if (cache == oldCache && firstArea == cache->areas) { 546 _area = area; 547 if (_cache != NULL) 548 *_cache = cache; 549 return B_OK; 550 } 551 552 // Restore the original state and try again. 553 554 // Unlock the address spaces, but keep the cache locked for the next 555 // iteration. 556 Unlock(); 557 558 // Get an additional reference to the original address spaces. 559 for (int32 i = 0; i < originalCount; i++) 560 originalItems[i].space->Get(); 561 562 // Release all references to the current address spaces. 563 for (int32 i = 0; i < fCount; i++) 564 fItems[i].space->Put(); 565 566 // Copy over the original state. 567 fCount = originalCount; 568 if (originalItems != NULL) 569 memcpy(fItems, originalItems, fCount * sizeof(lock_item)); 570 } 571} 572