Lines Matching refs:area

100 	inline AreaCacheLocker(VMArea* area)
103 SetTo(area);
111 inline void SetTo(VMArea* area)
114 area != NULL ? vm_area_get_locked_cache(area) : NULL, true, true);
249 static rw_lock sAreaCacheLock = RW_LOCK_INITIALIZER("area->cache");
273 static void delete_area(VMAddressSpace* addressSpace, VMArea* area,
332 PageFaultError(area_id area, status_t error)
334 fArea(area),
344 out.Print("page fault error: no area");
347 out.Print("page fault error: area: %ld, kernel only", fArea);
350 out.Print("page fault error: area: %ld, write protected",
354 out.Print("page fault error: area: %ld, read protected", fArea);
357 out.Print("page fault error: area: %ld, execute protected",
367 out.Print("page fault error: area: %ld, error: %s", fArea,
381 PageFaultDone(area_id area, VMCache* topCache, VMCache* cache,
384 fArea(area),
394 out.Print("page fault done: area: %ld, top cache: %p, cache: %p, "
439 virtual_page_address(VMArea* area, vm_page* page)
441 return area->Base()
442 + ((page->cache_offset << PAGE_SHIFT) - area->cache_offset);
447 is_page_in_area(VMArea* area, vm_page* page)
450 return pageCacheOffsetBytes >= area->cache_offset
451 && pageCacheOffsetBytes < area->cache_offset + (off_t)area->Size();
461 VMArea* area = VMAreas::LookupLocked(id);
462 if (area != NULL && area->address_space != addressSpace)
463 area = NULL;
467 return area;
481 allocate_area_page_protections(VMArea* area)
483 size_t bytes = area_page_protections_size(area->Size());
484 area->page_protections = (uint8*)malloc_etc(bytes,
485 area->address_space == VMAddressSpace::Kernel()
487 if (area->page_protections == NULL)
490 // init the page protections for all pages to that of the area
491 uint32 areaProtection = area->protection
493 memset(area->page_protections, areaProtection | (areaProtection << 4),
500 set_area_page_protection(VMArea* area, addr_t pageAddress, uint32 protection)
503 addr_t pageIndex = (pageAddress - area->Base()) / B_PAGE_SIZE;
504 uint8& entry = area->page_protections[pageIndex / 2];
513 get_area_page_protection(VMArea* area, addr_t pageAddress)
515 if (area->page_protections == NULL)
516 return area->protection;
518 uint32 pageIndex = (pageAddress - area->Base()) / B_PAGE_SIZE;
519 uint32 protection = area->page_protections[pageIndex / 2];
531 // If this is a kernel area we return only the kernel flags.
532 if (area->address_space == VMAddressSpace::Kernel())
553 map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection,
556 VMTranslationMap* map = area->address_space->TranslationMap();
560 if (area->wiring == B_NO_LOCK) {
563 bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
572 mapping->area = area;
577 area->MemoryType(), reservation);
584 area->mappings.Add(mapping);
592 area->MemoryType(), reservation);
618 unmap_page(VMArea* area, addr_t virtualAddress)
620 return area->address_space->TranslationMap()->UnmapPage(area,
629 unmap_pages(VMArea* area, addr_t base, size_t size)
631 area->address_space->TranslationMap()->UnmapPages(area, base, size, true);
636 intersect_area(VMArea* area, addr_t& address, addr_t& size, addr_t& offset)
638 if (address < area->Base()) {
639 offset = area->Base() - address;
643 address = area->Base();
646 if (size > area->Size())
647 size = area->Size();
652 offset = address - area->Base();
653 if (offset >= area->Size())
656 if (size >= area->Size() - offset)
657 size = area->Size() - offset;
663 /*! Cuts a piece out of an area. If the given cut range covers the complete
664 area, it is deleted. If it covers the beginning or the end, the area is
666 area, it is split in two; in this case the second area is returned via
672 cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
676 if (!intersect_area(area, address, size, offset))
679 // Is the area fully covered?
680 if (address == area->Base() && size == area->Size()) {
681 delete_area(addressSpace, area, false);
696 VMCache* cache = vm_area_get_locked_cache(area);
700 // If no one else uses the area's cache and it's an anonymous cache, we can
702 bool onlyCacheUser = cache->areas == area && area->cache_next == NULL
703 && cache->consumers.IsEmpty() && area->cache_type == CACHE_TYPE_RAM;
705 const addr_t oldSize = area->Size();
708 if (offset > 0 && size == area->Size() - offset) {
709 status_t error = addressSpace->ShrinkAreaTail(area, offset,
714 if (area->page_protections != NULL) {
716 area->page_protections, area->Size(), allocationFlags);
719 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
723 area->page_protections = newProtections;
727 unmap_pages(area, address, size);
741 if (area->Base() == address) {
743 if (area->page_protections != NULL) {
746 newProtections = realloc_page_protections(NULL, area->Size(),
753 // resize the area
754 status_t error = addressSpace->ShrinkAreaHead(area, area->Size() - size,
762 if (area->page_protections != NULL) {
764 ssize_t pagesShifted = (oldSize - area->Size()) / B_PAGE_SIZE;
765 bitmap_shift<uint8>(area->page_protections, oldBytes * 8, -(pagesShifted * 4));
767 size_t bytes = area_page_protections_size(area->Size());
768 memcpy(newProtections, area->page_protections, bytes);
769 free_etc(area->page_protections, allocationFlags);
770 area->page_protections = newProtections;
774 unmap_pages(area, address, size);
783 area->cache_offset += size;
788 // The tough part -- cut a piece out of the middle of the area.
789 // We do that by shrinking the area to the begin section and creating a
790 // new area for the end section.
793 addr_t secondSize = area->Size() - offset - size;
796 unmap_pages(area, address, area->Size() - firstNewSize);
798 // resize the area
799 status_t error = addressSpace->ShrinkAreaTail(area, firstNewSize,
809 if (area->page_protections != NULL) {
810 areaNewProtections = realloc_page_protections(NULL, area->Size(),
816 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
829 // Create a new cache for the second area.
832 area->protection & B_OVERCOMMITTING_AREA, 0, 0,
835 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
843 secondCache->virtual_base = area->cache_offset;
844 secondCache->virtual_end = area->cache_offset + secondSize;
847 off_t adoptOffset = area->cache_offset + secondBase - area->Base();
849 area->cache_offset);
859 // Map the second area.
861 area->cache_offset, area->name, secondSize, area->wiring,
862 area->protection, area->protection_max, REGION_NO_PRIVATE_MAP, 0,
872 area->cache_offset, secondSize, adoptOffset);
885 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
895 error = map_backing_store(addressSpace, cache, area->cache_offset
896 + (secondBase - area->Base()),
897 area->name, secondSize, area->wiring, area->protection,
898 area->protection_max, REGION_NO_PRIVATE_MAP, 0,
901 addressSpace->ShrinkAreaTail(area, oldSize, allocationFlags);
906 // We need a cache reference for the new area.
910 if (area->page_protections != NULL) {
911 // Copy the protection bits of the first area.
912 size_t areaBytes = area_page_protections_size(area->Size());
913 memcpy(areaNewProtections, area->page_protections, areaBytes);
914 uint8* areaOldProtections = area->page_protections;
915 area->page_protections = areaNewProtections;
917 // Shift the protection bits of the second area to the start of
920 addr_t secondAreaOffset = secondBase - area->Base();
924 // Copy the protection bits of the second area.
932 // Set the correct page protections for the second area.
969 VMArea* area = it.Next();) {
971 if ((area->protection & B_KERNEL_AREA) != 0) {
973 "unmap range of kernel area %" B_PRId32 " (%s)\n",
974 team_get_current_team_id(), area->id, area->name);
982 VMArea* area = it.Next();) {
984 status_t error = cut_area(addressSpace, area, address, size, NULL,
997 discard_area_range(VMArea* area, addr_t address, addr_t size)
1000 if (!intersect_area(area, address, size, offset))
1003 // If someone else uses the area's cache or it's not an anonymous cache, we
1005 VMCache* cache = vm_area_get_locked_cache(area);
1006 if (cache->areas != area || area->cache_next != NULL
1014 unmap_pages(area, address, size);
1032 VMArea* area = it.Next();) {
1033 status_t error = discard_area_range(area, address, size);
1059 ", protection %d, protectionMax %d, area %p, areaName '%s'\n",
1067 panic("map_backing_store(): called with size=0 for area '%s'!",
1086 VMArea* area = addressSpace->CreateArea(areaName, wiring, protection,
1089 area->protection_max = protectionMax & B_USER_PROTECTION;
1090 if (area == NULL)
1128 // insert the area, so back out
1136 // some existing area, and unmap_address_range also needs to lock that
1137 // cache to delete the area.
1146 status = addressSpace->InsertArea(area, size, addressRestrictions,
1157 // attach the cache to the area
1158 area->cache = cache;
1159 area->cache_offset = offset;
1161 // point the cache back to the area
1162 cache->InsertAreaLocked(area);
1166 // insert the area in the global areas map
1167 VMAreas::Insert(area);
1169 // grab a ref to the address space (the area holds this)
1173 // cache, sourceCache, areaName, area);
1175 *_area = area;
1188 addressSpace->DeleteArea(area, allocationFlags);
1193 /*! Equivalent to wait_if_area_range_is_wired(area, area->Base(), area->Size(),
1198 wait_if_area_is_wired(VMArea* area, LockerType1* locker1, LockerType2* locker2)
1200 area->cache->AssertLocked();
1203 if (!area->AddWaiterIfWired(&waiter))
1218 /*! Checks whether the given area has any wired ranges intersecting with the
1223 The area's top cache must be locked and must be unlocked as a side effect
1229 \param area The area to be checked.
1240 wait_if_area_range_is_wired(VMArea* area, addr_t base, size_t size,
1243 area->cache->AssertLocked();
1246 if (!area->AddWaiterIfWired(&waiter, base, size))
1279 VMArea* area = it.Next();) {
1281 AreaCacheLocker cacheLocker(vm_area_get_locked_cache(area));
1283 if (wait_if_area_range_is_wired(area, base, size, locker, &cacheLocker))
1291 /*! Prepares an area to be used for vm_set_kernel_area_debug_protection().
1299 VMArea* area;
1300 status_t status = locker.SetFromArea(id, area);
1304 if (area->page_protections == NULL) {
1305 status = allocate_area_page_protections(area);
1310 *cookie = (void*)area;
1321 additional calls to this function. For this to work the area must be
1350 VMArea* area = (VMArea*)cookie;
1352 addr_t offset = address - area->Base();
1353 if (area->Size() - offset < size) {
1354 panic("protect range not fully within supplied area");
1358 if (area->page_protections == NULL) {
1359 panic("area has no page protections");
1373 set_area_page_protection(area, pageAddress, protection);
1404 VMArea* area;
1410 true, &area, NULL);
1417 area->cache_type = CACHE_TYPE_RAM;
1418 return area->id;
1465 VMArea* area;
1680 virtualAddressRestrictions, kernel, &area, _address);
1697 // Allocate and map all pages for this area
1700 for (addr_t address = area->Base();
1701 address < area->Base() + (area->Size() - 1);
1705 if (isStack && address < area->Base()
1708 if (isStack && address >= area->Base() + area->Size()
1716 map_page(area, page, address, protection, &reservation);
1737 for (addr_t virtualAddress = area->Base();
1738 virtualAddress < area->Base() + (area->Size() - 1);
1774 addr_t virtualAddress = area->Base();
1779 for (virtualAddress = area->Base(); virtualAddress < area->Base()
1780 + (area->Size() - 1); virtualAddress += B_PAGE_SIZE,
1787 area->MemoryType(), &reservation);
1812 area->cache_type = CACHE_TYPE_RAM;
1813 return area->id;
1817 // we had reserved the area space upfront...
1844 VMArea* area;
1861 // move the actual area down to align on a page boundary
1882 true, &area, _address);
1895 area->SetMemoryType(memoryType);
1897 status = arch_vm_set_memory_type(area, physicalAddress, memoryType);
1899 delete_area(locker.AddressSpace(), area, false);
1908 // The area is already mapped, but possibly not with the right
1911 map->ProtectArea(area, area->protection);
1914 // Map the area completely.
1917 size_t reservePages = map->MaxPagesNeededToMap(area->Base(),
1918 area->Base() + (size - 1));
1927 map->Map(area->Base() + offset, physicalAddress + offset,
1928 protection, area->MemoryType(), &reservation);
1936 // modify the pointer returned to be offset back into the new area
1940 area->cache_type = CACHE_TYPE_DEVICE;
1941 return area->id;
1992 VMArea* area;
1998 &addressRestrictions, true, &area, _address);
2009 size_t reservePages = map->MaxPagesNeededToMap(area->Base(),
2010 area->Base() + (size - 1));
2029 map->Map(area->Base() + offset, vecs[vecIndex].base + vecOffset,
2030 protection, area->MemoryType(), &reservation);
2041 area->cache_type = CACHE_TYPE_DEVICE;
2042 return area->id;
2077 VMArea* area;
2084 &addressRestrictions, true, &area, address);
2093 area->cache_type = CACHE_TYPE_NULL;
2094 return area->id;
2108 /*! \a cache must be locked. The area's address space must be read-locked.
2111 pre_map_area_pages(VMArea* area, VMCache* cache,
2114 addr_t baseAddress = area->Base();
2115 addr_t cacheOffset = area->cache_offset;
2117 page_num_t endPage = firstPage + area->Size() / B_PAGE_SIZE;
2130 map_page(area, page,
2138 /*! Will map the file specified by \a fd to an area in memory.
2266 VMArea* area;
2272 &addressRestrictions, kernel, &area, _address);
2280 pre_map_area_pages(area, cache, &reservation);
2294 area->cache_type = CACHE_TYPE_VNODE;
2295 return area->id;
2313 vm_area_get_locked_cache(VMArea* area)
2318 VMCache* cache = area->cache;
2328 if (cache == area->cache) {
2355 // Check whether the source area exists and is cloneable. If so, mark it
2400 dprintf("team \"%s\" (%" B_PRId32 ") attempted to clone area \"%s\" (%"
2426 // we don't have actual pages to map but a physical area
2494 /*! Deletes the specified area of the given address space.
2497 The caller must ensure that the area does not have any wired ranges.
2499 \param addressSpace The address space containing the area.
2500 \param area The area to be deleted.
2505 delete_area(VMAddressSpace* addressSpace, VMArea* area,
2508 ASSERT(!area->IsWired());
2510 VMAreas::Remove(area);
2512 // At this point the area is removed from the global hash table, but
2513 // still exists in the area list.
2515 // Unmap the virtual address space the area occupied.
2518 VMCache* topCache = vm_area_get_locked_cache(area);
2522 // If the area's top cache is a temporary cache and the area is the only
2529 area->address_space->TranslationMap()->UnmapArea(area,
2533 if (!area->cache->temporary)
2534 area->cache->WriteModified();
2539 arch_vm_unset_memory_type(area);
2540 addressSpace->RemoveArea(area, allocationFlags);
2543 area->cache->RemoveArea(area);
2544 area->cache->ReleaseRef();
2546 addressSpace->DeleteArea(area, allocationFlags);
2553 TRACE(("vm_delete_area(team = 0x%" B_PRIx32 ", area = 0x%" B_PRIx32 ")\n",
2556 // lock the address space and make sure the area isn't wired
2558 VMArea* area;
2562 status_t status = locker.SetFromArea(team, id, area);
2566 cacheLocker.SetTo(area);
2567 } while (wait_if_area_is_wired(area, &locker, &cacheLocker));
2571 if (!kernel && (area->protection & B_KERNEL_AREA) != 0)
2574 delete_area(locker.AddressSpace(), area, false);
2657 // The area must be readable in the same way it was
2682 // Change the protection of all pages in this area.
2690 // The area must be readable in the same way it was
2705 // The area must be readable in the same way it was previously
2763 // If the source area isn't shared, count the number of wired pages in
2833 // First, create a cache on top of the source area, respectively use the
2834 // existing one, if this is a shared area.
2855 // The new area uses the old area's cache, but map_backing_store()
2860 // If the source area is writable, we need to move it one layer up as well
2872 // we return the ID of the newly created area
2883 TRACE(("vm_set_area_protection(team = %#" B_PRIx32 ", area = %#" B_PRIx32
2895 VMArea* area;
2905 status = locker.AddAreaCacheAndLock(areaID, true, false, area, &cache);
2911 if (!kernel && (area->address_space == VMAddressSpace::Kernel()
2912 || (area->protection & B_KERNEL_AREA) != 0)) {
2914 "set protection %#" B_PRIx32 " on kernel area %" B_PRId32
2915 " (%s)\n", team, newProtection, areaID, area->name);
2918 if (!kernel && area->protection_max != 0
2919 && (newProtection & area->protection_max)
2923 "area %" B_PRId32 " (%s)\n", team, newProtection,
2924 area->protection_max, areaID, area->name);
2929 && area->address_space->ID() != team) {
2935 if (area->protection == newProtection)
2939 = (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0;
2941 // Make sure the area (respectively, if we're going to call
2953 if (wait_if_area_is_wired(area, &locker, &cacheLocker))
2965 if (cache->CountWritableAreas(area) == 0) {
2982 // is significantly smaller than the number of pages in the area,
2985 == (area->protection & ~(B_WRITE_AREA | B_KERNEL_WRITE_AREA))
2986 && cache->page_count * 2 < area->Size() / B_PAGE_SIZE) {
3020 VMTranslationMap* map = area->address_space->TranslationMap();
3024 page_num_t firstPageOffset = area->cache_offset / B_PAGE_SIZE;
3026 = firstPageOffset + area->Size() / B_PAGE_SIZE;
3031 addr_t address = virtual_page_address(area, page);
3032 map->ProtectPage(area, address, newProtection);
3036 map->ProtectArea(area, newProtection);
3041 area->protection = newProtection;
3078 VMArea* area = mapping->area;
3079 VMTranslationMap* map = area->address_space->TranslationMap();
3084 map->Query(virtual_page_address(area, page), &physicalAddress, &flags);
3108 VMArea* area = mapping->area;
3109 VMTranslationMap* map = area->address_space->TranslationMap();
3112 map->ClearFlags(virtual_page_address(area, page), flags);
3128 VMArea* area = mapping->area;
3129 VMTranslationMap* map = area->address_space->TranslationMap();
3130 addr_t address = virtual_page_address(area, page);
3131 map->UnmapPage(area, address, false);
3144 VMArea* area = mapping->area;
3145 VMTranslationMap* map = area->address_space->TranslationMap();
3148 if (map->ClearAccessedAndModified(area,
3149 virtual_page_address(area, page), false, modified)) {
3186 VMArea* area = mapping->area;
3187 VMTranslationMap* map = area->address_space->TranslationMap();
3188 addr_t address = virtual_page_address(area, page);
3190 if (map->ClearAccessedAndModified(area, address, true, modified)) {
3499 VMArea* area = cache->areas;
3500 kprintf(", areas: %" B_PRId32 " (%s, team: %" B_PRId32 ")", area->id,
3501 area->name, area->address_space->ID());
3503 while (area->cache_next != NULL) {
3504 area = area->cache_next;
3505 kprintf(", %" B_PRId32, area->id);
3631 dump_area_struct(VMArea* area, bool mappings)
3633 kprintf("AREA: %p\n", area);
3634 kprintf("name:\t\t'%s'\n", area->name);
3635 kprintf("owner:\t\t0x%" B_PRIx32 "\n", area->address_space->ID());
3636 kprintf("id:\t\t0x%" B_PRIx32 "\n", area->id);
3637 kprintf("base:\t\t0x%lx\n", area->Base());
3638 kprintf("size:\t\t0x%lx\n", area->Size());
3639 kprintf("protection:\t0x%" B_PRIx32 "\n", area->protection);
3640 kprintf("page_protection:%p\n", area->page_protections);
3641 kprintf("wiring:\t\t0x%x\n", area->wiring);
3642 kprintf("memory_type:\t%#" B_PRIx32 "\n", area->MemoryType());
3643 kprintf("cache:\t\t%p\n", area->cache);
3644 kprintf("cache_type:\t%s\n", vm_cache_type_to_string(area->cache_type));
3645 kprintf("cache_offset:\t0x%" B_PRIx64 "\n", area->cache_offset);
3646 kprintf("cache_next:\t%p\n", area->cache_next);
3647 kprintf("cache_prev:\t%p\n", area->cache_prev);
3649 VMAreaMappings::Iterator iterator = area->mappings.GetIterator();
3673 VMArea* area;
3677 kprintf("usage: area [-m] [id|contains|address|name] <id|address|name>\n"
3681 "-m shows the area's mappings as well.\n");
3703 kprintf("No area specifier given.\n");
3712 // walk through the area list, looking for the arguments as a name
3715 while ((area = it.Next()) != NULL) {
3717 && !strcmp(argv[index], area->name))
3718 || (num != 0 && (((mode & 1) != 0 && (addr_t)area->id == num)
3719 || (((mode & 2) != 0 && area->Base() <= num
3720 && area->Base() + area->Size() > num))))) {
3721 dump_area_struct(area, mappings);
3727 kprintf("could not find area %s (%ld)\n", argv[index], num);
3737 VMArea* area;
3752 while ((area = it.Next()) != NULL) {
3753 if ((id != 0 && area->address_space->ID() != id)
3754 || (name != NULL && strstr(area->name, name) == NULL))
3757 kprintf("%p %5" B_PRIx32 " %p %p %4" B_PRIx32 " %4d %s\n", area,
3758 area->id, (void*)area->Base(), (void*)area->Size(),
3759 area->protection, area->wiring, area->name);
3849 if (VMArea* area = fAddressSpace->LookupArea(virtualAddress))
3850 kprintf(" %8" B_PRId32 " %s\n", area->id, area->name);
3933 while (VMArea* area = addressSpace->FirstArea()) {
3934 ASSERT(!area->IsWired());
3935 delete_area(addressSpace, area, deletingAddressSpace);
3958 VMArea* area = locker.AddressSpace()->LookupArea(address);
3959 if (area != NULL) {
3960 if (!kernel && (area->protection & (B_READ_AREA | B_WRITE_AREA)) == 0
3961 && (area->protection & B_KERNEL_AREA) != 0)
3964 return area->id;
4012 // into the area we should dispose
4018 VMArea* area = it.Next();) {
4019 addr_t areaStart = area->Base();
4020 addr_t areaEnd = areaStart + (area->Size() - 1);
4026 // we are done, the area is already beyond of what we have to free
4047 // we can also get rid of some space at the end of the area
4065 // use file name to create a good area name
4106 area_id area = area_for((void*)(addr_t)args->kernel_args_range[i].start);
4107 if (area >= B_OK)
4108 delete_area(area);
4440 add_debugger_command("area", &dump_area,
4441 "Dump info about a particular area");
4615 VMArea* area = NULL;
4618 area = addressSpace->LookupArea(faultAddress);
4626 faultAddress, area ? area->name : "???", faultAddress - (area ?
4627 area->Base() : 0x0));
4709 /*! Gets the page that should be mapped into the area.
4790 // Since we needed to unlock everything temporarily, the area
4889 // get the area the fault was in
4890 VMArea* area = addressSpace->LookupArea(address);
4891 if (area == NULL) {
4892 dprintf("vm_soft_fault: va 0x%lx not covered by area in address "
4901 uint32 protection = get_area_page_protection(area, address);
4903 && (area->protection & B_KERNEL_AREA) != 0) {
4904 dprintf("user access on kernel area 0x%" B_PRIx32 " at %p\n",
4905 area->id, (void*)originalAddress);
4906 TPF(PageFaultError(area->id,
4913 dprintf("write access attempted on write-protected area 0x%"
4914 B_PRIx32 " at %p\n", area->id, (void*)originalAddress);
4915 TPF(PageFaultError(area->id,
4921 dprintf("instruction fetch attempted on execute-protected area 0x%"
4922 B_PRIx32 " at %p\n", area->id, (void*)originalAddress);
4923 TPF(PageFaultError(area->id,
4929 dprintf("read access attempted on read-protected area 0x%" B_PRIx32
4930 " at %p\n", area->id, (void*)originalAddress);
4931 TPF(PageFaultError(area->id,
4937 // We have the area, it was a valid access, so let's try to resolve the
4939 // At first, the top most cache from the area is investigated.
4941 context.Prepare(vm_area_get_locked_cache(area),
4942 address - area->Base() + area->cache_offset);
4960 TPF(PageFaultError(area->id, status));
4969 TPF(PageFaultDone(area->id, context.topCache, context.page->Cache(),
4972 // If the page doesn't reside in the area's cache, we need to make sure
4995 context.map->ProtectPage(area, address, newProtection);
5014 if (area->AddWaiterIfWired(&waiter, address, B_PAGE_SIZE,
5043 unmap_page(area, address);
5048 if (map_page(area, context.page, address, newProtection,
5247 VMArea* area;
5248 status_t status = locker.SetFromArea(id, area);
5253 uint32 oldType = area->MemoryType();
5257 // set the memory type of the area and the mapped pages
5258 VMTranslationMap* map = area->address_space->TranslationMap();
5260 area->SetMemoryType(type);
5261 map->ProtectArea(area, area->protection);
5265 status_t error = arch_vm_set_memory_type(area, physicalBase, type);
5267 // reset the memory type of the area and the mapped pages
5269 area->SetMemoryType(oldType);
5270 map->ProtectArea(area, area->protection);
5304 fill_area_info(struct VMArea* area, area_info* info, size_t size)
5306 strlcpy(info->name, area->name, B_OS_NAME_LENGTH);
5307 info->area = area->id;
5308 info->address = (void*)area->Base();
5309 info->size = area->Size();
5310 info->protection = area->protection;
5311 info->lock = area->wiring;
5312 info->team = area->address_space->ID();
5318 VMCache* cache = vm_area_get_locked_cache(area);
5320 // Note, this is a simplification; the cache could be larger than this area
5335 VMArea* area;
5351 status = locker.AddAreaCacheAndLock(areaID, true, true, area, &cache);
5357 if (!kernel && (area->address_space == VMAddressSpace::Kernel()
5358 || (area->protection & B_KERNEL_AREA) != 0)) {
5360 "resize kernel area %" B_PRId32 " (%s)\n",
5361 team_get_current_team_id(), areaID, area->name);
5366 oldSize = area->Size();
5421 // We also need to unmap all pages beyond the new size, if the area has
5436 if (area->page_protections != NULL) {
5439 = (uint8*)realloc(area->page_protections, bytes);
5443 area->page_protections = newProtections;
5446 // init the additional page protections to that of the area
5448 uint32 areaProtection = area->protection
5450 memset(area->page_protections + offset,
5453 uint8& entry = area->page_protections[offset - 1];
5480 // TODO: we must honour the lock restrictions of this area
5518 walks through the respective area's cache chain to find the physical page
5560 // get the area
5561 VMArea* area = addressSpace->LookupArea((addr_t)unsafeMemory);
5562 if (area == NULL)
5566 off_t cacheOffset = (addr_t)unsafeMemory - area->Base()
5567 + area->cache_offset;
5568 VMCache* cache = area->cache;
5591 if (page->Cache() != area->cache)
5728 // get the area
5729 VMArea* area = addressSpace->LookupArea(pageAddress);
5730 if (area == NULL) {
5735 // Lock the area's top cache. This is a requirement for VMArea::Wire().
5736 VMCacheChainLocker cacheChainLocker(vm_area_get_locked_cache(area));
5738 // mark the area range wired
5739 area->Wire(&info->range);
5741 // Lock the area's cache chain and the translation map. Needed to look
5763 // wired the area itself, nothing disturbing will happen with it
5774 VMCache* cache = vm_area_get_locked_cache(area);
5775 area->Unwire(&info->range);
5799 VMArea* area = info->range.area;
5800 AddressSpaceReadLocker addressSpaceLocker(area->address_space, false);
5804 VMCache* cache = vm_area_get_locked_cache(area);
5816 area->Unwire(&info->range);
5887 // get the next area
5888 VMArea* area = addressSpace->LookupArea(nextAddress);
5889 if (area == NULL) {
5895 addr_t areaEnd = std::min(lockEndAddress, area->Base() + area->Size());
5906 // Lock the area's top cache. This is a requirement for VMArea::Wire().
5907 VMCacheChainLocker cacheChainLocker(vm_area_get_locked_cache(area));
5909 // mark the area range wired
5910 area->Wire(range);
5912 // Depending on the area cache type and the wiring, we may not need to
5914 if (area->cache_type == CACHE_TYPE_NULL
5915 || area->cache_type == CACHE_TYPE_DEVICE
5916 || area->wiring == B_FULL_LOCK
5917 || area->wiring == B_CONTIGUOUS) {
5922 // Lock the area's cache chain and the translation map. Needed to look
5943 // wired the area itself, nothing disturbing will happen with it
5953 cacheChainLocker.SetTo(vm_area_get_locked_cache(area));
5968 // is the first in this area, unwire the area, since we won't get
5971 area->Unwire(range);
6046 // get the next area
6047 VMArea* area = addressSpace->LookupArea(nextAddress);
6048 if (area == NULL) {
6054 addr_t areaEnd = std::min(lockEndAddress, area->Base() + area->Size());
6056 // Lock the area's top cache. This is a requirement for
6058 VMCacheChainLocker cacheChainLocker(vm_area_get_locked_cache(area));
6060 // Depending on the area cache type and the wiring, we may not need to
6062 if (area->cache_type == CACHE_TYPE_NULL
6063 || area->cache_type == CACHE_TYPE_DEVICE
6064 || area->wiring == B_FULL_LOCK
6065 || area->wiring == B_CONTIGUOUS) {
6069 VMAreaWiredRange* range = area->Unwire(areaStart,
6079 // Lock the area's cache chain and the translation map. Needed to look
6108 // All pages are unwired. Remove the area's wired range as well (to
6110 VMAreaWiredRange* range = area->Unwire(areaStart,
6292 VMArea* area;
6293 status_t status = locker.SetFromArea(id, area);
6297 fill_area_info(area, info, size);
6318 VMArea* area = locker.AddressSpace()->FindClosestArea(nextBase, false);
6319 if (area == NULL) {
6324 fill_area_info(area, info, size);
6325 *cookie = (ssize_t)(area->Base() + 1);
6332 set_area_protection(area_id area, uint32 newProtection)
6334 return vm_set_area_protection(VMAddressSpace::KernelID(), area,
6346 /*! Transfers the specified area to a new team. The caller must be the owner
6347 of the area.
6361 // We need to mark the area cloneable so the following operations work.
6446 delete_area(area_id area)
6448 return vm_delete_area(VMAddressSpace::KernelID(), area, true);
6517 _user_get_area_info(area_id area, area_info* userInfo)
6523 status_t status = get_area_info(area, &info);
6564 _user_set_area_protection(area_id area, uint32 newProtection)
6569 return vm_set_area_protection(VMAddressSpace::CurrentID(), area,
6575 _user_resize_area(area_id area, size_t newSize)
6579 return vm_resize_area(area, newSize, false);
6584 _user_transfer_area(area_id area, void** userAddress, uint32 addressSpec,
6599 area_id newArea = transfer_area(area, &address, addressSpec, target, false);
6686 area_id area = vm_create_anonymous_area(VMAddressSpace::CurrentID(), name,
6690 if (area >= B_OK
6692 delete_area(area);
6696 return area;
6701 _user_delete_area(area_id area)
6707 return vm_delete_area(VMAddressSpace::CurrentID(), area, false);
6720 area_id area;
6743 area = _vm_map_file(VMAddressSpace::CurrentID(), name, &address,
6746 if (area < B_OK)
6747 return area;
6752 return area;
6824 VMArea* area = locker.AddressSpace()->LookupArea(currentAddress);
6825 if (area == NULL)
6828 if ((area->protection & B_KERNEL_AREA) != 0)
6830 if (area->protection_max != 0
6831 && (protection & area->protection_max) != (protection & B_USER_PROTECTION)) {
6835 addr_t offset = currentAddress - area->Base();
6836 size_t rangeSize = min_c(area->Size() - offset, sizeLeft);
6838 AreaCacheLocker cacheLocker(area);
6840 if (wait_if_area_range_is_wired(area, currentAddress, rangeSize,
6853 // Second round: If the protections differ from that of the area, create a
6859 VMArea* area = locker.AddressSpace()->LookupArea(currentAddress);
6860 if (area == NULL)
6863 addr_t offset = currentAddress - area->Base();
6864 size_t rangeSize = min_c(area->Size() - offset, sizeLeft);
6869 if (area->page_protections == NULL) {
6870 if (area->protection == protection)
6872 if (offset == 0 && rangeSize == area->Size()) {
6873 // The whole area is covered: let set_area_protection handle it.
6874 status_t status = vm_set_area_protection(area->address_space->ID(),
6875 area->id, protection, false);
6881 status_t status = allocate_area_page_protections(area);
6888 VMCache* topCache = vm_area_get_locked_cache(area);
6896 for (addr_t pageAddress = area->Base() + offset;
6904 = (get_area_page_protection(area, pageAddress) & B_WRITE_AREA) != 0;
6921 for (addr_t pageAddress = area->Base() + offset;
6925 set_area_page_protection(area, pageAddress, protection);
6938 panic("area %p looking up page failed for pa %#" B_PRIxPHYSADDR
6939 "\n", area, physicalAddress);
6951 map->ProtectPage(area, pageAddress, protection);
6957 unmap_page(area, pageAddress);
6997 // get the first area
6998 VMArea* area = locker.AddressSpace()->LookupArea(address);
6999 if (area == NULL)
7002 uint32 offset = address - area->Base();
7003 size_t rangeSize = min_c(area->Size() - offset, size);
7004 offset += area->cache_offset;
7007 AreaCacheLocker cacheLocker(area);
7010 VMCache* cache = area->cache;
7102 VMArea* area = locker.AddressSpace()->LookupArea((addr_t)address);
7103 if (area == NULL)
7106 uint32 protection = get_area_page_protection(area, (addr_t)address);
7107 uint32 wiring = area->wiring;
7145 // get the next area
7146 VMArea* area = addressSpace->LookupArea(nextAddress);
7147 if (area == NULL) {
7153 const addr_t areaEnd = std::min(endAddress, area->Base() + area->Size());
7162 VMCacheChainLocker cacheChainLocker(vm_area_get_locked_cache(area));
7164 if (dynamic_cast<VMAnonymousNoSwapCache*>(area->cache) != NULL) {
7166 } else if ((anonCache = dynamic_cast<VMAnonymousCache*>(area->cache)) != NULL) {
7167 error = anonCache->SetCanSwapPages(areaStart - area->Base(),
7200 // if multiple clones of an area had mlock() called on them,