Lines Matching refs:size_t

240 static const size_t kMemoryReserveForPriority[] = {
471 static inline size_t
472 area_page_protections_size(size_t areaSize)
483 size_t bytes = area_page_protections_size(area->Size());
540 realloc_page_protections(uint8* pageProtections, size_t areaSize,
543 size_t bytes = area_page_protections_size(areaSize);
629 unmap_pages(VMArea* area, addr_t base, size_t size)
763 size_t oldBytes = area_page_protections_size(oldSize);
767 size_t bytes = area_page_protections_size(area->Size());
912 size_t areaBytes = area_page_protections_size(area->Size());
919 size_t oldBytes = area_page_protections_size(oldSize);
925 size_t secondAreaBytes = area_page_protections_size(secondSize);
1240 wait_if_area_range_is_wired(VMArea* area, addr_t base, size_t size,
1275 size_t size, LockerType* locker)
1328 vm_set_kernel_area_debug_protection(void* cookie, void* _address, size_t size,
1917 size_t reservePages = map->MaxPagesNeededToMap(area->Base(),
2009 size_t reservePages = map->MaxPagesNeededToMap(area->Base(),
2019 size_t vecOffset = 0;
2144 uint32 addressSpec, size_t size, uint32 protection, uint32 mapping,
2438 size_t reservePages = map->MaxPagesNeededToMap(newArea->Base(),
2457 size_t reservePages = map->MaxPagesNeededToMap(
2807 size_t bytes = area_page_protections_size(source->Size());
2818 for (size_t i = 0; i < bytes; i++) {
4166 allocate_early_virtual(kernel_args* args, size_t size, addr_t alignment)
4287 vm_allocate_early(kernel_args* args, size_t virtualSize, size_t physicalSize,
4879 size_t reservePages = 2 + context.map->MaxPagesNeededToMap(originalAddress,
5176 size_t
5184 vm_unreserve_memory(size_t amount)
5195 vm_try_reserve_memory(size_t amount, int priority, bigtime_t timeout)
5197 size_t reserve = kMemoryReserveForPriority[priority];
5304 fill_area_info(struct VMArea* area, area_info* info, size_t size)
5328 vm_resize_area(area_id areaID, size_t newSize, bool kernel)
5342 size_t oldSize;
5437 size_t bytes = area_page_protections_size(newSize);
5493 vm_memcpy_from_physical(void* to, phys_addr_t from, size_t length, bool user)
5500 vm_memcpy_to_physical(phys_addr_t to, const void* _from, size_t length,
5537 size_t size, bool copyToUnsafe)
5604 validate_memory_range(const void* addr, size_t size)
5621 user_memcpy(void* to, const void* from, size_t size)
5643 user_strlcpy(char* to, const char* from, size_t size)
5651 size_t maxSize = size;
5665 if ((size_t)result >= maxSize && maxSize < size)
5673 user_memset(void* s, char c, size_t count)
5849 lock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
5996 lock_memory(void* address, size_t numBytes, uint32 flags)
6008 unlock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
6132 unlock_memory(void* address, size_t numBytes, uint32 flags)
6147 get_memory_map_etc(team_id team, const void* address, size_t numBytes,
6246 __get_memory_map_haiku(const void* address, size_t numBytes,
6286 _get_area_info(area_id id, area_info* info, size_t size)
6303 _get_next_area_info(team_id team, ssize_t* cookie, area_info* info, size_t size)
6340 resize_area(area_id areaID, size_t newSize)
6388 size_t numBytes, uint32 addressSpec, uint32 protection,
6415 create_area_etc(team_id team, const char* name, size_t size, uint32 lock,
6431 size_t size, uint32 lock, uint32 protection)
6575 _user_resize_area(area_id area, size_t newSize)
6651 size_t size, uint32 lock, uint32 protection)
6715 size_t size, uint32 protection, uint32 mapping, bool unmapAddressRange,
6757 _user_unmap_memory(void* _address, size_t size)
6787 _user_set_memory_protection(void* _address, size_t size, uint32 protection)
6822 size_t sizeLeft = size;
6836 size_t rangeSize = min_c(area->Size() - offset, sizeLeft);
6857 size_t sizeLeft = size;
6864 size_t rangeSize = min_c(area->Size() - offset, sizeLeft);
6968 _user_sync_memory(void* _address, size_t size, uint32 flags)
7003 size_t rangeSize = min_c(area->Size() - offset, size);
7047 _user_memory_advice(void* _address, size_t size, uint32 advice)
7122 user_set_memory_swappable(const void* _address, size_t size, bool swappable)
7190 _user_mlock(const void* _address, size_t size)
7197 _user_munlock(const void* _address, size_t size)
7224 __get_memory_map_beos(const void* _address, size_t numBytes,
7268 size_t numBytes, uint32 addressSpec, uint32 protection,
7281 size_t size, uint32 lock, uint32 protection)