Lines Matching refs:size

24 VmAddressRegion::VmAddressRegion(VmAspace& aspace, vaddr_t base, size_t size, uint32_t vmar_flags)
25 : VmAddressRegionOrMapping(base, size, vmar_flags | VMAR_CAN_RWX_FLAGS,
35 VmAddressRegion::VmAddressRegion(VmAddressRegion& parent, vaddr_t base, size_t size,
37 : VmAddressRegionOrMapping(base, size, vmar_flags, parent.aspace_.get(),
45 : VmAddressRegion(kernel_aspace, kernel_aspace.base(), kernel_aspace.size(),
64 auto vmar = new (&ac) VmAddressRegion(aspace, aspace.base(), aspace.size(), vmar_flags);
74 zx_status_t VmAddressRegion::CreateSubVmarInternal(size_t offset, size_t size, uint8_t align_pow2,
86 if (size == 0) {
129 if (offset >= size_ || size > size_ - offset) {
142 if (!IsRangeAvailableLocked(new_base, size)) {
144 return OverwriteVmMapping(new_base, size, vmar_flags,
151 zx_status_t status = AllocSpotLocked(size, align_pow2, arch_mmu_flags, &new_base);
167 VmMapping(*this, new_base, size, vmar_flags,
171 VmAddressRegion(*this, new_base, size, vmar_flags, name));
182 !VDso::valid_code_mapping(vmo_offset, size)) {
193 zx_status_t VmAddressRegion::CreateSubVmar(size_t offset, size_t size, uint8_t align_pow2,
198 if (!IS_PAGE_ALIGNED(size)) {
208 zx_status_t status = CreateSubVmarInternal(offset, size, align_pow2, vmar_flags, nullptr, 0,
218 zx_status_t VmAddressRegion::CreateVmMapping(size_t mapping_offset, size_t size, uint8_t align_pow2,
223 LTRACEF("%p %#zx %#zx %x\n", this, mapping_offset, size, vmar_flags);
235 // If size overflows, it'll become 0 and get rejected in
237 size = ROUNDUP(size, PAGE_SIZE);
239 // Make sure that vmo_offset is aligned and that a mapping of this size
241 if (!IS_PAGE_ALIGNED(vmo_offset) || vmo_offset + size < vmo_offset) {
259 CreateSubVmarInternal(mapping_offset, size, align_pow2, vmar_flags, fbl::move(vmo),
270 vaddr_t base, size_t size, uint32_t vmar_flags,
282 VmMapping(*this, base, size, vmar_flags,
288 zx_status_t status = UnmapInternalLocked(base, size, false /* can_destroy_regions */,
364 if (!itr.IsValid() || itr->base() > addr || addr > itr->base() + itr->size() - 1) {
401 bool VmAddressRegion::IsRangeAvailableLocked(vaddr_t base, size_t size) {
403 DEBUG_ASSERT(size > 0);
414 if (add_overflow(prev->base(), prev->size() - 1, &prev_last_byte)) {
424 if (add_overflow(base, size - 1, &last_byte)) {
450 if (add_overflow(prev->base(), prev->size(), &gap_beg) ||
518 zx_status_t VmAddressRegion::AllocSpotLocked(size_t size, uint8_t align_pow2, uint arch_mmu_flags,
521 DEBUG_ASSERT(size > 0 && IS_PAGE_ALIGNED(size));
524 LTRACEF_LEVEL(2, "aspace %p size 0x%zx align %hhu\n", this, size,
529 return CompactRandomizedRegionAllocatorLocked(size, align_pow2, arch_mmu_flags, spot);
531 return NonCompactRandomizedRegionAllocatorLocked(size, align_pow2, arch_mmu_flags,
535 return LinearRegionAllocatorLocked(size, align_pow2, arch_mmu_flags, spot);
615 zx_status_t VmAddressRegion::Unmap(vaddr_t base, size_t size) {
618 size = ROUNDUP(size, PAGE_SIZE);
619 if (size == 0 || !IS_PAGE_ALIGNED(base)) {
628 return UnmapInternalLocked(base, size, true /* can_destroy_regions */,
632 zx_status_t VmAddressRegion::UnmapAllowPartial(vaddr_t base, size_t size) {
635 size = ROUNDUP(size, PAGE_SIZE);
636 if (size == 0 || !IS_PAGE_ALIGNED(base)) {
645 return UnmapInternalLocked(base, size, true /* can_destroy_regions */,
655 } else if (base >= itr->base() + itr->size()) {
662 zx_status_t VmAddressRegion::UnmapInternalLocked(vaddr_t base, size_t size,
667 if (!is_in_range(base, size)) {
678 aspace_->vdso_code_mapping_->base() - base < size) {
682 const vaddr_t end_addr = base + size;
690 const vaddr_t itr_end = itr->base() + itr->size();
705 const vaddr_t curr_end = curr->base() + curr->size();
710 if (unmap_base == curr->base() && unmap_size == curr->size()) {
733 __UNUSED bool intersects = GetIntersect(base, size, curr->base(), curr->size(),
745 } else if (unmap_base == curr->base() && unmap_size == curr->size()) {
775 zx_status_t VmAddressRegion::Protect(vaddr_t base, size_t size, uint new_arch_mmu_flags) {
778 size = ROUNDUP(size, PAGE_SIZE);
779 if (size == 0 || !IS_PAGE_ALIGNED(base)) {
788 if (!is_in_range(base, size)) {
796 const vaddr_t end_addr = base + size;
803 if (!begin.IsValid() || begin->base() + begin->size() <= base) {
824 last_mapped = itr->base() + itr->size();
826 if (last_mapped < base + size) {
836 const vaddr_t curr_end = itr->base() + itr->size();
855 zx_status_t VmAddressRegion::LinearRegionAllocatorLocked(size_t size, uint8_t align_pow2,
867 // requested size.
872 if (CheckGapLocked(before_iter, after_iter, spot, base, align, size, 0, arch_mmu_flags)) {
902 prev_region_end = ROUNDUP(region.base() + region.size(), align);
917 // given range size, for a range that has a base that satisfies the alignment.
927 zx_status_t VmAddressRegion::NonCompactRandomizedRegionAllocatorLocked(size_t size, uint8_t align_pow2,
938 ForEachGap([align, align_pow2, size, &candidate_spaces](vaddr_t gap_base, size_t gap_len) -> bool {
940 if (gap_len >= size) {
941 candidate_spaces += AllocationSpotsInRange(gap_len, size, align_pow2);
957 ForEachGap([align_pow2, size, &alloc_spot, &selected_index](vaddr_t gap_base,
959 if (gap_len < size) {
963 const size_t spots = AllocationSpotsInRange(gap_len, size, align_pow2);
976 auto after_iter = subregions_.upper_bound(alloc_spot + size - 1);
979 if (after_iter == subregions_.begin() || subregions_.size() == 0) {
987 if (CheckGapLocked(before_iter, after_iter, spot, alloc_spot, align, size, 0,
998 zx_status_t VmAddressRegion::CompactRandomizedRegionAllocatorLocked(size_t size, uint8_t align_pow2,
1006 if (unlikely(subregions_.size() == 0)) {
1007 return NonCompactRandomizedRegionAllocatorLocked(size, align_pow2, arch_mmu_flags, spot);
1033 if (sub_overflow(after_iter->base(), size, &base) ||
1045 if (add_overflow(before_iter->base(), before_iter->size(), &base) ||
1053 if (CheckGapLocked(before_iter, after_iter, spot, chosen_base, align, size, 0,