Lines Matching refs:region

100   // When we know the size class (the region base) we can represent a pointer
101 // as a 4-byte integer (offset from the region start shifted right by 4).
126 "SizeClassAllocator: region info"));
145 "SizeClassAllocator: region info");
177 RegionInfo *region = GetRegionInfo(class_id);
181 Lock l(&region->mutex);
182 uptr old_num_chunks = region->num_freed_chunks;
186 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
195 region->num_freed_chunks = new_num_freed_chunks;
196 region->stats.n_freed += n_chunks;
203 RegionInfo *region = GetRegionInfo(class_id);
207 Lock l(&region->mutex);
212 if (region->rtoi.last_released_bytes > 0) {
213 MmapFixedOrDie(region_beg, region->mapped_user,
214 "SizeClassAllocator: region data");
215 region->rtoi.n_freed_at_last_release = 0;
216 region->rtoi.last_released_bytes = 0;
219 if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
220 if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
221 n_chunks - region->num_freed_chunks)))
223 CHECK_GE(region->num_freed_chunks, n_chunks);
225 region->num_freed_chunks -= n_chunks;
226 uptr base_idx = region->num_freed_chunks;
229 region->stats.n_allocated += n_chunks;
268 const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
269 if (region->mapped_user >= next_beg)
312 RegionInfo *region = GetRegionInfo(class_id);
313 if (region->mapped_user == 0) return;
314 uptr in_use = region->stats.n_allocated - region->stats.n_freed;
315 uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
319 "last released: %6lldK region: 0x%zx\n",
320 region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
321 region->mapped_user >> 10, region->stats.n_allocated,
322 region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
323 rss >> 10, region->rtoi.num_releases,
324 region->rtoi.last_released_bytes >> 10,
339 RegionInfo *region = GetRegionInfo(class_id);
340 if (region->mapped_user != 0) {
341 total_mapped += region->mapped_user;
344 n_allocated += region->stats.n_allocated;
345 n_freed += region->stats.n_freed;
373 RegionInfo *region = GetRegionInfo(class_id);
377 AddressSpaceView::Load(region)->allocated_user;
629 // dedicate 1/8 of the region's virtual space to FreeArray.
678 bool exhausted; // Whether region is out of space for new chunks.
739 bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
742 if (region->mapped_free_array < needed_space) {
746 region->mapped_free_array;
747 uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
751 region->mapped_free_array = new_mapped_free_array;
757 bool IsRegionExhausted(RegionInfo *region, uptr class_id,
759 if (LIKELY(region->mapped_user + region->mapped_meta +
762 if (!region->exhausted) {
763 region->exhausted = true;
772 RegionInfo *region, uptr requested_count) {
773 // region->mutex is held.
778 region->allocated_user + requested_count * size;
780 if (LIKELY(total_user_bytes > region->mapped_user)) {
781 if (UNLIKELY(region->mapped_user == 0)) {
784 region->rand_state = static_cast<u32>(region_beg >> 12);
792 region->rtoi.last_release_at_ns = MonotonicNanoTime();
796 RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
797 if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
799 if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
801 "SizeClassAllocator: region data")))
804 region->mapped_user += user_map_size;
807 (region->mapped_user - region->allocated_user) / size;
812 region->allocated_meta + new_chunks_count * kMetadataSize;
813 const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
814 RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
817 if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))
820 GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
821 meta_map_size, "SizeClassAllocator: region metadata")))
823 region->mapped_meta += meta_map_size;
829 const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
830 if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
833 for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
837 RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
838 &region->rand_state);
842 region->num_freed_chunks += new_chunks_count;
843 region->allocated_user += new_chunks_count * size;
844 CHECK_LE(region->allocated_user, region->mapped_user);
845 region->allocated_meta += new_chunks_count * kMetadataSize;
846 CHECK_LE(region->allocated_meta, region->mapped_meta);
847 region->exhausted = false;
856 // Attempts to release RAM occupied by freed chunks back to OS. The region is
863 RegionInfo *region = GetRegionInfo(class_id);
867 uptr n = region->num_freed_chunks;
870 if ((region->stats.n_freed -
871 region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
880 if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
888 RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
893 region->rtoi.n_freed_at_last_release = region->stats.n_freed;
894 region->rtoi.num_releases += ranges;
895 region->rtoi.last_released_bytes = bytes;
897 region->rtoi.last_release_at_ns = MonotonicNanoTime();