1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <fbl/algorithm.h>
6#include <fbl/alloc_checker.h>
7#include <region-alloc/region-alloc.h>
8#include <string.h>
9
10// Support for Pool allocated bookkeeping
11RegionAllocator::RegionPool::RefPtr RegionAllocator::RegionPool::Create(size_t max_memory) {
12    // Sanity check our allocation arguments.
13    if (SLAB_SIZE > max_memory) {
14        return nullptr;
15    }
16
17    fbl::AllocChecker ac;
18    auto ret = fbl::AdoptRef(new (&ac) RegionPool(max_memory / SLAB_SIZE));
19
20    if (!ac.check()) {
21        return nullptr;
22    }
23
24    return ret;
25}
26
27RegionAllocator::~RegionAllocator() {
28    // No one should be destroying us while we have allocations in flight.
29    ZX_DEBUG_ASSERT(allocated_regions_by_base_.is_empty());
30
31    // We should have the same number of regions sorted by base address and by
32    // size.
33    ZX_DEBUG_ASSERT(avail_regions_by_base_.size() == avail_regions_by_size_.size());
34
35    // We have to have a region pool assigned to us, or our available regions
36    // need to be empty.
37    ZX_DEBUG_ASSERT((region_pool_ != nullptr) ||
38                 (avail_regions_by_base_.is_empty() && avail_regions_by_size_.is_empty()));
39
40    // Return all of our bookkeeping to our region pool.
41    avail_regions_by_base_.clear();
42    while (!avail_regions_by_size_.is_empty()) {
43        region_pool_->Delete(avail_regions_by_size_.pop_front());
44    }
45}
46
47void RegionAllocator::Reset() {
48    fbl::AutoLock alloc_lock(&alloc_lock_);
49
50    ZX_DEBUG_ASSERT((region_pool_ != nullptr) || avail_regions_by_base_.is_empty());
51
52    Region* removed;
53    while ((removed = avail_regions_by_base_.pop_front()) != nullptr) {
54        avail_regions_by_size_.erase(*removed);
55        region_pool_->Delete(removed);
56    }
57
58    ZX_DEBUG_ASSERT(avail_regions_by_base_.is_empty());
59    ZX_DEBUG_ASSERT(avail_regions_by_size_.is_empty());
60}
61
62zx_status_t RegionAllocator::SetRegionPool(const RegionPool::RefPtr& region_pool) {
63    fbl::AutoLock alloc_lock(&alloc_lock_);
64
65    if (!allocated_regions_by_base_.is_empty() || !avail_regions_by_base_.is_empty())
66        return ZX_ERR_BAD_STATE;
67
68    region_pool_ = region_pool;
69    return ZX_OK;
70}
71
72zx_status_t RegionAllocator::AddRegion(const ralloc_region_t& region, bool allow_overlap) {
73    fbl::AutoLock alloc_lock(&alloc_lock_);
74
75    // Start with sanity checks
76    zx_status_t ret = AddSubtractSanityCheckLocked(region);
77    if (ret != ZX_OK)
78        return ret;
79
80    // Make sure that we do not intersect with the available regions if we do
81    // not allow overlaps.
82    if (!allow_overlap && IntersectsLocked(avail_regions_by_base_, region))
83        return ZX_ERR_INVALID_ARGS;
84
85    // All sanity checks passed.  Grab a piece of free bookeeping from our pool,
86    // fill it out, then add it to the sets of available regions (indexed by
87    // base address as well as size)
88    Region* to_add = region_pool_->New(this);
89    if (to_add == nullptr)
90        return ZX_ERR_NO_MEMORY;
91
92    to_add->base = region.base;
93    to_add->size = region.size;
94
95    AddRegionToAvailLocked(to_add, allow_overlap);
96
97    return ZX_OK;
98}
99
100zx_status_t RegionAllocator::SubtractRegion(const ralloc_region_t& to_subtract,
101                                            bool allow_incomplete) {
102    fbl::AutoLock alloc_lock(&alloc_lock_);
103
104    // Start with sanity checks
105    zx_status_t ret = AddSubtractSanityCheckLocked(to_subtract);
106    if (ret != ZX_OK)
107        return ret;
108
109    // Make a copy of the region to subtract.  We may need to modify the region
110    // as part of the subtraction algorithm.
111    ralloc_region_t region = to_subtract;
112
113    // Find the region whose base address is <= the specified region (if any).
114    // If we do not allow incomplete subtraction, this is the region which must
115    // entirely contain the subtracted region.
116    //
117    // Additionally, the only time we should ever need any extra bookkeeping
118    // allocation is if this region (if present) needs to be split into two
119    // regions.
120    auto before = avail_regions_by_base_.upper_bound(region.base);
121    auto after  = before--;
122    uint64_t region_end = region.base + region.size;  // exclusive end
123    uint64_t before_end = 0;
124
125    if (before.IsValid()) {
126        before_end = before->base + before->size;  // exclusive end
127        if ((region.base >= before->base) && (region_end <= before_end)) {
128            // Looks like we found an available region which completely contains the
129            // region to be subtracted.  Handle the 4 possible cases.
130
131            // Case 1: The regions are the same.  This one is easy.
132            if ((region.base == before->base) && (region_end == before_end)) {
133                Region* removed = avail_regions_by_base_.erase(before);
134                avail_regions_by_size_.erase(*removed);
135
136                ZX_DEBUG_ASSERT(region_pool_ != nullptr);
137                region_pool_->Delete(removed);
138
139                return ZX_OK;
140            }
141
142            // Case 2: before completely contains region.  The before region needs
143            // to be split into two regions.  If we are out of bookkeeping space, we
144            // are out of luck.
145            if ((region.base != before->base) && (region_end != before_end)) {
146                Region* second = region_pool_->New(this);
147                if (second == nullptr)
148                    return ZX_ERR_NO_MEMORY;
149
150                // Looks like we have the memory we need.  Compute the base/size of
151                // the two regions which will be left over, then update the first
152                // region's position in the size index, and add the second region to
153                // the set of available regions.
154                Region* first = avail_regions_by_size_.erase(*before);
155                first->size  = region.base - first->base;
156                second->base = region_end;
157                second->size = before_end - region_end;
158
159                avail_regions_by_size_.insert(first);
160                avail_regions_by_base_.insert(second);
161                avail_regions_by_size_.insert(second);
162                return ZX_OK;
163            }
164
165            // Case 3: region trims the front of before.  Update before's base and
166            // size, and recompute its position in the size index.  Note: there is
167            // no need to recompute its position in the base index, this has not
168            // changed.
169            if (region.base == before->base) {
170                ZX_DEBUG_ASSERT(region_end < before_end);
171
172                Region* bptr = avail_regions_by_size_.erase(*before);
173                bptr->base += region.size;
174                bptr->size -= region.size;
175                avail_regions_by_size_.insert(bptr);
176
177                return ZX_OK;
178            }
179
180            // Case 4: region trims the end of before.  Update before's size and
181            // recompute its position in the size index.
182            ZX_DEBUG_ASSERT(region.base != before->base);
183            ZX_DEBUG_ASSERT(region_end == before_end);
184
185            Region* bptr = avail_regions_by_size_.erase(*before);
186            bptr->size -= region.size;
187            avail_regions_by_size_.insert(bptr);
188
189            return ZX_OK;
190        }
191    }
192
193    // If we have gotten this far, then there is no single region in the
194    // available set which completely contains the subtraction region.  We
195    // cannot continue unless allow_incomplete is true.
196    if (!allow_incomplete)
197        return ZX_ERR_INVALID_ARGS;
198
199    // Great!  At this point we know that we are going to succeed, we just need
200    // to go about updating all of the bookkeeping.  We may need to trim the end
201    // of the region which comes before us, and then consume some of the regions
202    // which come after us, finishing by trimming the front of (at most) one of
203    // the regions which comes after us.  At no point in time do we need to
204    // allocate any more bookkeeping, success is guaranteed.  Start by
205    // considering the before region.
206    if (before.IsValid()) {
207        ZX_DEBUG_ASSERT(region.base >= before->base);
208        ZX_DEBUG_ASSERT(region_end  >  before_end);
209        if (before_end > region.base) {
210            // No matter what, 'before' needs to be removed from the size index.
211            Region* bptr = avail_regions_by_size_.erase(*before);
212
213            // If before's base is the same as the region's base, then we are
214            // subtracting out all of before.  Otherwise, we are trimming the back
215            // before and need to recompute its size and position in the size index.
216            if (bptr->base == region.base) {
217                avail_regions_by_base_.erase(*bptr);
218
219                ZX_DEBUG_ASSERT(region_pool_ != nullptr);
220                region_pool_->Delete(bptr);
221            } else {
222                bptr->size = region.base - bptr->base;
223                avail_regions_by_size_.insert(bptr);
224            }
225
226            // Either way, the region we are subtracting now starts where before
227            // used to end.
228            region.base = before_end;
229            region.size = region_end - region.base;
230            ZX_DEBUG_ASSERT(region.size > 0);
231        }
232    }
233
234    // While there are regions whose base address comes after the base address
235    // of what we want to subtract, we need to do one of three things...
236    //
237    // 1) Consume entire regions which are contained entirely within our
238    //    subtraction region.
239    // 2) Trim the front of a region which is clipped by our subtraction region.
240    // 3) Stop because all remaining regions start after the end of our
241    //    subtraction region.
242    while (after.IsValid()) {
243        ZX_DEBUG_ASSERT(after->base > region.base);
244
245        // Case #3
246        if (after->base >= region_end)
247            break;
248
249        // Cases #1 and #2.  No matter what, we need to...
250        // 1) Advance after, re-naming the old 'after' to 'trim in the process.
251        // 2) Remove trim from the size index.
252        auto     trim_iter = after++;
253        Region*  trim      = avail_regions_by_size_.erase(*trim_iter);
254        uint64_t trim_end  = trim->base + trim->size;
255
256        if (trim_end > region_end) {
257            // Case #2.  We are guaranteed to be done at this point.
258            trim->base = region_end;
259            trim->size = trim_end - trim->base;
260            avail_regions_by_size_.insert(trim);
261            break;
262        }
263
264        // Case #1.  Advance the subtraction region to the end of the region we
265        // just trimmed into oblivion.  If we have run out of region to trim,
266        // then we know we are done.
267        avail_regions_by_base_.erase(*trim);
268        region.base = trim_end;
269        region.size = region_end - region.base;
270
271        ZX_DEBUG_ASSERT(region_pool_ != nullptr);
272        region_pool_->Delete(trim);
273
274        if (!region.size)
275            break;
276    }
277
278
279    // Sanity check.  The number of elements in the base index should match the
280    // number of elements in the size index.
281    ZX_DEBUG_ASSERT(avail_regions_by_base_.size() == avail_regions_by_size_.size());
282    return ZX_OK;
283}
284
285zx_status_t RegionAllocator::GetRegion(uint64_t size,
286                                       uint64_t alignment,
287                                       Region::UPtr& out_region) {
288    fbl::AutoLock alloc_lock(&alloc_lock_);
289
290    // Check our RegionPool
291    if (region_pool_ == nullptr)
292        return ZX_ERR_BAD_STATE;
293
294    // Sanity check the arguments.
295    out_region = nullptr;
296    if (!size || !alignment || !fbl::is_pow2(alignment))
297        return ZX_ERR_INVALID_ARGS;
298
299    // Compute the things we will need round-up align base addresses.
300    uint64_t mask     = alignment - 1;
301    uint64_t inv_mask = ~mask;
302
303    // Start by using our size index to look up the first available region which
304    // is large enough to hold this allocation (if any)
305    auto iter = avail_regions_by_size_.lower_bound({ .base = 0, .size = size });
306
307    // Consider all of the regions which are large enough to hold our
308    // allocation.  Stop as soon as we find one which can satisfy the alignment
309    // restrictions.
310    uint64_t aligned_base = 0;
311    while (iter.IsValid()) {
312        ZX_DEBUG_ASSERT(iter->size >= size);
313        aligned_base = (iter->base + mask) & inv_mask;
314        uint64_t overhead = aligned_base - iter->base;
315        uint64_t leftover = iter->size - size;
316
317        // We have a usable region if the aligned base address has not wrapped
318        // the address space, and if overhead required to align the allocation
319        // is not larger than what is leftover in the region after performing
320        // the allocation.
321        if ((aligned_base >= iter->base) && (overhead <= leftover))
322            break;
323
324        ++iter;
325    }
326
327    if (!iter.IsValid())
328        return ZX_ERR_NOT_FOUND;
329
330    return AllocFromAvailLocked(iter, out_region, aligned_base, size);
331}
332
333zx_status_t RegionAllocator::GetRegion(const ralloc_region_t& requested_region,
334                                       Region::UPtr& out_region) {
335    fbl::AutoLock alloc_lock(&alloc_lock_);
336
337    // Check our RegionPool
338    if (region_pool_ == nullptr)
339        return ZX_ERR_BAD_STATE;
340
341    uint64_t base = requested_region.base;
342    uint64_t size = requested_region.size;
343
344    // Sanity check the arguments.
345    out_region = nullptr;
346    if (!size || ((base + size) < base))
347        return ZX_ERR_INVALID_ARGS;
348
349    // Find the first available region whose base address is strictly greater
350    // than the one we are looking for, then back up one.
351    auto iter = avail_regions_by_base_.upper_bound(base);
352    --iter;
353
354    // If the iterator is invalid, then we cannot satisfy this request.  If it
355    // is valid, then we can satisfy this request if and only if the region we
356    // found completely contains the requested region.
357    if (!iter.IsValid())
358        return ZX_ERR_NOT_FOUND;
359
360    // We know that base must be >= iter->base
361    // We know that iter->size is non-zero.
362    // Therefore, we know that base is in the range [iter.start, iter.end]
363    // We know that request.end is > base
364    // Therefore request.end is > iter.base
365    //
366    // So, if request.end <= iter.end, we know that request is completely
367    // contained within iter.  It does not matter if we use the inclusive or
368    // exclusive end to check, as long as we are consistent.
369    ZX_DEBUG_ASSERT(iter->size > 0);
370    ZX_DEBUG_ASSERT(iter->base <= base);
371    uint64_t req_end  = base + size - 1;
372    uint64_t iter_end = iter->base + iter->size - 1;
373    if (req_end > iter_end)
374        return ZX_ERR_NOT_FOUND;
375
376    // Great, we have found a region which should be able to satisfy our
377    // allocation request.  Get an iterator for the by-size index, then use the
378    // common AllocFromAvailLocked method to handle the bookkeeping involved.
379    auto by_size_iter = avail_regions_by_size_.make_iterator(*iter);
380    return AllocFromAvailLocked(by_size_iter, out_region, base, size);
381}
382
383zx_status_t RegionAllocator::AddSubtractSanityCheckLocked(const ralloc_region_t& region) {
384    // Check our RegionPool
385    if (region_pool_ == nullptr)
386        return ZX_ERR_BAD_STATE;
387
388    // Sanity check the region to make sure that it is well formed.  We do not
389    // allow a region which is of size zero, or which wraps around the
390    // allocation space.
391    if ((region.base + region.size) <= region.base)
392        return ZX_ERR_INVALID_ARGS;
393
394    // Next, make sure the region we are adding or subtracting does not
395    // intersect any region which is currently allocated.
396    if (IntersectsLocked(allocated_regions_by_base_, region))
397        return ZX_ERR_INVALID_ARGS;
398
399    return ZX_OK;
400}
401
402void RegionAllocator::ReleaseRegion(Region* region) {
403    fbl::AutoLock alloc_lock(&alloc_lock_);
404
405    ZX_DEBUG_ASSERT(region != nullptr);
406
407    // When a region comes back from a user, it should be in the
408    // allocated_regions_by_base tree, but not in either of the avail_regions
409    // trees, and not in any free list.  Remove it from the allocated_regions
410    // bookkeeping and add it back to the available regions.
411    ZX_DEBUG_ASSERT(region->ns_tree_sort_by_base_.InContainer());
412    ZX_DEBUG_ASSERT(!region->ns_tree_sort_by_size_.InContainer());
413
414    allocated_regions_by_base_.erase(*region);
415    AddRegionToAvailLocked(region);
416}
417
418zx_status_t RegionAllocator::AllocFromAvailLocked(Region::WAVLTreeSortBySize::iterator source,
419                                                  Region::UPtr& out_region,
420                                                  uint64_t base,
421                                                  uint64_t size) {
422    ZX_DEBUG_ASSERT(out_region == nullptr);
423    ZX_DEBUG_ASSERT(source.IsValid());
424    ZX_DEBUG_ASSERT(base >= source->base);
425    ZX_DEBUG_ASSERT(size <= source->size);
426
427    uint64_t overhead = base - source->base;
428    ZX_DEBUG_ASSERT(overhead < source->size);
429
430    uint64_t leftover = source->size - size;
431    ZX_DEBUG_ASSERT(leftover >= overhead);
432
433    // Great, we found a region.  We may have to split the available region into
434    // up to 2 sub regions depedning on where the aligned allocation lies in the
435    // region.  Figure out how much splitting we need to do and attempt to
436    // allocate the bookkeeping.
437    bool split_before = base != source->base;
438    bool split_after  = overhead < leftover;
439
440    if (!split_before && !split_after) {
441        // If no splits are required, then this should be easy.  Take the region
442        // out of the avail bookkeeping, add it to the allocated bookkeeping and
443        // we are finished.
444        Region* region = avail_regions_by_size_.erase(source);
445        avail_regions_by_base_.erase(*region);
446        allocated_regions_by_base_.insert(region);
447        out_region.reset(region);
448    } else if (!split_before) {
449        // If we only have to split after, then this region is aligned with what
450        // we want to allocate, but we will not use all of it.  Break it into
451        // two pieces and return the one which comes first.
452        Region* before_region = region_pool_->New(this);
453        if (before_region == nullptr)
454            return ZX_ERR_NO_MEMORY;
455
456        Region* after_region = avail_regions_by_size_.erase(source);
457
458        before_region->base = after_region->base;
459        before_region->size = size;
460        after_region->base += size;
461        after_region->size -= size;
462
463        avail_regions_by_size_.insert(after_region);
464        allocated_regions_by_base_.insert(before_region);
465
466        out_region.reset(before_region);
467    } else if (!split_after) {
468        // If we only have to split before, then this region is not aligned
469        // properly with what we want to allocate, but we will use the entire
470        // region (after aligning).  Break it into two pieces and return the one
471        // which comes after.
472        Region* after_region = region_pool_->New(this);
473        if (after_region == nullptr)
474            return ZX_ERR_NO_MEMORY;
475
476        Region* before_region = avail_regions_by_size_.erase(source);
477
478        after_region->base   = base;
479        after_region->size   = size;
480        before_region->size -= size;
481
482        avail_regions_by_size_.insert(before_region);
483        allocated_regions_by_base_.insert(after_region);
484
485        out_region.reset(after_region);
486    } else {
487        // Looks like we need to break our region into 3 chunk and return the
488        // middle chunk.  Start by grabbing the bookkeeping we require first.
489        Region* region = region_pool_->New(this);
490        if (region == nullptr)
491            return ZX_ERR_NO_MEMORY;
492
493        Region* after_region = region_pool_->New(this);
494        if (after_region == nullptr) {
495            ZX_DEBUG_ASSERT(region_pool_ != nullptr);
496            region_pool_->Delete(region);
497            return ZX_ERR_NO_MEMORY;
498        }
499
500        Region* before_region = avail_regions_by_size_.erase(source);
501
502        region->base        = before_region->base + overhead;
503        region->size        = size;
504        after_region->base  = region->base + region->size;
505        after_region->size  = before_region->size - size - overhead;
506        before_region->size = overhead;
507
508        avail_regions_by_size_.insert(before_region);
509        avail_regions_by_size_.insert(after_region);
510        avail_regions_by_base_.insert(after_region);
511        allocated_regions_by_base_.insert(region);
512
513        out_region.reset(region);
514    }
515    return ZX_OK;
516}
517
518void RegionAllocator::AddRegionToAvailLocked(Region* region, bool allow_overlap) {
519    // Sanity checks.  This region should not exist in any bookkeeping, and
520    // should not overlap with any of the regions we are currently tracking.
521    ZX_DEBUG_ASSERT(!region->ns_tree_sort_by_base_.InContainer());
522    ZX_DEBUG_ASSERT(!region->ns_tree_sort_by_size_.InContainer());
523    ZX_DEBUG_ASSERT(!IntersectsLocked(allocated_regions_by_base_, *region));
524    ZX_DEBUG_ASSERT(allow_overlap || !IntersectsLocked(avail_regions_by_base_, *region));
525
526    // Find the region which comes before us and the region which comes after us
527    // in the tree.
528    auto before = avail_regions_by_base_.upper_bound(region->base);
529    auto after  = before--;
530
531    // Merge with the region which comes before us if we can.
532    uint64_t region_end = (region->base + region->size);    // exclusive end
533    if (before.IsValid()) {
534        ZX_DEBUG_ASSERT(before->base <= region->base);
535
536        uint64_t before_end = (before->base + before->size);    // exclusive end
537        if (allow_overlap ? (before_end >= region->base) : (before_end == region->base)) {
538            region_end   = fbl::max(region_end, before_end);
539            region->base = before->base;
540
541            auto removed = avail_regions_by_base_.erase(before);
542            avail_regions_by_size_.erase(*removed);
543            ZX_DEBUG_ASSERT(region_pool_ != nullptr);
544            region_pool_->Delete(removed);
545        }
546    }
547
548    // Merge with the region which comes after us if we can, keep merging if we
549    // allow overlaps.
550    while (after.IsValid()) {
551        ZX_DEBUG_ASSERT(region->base < after->base);
552
553        if (!(allow_overlap ? (region_end >= after->base) : (region_end == after->base)))
554            break;
555
556        uint64_t after_end = (after->base + after->size);
557        region_end = fbl::max(region_end, after_end);
558
559        auto remove_me = after++;
560        auto removed  = avail_regions_by_base_.erase(remove_me);
561        avail_regions_by_size_.erase(*removed);
562        ZX_DEBUG_ASSERT(region_pool_ != nullptr);
563        region_pool_->Delete(removed);
564
565        if (!allow_overlap)
566            break;
567
568    }
569
570    // Update the region's size to reflect any mergers which may have taken
571    // place, then add the region to the two indexes.
572    region->size = region_end - region->base;
573    avail_regions_by_base_.insert(region);
574    avail_regions_by_size_.insert(region);
575}
576
577bool RegionAllocator::IntersectsLocked(const Region::WAVLTreeSortByBase& tree,
578                                       const ralloc_region_t& region) {
579    // Find the first entry in the tree whose base is >= region.base.  If this
580    // element exists, and its base is < the exclusive end of region, then
581    // we have an intersection.
582    auto iter = tree.lower_bound(region.base);
583    if (iter.IsValid() && (iter->base < (region.base + region.size)))
584        return true;
585
586    // Check the element before us in the tree.  If it exists, we know that it's
587    // base is < region.base.  If it's exclusive end is >= region.base, then we
588    // have an intersection.
589    --iter;
590    if (iter.IsValid() && (region.base < (iter->base + iter->size)))
591        return true;
592
593    return false;
594}
595