1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <stdlib.h>
6#include <string.h>
7
8#include <bitmap/raw-bitmap.h>
9
10#include <minfs/allocator.h>
11#include <minfs/block-txn.h>
12
13namespace minfs {
14namespace {
15
16// Returns the number of blocks necessary to store a pool containing
17// |size| bits.
18blk_t BitmapBlocksForSize(size_t size) {
19    return (static_cast<blk_t>(size) + kMinfsBlockBits - 1) / kMinfsBlockBits;
20}
21
22}  // namespace
23
24AllocatorPromise::~AllocatorPromise() {
25    if (reserved_ > 0) {
26        ZX_DEBUG_ASSERT(allocator_ != nullptr);
27        allocator_->Unreserve(reserved_);
28    }
29}
30
31size_t AllocatorPromise::Allocate(WriteTxn* txn) {
32    ZX_DEBUG_ASSERT(allocator_ != nullptr);
33    ZX_DEBUG_ASSERT(reserved_ > 0);
34    reserved_--;
35    return allocator_->Allocate(txn);
36}
37
38AllocatorFvmMetadata::AllocatorFvmMetadata() = default;
39AllocatorFvmMetadata::AllocatorFvmMetadata(uint32_t* data_slices,
40                                           uint32_t* metadata_slices,
41                                           uint64_t slice_size) :
42        data_slices_(data_slices), metadata_slices_(metadata_slices),
43        slice_size_(slice_size) {}
44AllocatorFvmMetadata::AllocatorFvmMetadata(AllocatorFvmMetadata&&) = default;
45AllocatorFvmMetadata& AllocatorFvmMetadata::operator=(AllocatorFvmMetadata&&) = default;
46AllocatorFvmMetadata::~AllocatorFvmMetadata() = default;
47
48uint32_t AllocatorFvmMetadata::UnitsPerSlices(uint32_t slices, uint32_t unit_size) const {
49    uint64_t units = (slice_size_ * slices) / unit_size;
50    ZX_DEBUG_ASSERT(units <= fbl::numeric_limits<uint32_t>::max());
51    return static_cast<uint32_t>(units);
52}
53
54// NOTE: This helper is only intended to be called for
55// values of |blocks| which are known to be convertible to slices
56// without loss. This is checked by a runtime assertion.
57uint32_t AllocatorFvmMetadata::BlocksToSlices(uint32_t blocks) const {
58    const size_t kBlocksPerSlice = slice_size_ / kMinfsBlockSize;
59    uint32_t slices = static_cast<uint32_t>(blocks / kBlocksPerSlice);
60    ZX_DEBUG_ASSERT(UnitsPerSlices(slices, kMinfsBlockSize) == blocks);
61    return slices;
62}
63
64AllocatorMetadata::AllocatorMetadata() = default;
65AllocatorMetadata::AllocatorMetadata(blk_t data_start_block,
66                                     blk_t metadata_start_block, bool using_fvm,
67                                     AllocatorFvmMetadata fvm,
68                                     uint32_t* pool_used, uint32_t* pool_total) :
69    data_start_block_(data_start_block), metadata_start_block_(metadata_start_block),
70    using_fvm_(using_fvm), fvm_(fbl::move(fvm)),
71    pool_used_(pool_used), pool_total_(pool_total) {}
72AllocatorMetadata::AllocatorMetadata(AllocatorMetadata&&) = default;
73AllocatorMetadata& AllocatorMetadata::operator=(AllocatorMetadata&&) = default;
74AllocatorMetadata::~AllocatorMetadata() = default;
75
76Allocator::Allocator(Bcache* bc, Superblock* sb, size_t unit_size, GrowHandler grow_cb,
77                     AllocatorMetadata metadata) :
78    bc_(bc), sb_(sb), unit_size_(unit_size), grow_cb_(fbl::move(grow_cb)),
79    metadata_(fbl::move(metadata)), reserved_(0), hint_(0) {}
80Allocator::~Allocator() = default;
81
82zx_status_t Allocator::Create(Bcache* bc, Superblock* sb, fs::ReadTxn* txn, size_t unit_size,
83                              GrowHandler grow_cb, AllocatorMetadata metadata,
84                              fbl::unique_ptr<Allocator>* out) {
85    auto allocator = fbl::unique_ptr<Allocator>(new Allocator(bc, sb, unit_size,
86                                                              fbl::move(grow_cb),
87                                                              fbl::move(metadata)));
88    blk_t pool_blocks = BitmapBlocksForSize(allocator->metadata_.PoolTotal());
89
90    zx_status_t status;
91    if ((status = allocator->map_.Reset(pool_blocks * kMinfsBlockBits)) != ZX_OK) {
92        return status;
93    }
94    if ((status = allocator->map_.Shrink(allocator->metadata_.PoolTotal())) != ZX_OK) {
95        return status;
96    }
97#ifdef __Fuchsia__
98    vmoid_t map_vmoid;
99    if ((status = bc->AttachVmo(allocator->map_.StorageUnsafe()->GetVmo(), &map_vmoid)) != ZX_OK) {
100        return status;
101    }
102    auto data = map_vmoid;
103#else
104    auto data = allocator->map_.StorageUnsafe()->GetData();
105#endif
106    txn->Enqueue(data, 0, allocator->metadata_.MetadataStartBlock(), pool_blocks);
107    *out = fbl::move(allocator);
108    return ZX_OK;
109}
110
111zx_status_t Allocator::Reserve(WriteTxn* txn, size_t count,
112                               fbl::unique_ptr<AllocatorPromise>* out_promise) {
113    if (GetAvailable() < count) {
114        // If we do not have enough free elements, attempt to extend the partition.
115        zx_status_t status;
116        //TODO(planders): Allow Extend to take in count.
117        if ((status = Extend(txn)) != ZX_OK) {
118            return status;
119        }
120
121        ZX_DEBUG_ASSERT(GetAvailable() >= count);
122    }
123
124    reserved_ += count;
125    (*out_promise).reset(new AllocatorPromise(this, count));
126    return ZX_OK;
127}
128
129void Allocator::Unreserve(size_t count) {
130    ZX_DEBUG_ASSERT(reserved_ >= count);
131    reserved_ -= count;
132}
133
134size_t Allocator::Allocate(WriteTxn* txn) {
135    ZX_DEBUG_ASSERT(reserved_ > 0);
136    size_t bitoff_start;
137    if (map_.Find(false, hint_, map_.size(), 1, &bitoff_start) != ZX_OK) {
138        ZX_ASSERT(map_.Find(false, 0, hint_, 1, &bitoff_start) == ZX_OK);
139    }
140
141    ZX_ASSERT(map_.Set(bitoff_start, bitoff_start + 1) == ZX_OK);
142
143    Persist(txn, bitoff_start, 1);
144    metadata_.PoolAllocate(1);
145    reserved_ -= 1;
146    sb_->Write(txn);
147    hint_ = bitoff_start + 1;
148    return bitoff_start;
149}
150
151void Allocator::Free(WriteTxn* txn, size_t index) {
152    ZX_DEBUG_ASSERT(map_.Get(index, index + 1));
153    map_.Clear(index, index + 1);
154    Persist(txn, index, 1);
155    metadata_.PoolRelease(1);
156    sb_->Write(txn);
157
158    if (index < hint_) {
159        hint_ = index;
160    }
161}
162
163zx_status_t Allocator::Extend(WriteTxn* txn) {
164#ifdef __Fuchsia__
165    TRACE_DURATION("minfs", "Minfs::Allocator::Extend");
166    if (!metadata_.UsingFvm()) {
167        return ZX_ERR_NO_SPACE;
168    }
169    uint32_t data_slices_diff = 1;
170
171    // Determine if we will have enough space in the bitmap slice
172    // to grow |data_slices_diff| data slices.
173
174    // How large is the bitmap right now?
175    uint32_t bitmap_slices = metadata_.Fvm().MetadataSlices();
176    uint32_t bitmap_blocks = metadata_.Fvm().UnitsPerSlices(bitmap_slices, kMinfsBlockSize);
177
178    // How large does the bitmap need to be?
179    uint32_t data_slices = metadata_.Fvm().DataSlices();
180    uint32_t data_slices_new = data_slices + data_slices_diff;
181
182    uint32_t pool_size = metadata_.Fvm().UnitsPerSlices(data_slices_new,
183                                                        static_cast<uint32_t>(unit_size_));
184    uint32_t bitmap_blocks_new = BitmapBlocksForSize(pool_size);
185
186    if (bitmap_blocks_new > bitmap_blocks) {
187        // TODO(smklein): Grow the bitmap another slice.
188        fprintf(stderr, "Minfs allocator needs to increase bitmap size\n");
189        return ZX_ERR_NO_SPACE;
190    }
191
192    // Make the request to the FVM.
193    extend_request_t request;
194    request.length = data_slices_diff;
195    request.offset = metadata_.Fvm().BlocksToSlices(metadata_.DataStartBlock()) + data_slices;
196
197    zx_status_t status;
198    if ((status = bc_->FVMExtend(&request)) != ZX_OK) {
199        fprintf(stderr, "minfs::Allocator::Extend failed to grow (on disk): %d\n", status);
200        return status;
201    }
202
203    if (grow_cb_) {
204        if ((status = grow_cb_(pool_size)) != ZX_OK) {
205            fprintf(stderr, "minfs::Allocator grow callback failure: %d\n", status);
206            return status;
207        }
208    }
209
210    // Extend the in memory representation of our allocation pool -- it grew!
211    ZX_DEBUG_ASSERT(pool_size >= map_.size());
212    size_t old_pool_size = map_.size();
213    if ((status = map_.Grow(fbl::round_up(pool_size, kMinfsBlockBits))) != ZX_OK) {
214        fprintf(stderr, "minfs::Allocator failed to Grow (in memory): %d\n", status);
215        return ZX_ERR_NO_SPACE;
216    }
217    // Grow before shrinking to ensure the underlying storage is a multiple
218    // of kMinfsBlockSize.
219    map_.Shrink(pool_size);
220
221    metadata_.Fvm().SetDataSlices(data_slices_new);
222    metadata_.SetPoolTotal(pool_size);
223    sb_->Write(txn);
224
225    // Update the block bitmap.
226    Persist(txn, old_pool_size, pool_size - old_pool_size);
227    return ZX_OK;
228#else
229    return ZX_ERR_NO_SPACE;
230#endif
231}
232
233void Allocator::Persist(WriteTxn* txn, size_t index, size_t count) {
234    blk_t rel_block = static_cast<blk_t>(index) / kMinfsBlockBits;
235    blk_t abs_block = metadata_.MetadataStartBlock() + rel_block;
236    blk_t blk_count = BitmapBlocksForSize(count);
237
238#ifdef __Fuchsia__
239    auto data = map_.StorageUnsafe()->GetVmo();
240#else
241    auto data = map_.StorageUnsafe()->GetData();
242#endif
243    txn->Enqueue(data, rel_block, abs_block, blk_count);
244}
245
246} // namespace minfs
247