Lines Matching refs:fbl

15 #include <fbl/alloc_checker.h>
16 #include <fbl/auto_call.h>
17 #include <fbl/limits.h>
18 #include <fbl/ref_ptr.h>
119 zx_status_t EnqueuePaginated(fbl::unique_ptr<WritebackWork>* work, Blobfs* blobfs, VnodeBlob* vn,
123 uint64_t delta_blocks = fbl::min(nblocks, kMaxChunkBlocks);
129 delta_blocks = fbl::min(nblocks, kMaxChunkBlocks);
131 fbl::unique_ptr<WritebackWork> tmp;
136 blobfs->EnqueueWork(fbl::move(*work));
137 *work = fbl::move(tmp);
184 auto cleanup = fbl::MakeAutoCall([this]() { BlobCloseHandles(); });
229 fbl::unique_ptr<fzl::MappedVmo> compressed_blob;
249 auto detach = fbl::MakeAutoCall([this, &compressed_vmoid]() {
359 fbl::unique_ptr<WritebackWork> wb;
362 } else if ((status = WriteMetadata(fbl::move(wb))) != ZX_OK) {
384 write_info_ = fbl::make_unique<WritebackInfo>();
417 zx_status_t VnodeBlob::WriteMetadata(fbl::unique_ptr<WritebackWork> wb) {
443 blobfs_->EnqueueWork(fbl::move(wb));
461 size_t to_write = fbl::min(len, inode_.blob_size - write_info_->bytes_written);
485 fbl::unique_ptr<WritebackWork> wb;
499 uint64_t blocks = fbl::round_up(write_info_->compressor.Size(),
513 uint64_t blocks = fbl::round_up(inode_.blob_size, kBlobfsBlockSize) / kBlobfsBlockSize;
554 if ((status = WriteMetadata(fbl::move(wb))) != ZX_OK) {
634 clone_ref_ = fbl::RefPtr<VnodeBlob>(this);
695 fbl::AllocChecker ac;
696 fbl::RefPtr<VnodeBlob> vn =
697 fbl::AdoptRef(new (&ac) VnodeBlob(bs, digest));
753 size_t hint = block_map_.size() - fbl::min(num_blocks, block_map_.size());
908 fbl::unique_ptr<fzl::MappedVmo> buffer;
913 if ((status = WritebackBuffer::Create(this, fbl::move(buffer), &writeback_)) != ZX_OK) {
925 ManagedVfs::Shutdown([this, cb = fbl::move(cb)](zx_status_t status) mutable {
929 fbl::Vector<fbl::RefPtr<VnodeBlob>> internal_references;
931 fbl::AutoLock lock(&hash_lock_);
935 internal_references.push_back(fbl::move(vn));
942 Sync([this, cb = fbl::move(cb)](zx_status_t status) mutable {
943 async::PostTask(dispatcher(), [this, cb = fbl::move(cb)]() mutable {
955 auto on_unmount = fbl::move(on_unmount_);
979 uint64_t bbm_end_block = fbl::round_up(start_block + nblocks,
994 zx_status_t Blobfs::NewBlob(const Digest& digest, fbl::RefPtr<VnodeBlob>* out) {
1003 fbl::AllocChecker ac;
1004 *out = fbl::AdoptRef(new (&ac) VnodeBlob(this, digest));
1009 fbl::AutoLock lock(&hash_lock_);
1036 fbl::unique_ptr<WritebackWork> wb;
1044 EnqueueWork(fbl::move(wb));
1100 if ((r = df.Next(fbl::StringPiece(name, Digest::kLength * 2),
1112 zx_status_t Blobfs::LookupBlob(const Digest& digest, fbl::RefPtr<VnodeBlob>* out) {
1115 auto release = fbl::MakeAutoCall([&digest]() {
1120 fbl::RefPtr<VnodeBlob> vn;
1123 fbl::AutoLock lock(&hash_lock_);
1126 vn = fbl::internal::MakeRefPtrUpgradeFromRaw(raw_vn, hash_lock_);
1156 *out = fbl::move(vn);
1204 ZX_DEBUG_ASSERT(inodes64 <= fbl::numeric_limits<uint32_t>::max());
1207 ZX_DEBUG_ASSERT(info_.inode_count <= fbl::numeric_limits<uint32_t>::max());
1225 fbl::unique_ptr<WritebackWork> wb;
1233 EnqueueWork(fbl::move(wb));
1251 ZX_DEBUG_ASSERT(blocks64 <= fbl::numeric_limits<uint32_t>::max());
1269 if (block_map_.Grow(fbl::round_up(blocks, kBlobfsBlockBits)) != ZX_OK) {
1277 fbl::unique_ptr<WritebackWork> wb;
1296 EnqueueWork(fbl::move(wb));
1302 fbl::unique_ptr<WritebackWork> wb;
1308 wb->SetClosure(fbl::move(closure));
1309 EnqueueWork(fbl::move(wb));
1374 Blobfs::Blobfs(fbl::unique_fd fd, const blobfs_info_t* info)
1375 : blockfd_(fbl::move(fd)) {
1390 zx_status_t Blobfs::Create(fbl::unique_fd fd, const blobfs_info_t* info,
1391 fbl::unique_ptr<Blobfs>* out) {
1399 fbl::AllocChecker ac;
1400 auto fs = fbl::unique_ptr<Blobfs>(new Blobfs(fbl::move(fd), info));
1413 if ((status = block_client::Client::Create(fbl::move(fifo), &fs->fifo_client_)) != ZX_OK) {
1427 ZX_DEBUG_ASSERT(fbl::round_up(nodemap_size, kBlobfsBlockSize) == nodemap_size);
1456 *out = fbl::move(fs);
1461 fbl::AutoLock lock(&hash_lock_);
1465 fbl::AllocChecker ac;
1467 fbl::RefPtr<VnodeBlob> vn = fbl::AdoptRef(new (&ac) VnodeBlob(this, digest));
1476 zx_status_t status = VnodeInsertClosedLocked(fbl::move(vn));
1491 fbl::AutoLock lock(&hash_lock_);
1496 fbl::AutoLock lock(&hash_lock_);
1498 fbl::RefPtr<VnodeBlob> vn = fbl::internal::MakeRefPtrNoAdopt(raw_vn);
1500 ZX_ASSERT(VnodeInsertClosedLocked(fbl::move(vn)) == ZX_OK);
1503 zx_status_t Blobfs::VnodeInsertClosedLocked(fbl::RefPtr<VnodeBlob> vn) {
1515 fbl::RefPtr<VnodeBlob> Blobfs::VnodeUpgradeLocked(const uint8_t* key) {
1524 return fbl::internal::MakeRefPtrNoAdopt(raw_vn);
1527 zx_status_t Blobfs::OpenRootNode(fbl::RefPtr<VnodeBlob>* out) {
1528 fbl::AllocChecker ac;
1529 fbl::RefPtr<VnodeBlob> vn =
1530 fbl::AdoptRef(new (&ac) VnodeBlob(this));
1541 *out = fbl::move(vn);
1554 zx_status_t blobfs_create(fbl::unique_ptr<Blobfs>* out, fbl::unique_fd blockfd) {
1581 if ((status = Blobfs::Create(fbl::move(blockfd), info, out)) != ZX_OK) {
1589 zx_status_t blobfs_mount(async_dispatcher_t* dispatcher, fbl::unique_fd blockfd,
1591 fbl::Closure on_unmount) {
1593 fbl::unique_ptr<Blobfs> fs;
1595 if ((status = blobfs_create(&fs, fbl::move(blockfd))) != ZX_OK) {
1608 fs->SetUnmountCallback(fbl::move(on_unmount));
1610 fbl::RefPtr<VnodeBlob> vn;
1616 if ((status = fs->ServeDirectory(fbl::move(vn), fbl::move(root))) != ZX_OK) {