1/* 2 * Copyright 2001-2008, Axel D��rfler, axeld@pinc-software.de. All Rights Reserved. 3 * This file may be used under the terms of the MIT License. 4 */ 5#ifndef VOLUME_H 6#define VOLUME_H 7 8 9#include <KernelExport.h> 10#include "fsproto.h" 11 12#include "cache.h" 13 14#include "bfs.h" 15#include "BlockAllocator.h" 16#include "BufferPool.h" 17#include "Chain.h" 18 19class Journal; 20class Inode; 21class Query; 22 23enum volume_flags { 24 VOLUME_READ_ONLY = 0x0001 25}; 26 27enum volume_initialize_flags { 28 VOLUME_NO_INDICES = 0x0001, 29}; 30 31class Volume { 32 public: 33 Volume(dev_t id); 34 ~Volume(); 35 36 status_t Mount(const char *device, uint32 flags); 37 status_t Unmount(); 38 status_t Initialize(const char *device, const char *name, 39 uint32 blockSize, uint32 flags); 40 41 bool IsValidSuperBlock(); 42 bool IsReadOnly() const; 43 void Panic(); 44 RecursiveLock &Lock(); 45 46 block_run Root() const { return fSuperBlock.root_dir; } 47 Inode *RootNode() const { return fRootNode; } 48 block_run Indices() const { return fSuperBlock.indices; } 49 Inode *IndicesNode() const { return fIndicesNode; } 50 block_run Log() const { return fSuperBlock.log_blocks; } 51 vint32 &LogStart() { return fLogStart; } 52 vint32 &LogEnd() { return fLogEnd; } 53 int Device() const { return fDevice; } 54 55 dev_t ID() const { return fID; } 56 const char *Name() const { return fSuperBlock.name; } 57 58 off_t NumBlocks() const { return fSuperBlock.NumBlocks(); } 59 off_t UsedBlocks() const { return fSuperBlock.UsedBlocks(); } 60 off_t FreeBlocks() const { return NumBlocks() - UsedBlocks(); } 61 62 uint32 BlockSize() const { return fBlockSize; } 63 uint32 BlockShift() const { return fBlockShift; } 64 uint32 InodeSize() const { return fSuperBlock.InodeSize(); } 65 uint32 AllocationGroups() const { return fSuperBlock.AllocationGroups(); } 66 uint32 AllocationGroupShift() const { return fAllocationGroupShift; } 67 disk_super_block &SuperBlock() { return fSuperBlock; } 68 69 off_t ToOffset(block_run run) const { return ToBlock(run) << BlockShift(); } 70 off_t ToBlock(block_run run) const { return ((((off_t)run.AllocationGroup()) << AllocationGroupShift()) | (off_t)run.Start()); } 71 block_run ToBlockRun(off_t block) const; 72 status_t ValidateBlockRun(block_run run); 73 74 off_t ToVnode(block_run run) const { return ToBlock(run); } 75 off_t ToVnode(off_t block) const { return block; } 76 off_t VnodeToBlock(vnode_id id) const { return (off_t)id; } 77 78 status_t CreateIndicesRoot(Transaction *transaction); 79 80 // block bitmap 81 BlockAllocator &Allocator(); 82 status_t AllocateForInode(Transaction *transaction, const Inode *parent, 83 mode_t type, block_run &run); 84 status_t AllocateForInode(Transaction *transaction, const block_run *parent, 85 mode_t type, block_run &run); 86 status_t Allocate(Transaction *transaction,const Inode *inode, 87 off_t numBlocks, block_run &run, uint16 minimum = 1); 88 status_t Free(Transaction *transaction, block_run run); 89 90 // cache access 91 status_t WriteSuperBlock(); 92 status_t WriteBlocks(off_t blockNumber, const uint8 *block, uint32 numBlocks); 93 void WriteCachedBlocksIfNecessary(); 94 status_t FlushDevice(); 95 96 // queries 97 void UpdateLiveQueries(Inode *inode, const char *attribute, int32 type, 98 const uint8 *oldKey, size_t oldLength, 99 const uint8 *newKey, size_t newLength); 100 bool CheckForLiveQuery(const char *attribute); 101 void AddQuery(Query *query); 102 void RemoveQuery(Query *query); 103 104 status_t Sync(); 105 Journal *GetJournal(off_t refBlock) const; 106 107 BufferPool &Pool(); 108 109 uint32 GetUniqueID(); 110 111 static status_t Identify(int fd, disk_super_block *superBlock); 112 113 protected: 114 dev_t fID; 115 int fDevice; 116 disk_super_block fSuperBlock; 117 118 uint32 fBlockSize; 119 uint32 fBlockShift; 120 uint32 fAllocationGroupShift; 121 122 BlockAllocator fBlockAllocator; 123 RecursiveLock fLock; 124 Journal *fJournal; 125 vint32 fLogStart, fLogEnd; 126 127 Inode *fRootNode; 128 Inode *fIndicesNode; 129 130 vint32 fDirtyCachedBlocks; 131 132 SimpleLock fQueryLock; 133 Chain<Query> fQueries; 134 135 int32 fUniqueID; 136 uint32 fFlags; 137 138 BufferPool fBufferPool; 139}; 140 141 142// inline functions 143 144inline bool 145Volume::IsReadOnly() const 146{ 147 return fFlags & VOLUME_READ_ONLY; 148} 149 150 151inline RecursiveLock & 152Volume::Lock() 153{ 154 return fLock; 155} 156 157 158inline BlockAllocator & 159Volume::Allocator() 160{ 161 return fBlockAllocator; 162} 163 164 165inline status_t 166Volume::AllocateForInode(Transaction *transaction, const block_run *parent, mode_t type, block_run &run) 167{ 168 return fBlockAllocator.AllocateForInode(transaction, parent, type, run); 169} 170 171 172inline status_t 173Volume::Allocate(Transaction *transaction, const Inode *inode, off_t numBlocks, block_run &run, uint16 minimum) 174{ 175 return fBlockAllocator.Allocate(transaction, inode, numBlocks, run, minimum); 176} 177 178 179inline status_t 180Volume::Free(Transaction *transaction, block_run run) 181{ 182 return fBlockAllocator.Free(transaction, run); 183} 184 185 186inline status_t 187Volume::WriteBlocks(off_t blockNumber, const uint8 *block, uint32 numBlocks) 188{ 189 atomic_add(&fDirtyCachedBlocks, numBlocks); 190 return cached_write(fDevice, blockNumber, block, numBlocks, fSuperBlock.block_size); 191} 192 193 194inline void 195Volume::WriteCachedBlocksIfNecessary() 196{ 197 // the specific values are only valid for the current BeOS cache 198 if (fDirtyCachedBlocks > 128) { 199 force_cache_flush(fDevice, false); 200 atomic_add(&fDirtyCachedBlocks, -64); 201 } 202} 203 204 205inline status_t 206Volume::FlushDevice() 207{ 208 fDirtyCachedBlocks = 0; 209 return flush_device(fDevice, 0); 210} 211 212 213inline Journal * 214Volume::GetJournal(off_t /*refBlock*/) const 215{ 216 return fJournal; 217} 218 219 220inline BufferPool & 221Volume::Pool() 222{ 223 return fBufferPool; 224} 225 226 227inline uint32 228Volume::GetUniqueID() 229{ 230 return atomic_add(&fUniqueID, 1); 231} 232 233#endif /* VOLUME_H */ 234