1 // Copyright 2018 The Fuchsia Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #pragma once 6 7 #include <optional> 8 #include <inttypes.h> 9 #include <stdbool.h> 10 #include <stdint.h> 11 12 #include <bitmap/raw-bitmap.h> 13 #include <bitmap/rle-bitmap.h> 14 #include <blobfs/common.h> 15 #include <blobfs/extent-reserver.h> 16 #include <blobfs/format.h> 17 #include <blobfs/iterator/extent-iterator.h> 18 #include <blobfs/node-reserver.h> 19 #include <fbl/algorithm.h> 20 #include <fbl/function.h> 21 #include <fbl/vector.h> 22 #include <fs/trace.h> 23 #include <lib/fzl/resizeable-vmo-mapper.h> 24 #include <lib/zx/vmo.h> 25 #include <zircon/types.h> 26 27 namespace blobfs { 28 29 // An interface which controls actual access to the underlying storage. 30 class SpaceManager { 31 public: 32 virtual ~SpaceManager() = default; 33 34 virtual const Superblock& Info() const = 0; 35 36 // Adds any number of nodes to |node_map|, extending the volume if necessary. 37 virtual zx_status_t AddInodes(fzl::ResizeableVmoMapper* node_map) = 0; 38 39 // Adds space for |nblocks| blocks to |map|, extending the volume if necessary. 40 virtual zx_status_t AddBlocks(uint64_t nblocks, RawBitmap* map) = 0; 41 42 // Allocates a vmoid registering a VMO with the underlying block device. 43 virtual zx_status_t AttachVmo(const zx::vmo& vmo, vmoid_t* out) = 0; 44 45 // Releases an allocated vmoid. 46 virtual zx_status_t DetachVmo(vmoid_t vmoid) = 0; 47 }; 48 49 // Allocates and frees both block and node entries. 50 // 51 // Also maintains reservation mappings, to help in-progress allocations avoid 52 // from being persisted too early. 53 class Allocator : private ExtentReserver, private NodeReserver, public NodeFinder { 54 public: 55 Allocator(SpaceManager* space_manager, RawBitmap block_map, fzl::ResizeableVmoMapper node_map); 56 ~Allocator(); 57 58 using ExtentReserver::ReservedBlockCount; 59 using NodeReserver::ReservedNodeCount; 60 61 //////////////// 62 // blobfs::NodeFinder interface. 63 // 64 // TODO(smklein): It may be possible to convert NodeFinder from an interface 65 // to a concrete base class if we can reconcile the differences with host. 66 67 Inode* GetNode(uint32_t node_index) final; 68 69 //////////////// 70 // Other interfaces. 71 SetLogging(bool enable)72 void SetLogging(bool enable) { 73 log_allocation_failure_ = enable; 74 } 75 76 // Returns true if [start_block, end_block) is allocated. 77 // 78 // If any blocks are unallocated, will set the optional output parameter 79 // |out_first_unset| to the first unallocated block within this range. 80 bool CheckBlocksAllocated(uint64_t start_block, uint64_t end_block, 81 uint64_t* out_first_unset = nullptr) const; 82 83 // Resets the size of the block map based on |Info().data_block_count|. 84 // 85 // It is unsafe to call this method while any blocks are reserved. 86 zx_status_t ResetBlockMapSize(); 87 88 // Resets the size of the node map based on |Info().inode_count|. 89 // 90 // It is unsafe to call this method while any nodes are reserved. 91 zx_status_t ResetNodeMapSize(); 92 93 // Reads the block map and node map from underlying storage, using a 94 // blocking read transaction. 95 // 96 // It is unsafe to call this method while any nodes or blocks are reserved. 97 zx_status_t ResetFromStorage(fs::ReadTxn txn); 98 99 // Provides a read-only view into the block map. 100 const zx::vmo& GetBlockMapVmo() const; 101 102 // Provides a read-only view into the node map. 103 const zx::vmo& GetNodeMapVmo() const; 104 105 // Reserves space for blocks in memory. Does not update disk. 106 // 107 // On success, appends the (possibly non-contiguous) region of allocated 108 // blocks to |out_extents|. 109 // On failure, |out_extents| is cleared. 110 zx_status_t ReserveBlocks(uint64_t num_blocks, fbl::Vector<ReservedExtent>* out_extents); 111 112 // Marks blocks as allocated which have previously been reserved to the bitmap. 113 void MarkBlocksAllocated(const ReservedExtent& reserved_extent); 114 115 // Frees blocks which have already been allocated (in-memory). 116 // 117 // |extent| must represent a portion of the block map which has already been 118 // allocated. 119 void FreeBlocks(const Extent& extent); 120 121 // Reserves space for nodes in memory. Does not update disk. 122 // 123 // On success, appends the (possibly non-contiguous) nodes to |out_nodes|. 124 // On failure, |out_nodes| is cleared. 125 zx_status_t ReserveNodes(uint64_t num_nodes, fbl::Vector<ReservedNode>* out_nodes); 126 127 // Reserves a nodes for allocation (in-memory). 128 std::optional<ReservedNode> ReserveNode(); 129 130 // Marks a reserved node by updating the node map to indicate it is an 131 // allocated inode. 132 void MarkInodeAllocated(const ReservedNode& node); 133 134 // Marks a reserved node by updating the node map to indicate it is an 135 // allocated extent container 136 void MarkContainerNodeAllocated(const ReservedNode& node, uint32_t previous_node); 137 138 // Frees a node which has already been committed. 139 void FreeNode(uint32_t node_index); 140 141 private: 142 // Returns true if [start_block, end_block) are unallocated. 143 bool CheckBlocksUnallocated(uint64_t start_block, uint64_t end_block) const; 144 145 // Avoids a collision with the committed block map, moving the starting 146 // location / block length to find a region with no collision. 147 // 148 // Returns true if we should restart searching to attempt to maximally munch 149 // from the allocation pool. 150 bool FindUnallocatedExtent(uint64_t start, uint64_t block_length, 151 uint64_t* out_start, uint64_t* out_block_length); 152 153 // Identifies the subset of blocks which don't collide with pending 154 // reservations. If any collisions exist, maximally munches the available 155 // free space into newly reserved extents. 156 // 157 // It is assumed that [start, start + block_length) is unallocated; 158 // this is internally asserted. |FindUnallocatedExtent| should be invoked 159 // first to provide this guarantee. 160 // 161 // Returns true if we should restart searching to attempt to maximally munch 162 // from the allocation pool. Otherwise, no collisions with pending 163 // reservations exist. 164 bool MunchUnreservedExtents(bitmap::RleBitmap::const_iterator reserved_iterator, 165 uint64_t remaining_blocks, 166 uint64_t start, 167 uint64_t block_length, 168 fbl::Vector<ReservedExtent>* out_extents, 169 bitmap::RleBitmap::const_iterator* out_reserved_iterator, 170 uint64_t* out_remaining_blocks, 171 uint64_t* out_start, 172 uint64_t* out_block_length); 173 174 // Searches for |nblocks| free blocks between the block_map_ and reserved_blocks_ bitmaps. 175 // 176 // Appends the (possibly non-contiguous) region of allocated blocks to |out_extents|. 177 // 178 // May fail if not enough blocks can be found. In this case, an error will be returned, 179 // and the number of found blocks will be returned in |out_actual_blocks|. This result 180 // is guaranteed to be less than or equal to |num_blocks|. 181 zx_status_t FindBlocks(uint64_t start, uint64_t num_blocks, 182 fbl::Vector<ReservedExtent>* out_extents, uint64_t* out_actual_blocks); 183 184 zx_status_t FindNode(uint32_t* node_index_out); 185 186 void LogAllocationFailure(uint64_t num_blocks) const; 187 188 SpaceManager* space_manager_; 189 190 RawBitmap block_map_ = {}; 191 fzl::ResizeableVmoMapper node_map_; 192 193 vmoid_t block_map_vmoid_ = VMOID_INVALID; 194 vmoid_t node_map_vmoid_ = VMOID_INVALID; 195 196 bool log_allocation_failure_ = true; 197 }; 198 199 } // namespace blobfs 200