1 // Copyright 2017 The Fuchsia Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #pragma once 6 7 #include <cstdint> 8 9 #include <ddk/device.h> 10 #include <ddktl/device.h> 11 #include <ddktl/protocol/block.h> 12 #include <fbl/intrusive_wavl_tree.h> 13 #include <fbl/mutex.h> 14 #include <fbl/unique_ptr.h> 15 #include <zircon/thread_annotations.h> 16 #include <zircon/types.h> 17 18 #include "slice-extent.h" 19 20 namespace fvm { 21 22 // Forward Declaration 23 class VPartitionManager; 24 class VPartition; 25 26 using PartitionDeviceType = 27 ddk::Device<VPartition, ddk::Ioctlable, ddk::GetSizable, ddk::Unbindable>; 28 29 class VPartition : public PartitionDeviceType, 30 public ddk::BlockImplProtocol<VPartition, ddk::base_protocol> { 31 public: 32 using SliceMap = fbl::WAVLTree<size_t, fbl::unique_ptr<SliceExtent>>; 33 34 static zx_status_t Create(VPartitionManager* vpm, size_t entry_index, 35 fbl::unique_ptr<VPartition>* out); 36 // Device Protocol 37 zx_status_t DdkIoctl(uint32_t op, const void* cmd, size_t cmdlen, void* reply, size_t max, 38 size_t* out_actual); 39 zx_off_t DdkGetSize(); 40 void DdkUnbind(); 41 void DdkRelease(); 42 43 // Block Protocol 44 void BlockImplQuery(block_info_t* info_out, size_t* block_op_size_out); 45 void BlockImplQueue(block_op_t* txn, block_impl_queue_callback completion_cb, void* cookie); 46 ExtentBegin()47 SliceMap::iterator ExtentBegin() TA_REQ(lock_) { return slice_map_.begin(); } 48 49 // Given a virtual slice, return the physical slice allocated 50 // to it. If no slice is allocated, return PSLICE_UNALLOCATED. 51 uint32_t SliceGetLocked(size_t vslice) const TA_REQ(lock_); 52 53 // Check slices starting from |vslice_start|. 54 // Sets |*count| to the number of contiguous allocated or unallocated slices found. 55 // Sets |*allocated| to true if the vslice range is allocated, and false otherwise. 56 zx_status_t CheckSlices(size_t vslice_start, size_t* count, bool* allocated) TA_EXCL(lock_); 57 SliceSetUnsafe(size_t vslice,uint32_t pslice)58 zx_status_t SliceSetUnsafe(size_t vslice, uint32_t pslice) TA_NO_THREAD_SAFETY_ANALYSIS { 59 return SliceSetLocked(vslice, pslice); 60 } 61 zx_status_t SliceSetLocked(size_t vslice, uint32_t pslice) TA_REQ(lock_); 62 SliceCanFree(size_t vslice)63 bool SliceCanFree(size_t vslice) const TA_REQ(lock_) { 64 auto extent = --slice_map_.upper_bound(vslice); 65 return extent.IsValid() && extent->get(vslice) != PSLICE_UNALLOCATED; 66 } 67 68 // Returns "true" if slice freed successfully, false otherwise. 69 // If freeing from the back of an extent, guaranteed not to fail. 70 bool SliceFreeLocked(size_t vslice) TA_REQ(lock_); 71 72 // Destroy the extent containing the vslice. 73 void ExtentDestroyLocked(size_t vslice) TA_REQ(lock_); 74 BlockSize()75 size_t BlockSize() const TA_NO_THREAD_SAFETY_ANALYSIS { return info_.block_size; } AddBlocksLocked(ssize_t nblocks)76 void AddBlocksLocked(ssize_t nblocks) TA_REQ(lock_) { info_.block_count += nblocks; } 77 GetEntryIndex()78 size_t GetEntryIndex() const { return entry_index_; } 79 KillLocked()80 void KillLocked() TA_REQ(lock_) { entry_index_ = 0; } IsKilledLocked()81 bool IsKilledLocked() TA_REQ(lock_) { return entry_index_ == 0; } 82 83 VPartition(VPartitionManager* vpm, size_t entry_index, size_t block_op_size); 84 ~VPartition(); 85 fbl::Mutex lock_; 86 87 private: 88 DISALLOW_COPY_ASSIGN_AND_MOVE(VPartition); 89 90 zx_device_t* GetParent() const; 91 92 VPartitionManager* mgr_; 93 size_t entry_index_; 94 95 // Mapping of virtual slice number (index) to physical slice number (value). 96 // Physical slice zero is reserved to mean "unmapped", so a zeroed slice_map 97 // indicates that the vpartition is completely unmapped, and uses no 98 // physical slices. 99 SliceMap slice_map_ TA_GUARDED(lock_); 100 block_info_t info_ TA_GUARDED(lock_); 101 }; 102 103 } // namespace fvm 104