1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 
7 #pragma once
8 
9 #include <assert.h>
10 #include <fbl/array.h>
11 #include <fbl/canary.h>
12 #include <fbl/intrusive_double_list.h>
13 #include <fbl/macros.h>
14 #include <fbl/name.h>
15 #include <fbl/ref_counted.h>
16 #include <fbl/ref_ptr.h>
17 #include <kernel/lockdep.h>
18 #include <kernel/mutex.h>
19 #include <lib/user_copy/user_ptr.h>
20 #include <list.h>
21 #include <stdint.h>
22 #include <vm/page.h>
23 #include <vm/vm.h>
24 #include <vm/vm_page_list.h>
25 #include <zircon/thread_annotations.h>
26 #include <zircon/types.h>
27 
28 class VmMapping;
29 
30 typedef zx_status_t (*vmo_lookup_fn_t)(void* context, size_t offset, size_t index, paddr_t pa);
31 
32 class VmObjectChildObserver {
33 public:
34     virtual void OnZeroChild() = 0;
35     virtual void OnOneChild() = 0;
36 };
37 
38 // The base vm object that holds a range of bytes of data
39 //
40 // Can be created without mapping and used as a container of data, or mappable
41 // into an address space via VmAddressRegion::CreateVmMapping
42 class VmObject : public fbl::RefCounted<VmObject>,
43                  public fbl::DoublyLinkedListable<VmObject*> {
44 public:
45     // public API
Resize(uint64_t size)46     virtual zx_status_t Resize(uint64_t size) { return ZX_ERR_NOT_SUPPORTED; }
ResizeLocked(uint64_t size)47     virtual zx_status_t ResizeLocked(uint64_t size) TA_REQ(lock_) { return ZX_ERR_NOT_SUPPORTED; }
48 
size()49     virtual uint64_t size() const { return 0; }
create_options()50     virtual uint32_t create_options() const { return 0; }
51 
52     // Returns true if the object is backed by RAM.
is_paged()53     virtual bool is_paged() const { return false; }
54     // Returns true if the object is backed by a contiguous range of physical
55     // memory.
is_contiguous()56     virtual bool is_contiguous() const { return false; }
57     // Returns true if the object size can be changed.
is_resizable()58     virtual bool is_resizable() const { return false; }
59 
60     // Returns the number of physical pages currently allocated to the
61     // object where (offset <= page_offset < offset+len).
62     // |offset| and |len| are in bytes.
AllocatedPagesInRange(uint64_t offset,uint64_t len)63     virtual size_t AllocatedPagesInRange(uint64_t offset, uint64_t len) const {
64         return 0;
65     }
66     // Returns the number of physical pages currently allocated to the object.
AllocatedPages()67     size_t AllocatedPages() const {
68         return AllocatedPagesInRange(0, size());
69     }
70 
71     // find physical pages to back the range of the object
CommitRange(uint64_t offset,uint64_t len)72     virtual zx_status_t CommitRange(uint64_t offset, uint64_t len) {
73         return ZX_ERR_NOT_SUPPORTED;
74     }
75 
76     // free a range of the vmo back to the default state
DecommitRange(uint64_t offset,uint64_t len)77     virtual zx_status_t DecommitRange(uint64_t offset, uint64_t len) {
78         return ZX_ERR_NOT_SUPPORTED;
79     }
80 
81     // Pin the given range of the vmo.  If any pages are not committed, this
82     // returns a ZX_ERR_NO_MEMORY.
Pin(uint64_t offset,uint64_t len)83     virtual zx_status_t Pin(uint64_t offset, uint64_t len) {
84         return ZX_ERR_NOT_SUPPORTED;
85     }
86 
87     // Unpin the given range of the vmo.  This asserts if it tries to unpin a
88     // page that is already not pinned (do not expose this function to
89     // usermode).
Unpin(uint64_t offset,uint64_t len)90     virtual void Unpin(uint64_t offset, uint64_t len) {
91         panic("Unpin should only be called on a pinned range");
92     }
93 
94     // read/write operators against kernel pointers only
Read(void * ptr,uint64_t offset,size_t len)95     virtual zx_status_t Read(void* ptr, uint64_t offset, size_t len) {
96         return ZX_ERR_NOT_SUPPORTED;
97     }
Write(const void * ptr,uint64_t offset,size_t len)98     virtual zx_status_t Write(const void* ptr, uint64_t offset, size_t len) {
99         return ZX_ERR_NOT_SUPPORTED;
100     }
101 
102     // execute lookup_fn on a given range of physical addresses within the vmo
Lookup(uint64_t offset,uint64_t len,vmo_lookup_fn_t lookup_fn,void * context)103     virtual zx_status_t Lookup(uint64_t offset, uint64_t len,
104                                vmo_lookup_fn_t lookup_fn, void* context) {
105         return ZX_ERR_NOT_SUPPORTED;
106     }
107 
108     // read/write operators against user space pointers only
ReadUser(user_out_ptr<void> ptr,uint64_t offset,size_t len)109     virtual zx_status_t ReadUser(user_out_ptr<void> ptr, uint64_t offset, size_t len) {
110         return ZX_ERR_NOT_SUPPORTED;
111     }
WriteUser(user_in_ptr<const void> ptr,uint64_t offset,size_t len)112     virtual zx_status_t WriteUser(user_in_ptr<const void> ptr, uint64_t offset, size_t len) {
113         return ZX_ERR_NOT_SUPPORTED;
114     }
115 
116     // The associated VmObjectDispatcher will set an observer to notify user mode.
117     void SetChildObserver(VmObjectChildObserver* child_observer);
118 
119     // Returns a null-terminated name, or the empty string if set_name() has not
120     // been called.
121     void get_name(char* out_name, size_t len) const;
122 
123     // Sets the name of the object. May truncate internally. |len| is the size
124     // of the buffer pointed to by |name|.
125     zx_status_t set_name(const char* name, size_t len);
126 
127     // Returns a user ID associated with this VMO, or zero.
128     // Typically used to hold a zircon koid for Dispatcher-wrapped VMOs.
129     uint64_t user_id() const;
130 
131     // Returns the parent's user_id() if this VMO has a parent,
132     // otherwise returns zero.
133     uint64_t parent_user_id() const;
134 
135     // Sets the value returned by |user_id()|. May only be called once.
136     void set_user_id(uint64_t user_id);
137 
138     virtual void Dump(uint depth, bool verbose) = 0;
139 
140     // cache maintenance operations.
InvalidateCache(const uint64_t offset,const uint64_t len)141     virtual zx_status_t InvalidateCache(const uint64_t offset, const uint64_t len) {
142         return ZX_ERR_NOT_SUPPORTED;
143     }
CleanCache(const uint64_t offset,const uint64_t len)144     virtual zx_status_t CleanCache(const uint64_t offset, const uint64_t len) {
145         return ZX_ERR_NOT_SUPPORTED;
146     }
CleanInvalidateCache(const uint64_t offset,const uint64_t len)147     virtual zx_status_t CleanInvalidateCache(const uint64_t offset, const uint64_t len) {
148         return ZX_ERR_NOT_SUPPORTED;
149     }
SyncCache(const uint64_t offset,const uint64_t len)150     virtual zx_status_t SyncCache(const uint64_t offset, const uint64_t len) {
151         return ZX_ERR_NOT_SUPPORTED;
152     }
153 
154     virtual uint32_t GetMappingCachePolicy() const = 0;
SetMappingCachePolicy(const uint32_t cache_policy)155     virtual zx_status_t SetMappingCachePolicy(const uint32_t cache_policy) {
156         return ZX_ERR_NOT_SUPPORTED;
157     }
158 
159     // create a copy-on-write clone vmo at the page-aligned offset and length
160     // note: it's okay to start or extend past the size of the parent
CloneCOW(bool resizable,uint64_t offset,uint64_t size,bool copy_name,fbl::RefPtr<VmObject> * clone_vmo)161     virtual zx_status_t CloneCOW(bool resizable,
162                                  uint64_t offset, uint64_t size, bool copy_name,
163                                  fbl::RefPtr<VmObject>* clone_vmo) {
164         return ZX_ERR_NOT_SUPPORTED;
165     }
166 
167     // Returns true if this VMO was created via CloneCOW().
168     // TODO: If more types of clones appear, replace this with a method that
169     // returns an enum rather than adding a new method for each clone type.
170     bool is_cow_clone() const;
171 
172     // get a pointer to the page structure and/or physical address at the specified offset.
173     // valid flags are VMM_PF_FLAG_*
GetPage(uint64_t offset,uint pf_flags,list_node * free_list,vm_page_t ** page,paddr_t * pa)174     zx_status_t GetPage(uint64_t offset, uint pf_flags, list_node* free_list,
175                         vm_page_t** page, paddr_t* pa) {
176         Guard<fbl::Mutex> guard{&lock_};
177         return GetPageLocked(offset, pf_flags, free_list, page, pa);
178     }
179 
180     // See VmObject::GetPage
GetPageLocked(uint64_t offset,uint pf_flags,list_node * free_list,vm_page_t ** page,paddr_t * pa)181     virtual zx_status_t GetPageLocked(uint64_t offset, uint pf_flags, list_node* free_list,
182                                       vm_page_t** page, paddr_t* pa) TA_REQ(lock_) {
183         return ZX_ERR_NOT_SUPPORTED;
184     }
185 
lock()186     Lock<fbl::Mutex>* lock() TA_RET_CAP(lock_) { return &lock_; }
lock_ref()187     Lock<fbl::Mutex>& lock_ref() TA_RET_CAP(lock_) { return lock_; }
188 
189     void AddMappingLocked(VmMapping* r) TA_REQ(lock_);
190     void RemoveMappingLocked(VmMapping* r) TA_REQ(lock_);
191     uint32_t num_mappings() const;
192 
193     // Returns true if this VMO is mapped into any VmAspace whose is_user()
194     // returns true.
195     bool IsMappedByUser() const;
196 
197     // Returns an estimate of the number of unique VmAspaces that this object
198     // is mapped into.
199     uint32_t share_count() const;
200 
201     void AddChildLocked(VmObject* r) TA_REQ(lock_);
202     void RemoveChildLocked(VmObject* r) TA_REQ(lock_);
203     uint32_t num_children() const;
204 
205     // Calls the provided |func(const VmObject&)| on every VMO in the system,
206     // from oldest to newest. Stops if |func| returns an error, returning the
207     // error value.
208     template <typename T>
ForEach(T func)209     static zx_status_t ForEach(T func) {
210         Guard<fbl::Mutex> guard{AllVmosLock::Get()};
211         for (const auto& iter : all_vmos_) {
212             zx_status_t s = func(iter);
213             if (s != ZX_OK) {
214                 return s;
215             }
216         }
217         return ZX_OK;
218     }
219 
220 protected:
221     // private constructor (use Create())
222     explicit VmObject(fbl::RefPtr<VmObject> parent);
VmObject()223     VmObject()
224         : VmObject(nullptr) {}
225 
226     // private destructor, only called from refptr
227     virtual ~VmObject();
228     friend fbl::RefPtr<VmObject>;
229 
230     DISALLOW_COPY_ASSIGN_AND_MOVE(VmObject);
231 
232     // inform all mappings and children that a range of this vmo's pages were added or removed.
233     void RangeChangeUpdateLocked(uint64_t offset, uint64_t len) TA_REQ(lock_);
234 
235     // above call but called from a parent
RangeChangeUpdateFromParentLocked(uint64_t offset,uint64_t len)236     virtual void RangeChangeUpdateFromParentLocked(uint64_t offset, uint64_t len)
237         // Called under the parent's lock, which confuses analysis.
238         TA_NO_THREAD_SAFETY_ANALYSIS { RangeChangeUpdateLocked(offset, len); }
239 
240     // magic value
241     fbl::Canary<fbl::magic("VMO_")> canary_;
242 
243     // members
244 
245     // declare a local mutex and default to pointing at it
246     // if constructed with a parent vmo, point lock_ at the parent's lock
247 private:
248     DECLARE_MUTEX(VmObject) local_lock_;
249 
250 protected:
251     Lock<fbl::Mutex>& lock_;
252 
253     // list of every mapping
254     fbl::DoublyLinkedList<VmMapping*> mapping_list_ TA_GUARDED(lock_);
255 
256     // list of every child
257     fbl::DoublyLinkedList<VmObject*> children_list_ TA_GUARDED(lock_);
258 
259     // parent pointer (may be null)
260     fbl::RefPtr<VmObject> parent_ TA_GUARDED(lock_);
261 
262     // lengths of corresponding lists
263     uint32_t mapping_list_len_ TA_GUARDED(lock_) = 0;
264     uint32_t children_list_len_ TA_GUARDED(lock_) = 0;
265 
266     uint64_t user_id_ TA_GUARDED(lock_) = 0;
267 
268     // The user-friendly VMO name. For debug purposes only. That
269     // is, there is no mechanism to get access to a VMO via this name.
270     fbl::Name<ZX_MAX_NAME_LEN> name_;
271 
272 private:
273     // This member, if not null, is used to signal the user facing Dispatcher.
274     VmObjectChildObserver* child_observer_ TA_GUARDED(lock_) = nullptr;
275 
276     // Per-node state for the global VMO list.
277     using NodeState = fbl::DoublyLinkedListNodeState<VmObject*>;
278     NodeState global_list_state_;
279 
280     // The global VMO list.
281     struct GlobalListTraits {
node_stateGlobalListTraits282         static NodeState& node_state(VmObject& vmo) {
283             return vmo.global_list_state_;
284         }
285     };
286     using GlobalList = fbl::DoublyLinkedList<VmObject*, GlobalListTraits>;
287     DECLARE_SINGLETON_MUTEX(AllVmosLock);
288     static GlobalList all_vmos_ TA_GUARDED(AllVmosLock::Get());
289 };
290