1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 
7 #include "vm/vm_object.h"
8 
9 #include "vm_priv.h"
10 
11 #include <assert.h>
12 #include <err.h>
13 #include <fbl/auto_lock.h>
14 #include <fbl/mutex.h>
15 #include <fbl/ref_ptr.h>
16 #include <inttypes.h>
17 #include <ktl/move.h>
18 #include <lib/console.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <trace.h>
22 
23 #include <vm/vm.h>
24 #include <vm/vm_address_region.h>
25 
26 #include <zircon/types.h>
27 
28 #define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
29 
30 VmObject::GlobalList VmObject::all_vmos_ = {};
31 
VmObject(fbl::RefPtr<VmObject> parent)32 VmObject::VmObject(fbl::RefPtr<VmObject> parent)
33     : lock_(parent ? parent->lock_ref() : local_lock_),
34       parent_(ktl::move(parent)) {
35     LTRACEF("%p\n", this);
36 
37     // Add ourself to the global VMO list, newer VMOs at the end.
38     {
39         Guard<fbl::Mutex> guard{AllVmosLock::Get()};
40         all_vmos_.push_back(this);
41     }
42 }
43 
~VmObject()44 VmObject::~VmObject() {
45     canary_.Assert();
46     LTRACEF("%p\n", this);
47 
48     // remove ourself from our parent (if present)
49     if (parent_) {
50         LTRACEF("removing ourself from our parent %p\n", parent_.get());
51 
52         // conditionally grab our shared lock with the parent, but only if it's
53         // not held. There are some destruction paths that may try to tear
54         // down the object with the parent locks held.
55         const bool need_lock = !lock_.lock().IsHeld();
56         if (need_lock) {
57             Guard<fbl::Mutex> guard{&lock_};
58             parent_->RemoveChildLocked(this);
59         } else {
60             parent_->RemoveChildLocked(this);
61         }
62     }
63 
64     DEBUG_ASSERT(mapping_list_.is_empty());
65     DEBUG_ASSERT(children_list_.is_empty());
66 
67     // Remove ourself from the global VMO list.
68     {
69         Guard<fbl::Mutex> guard{AllVmosLock::Get()};
70         DEBUG_ASSERT(global_list_state_.InContainer() == true);
71         all_vmos_.erase(*this);
72     }
73 }
74 
get_name(char * out_name,size_t len) const75 void VmObject::get_name(char* out_name, size_t len) const {
76     canary_.Assert();
77     name_.get(len, out_name);
78 }
79 
set_name(const char * name,size_t len)80 zx_status_t VmObject::set_name(const char* name, size_t len) {
81     canary_.Assert();
82     return name_.set(name, len);
83 }
84 
set_user_id(uint64_t user_id)85 void VmObject::set_user_id(uint64_t user_id) {
86     canary_.Assert();
87     Guard<fbl::Mutex> guard{&lock_};
88     DEBUG_ASSERT(user_id_ == 0);
89     user_id_ = user_id;
90 }
91 
user_id() const92 uint64_t VmObject::user_id() const {
93     canary_.Assert();
94     Guard<fbl::Mutex> guard{&lock_};
95     return user_id_;
96 }
97 
parent_user_id() const98 uint64_t VmObject::parent_user_id() const {
99     canary_.Assert();
100     // Don't hold both our lock and our parent's lock at the same time, because
101     // it's probably the same lock.
102     fbl::RefPtr<VmObject> parent;
103     {
104         Guard<fbl::Mutex> guard{&lock_};
105         if (parent_ == nullptr) {
106             return 0u;
107         }
108         parent = parent_;
109     }
110     return parent->user_id();
111 }
112 
is_cow_clone() const113 bool VmObject::is_cow_clone() const {
114     canary_.Assert();
115     Guard<fbl::Mutex> guard{&lock_};
116     return parent_ != nullptr;
117 }
118 
AddMappingLocked(VmMapping * r)119 void VmObject::AddMappingLocked(VmMapping* r) {
120     canary_.Assert();
121     DEBUG_ASSERT(lock_.lock().IsHeld());
122     mapping_list_.push_front(r);
123     mapping_list_len_++;
124 }
125 
RemoveMappingLocked(VmMapping * r)126 void VmObject::RemoveMappingLocked(VmMapping* r) {
127     canary_.Assert();
128     DEBUG_ASSERT(lock_.lock().IsHeld());
129     mapping_list_.erase(*r);
130     DEBUG_ASSERT(mapping_list_len_ > 0);
131     mapping_list_len_--;
132 }
133 
num_mappings() const134 uint32_t VmObject::num_mappings() const {
135     canary_.Assert();
136     Guard<fbl::Mutex> guard{&lock_};
137     return mapping_list_len_;
138 }
139 
IsMappedByUser() const140 bool VmObject::IsMappedByUser() const {
141     canary_.Assert();
142     Guard<fbl::Mutex> guard{&lock_};
143     for (const auto& m : mapping_list_) {
144         if (m.aspace()->is_user()) {
145             return true;
146         }
147     }
148     return false;
149 }
150 
share_count() const151 uint32_t VmObject::share_count() const {
152     canary_.Assert();
153 
154     Guard<fbl::Mutex> guard{&lock_};
155     if (mapping_list_len_ < 2) {
156         return 1;
157     }
158 
159     // Find the number of unique VmAspaces that we're mapped into.
160     // Use this buffer to hold VmAspace pointers.
161     static constexpr int kAspaceBuckets = 64;
162     uintptr_t aspaces[kAspaceBuckets];
163     unsigned int num_mappings = 0; // Number of mappings we've visited
164     unsigned int num_aspaces = 0;  // Unique aspaces we've seen
165     for (const auto& m : mapping_list_) {
166         uintptr_t as = reinterpret_cast<uintptr_t>(m.aspace().get());
167         // Simple O(n^2) should be fine.
168         for (unsigned int i = 0; i < num_aspaces; i++) {
169             if (aspaces[i] == as) {
170                 goto found;
171             }
172         }
173         if (num_aspaces < kAspaceBuckets) {
174             aspaces[num_aspaces++] = as;
175         } else {
176             // Maxed out the buffer. Estimate the remaining number of aspaces.
177             num_aspaces +=
178                 // The number of mappings we haven't visited yet
179                 (mapping_list_len_ - num_mappings)
180                 // Scaled down by the ratio of unique aspaces we've seen so far.
181                 * num_aspaces / num_mappings;
182             break;
183         }
184     found:
185         num_mappings++;
186     }
187     DEBUG_ASSERT_MSG(num_aspaces <= mapping_list_len_,
188                      "num_aspaces %u should be <= mapping_list_len_ %" PRIu32,
189                      num_aspaces, mapping_list_len_);
190 
191     // TODO: Cache this value as long as the set of mappings doesn't change.
192     // Or calculate it when adding/removing a new mapping under an aspace
193     // not in the list.
194     return num_aspaces;
195 }
196 
SetChildObserver(VmObjectChildObserver * child_observer)197 void VmObject::SetChildObserver(VmObjectChildObserver* child_observer) {
198     Guard<fbl::Mutex> guard{&lock_};
199     child_observer_ = child_observer;
200 }
201 
AddChildLocked(VmObject * o)202 void VmObject::AddChildLocked(VmObject* o) {
203     canary_.Assert();
204     DEBUG_ASSERT(lock_.lock().IsHeld());
205     children_list_.push_front(o);
206     children_list_len_++;
207 
208     // Signal the dispatcher that there are child VMOS
209     if ((child_observer_ != nullptr) && (children_list_len_ == 1)) {
210         child_observer_->OnOneChild();
211     }
212 }
213 
RemoveChildLocked(VmObject * o)214 void VmObject::RemoveChildLocked(VmObject* o) {
215     canary_.Assert();
216     DEBUG_ASSERT(lock_.lock().IsHeld());
217     children_list_.erase(*o);
218     DEBUG_ASSERT(children_list_len_ > 0);
219     children_list_len_--;
220 
221     // Signal the dispatcher that there are no more child VMOS
222     if ((child_observer_ != nullptr) && (children_list_len_ == 0)) {
223         child_observer_->OnZeroChild();
224     }
225 }
226 
num_children() const227 uint32_t VmObject::num_children() const {
228     canary_.Assert();
229     Guard<fbl::Mutex> guard{&lock_};
230     return children_list_len_;
231 }
232 
RangeChangeUpdateLocked(uint64_t offset,uint64_t len)233 void VmObject::RangeChangeUpdateLocked(uint64_t offset, uint64_t len) {
234     canary_.Assert();
235     DEBUG_ASSERT(lock_.lock().IsHeld());
236 
237     // offsets for vmos needn't be aligned, but vmars use aligned offsets
238     const uint64_t aligned_offset = ROUNDDOWN(offset, PAGE_SIZE);
239     const uint64_t aligned_len = ROUNDUP(offset + len, PAGE_SIZE) - aligned_offset;
240 
241     // other mappings may have covered this offset into the vmo, so unmap those ranges
242     for (auto& m : mapping_list_) {
243         m.UnmapVmoRangeLocked(aligned_offset, aligned_len);
244     }
245 
246     // inform all our children this as well, so they can inform their mappings
247     for (auto& child : children_list_) {
248         child.RangeChangeUpdateFromParentLocked(offset, len);
249     }
250 }
251 
cmd_vm_object(int argc,const cmd_args * argv,uint32_t flags)252 static int cmd_vm_object(int argc, const cmd_args* argv, uint32_t flags) {
253     if (argc < 2) {
254     notenoughargs:
255         printf("not enough arguments\n");
256     usage:
257         printf("usage:\n");
258         printf("%s dump <address>\n", argv[0].str);
259         printf("%s dump_pages <address>\n", argv[0].str);
260         return ZX_ERR_INTERNAL;
261     }
262 
263     if (!strcmp(argv[1].str, "dump")) {
264         if (argc < 2) {
265             goto notenoughargs;
266         }
267 
268         VmObject* o = reinterpret_cast<VmObject*>(argv[2].u);
269 
270         o->Dump(0, false);
271     } else if (!strcmp(argv[1].str, "dump_pages")) {
272         if (argc < 2) {
273             goto notenoughargs;
274         }
275 
276         VmObject* o = reinterpret_cast<VmObject*>(argv[2].u);
277 
278         o->Dump(0, true);
279     } else {
280         printf("unknown command\n");
281         goto usage;
282     }
283 
284     return ZX_OK;
285 }
286 
287 STATIC_COMMAND_START
288 #if LK_DEBUGLEVEL > 0
289 STATIC_COMMAND("vm_object", "vm object debug commands", &cmd_vm_object)
290 #endif
291 STATIC_COMMAND_END(vm_object);
292