1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 
7 #include <vm/vm_address_region.h>
8 
9 #include "vm_priv.h"
10 #include <assert.h>
11 #include <err.h>
12 #include <fbl/alloc_checker.h>
13 #include <fbl/auto_call.h>
14 #include <ktl/move.h>
15 #include <inttypes.h>
16 #include <trace.h>
17 #include <vm/fault.h>
18 #include <vm/vm.h>
19 #include <vm/vm_aspace.h>
20 #include <vm/vm_object.h>
21 #include <zircon/types.h>
22 
23 #define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
24 
VmMapping(VmAddressRegion & parent,vaddr_t base,size_t size,uint32_t vmar_flags,fbl::RefPtr<VmObject> vmo,uint64_t vmo_offset,uint arch_mmu_flags)25 VmMapping::VmMapping(VmAddressRegion& parent, vaddr_t base, size_t size, uint32_t vmar_flags,
26                      fbl::RefPtr<VmObject> vmo, uint64_t vmo_offset, uint arch_mmu_flags)
27     : VmAddressRegionOrMapping(base, size, vmar_flags,
28                                parent.aspace_.get(), &parent),
29       object_(ktl::move(vmo)), object_offset_(vmo_offset), arch_mmu_flags_(arch_mmu_flags) {
30 
31     LTRACEF("%p aspace %p base %#" PRIxPTR " size %#zx offset %#" PRIx64 "\n",
32             this, aspace_.get(), base_, size_, vmo_offset);
33 }
34 
~VmMapping()35 VmMapping::~VmMapping() {
36     canary_.Assert();
37     LTRACEF("%p aspace %p base %#" PRIxPTR " size %#zx\n",
38             this, aspace_.get(), base_, size_);
39 }
40 
AllocatedPagesLocked() const41 size_t VmMapping::AllocatedPagesLocked() const {
42     canary_.Assert();
43     DEBUG_ASSERT(aspace_->lock()->lock().IsHeld());
44 
45     if (state_ != LifeCycleState::ALIVE) {
46         return 0;
47     }
48     return object_->AllocatedPagesInRange(object_offset_, size_);
49 }
50 
Dump(uint depth,bool verbose) const51 void VmMapping::Dump(uint depth, bool verbose) const {
52     canary_.Assert();
53     for (uint i = 0; i < depth; ++i) {
54         printf("  ");
55     }
56     char vmo_name[32];
57     object_->get_name(vmo_name, sizeof(vmo_name));
58     printf("map %p [%#" PRIxPTR " %#" PRIxPTR "] sz %#zx mmufl %#x\n",
59            this, base_, base_ + size_ - 1, size_, arch_mmu_flags_);
60     for (uint i = 0; i < depth + 1; ++i) {
61         printf("  ");
62     }
63     printf("vmo %p/k%" PRIu64 " off %#" PRIx64
64            " pages %zu ref %d '%s'\n",
65            object_.get(), object_->user_id(), object_offset_,
66            // TODO(dbort): Use AllocatePagesLocked() once Dump() is locked
67            // consistently. Currently, Dump() may be called without the aspace
68            // lock.
69            object_->AllocatedPagesInRange(object_offset_, size_),
70            ref_count_debug(), vmo_name);
71     if (verbose) {
72         object_->Dump(depth + 1, false);
73     }
74 }
75 
Protect(vaddr_t base,size_t size,uint new_arch_mmu_flags)76 zx_status_t VmMapping::Protect(vaddr_t base, size_t size, uint new_arch_mmu_flags) {
77     canary_.Assert();
78     LTRACEF("%p %#" PRIxPTR " %#x %#x\n", this, base_, flags_, new_arch_mmu_flags);
79 
80     if (!IS_PAGE_ALIGNED(base)) {
81         return ZX_ERR_INVALID_ARGS;
82     }
83 
84     size = ROUNDUP(size, PAGE_SIZE);
85 
86     Guard<fbl::Mutex> guard{aspace_->lock()};
87     if (state_ != LifeCycleState::ALIVE) {
88         return ZX_ERR_BAD_STATE;
89     }
90 
91     if (size == 0 || !is_in_range(base, size)) {
92         return ZX_ERR_INVALID_ARGS;
93     }
94 
95     return ProtectLocked(base, size, new_arch_mmu_flags);
96 }
97 
98 namespace {
99 
100 // Implementation helper for ProtectLocked
ProtectOrUnmap(const fbl::RefPtr<VmAspace> & aspace,vaddr_t base,size_t size,uint new_arch_mmu_flags)101 zx_status_t ProtectOrUnmap(const fbl::RefPtr<VmAspace>& aspace, vaddr_t base, size_t size,
102                            uint new_arch_mmu_flags) {
103     if (new_arch_mmu_flags & ARCH_MMU_FLAG_PERM_RWX_MASK) {
104         return aspace->arch_aspace().Protect(base, size / PAGE_SIZE, new_arch_mmu_flags);
105     } else {
106         return aspace->arch_aspace().Unmap(base, size / PAGE_SIZE, nullptr);
107     }
108 }
109 
110 } // namespace
111 
ProtectLocked(vaddr_t base,size_t size,uint new_arch_mmu_flags)112 zx_status_t VmMapping::ProtectLocked(vaddr_t base, size_t size, uint new_arch_mmu_flags) {
113     DEBUG_ASSERT(aspace_->lock()->lock().IsHeld());
114     DEBUG_ASSERT(size != 0 && IS_PAGE_ALIGNED(base) && IS_PAGE_ALIGNED(size));
115 
116     // Do not allow changing caching
117     if (new_arch_mmu_flags & ARCH_MMU_FLAG_CACHE_MASK) {
118         return ZX_ERR_INVALID_ARGS;
119     }
120 
121     if (!is_valid_mapping_flags(new_arch_mmu_flags)) {
122         return ZX_ERR_ACCESS_DENIED;
123     }
124 
125     DEBUG_ASSERT(object_);
126     // grab the lock for the vmo
127     Guard<fbl::Mutex> guard{object_->lock()};
128 
129     // Persist our current caching mode
130     new_arch_mmu_flags |= (arch_mmu_flags_ & ARCH_MMU_FLAG_CACHE_MASK);
131 
132     // If we're not actually changing permissions, return fast.
133     if (new_arch_mmu_flags == arch_mmu_flags_) {
134         return ZX_OK;
135     }
136 
137     // TODO(teisenbe): deal with error mapping on arch_mmu_protect fail
138 
139     // If we're changing the whole mapping, just make the change.
140     if (base_ == base && size_ == size) {
141         zx_status_t status = ProtectOrUnmap(aspace_, base, size, new_arch_mmu_flags);
142         LTRACEF("arch_mmu_protect returns %d\n", status);
143         arch_mmu_flags_ = new_arch_mmu_flags;
144         return ZX_OK;
145     }
146 
147     // Handle changing from the left
148     if (base_ == base) {
149         // Create a new mapping for the right half (has old perms)
150         fbl::AllocChecker ac;
151         fbl::RefPtr<VmMapping> mapping(fbl::AdoptRef(
152             new (&ac) VmMapping(*parent_, base + size, size_ - size, flags_,
153                                 object_, object_offset_ + size, arch_mmu_flags_)));
154         if (!ac.check()) {
155             return ZX_ERR_NO_MEMORY;
156         }
157 
158         zx_status_t status = ProtectOrUnmap(aspace_, base, size, new_arch_mmu_flags);
159         LTRACEF("arch_mmu_protect returns %d\n", status);
160         arch_mmu_flags_ = new_arch_mmu_flags;
161 
162         size_ = size;
163         mapping->ActivateLocked();
164         return ZX_OK;
165     }
166 
167     // Handle changing from the right
168     if (base_ + size_ == base + size) {
169         // Create a new mapping for the right half (has new perms)
170         fbl::AllocChecker ac;
171 
172         fbl::RefPtr<VmMapping> mapping(fbl::AdoptRef(
173             new (&ac) VmMapping(*parent_, base, size, flags_,
174                                 object_, object_offset_ + base - base_,
175                                 new_arch_mmu_flags)));
176         if (!ac.check()) {
177             return ZX_ERR_NO_MEMORY;
178         }
179 
180         zx_status_t status = ProtectOrUnmap(aspace_, base, size, new_arch_mmu_flags);
181         LTRACEF("arch_mmu_protect returns %d\n", status);
182 
183         size_ -= size;
184         mapping->ActivateLocked();
185         return ZX_OK;
186     }
187 
188     // We're unmapping from the center, so we need to create two new mappings
189     const size_t left_size = base - base_;
190     const size_t right_size = (base_ + size_) - (base + size);
191     const uint64_t center_vmo_offset = object_offset_ + base - base_;
192     const uint64_t right_vmo_offset = center_vmo_offset + size;
193 
194     fbl::AllocChecker ac;
195     fbl::RefPtr<VmMapping> center_mapping(fbl::AdoptRef(
196         new (&ac) VmMapping(*parent_, base, size, flags_,
197                             object_, center_vmo_offset, new_arch_mmu_flags)));
198     if (!ac.check()) {
199         return ZX_ERR_NO_MEMORY;
200     }
201     fbl::RefPtr<VmMapping> right_mapping(fbl::AdoptRef(
202         new (&ac) VmMapping(*parent_, base + size, right_size, flags_,
203                             object_, right_vmo_offset, arch_mmu_flags_)));
204     if (!ac.check()) {
205         return ZX_ERR_NO_MEMORY;
206     }
207 
208     zx_status_t status = ProtectOrUnmap(aspace_, base, size, new_arch_mmu_flags);
209     LTRACEF("arch_mmu_protect returns %d\n", status);
210 
211     // Turn us into the left half
212     size_ = left_size;
213 
214     center_mapping->ActivateLocked();
215     right_mapping->ActivateLocked();
216     return ZX_OK;
217 }
218 
Unmap(vaddr_t base,size_t size)219 zx_status_t VmMapping::Unmap(vaddr_t base, size_t size) {
220     LTRACEF("%p %#" PRIxPTR " %zu\n", this, base, size);
221 
222     if (!IS_PAGE_ALIGNED(base)) {
223         return ZX_ERR_INVALID_ARGS;
224     }
225 
226     size = ROUNDUP(size, PAGE_SIZE);
227 
228     fbl::RefPtr<VmAspace> aspace(aspace_);
229     if (!aspace) {
230         return ZX_ERR_BAD_STATE;
231     }
232 
233     Guard<fbl::Mutex> guard{aspace_->lock()};
234     if (state_ != LifeCycleState::ALIVE) {
235         return ZX_ERR_BAD_STATE;
236     }
237 
238     if (size == 0 || !is_in_range(base, size)) {
239         return ZX_ERR_INVALID_ARGS;
240     }
241 
242     // If we're unmapping everything, destroy this mapping
243     if (base == base_ && size == size_) {
244         return DestroyLocked();
245     }
246 
247     return UnmapLocked(base, size);
248 }
249 
UnmapLocked(vaddr_t base,size_t size)250 zx_status_t VmMapping::UnmapLocked(vaddr_t base, size_t size) {
251     canary_.Assert();
252     DEBUG_ASSERT(aspace_->lock()->lock().IsHeld());
253     DEBUG_ASSERT(size != 0 && IS_PAGE_ALIGNED(size) && IS_PAGE_ALIGNED(base));
254     DEBUG_ASSERT(base >= base_ && base - base_ < size_);
255     DEBUG_ASSERT(size_ - (base - base_) >= size);
256     DEBUG_ASSERT(parent_);
257 
258     if (state_ != LifeCycleState::ALIVE) {
259         return ZX_ERR_BAD_STATE;
260     }
261 
262     // If our parent VMAR is DEAD, then we can only unmap everything.
263     DEBUG_ASSERT(parent_->state_ != LifeCycleState::DEAD || (base == base_ && size == size_));
264 
265     LTRACEF("%p\n", this);
266 
267     // grab the lock for the vmo
268     DEBUG_ASSERT(object_);
269     Guard<fbl::Mutex> guard{object_->lock()};
270 
271     // Check if unmapping from one of the ends
272     if (base_ == base || base + size == base_ + size_) {
273         LTRACEF("unmapping base %#lx size %#zx\n", base, size);
274         zx_status_t status = aspace_->arch_aspace().Unmap(base, size / PAGE_SIZE, nullptr);
275         if (status != ZX_OK) {
276             return status;
277         }
278 
279         if (base_ == base && size_ != size) {
280             // We need to remove ourselves from tree before updating base_,
281             // since base_ is the tree key.
282             fbl::RefPtr<VmAddressRegionOrMapping> ref(parent_->subregions_.erase(*this));
283             base_ += size;
284             object_offset_ += size;
285             parent_->subregions_.insert(ktl::move(ref));
286         }
287         size_ -= size;
288 
289         return ZX_OK;
290     }
291 
292     // We're unmapping from the center, so we need to split the mapping
293     DEBUG_ASSERT(parent_->state_ == LifeCycleState::ALIVE);
294 
295     const uint64_t vmo_offset = object_offset_ + (base + size) - base_;
296     const vaddr_t new_base = base + size;
297     const size_t new_size = (base_ + size_) - new_base;
298 
299     fbl::AllocChecker ac;
300     fbl::RefPtr<VmMapping> mapping(fbl::AdoptRef(
301         new (&ac) VmMapping(*parent_, new_base, new_size, flags_, object_, vmo_offset,
302                             arch_mmu_flags_)));
303     if (!ac.check()) {
304         return ZX_ERR_NO_MEMORY;
305     }
306 
307     // Unmap the middle segment
308     LTRACEF("unmapping base %#lx size %#zx\n", base, size);
309     zx_status_t status = aspace_->arch_aspace().Unmap(base, size / PAGE_SIZE, nullptr);
310     if (status != ZX_OK) {
311         return status;
312     }
313 
314     // Turn us into the left half
315     size_ = base - base_;
316     mapping->ActivateLocked();
317     return ZX_OK;
318 }
319 
UnmapVmoRangeLocked(uint64_t offset,uint64_t len) const320 zx_status_t VmMapping::UnmapVmoRangeLocked(uint64_t offset, uint64_t len) const {
321     canary_.Assert();
322 
323     LTRACEF("region %p obj_offset %#" PRIx64 " size %zu, offset %#" PRIx64 " len %#" PRIx64 "\n",
324             this, object_offset_, size_, offset, len);
325 
326     // NOTE: must be acquired with the vmo lock held, but doesn't need to take
327     // the address space lock, since it will not manipulate its location in the
328     // vmar tree. However, it must be held in the ALIVE state across this call.
329     //
330     // Avoids a race with DestroyLocked() since it removes ourself from the VMO's
331     // mapping list with the VMO lock held before dropping this state to DEAD. The
332     // VMO cant call back to us once we're out of their list.
333     DEBUG_ASSERT(state_ == LifeCycleState::ALIVE);
334 
335     DEBUG_ASSERT(object_);
336     DEBUG_ASSERT(object_->lock()->lock().IsHeld());
337 
338     DEBUG_ASSERT(IS_PAGE_ALIGNED(offset));
339     DEBUG_ASSERT(IS_PAGE_ALIGNED(len));
340     DEBUG_ASSERT(len > 0);
341 
342     // If we're currently faulting and are responsible for the vmo code to be calling
343     // back to us, detect the recursion and abort here.
344     // The specific path we're avoiding is if the VMO calls back into us during vmo->GetPageLocked()
345     // via UnmapVmoRangeLocked(). If we set this flag we're short circuiting the unmap operation
346     // so that we don't do extra work.
347     if (likely(currently_faulting_)) {
348         LTRACEF("recursing to ourself, abort\n");
349         return ZX_OK;
350     }
351 
352     if (len == 0) {
353         return ZX_OK;
354     }
355 
356     // compute the intersection of the passed in vmo range and our mapping
357     uint64_t offset_new;
358     uint64_t len_new;
359     if (!GetIntersect(object_offset_, static_cast<uint64_t>(size_), offset, len,
360                       &offset_new, &len_new)) {
361         return ZX_OK;
362     }
363 
364     DEBUG_ASSERT(len_new > 0 && len_new <= SIZE_MAX);
365     DEBUG_ASSERT(offset_new >= object_offset_);
366 
367     LTRACEF("intersection offset %#" PRIx64 ", len %#" PRIx64 "\n", offset_new, len_new);
368 
369     // make sure the base + offset is within our address space
370     // should be, according to the range stored in base_ + size_
371     vaddr_t unmap_base;
372     bool overflowed = add_overflow(base_, offset_new - object_offset_, &unmap_base);
373     ASSERT(!overflowed);
374 
375     // make sure we're only unmapping within our window
376     ASSERT(unmap_base >= base_);
377     ASSERT((unmap_base + len_new - 1) <= (base_ + size_ - 1));
378 
379     LTRACEF("going to unmap %#" PRIxPTR ", len %#" PRIx64 " aspace %p\n",
380             unmap_base, len_new, aspace_.get());
381 
382     zx_status_t status = aspace_->arch_aspace().Unmap(unmap_base,
383                                                       static_cast<size_t>(len_new) / PAGE_SIZE, nullptr);
384     if (status != ZX_OK) {
385         return status;
386     }
387 
388     return ZX_OK;
389 }
390 
391 namespace {
392 
393 class VmMappingCoalescer {
394 public:
395     VmMappingCoalescer(VmMapping* mapping, vaddr_t base);
396     ~VmMappingCoalescer();
397 
398     // Add a page to the mapping run.  If this fails, the VmMappingCoalescer is
399     // no longer valid.
Append(vaddr_t vaddr,paddr_t paddr)400     zx_status_t Append(vaddr_t vaddr, paddr_t paddr) {
401         DEBUG_ASSERT(!aborted_);
402         // If this isn't the expected vaddr, flush the run we have first.
403         if (count_ >= fbl::count_of(phys_) || vaddr != base_ + count_ * PAGE_SIZE) {
404             zx_status_t status = Flush();
405             if (status != ZX_OK) {
406                 return status;
407             }
408             base_ = vaddr;
409         }
410         phys_[count_] = paddr;
411         ++count_;
412         return ZX_OK;
413     }
414 
415     // Submit any outstanding mappings to the MMU.  If this fails, the
416     // VmMappingCoalescer is no longer valid.
417     zx_status_t Flush();
418 
419     // Drop the current outstanding mappings without sending them to the MMU.
420     // After this call, the VmMappingCoalescer is no longer valid.
Abort()421     void Abort() {
422         aborted_ = true;
423     }
424 
425 private:
426     DISALLOW_COPY_ASSIGN_AND_MOVE(VmMappingCoalescer);
427 
428     VmMapping* mapping_;
429     vaddr_t base_;
430     paddr_t phys_[16];
431     size_t count_;
432     bool aborted_;
433 };
434 
VmMappingCoalescer(VmMapping * mapping,vaddr_t base)435 VmMappingCoalescer::VmMappingCoalescer(VmMapping* mapping, vaddr_t base)
436     : mapping_(mapping), base_(base), count_(0), aborted_(false) {}
437 
~VmMappingCoalescer()438 VmMappingCoalescer::~VmMappingCoalescer() {
439     // Make sure we've flushed or aborted
440     DEBUG_ASSERT(count_ == 0 || aborted_);
441 }
442 
Flush()443 zx_status_t VmMappingCoalescer::Flush() {
444     if (count_ == 0) {
445         return ZX_OK;
446     }
447 
448     uint flags = mapping_->arch_mmu_flags();
449     if (flags & ARCH_MMU_FLAG_PERM_RWX_MASK) {
450         size_t mapped;
451         zx_status_t ret = mapping_->aspace()->arch_aspace().Map(base_, phys_, count_, flags,
452                                                                 &mapped);
453         if (ret != ZX_OK) {
454             TRACEF("error %d mapping %zu pages starting at va %#" PRIxPTR "\n", ret, count_, base_);
455             aborted_ = true;
456             return ret;
457         }
458         DEBUG_ASSERT(mapped == count_);
459     }
460     base_ += count_ * PAGE_SIZE;
461     count_ = 0;
462     return ZX_OK;
463 }
464 
465 } // namespace
466 
MapRange(size_t offset,size_t len,bool commit)467 zx_status_t VmMapping::MapRange(size_t offset, size_t len, bool commit) {
468     canary_.Assert();
469 
470     len = ROUNDUP(len, PAGE_SIZE);
471     if (len == 0) {
472         return ZX_ERR_INVALID_ARGS;
473     }
474 
475     Guard<fbl::Mutex> aspace_guard{aspace_->lock()};
476     if (state_ != LifeCycleState::ALIVE) {
477         return ZX_ERR_BAD_STATE;
478     }
479 
480     LTRACEF("region %p, offset %#zx, size %#zx, commit %d\n", this, offset, len, commit);
481 
482     DEBUG_ASSERT(object_);
483     if (!IS_PAGE_ALIGNED(offset) || !is_in_range(base_ + offset, len)) {
484         return ZX_ERR_INVALID_ARGS;
485     }
486 
487     // precompute the flags we'll pass GetPageLocked
488     // if committing, then tell it to soft fault in a page
489     uint pf_flags = VMM_PF_FLAG_WRITE;
490     if (commit) {
491         pf_flags |= VMM_PF_FLAG_SW_FAULT;
492     }
493 
494     // grab the lock for the vmo
495     Guard<fbl::Mutex> object_guard{object_->lock()};
496 
497     // set the currently faulting flag for any recursive calls the vmo may make back into us.
498     DEBUG_ASSERT(!currently_faulting_);
499     currently_faulting_ = true;
500     auto ac = fbl::MakeAutoCall([&]() { currently_faulting_ = false; });
501 
502     // iterate through the range, grabbing a page from the underlying object and
503     // mapping it in
504     size_t o;
505     VmMappingCoalescer coalescer(this, base_ + offset);
506     for (o = offset; o < offset + len; o += PAGE_SIZE) {
507         uint64_t vmo_offset = object_offset_ + o;
508 
509         zx_status_t status;
510         paddr_t pa;
511         status = object_->GetPageLocked(vmo_offset, pf_flags, nullptr, nullptr, &pa);
512         if (status != ZX_OK) {
513             // no page to map
514             if (commit) {
515                 // fail when we can't commit every requested page
516                 coalescer.Abort();
517                 return status;
518             }
519 
520             // skip ahead
521             continue;
522         }
523 
524         vaddr_t va = base_ + o;
525         LTRACEF_LEVEL(2, "mapping pa %#" PRIxPTR " to va %#" PRIxPTR "\n", pa, va);
526         status = coalescer.Append(va, pa);
527         if (status != ZX_OK) {
528             return status;
529         }
530     }
531     return coalescer.Flush();
532 }
533 
DecommitRange(size_t offset,size_t len)534 zx_status_t VmMapping::DecommitRange(size_t offset, size_t len) {
535     canary_.Assert();
536     LTRACEF("%p [%#zx+%#zx], offset %#zx, len %#zx\n",
537             this, base_, size_, offset, len);
538 
539     Guard<fbl::Mutex> guard{aspace_->lock()};
540     if (state_ != LifeCycleState::ALIVE) {
541         return ZX_ERR_BAD_STATE;
542     }
543     if (offset + len < offset || offset + len > size_) {
544         return ZX_ERR_OUT_OF_RANGE;
545     }
546     // VmObject::DecommitRange will typically call back into our instance's
547     // VmMapping::UnmapVmoRangeLocked.
548     return object_->DecommitRange(object_offset_ + offset, len);
549 }
550 
DestroyLocked()551 zx_status_t VmMapping::DestroyLocked() {
552     canary_.Assert();
553     DEBUG_ASSERT(aspace_->lock()->lock().IsHeld());
554     LTRACEF("%p\n", this);
555 
556     // Take a reference to ourself, so that we do not get destructed after
557     // dropping our last reference in this method (e.g. when calling
558     // subregions_.erase below).
559     fbl::RefPtr<VmMapping> self(this);
560 
561     // The vDSO code mapping can never be unmapped, not even
562     // by VMAR destruction (except for process exit, of course).
563     // TODO(mcgrathr): Turn this into a policy-driven process-fatal case
564     // at some point.  teisenbe@ wants to eventually make zx_vmar_destroy
565     // never fail.
566     if (aspace_->vdso_code_mapping_ == self) {
567         return ZX_ERR_ACCESS_DENIED;
568     }
569 
570     // unmap our entire range
571     zx_status_t status = UnmapLocked(base_, size_);
572     if (status != ZX_OK) {
573         return status;
574     }
575 
576     // Unmap should have reset our size to 0
577     DEBUG_ASSERT(size_ == 0);
578 
579     // grab the object lock and remove ourself from its list
580     {
581         Guard<fbl::Mutex> guard{object_->lock()};
582         object_->RemoveMappingLocked(this);
583     }
584 
585     // detach from any object we have mapped
586     object_.reset();
587 
588     // Detach the now dead region from the parent
589     if (parent_) {
590         DEBUG_ASSERT(subregion_list_node_.InContainer());
591         parent_->RemoveSubregion(this);
592     }
593 
594     // mark ourself as dead
595     parent_ = nullptr;
596     state_ = LifeCycleState::DEAD;
597     return ZX_OK;
598 }
599 
PageFault(vaddr_t va,const uint pf_flags)600 zx_status_t VmMapping::PageFault(vaddr_t va, const uint pf_flags) {
601     canary_.Assert();
602     DEBUG_ASSERT(aspace_->lock()->lock().IsHeld());
603 
604     DEBUG_ASSERT(va >= base_ && va <= base_ + size_ - 1);
605 
606     va = ROUNDDOWN(va, PAGE_SIZE);
607     uint64_t vmo_offset = va - base_ + object_offset_;
608 
609     __UNUSED char pf_string[5];
610     LTRACEF("%p va %#" PRIxPTR " vmo_offset %#" PRIx64 ", pf_flags %#x (%s)\n",
611             this, va, vmo_offset, pf_flags,
612             vmm_pf_flags_to_string(pf_flags, pf_string));
613 
614     // make sure we have permission to continue
615     if ((pf_flags & VMM_PF_FLAG_USER) && !(arch_mmu_flags_ & ARCH_MMU_FLAG_PERM_USER)) {
616         // user page fault on non user mapped region
617         LTRACEF("permission failure: user fault on non user region\n");
618         return ZX_ERR_ACCESS_DENIED;
619     }
620     if ((pf_flags & VMM_PF_FLAG_WRITE) && !(arch_mmu_flags_ & ARCH_MMU_FLAG_PERM_WRITE)) {
621         // write to a non-writeable region
622         LTRACEF("permission failure: write fault on non-writable region\n");
623         return ZX_ERR_ACCESS_DENIED;
624     }
625     if (!(pf_flags & VMM_PF_FLAG_WRITE) && !(arch_mmu_flags_ & ARCH_MMU_FLAG_PERM_READ)) {
626         // read to a non-readable region
627         LTRACEF("permission failure: read fault on non-readable region\n");
628         return ZX_ERR_ACCESS_DENIED;
629     }
630     if ((pf_flags & VMM_PF_FLAG_INSTRUCTION) && !(arch_mmu_flags_ & ARCH_MMU_FLAG_PERM_EXECUTE)) {
631         // instruction fetch from a no execute region
632         LTRACEF("permission failure: execute fault on no execute region\n");
633         return ZX_ERR_ACCESS_DENIED;
634     }
635 
636     // grab the lock for the vmo
637     Guard<fbl::Mutex> guard{object_->lock()};
638 
639     // set the currently faulting flag for any recursive calls the vmo may make back into us
640     // The specific path we're avoiding is if the VMO calls back into us during vmo->GetPageLocked()
641     // via UnmapVmoRangeLocked(). Since we're responsible for that page, signal to ourself to skip
642     // the unmap operation.
643     DEBUG_ASSERT(!currently_faulting_);
644     currently_faulting_ = true;
645     auto ac = fbl::MakeAutoCall([&]() { currently_faulting_ = false; });
646 
647     // fault in or grab an existing page
648     paddr_t new_pa;
649     vm_page_t* page;
650     zx_status_t status = object_->GetPageLocked(vmo_offset, pf_flags, nullptr, &page, &new_pa);
651     if (status != ZX_OK) {
652         // TODO(cpu): This trace was originally TRACEF() always on, but it fires if the
653         // VMO was resized, rather than just when the system is running out of memory.
654         LTRACEF("ERROR: failed to fault in or grab existing page\n");
655         LTRACEF("%p vmo_offset %#" PRIx64 ", pf_flags %#x\n", this, vmo_offset, pf_flags);
656         return status;
657     }
658 
659     // if we read faulted, make sure we map or modify the page without any write permissions
660     // this ensures we will fault again if a write is attempted so we can potentially
661     // replace this page with a copy or a new one
662     uint mmu_flags = arch_mmu_flags_;
663     if (!(pf_flags & VMM_PF_FLAG_WRITE)) {
664         // we read faulted, so only map with read permissions
665         mmu_flags &= ~ARCH_MMU_FLAG_PERM_WRITE;
666     }
667 
668     // see if something is mapped here now
669     // this may happen if we are one of multiple threads racing on a single address
670     uint page_flags;
671     paddr_t pa;
672     zx_status_t err = aspace_->arch_aspace().Query(va, &pa, &page_flags);
673     if (err >= 0) {
674         LTRACEF("queried va, page at pa %#" PRIxPTR ", flags %#x is already there\n", pa,
675                 page_flags);
676         if (pa == new_pa) {
677             // page was already mapped, are the permissions compatible?
678             // test that the page is already mapped with either the region's mmu flags
679             // or the flags that we're about to try to switch it to, which may be read-only
680             if (page_flags == arch_mmu_flags_ || page_flags == mmu_flags) {
681                 return ZX_OK;
682             }
683 
684             // assert that we're not accidentally marking the zero page writable
685             DEBUG_ASSERT((pa != vm_get_zero_page_paddr()) || !(mmu_flags & ARCH_MMU_FLAG_PERM_WRITE));
686 
687             // same page, different permission
688             status = aspace_->arch_aspace().Protect(va, 1, mmu_flags);
689             if (status != ZX_OK) {
690                 TRACEF("failed to modify permissions on existing mapping\n");
691                 return ZX_ERR_NO_MEMORY;
692             }
693         } else {
694             // some other page is mapped there already
695             LTRACEF("thread %s faulted on va %#" PRIxPTR ", different page was present\n",
696                     get_current_thread()->name, va);
697             LTRACEF("old pa %#" PRIxPTR " new pa %#" PRIxPTR "\n", pa, new_pa);
698 
699             // assert that we're not accidentally mapping the zero page writable
700             DEBUG_ASSERT((new_pa != vm_get_zero_page_paddr()) || !(mmu_flags & ARCH_MMU_FLAG_PERM_WRITE));
701 
702             // unmap the old one and put the new one in place
703             status = aspace_->arch_aspace().Unmap(va, 1, nullptr);
704             if (status != ZX_OK) {
705                 TRACEF("failed to remove old mapping before replacing\n");
706                 return ZX_ERR_NO_MEMORY;
707             }
708 
709             size_t mapped;
710             status = aspace_->arch_aspace().MapContiguous(va, new_pa, 1, mmu_flags, &mapped);
711             if (status != ZX_OK) {
712                 TRACEF("failed to map replacement page\n");
713                 return ZX_ERR_NO_MEMORY;
714             }
715             DEBUG_ASSERT(mapped == 1);
716 
717             return ZX_OK;
718         }
719     } else {
720         // nothing was mapped there before, map it now
721         LTRACEF("mapping pa %#" PRIxPTR " to va %#" PRIxPTR " is zero page %d\n",
722                 new_pa, va, (new_pa == vm_get_zero_page_paddr()));
723 
724         // assert that we're not accidentally mapping the zero page writable
725         DEBUG_ASSERT((new_pa != vm_get_zero_page_paddr()) || !(mmu_flags & ARCH_MMU_FLAG_PERM_WRITE));
726 
727         size_t mapped;
728         status = aspace_->arch_aspace().MapContiguous(va, new_pa, 1, mmu_flags, &mapped);
729         if (status != ZX_OK) {
730             TRACEF("failed to map page\n");
731             return ZX_ERR_NO_MEMORY;
732         }
733         DEBUG_ASSERT(mapped == 1);
734     }
735 
736 // TODO: figure out what to do with this
737 #if ARCH_ARM64
738     if (pf_flags & VMM_PF_FLAG_GUEST) {
739         // TODO(abdulla): Correctly handle page fault for guest.
740     } else if (arch_mmu_flags_ & ARCH_MMU_FLAG_PERM_EXECUTE) {
741         arch_sync_cache_range(va, PAGE_SIZE);
742     }
743 #endif
744     return ZX_OK;
745 }
746 
747 // We disable thread safety analysis here because one of the common uses of this
748 // function is for splitting one mapping object into several that will be backed
749 // by the same VmObject.  In that case, object_->lock() gets aliased across all
750 // of the VmMappings involved, but we have no way of informing the analyzer of
751 // this, resulting in spurious warnings.  We could disable analysis on the
752 // splitting functions instead, but they are much more involved, and we'd rather
753 // have the analysis mostly functioning on those than on this much simpler
754 // function.
ActivateLocked()755 void VmMapping::ActivateLocked() TA_NO_THREAD_SAFETY_ANALYSIS {
756     DEBUG_ASSERT(state_ == LifeCycleState::NOT_READY);
757     DEBUG_ASSERT(aspace_->lock()->lock().IsHeld());
758     DEBUG_ASSERT(object_->lock()->lock().IsHeld());
759     DEBUG_ASSERT(parent_);
760 
761     state_ = LifeCycleState::ALIVE;
762     object_->AddMappingLocked(this);
763     parent_->subregions_.insert(fbl::RefPtr<VmAddressRegionOrMapping>(this));
764 }
765 
Activate()766 void VmMapping::Activate() {
767     Guard<fbl::Mutex> guard{object_->lock()};
768     ActivateLocked();
769 }
770