1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 
7 #pragma once
8 
9 #include <arch/aspace.h>
10 #include <arch/mmu.h>
11 #include <assert.h>
12 #include <fbl/canary.h>
13 #include <fbl/intrusive_double_list.h>
14 #include <fbl/intrusive_wavl_tree.h>
15 #include <fbl/macros.h>
16 #include <fbl/ref_counted.h>
17 #include <fbl/ref_ptr.h>
18 #include <kernel/lockdep.h>
19 #include <kernel/mutex.h>
20 #include <lib/crypto/prng.h>
21 #include <vm/arch_vm_aspace.h>
22 #include <vm/vm.h>
23 #include <vm/vm_address_region.h>
24 #include <zircon/types.h>
25 
26 namespace hypervisor {
27 class GuestPhysicalAddressSpace;
28 } // namespace hypervisor
29 
30 class VmObject;
31 
32 class VmAspace : public fbl::DoublyLinkedListable<VmAspace*>, public fbl::RefCounted<VmAspace> {
33 public:
34     // Create an address space of the type specified in |flags| with name |name|.
35     //
36     // Although reference counted, the returned VmAspace must be explicitly destroyed via Destroy.
37     //
38     // Returns null on failure (e.g. due to resource starvation).
39     static fbl::RefPtr<VmAspace> Create(uint flags, const char* name);
40 
41     // Destroy this address space.
42     //
43     // Destroy does not free this object, but rather allows it to be freed when the last retaining
44     // RefPtr is destroyed.
45     zx_status_t Destroy();
46 
47     void Rename(const char* name);
48 
49     // flags
50     static const uint32_t TYPE_USER = (0 << 0);
51     static const uint32_t TYPE_KERNEL = (1 << 0);
52     // You probably do not want to use LOW_KERNEL.  It is primarily
53     // used for SMP bootstrap to allow mappings of very low memory using
54     // the standard VMM subsystem.
55     static const uint32_t TYPE_LOW_KERNEL = (2 << 0);
56     static const uint32_t TYPE_GUEST_PHYS = (3 << 0);
57     static const uint32_t TYPE_MASK = (3 << 0);
58 
59     // simple accessors
base()60     vaddr_t base() const { return base_; }
size()61     size_t size() const { return size_; }
name()62     const char* name() const { return name_; }
arch_aspace()63     ArchVmAspace& arch_aspace() { return arch_aspace_; }
is_user()64     bool is_user() const { return (flags_ & TYPE_MASK) == TYPE_USER; }
is_aslr_enabled()65     bool is_aslr_enabled() const { return aslr_enabled_; }
66 
67     // Get the root VMAR (briefly acquires the aspace lock)
68     fbl::RefPtr<VmAddressRegion> RootVmar();
69 
70     // Returns true if the address space has been destroyed.
71     bool is_destroyed() const;
72 
73     // accessor for singleton kernel address space
kernel_aspace()74     static VmAspace* kernel_aspace() { return kernel_aspace_; }
75 
76     // given an address, return either the kernel aspace or the current user one
77     static VmAspace* vaddr_to_aspace(uintptr_t address);
78 
79     // set the per thread aspace pointer to this
80     void AttachToThread(thread_t* t);
81 
82     void Dump(bool verbose) const;
83 
84     // Traverses the VM tree rooted at this node, in depth-first pre-order. If
85     // any methods of |ve| return false, the traversal stops and this method
86     // returns false. Returns true otherwise.
87     bool EnumerateChildren(VmEnumerator* ve);
88 
89     // A collection of memory usage counts.
90     struct vm_usage_t {
91         // A count of pages covered by VmMapping ranges.
92         size_t mapped_pages;
93 
94         // For the fields below, a page is considered committed if a VmMapping
95         // covers a range of a VmObject that contains that page, and that page
96         // has physical memory allocated to it.
97 
98         // A count of committed pages that are only mapped into this address
99         // space.
100         size_t private_pages;
101 
102         // A count of committed pages that are mapped into this and at least
103         // one other address spaces.
104         size_t shared_pages;
105 
106         // A number that estimates the fraction of shared_pages that this
107         // address space is responsible for keeping alive.
108         //
109         // An estimate of:
110         //   For each shared, committed page:
111         //   scaled_shared_bytes +=
112         //       PAGE_SIZE / (number of address spaces mapping this page)
113         //
114         // This number is strictly smaller than shared_pages * PAGE_SIZE.
115         size_t scaled_shared_bytes;
116     };
117 
118     // Counts memory usage under the VmAspace.
119     zx_status_t GetMemoryUsage(vm_usage_t* usage);
120 
121     size_t AllocatedPages() const;
122 
123     // Convenience method for traversing the tree of VMARs to find the deepest
124     // VMAR in the tree that includes *va*.
125     fbl::RefPtr<VmAddressRegionOrMapping> FindRegion(vaddr_t va);
126 
127     // For region creation routines
128     static const uint VMM_FLAG_VALLOC_SPECIFIC = (1u << 0); // allocate at specific address
129     static const uint VMM_FLAG_COMMIT = (1u << 1);          // commit memory up front (no demand paging)
130 
131     // legacy functions to assist in the transition to VMARs
132     // These all assume a flat VMAR structure in which all VMOs are mapped
133     // as children of the root.  They will all assert if used on user aspaces
134     // TODO(teisenbe): remove uses of these in favor of new VMAR interfaces
135     zx_status_t ReserveSpace(const char* name, size_t size, vaddr_t vaddr);
136     zx_status_t AllocPhysical(const char* name, size_t size, void** ptr, uint8_t align_pow2,
137                               paddr_t paddr, uint vmm_flags,
138                               uint arch_mmu_flags);
139     zx_status_t AllocContiguous(const char* name, size_t size, void** ptr, uint8_t align_pow2,
140                                 uint vmm_flags, uint arch_mmu_flags);
141     zx_status_t Alloc(const char* name, size_t size, void** ptr, uint8_t align_pow2,
142                       uint vmm_flags, uint arch_mmu_flags);
143     zx_status_t FreeRegion(vaddr_t va);
144 
145     // Internal use function for mapping VMOs.  Do not use.  This is exposed in
146     // the public API purely for tests.
147     zx_status_t MapObjectInternal(fbl::RefPtr<VmObject> vmo, const char* name, uint64_t offset,
148                                   size_t size, void** ptr, uint8_t align_pow2, uint vmm_flags,
149                                   uint arch_mmu_flags);
150 
151     uintptr_t vdso_base_address() const;
152     uintptr_t vdso_code_address() const;
153 
154 protected:
155     // Share the aspace lock with VmAddressRegion/VmMapping so they can serialize
156     // changes to the aspace.
157     friend class VmAddressRegionOrMapping;
158     friend class VmAddressRegion;
159     friend class VmMapping;
lock()160     Lock<fbl::Mutex>* lock() { return &lock_; }
161 
162     // Expose the PRNG for ASLR to VmAddressRegion
AslrPrng()163     crypto::PRNG& AslrPrng() {
164         DEBUG_ASSERT(aslr_enabled_);
165         return aslr_prng_;
166     }
167 
168 private:
169     // can only be constructed via factory
170     VmAspace(vaddr_t base, size_t size, uint32_t flags, const char* name);
171 
172     DISALLOW_COPY_ASSIGN_AND_MOVE(VmAspace);
173 
174     // private destructor that can only be used from the ref ptr
175     ~VmAspace();
176     friend fbl::RefPtr<VmAspace>;
177 
178     // complete initialization, may fail in OOM cases
179     zx_status_t Init();
180 
181     void InitializeAslr();
182 
183     // internal page fault routine, friended to be only called by vmm_page_fault_handler
184     zx_status_t PageFault(vaddr_t va, uint flags);
185     friend zx_status_t vmm_page_fault_handler(vaddr_t va, uint flags);
186     friend class hypervisor::GuestPhysicalAddressSpace;
187 
188     // magic
189     fbl::Canary<fbl::magic("VMAS")> canary_;
190 
191     // members
192     vaddr_t base_;
193     size_t size_;
194     uint32_t flags_;
195     char name_[32];
196     bool aspace_destroyed_ = false;
197     bool aslr_enabled_ = false;
198 
199     mutable DECLARE_MUTEX(VmAspace) lock_;
200 
201     // root of virtual address space
202     // Access to this reference is guarded by lock_.
203     fbl::RefPtr<VmAddressRegion> root_vmar_;
204 
205     // PRNG used by VMARs for address choices.  We record the seed to enable
206     // reproducible debugging.
207     crypto::PRNG aslr_prng_;
208     uint8_t aslr_seed_[crypto::PRNG::kMinEntropy];
209 
210     // architecturally specific part of the aspace
211     ArchVmAspace arch_aspace_;
212 
213     fbl::RefPtr<VmMapping> vdso_code_mapping_;
214 
215     // initialization routines need to construct the singleton kernel address space
216     // at a particular points in the bootup process
217     static void KernelAspaceInitPreHeap();
218     static VmAspace* kernel_aspace_;
219     friend void vm_init_preheap();
220 };
221 
222 void DumpAllAspaces(bool verbose);
223 
224 // hack to convert from vmm_aspace_t to VmAspace
vmm_aspace_to_obj(vmm_aspace_t * aspace)225 static VmAspace* vmm_aspace_to_obj(vmm_aspace_t* aspace) {
226     return reinterpret_cast<VmAspace*>(aspace);
227 }
228 
vmm_aspace_to_obj(const vmm_aspace_t * aspace)229 static const VmAspace* vmm_aspace_to_obj(const vmm_aspace_t* aspace) {
230     return reinterpret_cast<const VmAspace*>(aspace);
231 }
232