1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <vm/vm_aspace.h>
8
9 #include "vm_priv.h"
10
11 #include <assert.h>
12 #include <err.h>
13 #include <fbl/alloc_checker.h>
14 #include <fbl/auto_call.h>
15 #include <fbl/intrusive_double_list.h>
16 #include <fbl/mutex.h>
17 #include <inttypes.h>
18 #include <kernel/cmdline.h>
19 #include <kernel/thread.h>
20 #include <kernel/thread_lock.h>
21 #include <lib/crypto/global_prng.h>
22 #include <lib/crypto/prng.h>
23 #include <lib/vdso.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <trace.h>
27 #include <vm/fault.h>
28 #include <vm/vm.h>
29 #include <vm/vm_address_region.h>
30 #include <vm/vm_object.h>
31 #include <vm/vm_object_paged.h>
32 #include <vm/vm_object_physical.h>
33 #include <zircon/types.h>
34
35 #define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
36
37 #define GUEST_PHYSICAL_ASPACE_BASE 0UL
38 #define GUEST_PHYSICAL_ASPACE_SIZE (1UL << MMU_GUEST_SIZE_SHIFT)
39
40 // pointer to a singleton kernel address space
41 VmAspace* VmAspace::kernel_aspace_ = nullptr;
42
43 // pointer to the dummy root VMAR singleton
44 static VmAddressRegion* dummy_root_vmar = nullptr;
45
46 // list of all address spaces
47 struct VmAspaceListGlobal {};
48 static DECLARE_MUTEX(VmAspaceListGlobal) aspace_list_lock;
49 static fbl::DoublyLinkedList<VmAspace*> aspaces TA_GUARDED(aspace_list_lock);
50
51 // Called once at boot to initialize the singleton kernel address
52 // space. Thread safety analysis is disabled since we don't need to
53 // lock yet.
KernelAspaceInitPreHeap()54 void VmAspace::KernelAspaceInitPreHeap() TA_NO_THREAD_SAFETY_ANALYSIS {
55 // the singleton kernel address space
56 static VmAspace _kernel_aspace(KERNEL_ASPACE_BASE, KERNEL_ASPACE_SIZE, VmAspace::TYPE_KERNEL, "kernel");
57
58 // the singleton dummy root vmar (used to break a reference cycle in
59 // Destroy())
60 static VmAddressRegionDummy dummy_vmar;
61 #if LK_DEBUGLEVEL > 1
62 _kernel_aspace.Adopt();
63 dummy_vmar.Adopt();
64 #endif
65
66 dummy_root_vmar = &dummy_vmar;
67
68 static VmAddressRegion _kernel_root_vmar(_kernel_aspace);
69
70 _kernel_aspace.root_vmar_ = fbl::AdoptRef(&_kernel_root_vmar);
71
72 auto err = _kernel_aspace.Init();
73 ASSERT(err >= 0);
74
75 // save a pointer to the singleton kernel address space
76 VmAspace::kernel_aspace_ = &_kernel_aspace;
77 aspaces.push_front(kernel_aspace_);
78 }
79
80 // simple test routines
is_inside(VmAspace & aspace,vaddr_t vaddr)81 static inline bool is_inside(VmAspace& aspace, vaddr_t vaddr) {
82 return (vaddr >= aspace.base() && vaddr <= aspace.base() + aspace.size() - 1);
83 }
84
is_inside(VmAspace & aspace,VmAddressRegion & r)85 static inline bool is_inside(VmAspace& aspace, VmAddressRegion& r) {
86 // is the starting address within the address space
87 if (!is_inside(aspace, r.base())) {
88 return false;
89 }
90
91 if (r.size() == 0) {
92 return true;
93 }
94
95 // see if the size is enough to wrap the integer
96 if (r.base() + r.size() - 1 < r.base()) {
97 return false;
98 }
99
100 // test to see if the end address is within the address space's
101 if (r.base() + r.size() - 1 > aspace.base() + aspace.size() - 1) {
102 return false;
103 }
104
105 return true;
106 }
107
trim_to_aspace(VmAspace & aspace,vaddr_t vaddr,size_t size)108 static inline size_t trim_to_aspace(VmAspace& aspace, vaddr_t vaddr, size_t size) {
109 DEBUG_ASSERT(is_inside(aspace, vaddr));
110
111 if (size == 0) {
112 return size;
113 }
114
115 size_t offset = vaddr - aspace.base();
116
117 // LTRACEF("vaddr 0x%lx size 0x%zx offset 0x%zx aspace base 0x%lx aspace size 0x%zx\n",
118 // vaddr, size, offset, aspace.base(), aspace.size());
119
120 if (offset + size < offset) {
121 size = ULONG_MAX - offset - 1;
122 }
123
124 // LTRACEF("size now 0x%zx\n", size);
125
126 if (offset + size >= aspace.size() - 1) {
127 size = aspace.size() - offset;
128 }
129
130 // LTRACEF("size now 0x%zx\n", size);
131
132 return size;
133 }
134
VmAspace(vaddr_t base,size_t size,uint32_t flags,const char * name)135 VmAspace::VmAspace(vaddr_t base, size_t size, uint32_t flags, const char* name)
136 : base_(base), size_(size), flags_(flags), root_vmar_(nullptr), aslr_prng_(nullptr, 0) {
137
138 DEBUG_ASSERT(size != 0);
139 DEBUG_ASSERT(base + size - 1 >= base);
140
141 Rename(name);
142
143 LTRACEF("%p '%s'\n", this, name_);
144 }
145
Init()146 zx_status_t VmAspace::Init() {
147 canary_.Assert();
148
149 LTRACEF("%p '%s'\n", this, name_);
150
151 // initialize the architecturally specific part
152 bool is_high_kernel = (flags_ & TYPE_MASK) == TYPE_KERNEL;
153 bool is_guest = (flags_ & TYPE_MASK) == TYPE_GUEST_PHYS;
154 uint arch_aspace_flags =
155 (is_high_kernel ? ARCH_ASPACE_FLAG_KERNEL : 0u) |
156 (is_guest ? ARCH_ASPACE_FLAG_GUEST : 0u);
157 zx_status_t status = arch_aspace_.Init(base_, size_, arch_aspace_flags);
158 if (status != ZX_OK) {
159 return status;
160 }
161
162 InitializeAslr();
163
164 if (likely(!root_vmar_)) {
165 return VmAddressRegion::CreateRoot(*this, VMAR_FLAG_CAN_MAP_SPECIFIC, &root_vmar_);
166 }
167 return ZX_OK;
168 }
169
Create(uint32_t flags,const char * name)170 fbl::RefPtr<VmAspace> VmAspace::Create(uint32_t flags, const char* name) {
171 LTRACEF("flags 0x%x, name '%s'\n", flags, name);
172
173 vaddr_t base;
174 size_t size;
175 switch (flags & TYPE_MASK) {
176 case TYPE_USER:
177 base = USER_ASPACE_BASE;
178 size = USER_ASPACE_SIZE;
179 break;
180 case TYPE_KERNEL:
181 base = KERNEL_ASPACE_BASE;
182 size = KERNEL_ASPACE_SIZE;
183 break;
184 case TYPE_LOW_KERNEL:
185 base = 0;
186 size = USER_ASPACE_BASE + USER_ASPACE_SIZE;
187 break;
188 case TYPE_GUEST_PHYS:
189 base = GUEST_PHYSICAL_ASPACE_BASE;
190 size = GUEST_PHYSICAL_ASPACE_SIZE;
191 break;
192 default:
193 panic("Invalid aspace type");
194 }
195
196 fbl::AllocChecker ac;
197 auto aspace = fbl::AdoptRef(new (&ac) VmAspace(base, size, flags, name));
198 if (!ac.check()) {
199 return nullptr;
200 }
201
202 // initialize the arch specific component to our address space
203 auto err = aspace->Init();
204 if (err < 0) {
205 zx_status_t status = aspace->Destroy();
206 DEBUG_ASSERT(status == ZX_OK);
207 return nullptr;
208 }
209
210 // add it to the global list
211 {
212 Guard<fbl::Mutex> guard{&aspace_list_lock};
213 aspaces.push_back(aspace.get());
214 }
215
216 // return a ref pointer to the aspace
217 return ktl::move(aspace);
218 }
219
Rename(const char * name)220 void VmAspace::Rename(const char* name) {
221 canary_.Assert();
222 strlcpy(name_, name ? name : "unnamed", sizeof(name_));
223 }
224
~VmAspace()225 VmAspace::~VmAspace() {
226 canary_.Assert();
227 LTRACEF("%p '%s'\n", this, name_);
228
229 // we have to have already been destroyed before freeing
230 DEBUG_ASSERT(aspace_destroyed_);
231
232 // pop it out of the global aspace list
233 {
234 Guard<fbl::Mutex> guard{&aspace_list_lock};
235 if (this->InContainer()) {
236 aspaces.erase(*this);
237 }
238 }
239
240 // destroy the arch portion of the aspace
241 // TODO(teisenbe): Move this to Destroy(). Currently can't move since
242 // ProcessDispatcher calls Destroy() from the context of a thread in the
243 // aspace.
244 zx_status_t status = arch_aspace_.Destroy();
245 DEBUG_ASSERT(status == ZX_OK);
246 }
247
RootVmar()248 fbl::RefPtr<VmAddressRegion> VmAspace::RootVmar() {
249 Guard<fbl::Mutex> guard{&lock_};
250 fbl::RefPtr<VmAddressRegion> ref(root_vmar_);
251 return ktl::move(ref);
252 }
253
Destroy()254 zx_status_t VmAspace::Destroy() {
255 canary_.Assert();
256 LTRACEF("%p '%s'\n", this, name_);
257
258 Guard<fbl::Mutex> guard{&lock_};
259
260 // Don't let a vDSO mapping prevent destroying a VMAR
261 // when the whole process is being destroyed.
262 vdso_code_mapping_.reset();
263
264 // tear down and free all of the regions in our address space
265 if (root_vmar_) {
266 zx_status_t status = root_vmar_->DestroyLocked();
267 if (status != ZX_OK && status != ZX_ERR_BAD_STATE) {
268 return status;
269 }
270 }
271 aspace_destroyed_ = true;
272
273 // Break the reference cycle between this aspace and the root VMAR
274 root_vmar_.reset(dummy_root_vmar);
275
276 return ZX_OK;
277 }
278
is_destroyed() const279 bool VmAspace::is_destroyed() const {
280 Guard<fbl::Mutex> guard{&lock_};
281 return aspace_destroyed_;
282 }
283
MapObjectInternal(fbl::RefPtr<VmObject> vmo,const char * name,uint64_t offset,size_t size,void ** ptr,uint8_t align_pow2,uint vmm_flags,uint arch_mmu_flags)284 zx_status_t VmAspace::MapObjectInternal(fbl::RefPtr<VmObject> vmo, const char* name, uint64_t offset,
285 size_t size, void** ptr, uint8_t align_pow2, uint vmm_flags,
286 uint arch_mmu_flags) {
287
288 canary_.Assert();
289 LTRACEF("aspace %p name '%s' vmo %p, offset %#" PRIx64 " size %#zx "
290 "ptr %p align %hhu vmm_flags %#x arch_mmu_flags %#x\n",
291 this, name, vmo.get(), offset, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
292
293 DEBUG_ASSERT(!is_user() || !(arch_mmu_flags & ARCH_MMU_FLAG_PERM_USER));
294
295 size = ROUNDUP(size, PAGE_SIZE);
296 if (size == 0) {
297 return ZX_ERR_INVALID_ARGS;
298 }
299 if (!vmo) {
300 return ZX_ERR_INVALID_ARGS;
301 }
302 if (!IS_PAGE_ALIGNED(offset)) {
303 return ZX_ERR_INVALID_ARGS;
304 }
305
306 vaddr_t vmar_offset = 0;
307 // if they're asking for a specific spot or starting address, copy the address
308 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
309 // can't ask for a specific spot and then not provide one
310 if (!ptr) {
311 return ZX_ERR_INVALID_ARGS;
312 }
313 vmar_offset = reinterpret_cast<vaddr_t>(*ptr);
314
315 // check that it's page aligned
316 if (!IS_PAGE_ALIGNED(vmar_offset) || vmar_offset < base_) {
317 return ZX_ERR_INVALID_ARGS;
318 }
319
320 vmar_offset -= base_;
321 }
322
323 uint32_t vmar_flags = 0;
324 if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) {
325 vmar_flags |= VMAR_FLAG_SPECIFIC;
326 }
327
328 // Create the mappings with all of the CAN_* RWX flags, so that
329 // Protect() can transition them arbitrarily. This is not desirable for the
330 // long-term.
331 vmar_flags |= VMAR_CAN_RWX_FLAGS;
332
333 // allocate a region and put it in the aspace list
334 fbl::RefPtr<VmMapping> r(nullptr);
335 zx_status_t status = RootVmar()->CreateVmMapping(vmar_offset, size, align_pow2,
336 vmar_flags,
337 vmo, offset, arch_mmu_flags, name, &r);
338 if (status != ZX_OK) {
339 return status;
340 }
341
342 // if we're committing it, map the region now
343 if (vmm_flags & VMM_FLAG_COMMIT) {
344 auto err = r->MapRange(0, size, true);
345 if (err < 0) {
346 return err;
347 }
348 }
349
350 // return the vaddr if requested
351 if (ptr) {
352 *ptr = (void*)r->base();
353 }
354
355 return ZX_OK;
356 }
357
ReserveSpace(const char * name,size_t size,vaddr_t vaddr)358 zx_status_t VmAspace::ReserveSpace(const char* name, size_t size, vaddr_t vaddr) {
359 canary_.Assert();
360 LTRACEF("aspace %p name '%s' size %#zx vaddr %#" PRIxPTR "\n", this, name, size, vaddr);
361
362 DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));
363 DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
364
365 size = ROUNDUP_PAGE_SIZE(size);
366 if (size == 0) {
367 return ZX_OK;
368 }
369 if (!IS_PAGE_ALIGNED(vaddr)) {
370 return ZX_ERR_INVALID_ARGS;
371 }
372 if (!is_inside(*this, vaddr)) {
373 return ZX_ERR_OUT_OF_RANGE;
374 }
375
376 // trim the size
377 size = trim_to_aspace(*this, vaddr, size);
378
379 // allocate a zero length vm object to back it
380 // TODO: decide if a null vmo object is worth it
381 fbl::RefPtr<VmObject> vmo;
382 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, 0, &vmo);
383 if (status != ZX_OK) {
384 return status;
385 }
386 vmo->set_name(name, strlen(name));
387
388 // lookup how it's already mapped
389 uint arch_mmu_flags = 0;
390 auto err = arch_aspace_.Query(vaddr, nullptr, &arch_mmu_flags);
391 if (err) {
392 // if it wasn't already mapped, use some sort of strict default
393 arch_mmu_flags = ARCH_MMU_FLAG_CACHED | ARCH_MMU_FLAG_PERM_READ;
394 }
395
396 // map it, creating a new region
397 void* ptr = reinterpret_cast<void*>(vaddr);
398 return MapObjectInternal(ktl::move(vmo), name, 0, size, &ptr, 0, VMM_FLAG_VALLOC_SPECIFIC,
399 arch_mmu_flags);
400 }
401
AllocPhysical(const char * name,size_t size,void ** ptr,uint8_t align_pow2,paddr_t paddr,uint vmm_flags,uint arch_mmu_flags)402 zx_status_t VmAspace::AllocPhysical(const char* name, size_t size, void** ptr, uint8_t align_pow2,
403 paddr_t paddr, uint vmm_flags, uint arch_mmu_flags) {
404 canary_.Assert();
405 LTRACEF("aspace %p name '%s' size %#zx ptr %p paddr %#" PRIxPTR " vmm_flags 0x%x arch_mmu_flags 0x%x\n",
406 this, name, size, ptr ? *ptr : 0, paddr, vmm_flags, arch_mmu_flags);
407
408 DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr));
409
410 if (size == 0) {
411 return ZX_OK;
412 }
413 if (!IS_PAGE_ALIGNED(paddr)) {
414 return ZX_ERR_INVALID_ARGS;
415 }
416
417 size = ROUNDUP_PAGE_SIZE(size);
418
419 // create a vm object to back it
420 fbl::RefPtr<VmObject> vmo;
421 zx_status_t status = VmObjectPhysical::Create(paddr, size, &vmo);
422 if (status != ZX_OK) {
423 return status;
424 }
425 vmo->set_name(name, strlen(name));
426
427 // force it to be mapped up front
428 // TODO: add new flag to precisely mean pre-map
429 vmm_flags |= VMM_FLAG_COMMIT;
430
431 // Apply the cache policy
432 if (vmo->SetMappingCachePolicy(arch_mmu_flags & ARCH_MMU_FLAG_CACHE_MASK) != ZX_OK) {
433 return ZX_ERR_INVALID_ARGS;
434 }
435
436 arch_mmu_flags &= ~ARCH_MMU_FLAG_CACHE_MASK;
437 return MapObjectInternal(ktl::move(vmo), name, 0, size, ptr, align_pow2, vmm_flags,
438 arch_mmu_flags);
439 }
440
AllocContiguous(const char * name,size_t size,void ** ptr,uint8_t align_pow2,uint vmm_flags,uint arch_mmu_flags)441 zx_status_t VmAspace::AllocContiguous(const char* name, size_t size, void** ptr, uint8_t align_pow2,
442 uint vmm_flags, uint arch_mmu_flags) {
443 canary_.Assert();
444 LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n", this,
445 name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
446
447 size = ROUNDUP(size, PAGE_SIZE);
448 if (size == 0) {
449 return ZX_ERR_INVALID_ARGS;
450 }
451
452 // test for invalid flags
453 if (!(vmm_flags & VMM_FLAG_COMMIT)) {
454 return ZX_ERR_INVALID_ARGS;
455 }
456
457 // create a vm object to back it
458 fbl::RefPtr<VmObject> vmo;
459 zx_status_t status = VmObjectPaged::CreateContiguous(PMM_ALLOC_FLAG_ANY, size, align_pow2, &vmo);
460 if (status != ZX_OK) {
461 return status;
462 }
463 vmo->set_name(name, strlen(name));
464
465 return MapObjectInternal(ktl::move(vmo), name, 0, size, ptr, align_pow2, vmm_flags,
466 arch_mmu_flags);
467 }
468
Alloc(const char * name,size_t size,void ** ptr,uint8_t align_pow2,uint vmm_flags,uint arch_mmu_flags)469 zx_status_t VmAspace::Alloc(const char* name, size_t size, void** ptr, uint8_t align_pow2,
470 uint vmm_flags, uint arch_mmu_flags) {
471 canary_.Assert();
472 LTRACEF("aspace %p name '%s' size 0x%zx ptr %p align %hhu vmm_flags 0x%x arch_mmu_flags 0x%x\n", this,
473 name, size, ptr ? *ptr : 0, align_pow2, vmm_flags, arch_mmu_flags);
474
475 size = ROUNDUP(size, PAGE_SIZE);
476 if (size == 0) {
477 return ZX_ERR_INVALID_ARGS;
478 }
479
480 // allocate a vm object to back it
481 fbl::RefPtr<VmObject> vmo;
482 zx_status_t status = VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, size, &vmo);
483 if (status != ZX_OK) {
484 return status;
485 }
486 vmo->set_name(name, strlen(name));
487
488 // commit memory up front if requested
489 if (vmm_flags & VMM_FLAG_COMMIT) {
490 // commit memory to the object
491 status = vmo->CommitRange(0, size);
492 if (status != ZX_OK) {
493 return status;
494 }
495 }
496
497 // map it, creating a new region
498 return MapObjectInternal(ktl::move(vmo), name, 0, size, ptr, align_pow2, vmm_flags,
499 arch_mmu_flags);
500 }
501
FreeRegion(vaddr_t va)502 zx_status_t VmAspace::FreeRegion(vaddr_t va) {
503 DEBUG_ASSERT(!is_user());
504
505 fbl::RefPtr<VmAddressRegionOrMapping> r = RootVmar()->FindRegion(va);
506 if (!r) {
507 return ZX_ERR_NOT_FOUND;
508 }
509
510 return r->Destroy();
511 }
512
FindRegion(vaddr_t va)513 fbl::RefPtr<VmAddressRegionOrMapping> VmAspace::FindRegion(vaddr_t va) {
514 fbl::RefPtr<VmAddressRegion> vmar(RootVmar());
515 while (1) {
516 fbl::RefPtr<VmAddressRegionOrMapping> next(vmar->FindRegion(va));
517 if (!next) {
518 return vmar;
519 }
520
521 if (next->is_mapping()) {
522 return next;
523 }
524
525 vmar = next->as_vm_address_region();
526 }
527 }
528
AttachToThread(thread_t * t)529 void VmAspace::AttachToThread(thread_t* t) {
530 canary_.Assert();
531 DEBUG_ASSERT(t);
532
533 // point the lk thread at our object via the dummy C vmm_aspace_t struct
534 Guard<spin_lock_t, IrqSave> thread_lock_guard{ThreadLock::Get()};
535
536 // not prepared to handle setting a new address space or one on a running thread
537 DEBUG_ASSERT(!t->aspace);
538 DEBUG_ASSERT(t->state != THREAD_RUNNING);
539
540 t->aspace = reinterpret_cast<vmm_aspace_t*>(this);
541 }
542
PageFault(vaddr_t va,uint flags)543 zx_status_t VmAspace::PageFault(vaddr_t va, uint flags) {
544 canary_.Assert();
545 DEBUG_ASSERT(!aspace_destroyed_);
546 LTRACEF("va %#" PRIxPTR ", flags %#x\n", va, flags);
547
548 if ((flags_ & TYPE_MASK) == TYPE_GUEST_PHYS) {
549 flags &= ~VMM_PF_FLAG_USER;
550 flags |= VMM_PF_FLAG_GUEST;
551 }
552
553 // for now, hold the aspace lock across the page fault operation,
554 // which stops any other operations on the address space from moving
555 // the region out from underneath it
556 Guard<fbl::Mutex> guard{&lock_};
557
558 return root_vmar_->PageFault(va, flags);
559 }
560
Dump(bool verbose) const561 void VmAspace::Dump(bool verbose) const {
562 canary_.Assert();
563 printf("as %p [%#" PRIxPTR " %#" PRIxPTR "] sz %#zx fl %#x ref %d '%s'\n", this,
564 base_, base_ + size_ - 1, size_, flags_, ref_count_debug(), name_);
565
566 Guard<fbl::Mutex> guard{&lock_};
567
568 if (verbose) {
569 root_vmar_->Dump(1, verbose);
570 }
571 }
572
EnumerateChildren(VmEnumerator * ve)573 bool VmAspace::EnumerateChildren(VmEnumerator* ve) {
574 canary_.Assert();
575 DEBUG_ASSERT(ve != nullptr);
576 Guard<fbl::Mutex> guard{&lock_};
577 if (root_vmar_ == nullptr || aspace_destroyed_) {
578 // Aspace hasn't been initialized or has already been destroyed.
579 return true;
580 }
581 DEBUG_ASSERT(root_vmar_->IsAliveLocked());
582 if (!ve->OnVmAddressRegion(root_vmar_.get(), 0)) {
583 return false;
584 }
585 return root_vmar_->EnumerateChildrenLocked(ve, 1);
586 }
587
DumpAllAspaces(bool verbose)588 void DumpAllAspaces(bool verbose) {
589 Guard<fbl::Mutex> guard{&aspace_list_lock};
590
591 for (const auto& a : aspaces) {
592 a.Dump(verbose);
593 }
594 }
595
vaddr_to_aspace(uintptr_t address)596 VmAspace* VmAspace::vaddr_to_aspace(uintptr_t address) {
597 if (is_kernel_address(address)) {
598 return kernel_aspace();
599 } else if (is_user_address(address)) {
600 return vmm_aspace_to_obj(get_current_thread()->aspace);
601 } else {
602 return nullptr;
603 }
604 }
605
606 // TODO(dbort): Use GetMemoryUsage()
AllocatedPages() const607 size_t VmAspace::AllocatedPages() const {
608 canary_.Assert();
609
610 Guard<fbl::Mutex> guard{&lock_};
611 return root_vmar_->AllocatedPagesLocked();
612 }
613
InitializeAslr()614 void VmAspace::InitializeAslr() {
615 aslr_enabled_ = is_user() && !cmdline_get_bool("aslr.disable", false);
616
617 crypto::GlobalPRNG::GetInstance()->Draw(aslr_seed_, sizeof(aslr_seed_));
618 aslr_prng_.AddEntropy(aslr_seed_, sizeof(aslr_seed_));
619 }
620
vdso_base_address() const621 uintptr_t VmAspace::vdso_base_address() const {
622 Guard<fbl::Mutex> guard{&lock_};
623 return VDso::base_address(vdso_code_mapping_);
624 }
625
vdso_code_address() const626 uintptr_t VmAspace::vdso_code_address() const {
627 Guard<fbl::Mutex> guard{&lock_};
628 return vdso_code_mapping_ ? vdso_code_mapping_->base() : 0;
629 }
630