1 // Copyright 2017 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <hypervisor/guest_physical_address_space.h>
8
9 #include <fbl/alloc_checker.h>
10 #include <kernel/range_check.h>
11 #include <ktl/move.h>
12 #include <vm/fault.h>
13 #include <vm/vm_object_physical.h>
14
15 static constexpr uint kPfFlags = VMM_PF_FLAG_WRITE | VMM_PF_FLAG_SW_FAULT;
16
17 static constexpr uint kInterruptMmuFlags =
18 ARCH_MMU_FLAG_PERM_READ |
19 ARCH_MMU_FLAG_PERM_WRITE;
20
21 static constexpr uint kGuestMmuFlags =
22 ARCH_MMU_FLAG_CACHED |
23 ARCH_MMU_FLAG_PERM_READ |
24 ARCH_MMU_FLAG_PERM_WRITE;
25
26 namespace hypervisor {
27
Create(uint8_t vmid,ktl::unique_ptr<GuestPhysicalAddressSpace> * _gpas)28 zx_status_t GuestPhysicalAddressSpace::Create(
29 #if ARCH_ARM64
30 uint8_t vmid,
31 #endif
32 ktl::unique_ptr<GuestPhysicalAddressSpace>* _gpas) {
33 fbl::AllocChecker ac;
34 auto gpas = ktl::make_unique<GuestPhysicalAddressSpace>(&ac);
35 if (!ac.check()) {
36 return ZX_ERR_NO_MEMORY;
37 }
38
39 gpas->guest_aspace_ = VmAspace::Create(VmAspace::TYPE_GUEST_PHYS, "guest_paspace");
40 if (!gpas->guest_aspace_) {
41 return ZX_ERR_NO_MEMORY;
42 }
43 #if ARCH_ARM64
44 gpas->arch_aspace()->arch_set_asid(vmid);
45 #endif
46 *_gpas = ktl::move(gpas);
47 return ZX_OK;
48 }
49
~GuestPhysicalAddressSpace()50 GuestPhysicalAddressSpace::~GuestPhysicalAddressSpace() {
51 // VmAspace maintains a circular reference with it's root VMAR. We need to
52 // destroy the VmAspace in order to break that reference and allow the
53 // VmAspace to be destructed.
54 if (guest_aspace_) {
55 guest_aspace_->Destroy();
56 }
57 }
58
MapInterruptController(zx_gpaddr_t guest_paddr,zx_paddr_t host_paddr,size_t len)59 zx_status_t GuestPhysicalAddressSpace::MapInterruptController(zx_gpaddr_t guest_paddr,
60 zx_paddr_t host_paddr, size_t len) {
61 fbl::RefPtr<VmObject> vmo;
62 zx_status_t status = VmObjectPhysical::Create(host_paddr, len, &vmo);
63 if (status != ZX_OK) {
64 return status;
65 }
66
67 status = vmo->SetMappingCachePolicy(ARCH_MMU_FLAG_CACHED);
68 if (status != ZX_OK) {
69 return status;
70 }
71
72 // The root VMAR will maintain a reference to the VmMapping internally so
73 // we don't need to maintain a long-lived reference to the mapping here.
74 fbl::RefPtr<VmMapping> mapping;
75 status = RootVmar()->CreateVmMapping(guest_paddr, vmo->size(), /* align_pow2*/ 0,
76 VMAR_FLAG_SPECIFIC, vmo, /* vmo_offset */ 0,
77 kInterruptMmuFlags, "guest_interrupt_vmo",
78 &mapping);
79 if (status != ZX_OK) {
80 return status;
81 }
82
83 // Write mapping to page table.
84 status = mapping->MapRange(0, vmo->size(), true);
85 if (status != ZX_OK) {
86 mapping->Destroy();
87 return status;
88 }
89 return ZX_OK;
90 }
91
UnmapRange(zx_gpaddr_t guest_paddr,size_t len)92 zx_status_t GuestPhysicalAddressSpace::UnmapRange(zx_gpaddr_t guest_paddr, size_t len) {
93 return RootVmar()->UnmapAllowPartial(guest_paddr, len);
94 }
95
FindMapping(fbl::RefPtr<VmAddressRegion> region,zx_gpaddr_t guest_paddr)96 static fbl::RefPtr<VmMapping> FindMapping(fbl::RefPtr<VmAddressRegion> region,
97 zx_gpaddr_t guest_paddr) {
98 for (fbl::RefPtr<VmAddressRegionOrMapping> next; (next = region->FindRegion(guest_paddr));
99 region = next->as_vm_address_region()) {
100 if (next->is_mapping()) {
101 return next->as_vm_mapping();
102 }
103 }
104 return nullptr;
105 }
106
GetPage(zx_gpaddr_t guest_paddr,zx_paddr_t * host_paddr)107 zx_status_t GuestPhysicalAddressSpace::GetPage(zx_gpaddr_t guest_paddr, zx_paddr_t* host_paddr) {
108 fbl::RefPtr<VmMapping> mapping = FindMapping(RootVmar(), guest_paddr);
109 if (!mapping) {
110 return ZX_ERR_NOT_FOUND;
111 }
112
113 // Lookup the physical address of this page in the VMO.
114 zx_gpaddr_t offset = guest_paddr - mapping->base();
115 return mapping->vmo()->GetPage(offset, kPfFlags, nullptr, nullptr, host_paddr);
116 }
117
PageFault(zx_gpaddr_t guest_paddr)118 zx_status_t GuestPhysicalAddressSpace::PageFault(zx_gpaddr_t guest_paddr) {
119 fbl::RefPtr<VmMapping> mapping = FindMapping(RootVmar(), guest_paddr);
120 if (!mapping) {
121 return ZX_ERR_NOT_FOUND;
122 }
123
124 // In order to avoid re-faulting if the guest changes how it accesses guest
125 // physical memory, and to avoid the need for invalidation of the guest
126 // physical address space on x86 (through the use of INVEPT), we fault the
127 // page with the maximum allowable permissions of the mapping.
128 uint pf_flags = VMM_PF_FLAG_GUEST | VMM_PF_FLAG_HW_FAULT;
129 if (mapping->arch_mmu_flags() & ARCH_MMU_FLAG_PERM_WRITE) {
130 pf_flags |= VMM_PF_FLAG_WRITE;
131 }
132 if (mapping->arch_mmu_flags() & ARCH_MMU_FLAG_PERM_EXECUTE) {
133 pf_flags |= VMM_PF_FLAG_INSTRUCTION;
134 }
135 Guard<fbl::Mutex> guard{guest_aspace_->lock()};
136 return mapping->PageFault(guest_paddr, pf_flags);
137 }
138
CreateGuestPtr(zx_gpaddr_t guest_paddr,size_t len,const char * name,GuestPtr * guest_ptr)139 zx_status_t GuestPhysicalAddressSpace::CreateGuestPtr(zx_gpaddr_t guest_paddr, size_t len,
140 const char* name, GuestPtr* guest_ptr) {
141 const zx_gpaddr_t begin = ROUNDDOWN(guest_paddr, PAGE_SIZE);
142 const zx_gpaddr_t end = ROUNDUP(guest_paddr + len, PAGE_SIZE);
143 const zx_gpaddr_t mapping_len = end - begin;
144 if (begin > end || !InRange(begin, mapping_len, size())) {
145 return ZX_ERR_INVALID_ARGS;
146 }
147 fbl::RefPtr<VmAddressRegionOrMapping> region = RootVmar()->FindRegion(begin);
148 if (!region) {
149 return ZX_ERR_NOT_FOUND;
150 }
151 fbl::RefPtr<VmMapping> guest_mapping = region->as_vm_mapping();
152 if (!guest_mapping) {
153 return ZX_ERR_WRONG_TYPE;
154 }
155 const uint64_t intra_mapping_offset = begin - guest_mapping->base();
156 if (!InRange(intra_mapping_offset, mapping_len, guest_mapping->size())) {
157 // The address range is not contained within a single mapping.
158 return ZX_ERR_OUT_OF_RANGE;
159 }
160
161 fbl::RefPtr<VmMapping> host_mapping;
162 zx_status_t status = VmAspace::kernel_aspace()->RootVmar()->CreateVmMapping(
163 /* mapping_offset */ 0,
164 mapping_len,
165 /* align_pow2 */ false,
166 /* vmar_flags */ 0,
167 guest_mapping->vmo(),
168 guest_mapping->object_offset() + intra_mapping_offset,
169 kGuestMmuFlags,
170 name,
171 &host_mapping);
172 if (status != ZX_OK) {
173 return status;
174 }
175
176 *guest_ptr = GuestPtr(ktl::move(host_mapping), guest_paddr - begin);
177 return ZX_OK;
178 }
179
180 } // namespace hypervisor
181