1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <object/vm_address_region_dispatcher.h>
8
9 #include <vm/vm_address_region.h>
10 #include <vm/vm_aspace.h>
11 #include <vm/vm_object.h>
12
13 #include <zircon/rights.h>
14
15 #include <fbl/alloc_checker.h>
16
17 #include <assert.h>
18 #include <err.h>
19 #include <inttypes.h>
20 #include <trace.h>
21
22 #define LOCAL_TRACE 0
23
24 namespace {
25
26 // Split out the syscall flags into vmar flags and mmu flags. Note that this
27 // does not validate that the requested protections in *flags* are valid. For
28 // that use is_valid_mapping_protection()
split_syscall_flags(uint32_t flags,uint32_t * vmar_flags,uint * arch_mmu_flags)29 zx_status_t split_syscall_flags(uint32_t flags, uint32_t* vmar_flags, uint* arch_mmu_flags) {
30 // Figure out arch_mmu_flags
31 uint mmu_flags = 0;
32 switch (flags & (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE)) {
33 case ZX_VM_PERM_READ:
34 mmu_flags |= ARCH_MMU_FLAG_PERM_READ;
35 break;
36 case ZX_VM_PERM_READ | ZX_VM_PERM_WRITE:
37 mmu_flags |= ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE;
38 break;
39 }
40
41 if (flags & ZX_VM_PERM_EXECUTE) {
42 mmu_flags |= ARCH_MMU_FLAG_PERM_EXECUTE;
43 }
44
45 // Mask out arch_mmu_flags options
46 flags &= ~(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE);
47
48 // Figure out vmar flags
49 uint32_t vmar = 0;
50 if (flags & ZX_VM_COMPACT) {
51 vmar |= VMAR_FLAG_COMPACT;
52 flags &= ~ZX_VM_COMPACT;
53 }
54 if (flags & ZX_VM_SPECIFIC) {
55 vmar |= VMAR_FLAG_SPECIFIC;
56 flags &= ~ZX_VM_SPECIFIC;
57 }
58 if (flags & ZX_VM_SPECIFIC_OVERWRITE) {
59 vmar |= VMAR_FLAG_SPECIFIC_OVERWRITE;
60 flags &= ~ZX_VM_SPECIFIC_OVERWRITE;
61 }
62 if (flags & ZX_VM_CAN_MAP_SPECIFIC) {
63 vmar |= VMAR_FLAG_CAN_MAP_SPECIFIC;
64 flags &= ~ZX_VM_CAN_MAP_SPECIFIC;
65 }
66 if (flags & ZX_VM_CAN_MAP_READ) {
67 vmar |= VMAR_FLAG_CAN_MAP_READ;
68 flags &= ~ZX_VM_CAN_MAP_READ;
69 }
70 if (flags & ZX_VM_CAN_MAP_WRITE) {
71 vmar |= VMAR_FLAG_CAN_MAP_WRITE;
72 flags &= ~ZX_VM_CAN_MAP_WRITE;
73 }
74 if (flags & ZX_VM_CAN_MAP_EXECUTE) {
75 vmar |= VMAR_FLAG_CAN_MAP_EXECUTE;
76 flags &= ~ZX_VM_CAN_MAP_EXECUTE;
77 }
78 if (flags & ZX_VM_REQUIRE_NON_RESIZABLE) {
79 vmar |= VMAR_FLAG_REQUIRE_NON_RESIZABLE;
80 flags &= ~ZX_VM_REQUIRE_NON_RESIZABLE;
81 }
82
83 if (flags != 0)
84 return ZX_ERR_INVALID_ARGS;
85
86 *vmar_flags = vmar;
87 *arch_mmu_flags |= mmu_flags;
88 return ZX_OK;
89 }
90
91 } // namespace
92
Create(fbl::RefPtr<VmAddressRegion> vmar,uint base_arch_mmu_flags,fbl::RefPtr<Dispatcher> * dispatcher,zx_rights_t * rights)93 zx_status_t VmAddressRegionDispatcher::Create(fbl::RefPtr<VmAddressRegion> vmar,
94 uint base_arch_mmu_flags,
95 fbl::RefPtr<Dispatcher>* dispatcher,
96 zx_rights_t* rights) {
97
98 // The initial rights should match the VMAR's creation permissions
99 zx_rights_t vmar_rights = default_rights();
100 uint32_t vmar_flags = vmar->flags();
101 if (vmar_flags & VMAR_FLAG_CAN_MAP_READ) {
102 vmar_rights |= ZX_RIGHT_READ;
103 }
104 if (vmar_flags & VMAR_FLAG_CAN_MAP_WRITE) {
105 vmar_rights |= ZX_RIGHT_WRITE;
106 }
107 if (vmar_flags & VMAR_FLAG_CAN_MAP_EXECUTE) {
108 vmar_rights |= ZX_RIGHT_EXECUTE;
109 }
110
111 fbl::AllocChecker ac;
112 auto disp = new (&ac) VmAddressRegionDispatcher(ktl::move(vmar), base_arch_mmu_flags);
113 if (!ac.check())
114 return ZX_ERR_NO_MEMORY;
115
116 *rights = vmar_rights;
117 *dispatcher = fbl::AdoptRef<Dispatcher>(disp);
118 return ZX_OK;
119 }
120
VmAddressRegionDispatcher(fbl::RefPtr<VmAddressRegion> vmar,uint base_arch_mmu_flags)121 VmAddressRegionDispatcher::VmAddressRegionDispatcher(fbl::RefPtr<VmAddressRegion> vmar,
122 uint base_arch_mmu_flags)
123 : vmar_(ktl::move(vmar)), base_arch_mmu_flags_(base_arch_mmu_flags) {}
124
~VmAddressRegionDispatcher()125 VmAddressRegionDispatcher::~VmAddressRegionDispatcher() {}
126
Allocate(size_t offset,size_t size,uint32_t flags,fbl::RefPtr<VmAddressRegionDispatcher> * new_dispatcher,zx_rights_t * new_rights)127 zx_status_t VmAddressRegionDispatcher::Allocate(
128 size_t offset, size_t size, uint32_t flags,
129 fbl::RefPtr<VmAddressRegionDispatcher>* new_dispatcher,
130 zx_rights_t* new_rights) {
131
132 canary_.Assert();
133
134 uint32_t vmar_flags;
135 uint arch_mmu_flags = 0;
136 zx_status_t status = split_syscall_flags(flags, &vmar_flags, &arch_mmu_flags);
137 if (status != ZX_OK)
138 return status;
139
140 // Check if any MMU-related flags were requested.
141 if (arch_mmu_flags != 0) {
142 return ZX_ERR_INVALID_ARGS;
143 }
144
145 fbl::RefPtr<VmAddressRegion> new_vmar;
146 status = vmar_->CreateSubVmar(offset, size, /* align_pow2 */ 0 , vmar_flags,
147 "useralloc", &new_vmar);
148 if (status != ZX_OK)
149 return status;
150
151 // Create the dispatcher.
152 fbl::RefPtr<Dispatcher> dispatcher;
153 status = VmAddressRegionDispatcher::Create(ktl::move(new_vmar),
154 base_arch_mmu_flags_,
155 &dispatcher, new_rights);
156 if (status != ZX_OK)
157 return status;
158
159 *new_dispatcher =
160 DownCastDispatcher<VmAddressRegionDispatcher>(&dispatcher);
161 return ZX_OK;
162 }
163
Destroy()164 zx_status_t VmAddressRegionDispatcher::Destroy() {
165 canary_.Assert();
166
167 return vmar_->Destroy();
168 }
169
Map(size_t vmar_offset,fbl::RefPtr<VmObject> vmo,uint64_t vmo_offset,size_t len,uint32_t flags,fbl::RefPtr<VmMapping> * out)170 zx_status_t VmAddressRegionDispatcher::Map(size_t vmar_offset, fbl::RefPtr<VmObject> vmo,
171 uint64_t vmo_offset, size_t len, uint32_t flags,
172 fbl::RefPtr<VmMapping>* out) {
173 canary_.Assert();
174
175 if (!is_valid_mapping_protection(flags))
176 return ZX_ERR_INVALID_ARGS;
177
178 // Split flags into vmar_flags and arch_mmu_flags
179 uint32_t vmar_flags;
180 uint arch_mmu_flags = base_arch_mmu_flags_;
181 zx_status_t status = split_syscall_flags(flags, &vmar_flags, &arch_mmu_flags);
182 if (status != ZX_OK)
183 return status;
184
185 if (vmar_flags & VMAR_FLAG_REQUIRE_NON_RESIZABLE) {
186 vmar_flags &= ~VMAR_FLAG_REQUIRE_NON_RESIZABLE;
187 if (vmo->is_resizable())
188 return ZX_ERR_NOT_SUPPORTED;
189 }
190
191 fbl::RefPtr<VmMapping> result(nullptr);
192 status = vmar_->CreateVmMapping(vmar_offset, len, /* align_pow2 */ 0,
193 vmar_flags, ktl::move(vmo), vmo_offset,
194 arch_mmu_flags, "useralloc",
195 &result);
196 if (status != ZX_OK) {
197 return status;
198 }
199
200 *out = ktl::move(result);
201 return ZX_OK;
202 }
203
Protect(vaddr_t base,size_t len,uint32_t flags)204 zx_status_t VmAddressRegionDispatcher::Protect(vaddr_t base, size_t len, uint32_t flags) {
205 canary_.Assert();
206
207 if (!IS_PAGE_ALIGNED(base)) {
208 return ZX_ERR_INVALID_ARGS;
209 }
210
211 if (!is_valid_mapping_protection(flags))
212 return ZX_ERR_INVALID_ARGS;
213
214 uint32_t vmar_flags;
215 uint arch_mmu_flags = base_arch_mmu_flags_;
216 zx_status_t status = split_syscall_flags(flags, &vmar_flags, &arch_mmu_flags);
217 if (status != ZX_OK)
218 return status;
219
220 // This request does not allow any VMAR flags to be set
221 if (vmar_flags)
222 return ZX_ERR_INVALID_ARGS;
223
224 return vmar_->Protect(base, len, arch_mmu_flags);
225 }
226
Unmap(vaddr_t base,size_t len)227 zx_status_t VmAddressRegionDispatcher::Unmap(vaddr_t base, size_t len) {
228 canary_.Assert();
229
230 if (!IS_PAGE_ALIGNED(base)) {
231 return ZX_ERR_INVALID_ARGS;
232 }
233
234 return vmar_->Unmap(base, len);
235 }
236
is_valid_mapping_protection(uint32_t flags)237 bool VmAddressRegionDispatcher::is_valid_mapping_protection(uint32_t flags) {
238 if (!(flags & ZX_VM_PERM_READ)) {
239 // No way to express non-readable mappings that are also writeable or
240 // executable.
241 if (flags & (ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE)) {
242 return false;
243 }
244 }
245 return true;
246 }
247