1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <lib/fzl/vmar-manager.h>
6 #include <lib/fzl/vmo-mapper.h>
7 #include <unittest/unittest.h>
8 #include <zircon/limits.h>
9 #include <zircon/rights.h>
10
11 #include <fbl/algorithm.h>
12
13 #include <utility>
14
15 #include "vmo-probe.h"
16
17 namespace {
18
19 static constexpr size_t kSubVmarTestSize = 16 << 20; // 16MB
20 static constexpr size_t kVmoTestSize = 512 << 10; // 512KB
21
22 template <typename T>
23 using RefPtr = fbl::RefPtr<T>;
24 using VmarManager = fzl::VmarManager;
25 using VmoMapper = fzl::VmoMapper;
26 using AccessType = vmo_probe::AccessType;
27
28 template <typename T, typename U>
contained_in(const T & contained,const U & container)29 bool contained_in(const T& contained, const U& container) {
30 uintptr_t contained_start = reinterpret_cast<uintptr_t>(contained.start());
31 uintptr_t contained_end = contained_start + contained.size();
32 uintptr_t container_start = reinterpret_cast<uintptr_t>(container.start());
33 uintptr_t container_end = container_start + container.size();
34
35 return (contained_start <= contained_end) &&
36 (contained_start >= container_start) &&
37 (contained_end <= container_end);
38 }
39
vmar_vmo_core_test(uint32_t vmar_levels,bool test_create)40 bool vmar_vmo_core_test(uint32_t vmar_levels, bool test_create) {
41 BEGIN_TEST;
42
43 RefPtr<VmarManager> managers[2];
44 RefPtr<VmarManager> target_vmar;
45
46 ASSERT_LE(vmar_levels, fbl::count_of(managers));
47 size_t vmar_size = kSubVmarTestSize;
48 for (uint32_t i = 0; i < vmar_levels; ++i) {
49 managers[i] = VmarManager::Create(vmar_size, i ? managers[i - 1] : nullptr);
50 ASSERT_NONNULL(managers[i], "Failed to create VMAR manager");
51
52 if (i) {
53 ASSERT_TRUE(contained_in(*managers[i], *managers[i - 1]),
54 "Sub-VMO is not contained within in its parent!");
55 }
56
57 vmar_size >>= 1u;
58 }
59
60 if (vmar_levels) {
61 target_vmar = managers[vmar_levels - 1];
62 }
63
64 struct {
65 uint32_t access_flags;
66 zx_rights_t vmo_rights;
67 size_t test_offset;
68 size_t test_size;
69 void* start;
70 } kVmoTests[] = {
71 { .access_flags = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
72 .vmo_rights = ZX_RIGHT_SAME_RIGHTS,
73 .test_offset = 0,
74 .test_size = kVmoTestSize >> 1,
75 .start = nullptr,
76 },
77 { .access_flags = ZX_VM_PERM_READ,
78 .vmo_rights = ZX_RIGHT_READ | ZX_RIGHT_MAP,
79 .test_offset = 0,
80 .test_size = kVmoTestSize,
81 .start = nullptr,
82 },
83 // TODO(johngro): We are not allowed to map pages as write-only. Need
84 // to determine if this is WAI or not.
85 #if 0
86 { .access_flags = ZX_VM_PERM_WRITE,
87 .vmo_rights = ZX_RIGHT_WRITE | ZX_RIGHT_MAP,
88 .test_offset = 0,
89 .test_size = 0,
90 .start = nullptr,
91 },
92 #endif
93 { .access_flags = 0,
94 .vmo_rights = 0,
95 .test_offset = 0,
96 .test_size = 0,
97 .start = nullptr,
98 },
99 { .access_flags = 0,
100 .vmo_rights = 0,
101 .test_offset = kVmoTestSize >> 1,
102 .test_size = 0,
103 .start = nullptr,
104 },
105 };
106
107 for (uint32_t pass = 0; pass < 2; ++pass) {
108 {
109 VmoMapper mappers[fbl::count_of(kVmoTests)];
110 zx::vmo vmo_handles[fbl::count_of(kVmoTests)];
111 zx_status_t res;
112
113 for (size_t i = 0; i < fbl::count_of(kVmoTests); ++i) {
114 auto& t = kVmoTests[i];
115
116 for (uint32_t create_map_pass = 0; create_map_pass < 2; ++create_map_pass) {
117 // If this is the first create/map pass, the create/map operation should
118 // succeed. If this is the second pass, it should fail with BAD_STATE (since we
119 // should have already created/mapped already)
120 zx_status_t expected_cm_res = create_map_pass ? ZX_ERR_BAD_STATE : ZX_OK;
121
122 if (test_create) {
123 // If we are testing CreateAndMap, call it with the mapping
124 // rights and the proper rights reduction for the VMO it hands
125 // back to us. Hold onto the returned handle in vmo_handles.
126 res = mappers[i].CreateAndMap(kVmoTestSize,
127 t.access_flags,
128 target_vmar,
129 &vmo_handles[i],
130 t.vmo_rights);
131 t.test_size = kVmoTestSize;
132
133 ASSERT_EQ(res, expected_cm_res);
134 ASSERT_TRUE(vmo_handles[i].is_valid());
135 } else {
136 // If we are testing Map, and this is the first pass, create the VMOs we
137 // will pass to map, then do so.
138 if (create_map_pass == 0) {
139 res = zx::vmo::create(kVmoTestSize, 0, &vmo_handles[i]);
140 ASSERT_EQ(res, ZX_OK);
141 ASSERT_TRUE(vmo_handles[i].is_valid());
142 }
143
144 res = mappers[i].Map(vmo_handles[i],
145 t.test_offset,
146 t.test_size,
147 t.access_flags,
148 target_vmar);
149 ASSERT_EQ(res, expected_cm_res);
150
151 // If this was the first VMO we have mapped during this test
152 // run, and we requested only a partial map, and it was mapped
153 // in a sub-vmar, and the end of the VMO is not aligned with the
154 // end of the VMAR, then check to make sure that we read or
155 // write past the end of the partial mapping.
156 //
157 // TODO(johngro): It would be nice to always do these checks,
158 // but we do not have a lot of control of whether or not
159 // something else may have been mapped adjacent to our mapping,
160 // hence all of the restrictions described above.
161 if (!i && !create_map_pass && target_vmar &&
162 t.test_size && (t.test_size < kVmoTestSize)) {
163 uintptr_t vmo_end = reinterpret_cast<uintptr_t>(mappers[i].start());
164 uintptr_t vmar_end = reinterpret_cast<uintptr_t>(target_vmar->start());
165
166 vmo_end += mappers[i].size();
167 vmar_end += target_vmar->size();
168 if (vmo_end < vmar_end) {
169 void* probe_tgt = reinterpret_cast<void*>(vmo_end);
170 ASSERT_TRUE(vmo_probe::probe_access(probe_tgt, AccessType::Rd, false));
171 ASSERT_TRUE(vmo_probe::probe_access(probe_tgt, AccessType::Wr, false));
172 }
173 }
174 }
175 }
176
177 // Stash the address of the mapped VMOs in the test state
178 t.start = mappers[i].start();
179
180 // If we mapped inside of a sub-vmar, then the mapping should be contained within
181 // the VMAR.
182 if (target_vmar != nullptr) {
183 ASSERT_TRUE(contained_in(mappers[i], *target_vmar));
184 }
185
186 if (test_create) {
187 // If we created this VMO, make sure that its rights were reduced correctly.
188 zx_rights_t expected_rights = t.vmo_rights != ZX_RIGHT_SAME_RIGHTS
189 ? t.vmo_rights
190 : ZX_DEFAULT_VMO_RIGHTS;
191 zx_info_handle_basic_t info;
192 res = vmo_handles[i].get_info(ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
193 nullptr, nullptr);
194
195 ASSERT_EQ(res, ZX_OK, "Failed to get basic object info");
196 ASSERT_EQ(info.rights, expected_rights, "Rights reduction failure");
197 } else {
198 // If we mapped this VMO, and we passed zero for the map size, the Mapper should
199 // have mapped the entire VMO after the offset and its size should reflect that.
200 if (!t.test_size) {
201 ASSERT_EQ(mappers[i].size() + t.test_offset, kVmoTestSize);
202 t.test_size = kVmoTestSize - t.test_offset;
203 }
204 }
205 }
206
207 // Now that everything has been created and mapped, make sure that
208 // everything checks out by probing and looking for seg-faults
209 // if/when we violate permissions.
210 for (const auto& t : kVmoTests) {
211 ASSERT_TRUE(vmo_probe::probe_verify_region(t.start, t.test_size, t.access_flags));
212 }
213
214 // Release all of our VMO handles, then verify again. Releasing
215 // these handles should not cause our mapping to go away.
216 for (auto& h : vmo_handles) {
217 h.reset();
218 }
219
220 for (const auto& t : kVmoTests) {
221 ASSERT_TRUE(vmo_probe::probe_verify_region(t.start, t.test_size, t.access_flags));
222 }
223
224 // If this is the first pass, manually unmap all of the VmoMappers
225 // and verify that we can no longer access any of the previously
226 // mapped region.
227 if (!pass) {
228 for (auto& m : mappers) {
229 m.Unmap();
230 }
231
232 for (const auto& t : kVmoTests) {
233 ASSERT_TRUE(vmo_probe::probe_verify_region(t.start, t.test_size, 0));
234 }
235 }
236 }
237
238 // If this is the second pass, then we didn't manually call unmap, we
239 // just let the mappers go out of scope. Make sure that everything
240 // auto-unmapped as it should.
241 if (pass) {
242 for (const auto& t : kVmoTests) {
243 ASSERT_TRUE(vmo_probe::probe_verify_region(t.start, t.test_size, 0));
244 }
245 }
246 }
247
248 // TODO(johngro) : release all of our VMAR references and then make certain
249 // that they were destroyed as they should have been. Right now this is
250 // rather difficult as we cannot fetch mapping/vmar info for our current
251 // process, so we are skipping the check.
252
253 END_TEST;
254 }
255
vmo_create_and_map_root_test()256 bool vmo_create_and_map_root_test() {
257 BEGIN_TEST;
258 ASSERT_TRUE(vmar_vmo_core_test(0, true));
259 END_TEST;
260 }
261
vmo_create_and_map_sub_vmar_test()262 bool vmo_create_and_map_sub_vmar_test() {
263 BEGIN_TEST;
264 ASSERT_TRUE(vmar_vmo_core_test(1, true));
265 END_TEST;
266 }
267
vmo_create_and_map_sub_sub_vmar_test()268 bool vmo_create_and_map_sub_sub_vmar_test() {
269 BEGIN_TEST;
270 ASSERT_TRUE(vmar_vmo_core_test(2, true));
271 END_TEST;
272 }
273
vmo_map_root_test()274 bool vmo_map_root_test() {
275 BEGIN_TEST;
276 ASSERT_TRUE(vmar_vmo_core_test(0, false));
277 END_TEST;
278 }
279
vmo_map_sub_vmar_test()280 bool vmo_map_sub_vmar_test() {
281 BEGIN_TEST;
282 ASSERT_TRUE(vmar_vmo_core_test(1, false));
283 END_TEST;
284 }
285
vmo_map_sub_sub_vmar_test()286 bool vmo_map_sub_sub_vmar_test() {
287 BEGIN_TEST;
288 ASSERT_TRUE(vmar_vmo_core_test(2, false));
289 END_TEST;
290 }
291
vmo_mapper_move_test()292 bool vmo_mapper_move_test() {
293 BEGIN_TEST;
294
295 constexpr uint32_t ACCESS_FLAGS = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
296 void* addr;
297 size_t size;
298 {
299 // Create two mappers, and make sure neither has mapped anything.
300 VmoMapper mapper1, mapper2;
301
302 ASSERT_NULL(mapper1.start());
303 ASSERT_EQ(mapper1.size(), 0);
304 ASSERT_NULL(mapper2.start());
305 ASSERT_EQ(mapper2.size(), 0);
306
307 // Create and map a page in mapper 1, make sure we can probe it.
308 zx_status_t res;
309 res = mapper1.CreateAndMap(ZX_PAGE_SIZE, ACCESS_FLAGS);
310 addr = mapper1.start();
311 size = mapper1.size();
312
313 ASSERT_EQ(res, ZX_OK);
314 ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, ACCESS_FLAGS));
315
316 // Move the mapping from mapper1 into mapper2 using assignment. Make sure
317 // the region is still mapped and has not moved in our address space.
318 mapper2 = std::move(mapper1);
319
320 ASSERT_NULL(mapper1.start());
321 ASSERT_EQ(mapper1.size(), 0);
322 ASSERT_EQ(mapper2.start(), addr);
323 ASSERT_EQ(mapper2.size(), size);
324 ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, ACCESS_FLAGS));
325
326 // Now do the same thing, but this time move using construction.
327 VmoMapper mapper3(std::move(mapper2));
328
329 ASSERT_NULL(mapper2.start());
330 ASSERT_EQ(mapper2.size(), 0);
331 ASSERT_EQ(mapper3.start(), addr);
332 ASSERT_EQ(mapper3.size(), size);
333 ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, ACCESS_FLAGS));
334
335 // Map a new region into mapper1, make sure it is OK.
336 res = mapper1.CreateAndMap(ZX_PAGE_SIZE, ACCESS_FLAGS);
337 void* second_addr = mapper1.start();
338 size_t second_size = mapper1.size();
339
340 ASSERT_EQ(res, ZX_OK);
341 ASSERT_TRUE(vmo_probe::probe_verify_region(second_addr, second_size, ACCESS_FLAGS));
342
343 // Now, move mapper3 on top of mapper1 via assignment and make sure that
344 // mapper1's old region is properly unmapped while mapper3's contents remain
345 // mapped and are properly moved.
346 mapper1 = std::move(mapper3);
347
348 ASSERT_NULL(mapper3.start());
349 ASSERT_EQ(mapper3.size(), 0);
350 ASSERT_EQ(mapper1.start(), addr);
351 ASSERT_EQ(mapper1.size(), size);
352 ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, ACCESS_FLAGS));
353 ASSERT_TRUE(vmo_probe::probe_verify_region(second_addr, second_size, 0));
354 }
355
356 // Finally, now that we have left the scope, the original mapping that we
357 // have been moving around should be gone by now.
358 ASSERT_NONNULL(addr);
359 ASSERT_EQ(size, ZX_PAGE_SIZE);
360 ASSERT_TRUE(vmo_probe::probe_verify_region(addr, size, 0));
361
362 END_TEST;
363 }
364
365 } // namespace
366
367 BEGIN_TEST_CASE(vmo_mapper_vmar_manager_tests)
368 RUN_NAMED_TEST("vmo_create_and_map_root", vmo_create_and_map_root_test)
369 RUN_NAMED_TEST("vmo_create_and_map_sub_vmar", vmo_create_and_map_sub_vmar_test)
370 RUN_NAMED_TEST("vmo_create_and_map_sub_sub_vmar", vmo_create_and_map_sub_sub_vmar_test)
371 RUN_NAMED_TEST("vmo_map_root", vmo_map_root_test)
372 RUN_NAMED_TEST("vmo_map_sub_vmar", vmo_map_sub_vmar_test)
373 RUN_NAMED_TEST("vmo_map_sub_sub_vmar", vmo_map_sub_sub_vmar_test)
374 RUN_NAMED_TEST("vmo_mapper_move_test", vmo_mapper_move_test)
375 END_TEST_CASE(vmo_mapper_vmar_manager_tests)
376