1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <errno.h>
6 #include <fcntl.h>
7 #include <limits.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <sys/mman.h>
12 #include <sys/stat.h>
13 #include <unistd.h>
14
15 #include <fbl/unique_fd.h>
16 #include <zircon/compiler.h>
17 #include <zircon/syscalls.h>
18 #include <unittest/unittest.h>
19
20 #include "filesystems.h"
21
22 namespace {
23
24 // Certain filesystems delay creation of internal structures
25 // until the file is initially accessed. Test that we can
26 // actually mmap properly before the file has otherwise been
27 // accessed.
TestMmapEmpty(void)28 bool TestMmapEmpty(void) {
29 BEGIN_TEST;
30 if (!test_info->supports_mmap) {
31 return true;
32 }
33
34 constexpr char kFilename[] = "::mmap_empty";
35 int fd = open(kFilename, O_RDWR | O_CREAT | O_EXCL);
36 ASSERT_GT(fd, 0);
37
38 char tmp[] = "this is a temporary buffer";
39 void* addr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
40 ASSERT_NE(addr, MAP_FAILED);
41 ASSERT_EQ(write(fd, tmp, sizeof(tmp)), sizeof(tmp));
42 ASSERT_EQ(memcmp(addr, tmp, sizeof(tmp)), 0);
43
44 ASSERT_EQ(munmap(addr, PAGE_SIZE), 0, "munmap failed");
45 ASSERT_EQ(close(fd), 0);
46 ASSERT_EQ(unlink(kFilename), 0);
47 END_TEST;
48 }
49
50 // Test that a file's writes are properly propagated to
51 // a read-only buffer.
TestMmapReadable(void)52 bool TestMmapReadable(void) {
53 BEGIN_TEST;
54 if (!test_info->supports_mmap) {
55 return true;
56 }
57
58 constexpr char kFilename[] = "::mmap_readable";
59 int fd = open(kFilename, O_RDWR | O_CREAT | O_EXCL);
60 ASSERT_GT(fd, 0);
61
62 char tmp1[] = "this is a temporary buffer";
63 char tmp2[] = "and this is a secondary buffer";
64 ASSERT_EQ(write(fd, tmp1, sizeof(tmp1)), sizeof(tmp1));
65
66 // Demonstrate that a simple buffer can be mapped
67 void* addr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
68 ASSERT_NE(addr, MAP_FAILED);
69 ASSERT_EQ(memcmp(addr, tmp1, sizeof(tmp1)), 0);
70
71 // Show that if we keep writing to the file, the mapping
72 // is also updated
73 ASSERT_EQ(write(fd, tmp2, sizeof(tmp2)), sizeof(tmp2));
74 void* addr2 = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + sizeof(tmp1));
75 ASSERT_EQ(memcmp(addr2, tmp2, sizeof(tmp2)), 0);
76
77 // But the original part of the mapping is unchanged
78 ASSERT_EQ(memcmp(addr, tmp1, sizeof(tmp1)), 0);
79
80 ASSERT_EQ(munmap(addr, PAGE_SIZE), 0, "munmap failed");
81 ASSERT_EQ(close(fd), 0);
82 ASSERT_EQ(unlink(kFilename), 0);
83 END_TEST;
84 }
85
86 // Test that a mapped buffer's writes are properly propagated
87 // to the file.
TestMmapWritable(void)88 bool TestMmapWritable(void) {
89 BEGIN_TEST;
90 if (!test_info->supports_mmap) {
91 return true;
92 }
93
94 constexpr char kFilename[] = "::mmap_writable";
95 int fd = open(kFilename, O_RDWR | O_CREAT | O_EXCL);
96 ASSERT_GT(fd, 0);
97
98 char tmp1[] = "this is a temporary buffer";
99 char tmp2[] = "and this is a secondary buffer";
100 ASSERT_EQ(write(fd, tmp1, sizeof(tmp1)), sizeof(tmp1));
101
102 // Demonstrate that a simple buffer can be mapped
103 void* addr = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
104 ASSERT_NE(addr, MAP_FAILED);
105 ASSERT_EQ(memcmp(addr, tmp1, sizeof(tmp1)), 0);
106
107 // Extend the file length up to the necessary size
108 ASSERT_EQ(ftruncate(fd, sizeof(tmp1) + sizeof(tmp2)), 0);
109
110 // Write to the file in the mapping
111 void* addr2 = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + sizeof(tmp1));
112 memcpy(addr2, tmp2, sizeof(tmp2));
113
114 // Verify the write by reading from the file
115 char buf[sizeof(tmp2)];
116 ASSERT_EQ(read(fd, buf, sizeof(buf)), sizeof(buf));
117 ASSERT_EQ(memcmp(buf, tmp2, sizeof(tmp2)), 0);
118 // But the original part of the mapping is unchanged
119 ASSERT_EQ(memcmp(addr, tmp1, sizeof(tmp1)), 0);
120
121 // Extending the file beyond the mapping should still leave the first page
122 // accessible
123 ASSERT_EQ(ftruncate(fd, PAGE_SIZE * 2), 0);
124 ASSERT_EQ(memcmp(addr, tmp1, sizeof(tmp1)), 0);
125 ASSERT_EQ(memcmp(addr2, tmp2, sizeof(tmp2)), 0);
126 for (size_t i = sizeof(tmp1) + sizeof(tmp2); i < PAGE_SIZE; i++) {
127 auto caddr = reinterpret_cast<char*>(addr);
128 ASSERT_EQ(caddr[i], 0);
129 }
130
131 ASSERT_EQ(munmap(addr, PAGE_SIZE), 0, "munmap failed");
132 ASSERT_EQ(close(fd), 0);
133 ASSERT_EQ(unlink(kFilename), 0);
134
135 END_TEST;
136 }
137
138 // Test that the mapping of a file remains usable even after
139 // the file has been closed / unlinked / renamed.
TestMmapUnlinked(void)140 bool TestMmapUnlinked(void) {
141 BEGIN_TEST;
142 if (!test_info->supports_mmap) {
143 return true;
144 }
145
146 constexpr char kFilename[] = "::mmap_unlinked";
147 int fd = open(kFilename, O_RDWR | O_CREAT | O_EXCL);
148 ASSERT_GT(fd, 0);
149
150 char tmp[] = "this is a temporary buffer";
151 ASSERT_EQ(write(fd, tmp, sizeof(tmp)), sizeof(tmp));
152
153 // Demonstrate that a simple buffer can be mapped
154 void* addr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
155 ASSERT_NE(addr, MAP_FAILED);
156 ASSERT_EQ(memcmp(addr, tmp, sizeof(tmp)), 0);
157
158 // If we close the file, we can still access the mapping
159 ASSERT_EQ(close(fd), 0);
160 ASSERT_EQ(memcmp(addr, tmp, sizeof(tmp)), 0);
161
162 // If we rename the file, we can still access the mapping
163 ASSERT_EQ(rename(kFilename, "::otherfile"), 0);
164 ASSERT_EQ(memcmp(addr, tmp, sizeof(tmp)), 0);
165
166 // If we unlink the file, we can still access the mapping
167 ASSERT_EQ(unlink("::otherfile"), 0);
168 ASSERT_EQ(memcmp(addr, tmp, sizeof(tmp)), 0);
169
170 ASSERT_EQ(munmap(addr, PAGE_SIZE), 0, "munmap failed");
171 END_TEST;
172 }
173
174 // Test that MAP_SHARED propagates updates to the file
TestMmapShared(void)175 bool TestMmapShared(void) {
176 BEGIN_TEST;
177 if (!test_info->supports_mmap) {
178 return true;
179 }
180
181 constexpr char kFilename[] = "::mmap_shared";
182 int fd = open(kFilename, O_RDWR | O_CREAT | O_EXCL);
183 ASSERT_GT(fd, 0);
184
185 char tmp[] = "this is a temporary buffer";
186 ASSERT_EQ(write(fd, tmp, sizeof(tmp)), sizeof(tmp));
187
188 // Demonstrate that a simple buffer can be mapped
189 void* addr1 = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
190 ASSERT_NE(addr1, MAP_FAILED);
191 ASSERT_EQ(memcmp(addr1, tmp, sizeof(tmp)), 0);
192
193 int fd2 = open(kFilename, O_RDWR);
194 ASSERT_GT(fd2, 0);
195
196 // Demonstrate that the buffer can be mapped multiple times
197 void* addr2 = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd2, 0);
198 ASSERT_NE(addr2, MAP_FAILED);
199 ASSERT_EQ(memcmp(addr2, tmp, sizeof(tmp)), 0);
200
201 // Demonstrate that updates to the file are shared between mappings
202 char tmp2[] = "buffer which will update through fd";
203 ASSERT_EQ(lseek(fd, 0, SEEK_SET), 0);
204 ASSERT_EQ(write(fd, tmp2, sizeof(tmp2)), sizeof(tmp2));
205 ASSERT_EQ(memcmp(addr1, tmp2, sizeof(tmp2)), 0);
206 ASSERT_EQ(memcmp(addr2, tmp2, sizeof(tmp2)), 0);
207
208 // Demonstrate that updates to the mappings are shared too
209 char tmp3[] = "final buffer, which updates via mapping";
210 memcpy(addr1, tmp3, sizeof(tmp3));
211 ASSERT_EQ(memcmp(addr1, tmp3, sizeof(tmp3)), 0);
212 ASSERT_EQ(memcmp(addr2, tmp3, sizeof(tmp3)), 0);
213 ASSERT_EQ(close(fd), 0);
214 ASSERT_EQ(close(fd2), 0);
215 ASSERT_EQ(munmap(addr2, PAGE_SIZE), 0, "munmap failed");
216
217 // Demonstrate that we can map a read-only file as shared + readable
218 fd = open(kFilename, O_RDONLY);
219 ASSERT_GT(fd, 0);
220 addr2 = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
221 ASSERT_NE(addr2, MAP_FAILED);
222 ASSERT_EQ(memcmp(addr1, tmp3, sizeof(tmp3)), 0);
223 ASSERT_EQ(memcmp(addr2, tmp3, sizeof(tmp3)), 0);
224 ASSERT_EQ(close(fd), 0);
225 ASSERT_EQ(munmap(addr2, PAGE_SIZE), 0, "munmap failed");
226
227 ASSERT_EQ(munmap(addr1, PAGE_SIZE), 0, "munmap failed");
228 ASSERT_EQ(unlink(kFilename), 0);
229
230 END_TEST;
231 }
232
233 // Test that MAP_PRIVATE keeps all copies of the buffer
234 // separate
TestMmapPrivate(void)235 bool TestMmapPrivate(void) {
236 BEGIN_TEST;
237 if (!test_info->supports_mmap) {
238 return true;
239 }
240
241 constexpr char kFilename[] = "::mmap_private";
242 int fd = open(kFilename, O_RDWR | O_CREAT | O_EXCL);
243 ASSERT_GT(fd, 0);
244
245 char buf[64];
246 memset(buf, 'a', sizeof(buf));
247 ASSERT_EQ(write(fd, buf, sizeof(buf)), sizeof(buf));
248
249 // Demonstrate that a simple buffer can be mapped
250 void* addr1 = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
251 ASSERT_NE(addr1, MAP_FAILED);
252 ASSERT_EQ(memcmp(addr1, buf, sizeof(buf)), 0);
253 // ... multiple times
254 void* addr2 = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
255 ASSERT_NE(addr2, MAP_FAILED);
256 ASSERT_EQ(memcmp(addr2, buf, sizeof(buf)), 0);
257
258 // File: 'a'
259 // addr1 private copy: 'b'
260 // addr2 private copy: 'c'
261 memset(buf, 'b', sizeof(buf));
262 memcpy(addr1, buf, sizeof(buf));
263 memset(buf, 'c', sizeof(buf));
264 memcpy(addr2, buf, sizeof(buf));
265
266 // Verify the file and two buffers all have independent contents
267 memset(buf, 'a', sizeof(buf));
268 char tmp[sizeof(buf)];
269 ASSERT_EQ(lseek(fd, SEEK_SET, 0), 0);
270 ASSERT_EQ(read(fd, tmp, sizeof(tmp)), sizeof(tmp));
271 ASSERT_EQ(memcmp(tmp, buf, sizeof(tmp)), 0);
272 memset(buf, 'b', sizeof(buf));
273 ASSERT_EQ(memcmp(addr1, buf, sizeof(buf)), 0);
274 memset(buf, 'c', sizeof(buf));
275 ASSERT_EQ(memcmp(addr2, buf, sizeof(buf)), 0);
276
277 ASSERT_EQ(munmap(addr1, PAGE_SIZE), 0, "munmap failed");
278 ASSERT_EQ(munmap(addr2, PAGE_SIZE), 0, "munmap failed");
279 ASSERT_EQ(close(fd), 0);
280 ASSERT_EQ(unlink(kFilename), 0);
281
282 END_TEST;
283 }
284
285 // Test that mmap fails with appropriate error codes when
286 // we expect.
TestMmapEvil(void)287 bool TestMmapEvil(void) {
288 BEGIN_TEST;
289 if (!test_info->supports_mmap) {
290 return true;
291 }
292
293 // Try (and fail) to mmap a directory
294 ASSERT_EQ(mkdir("::mydir", 0666), 0);
295 int fd = open("::mydir", O_RDONLY | O_DIRECTORY);
296 ASSERT_GT(fd, 0);
297 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0), MAP_FAILED);
298 ASSERT_EQ(errno, EACCES);
299 errno = 0;
300 ASSERT_EQ(close(fd), 0);
301 ASSERT_EQ(rmdir("::mydir"), 0);
302
303 fd = open("::myfile", O_RDWR | O_CREAT | O_EXCL);
304 ASSERT_GT(fd, 0);
305
306 // Mmap without MAP_PRIVATE or MAP_SHARED
307 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_READ, 0, fd, 0), MAP_FAILED);
308 ASSERT_EQ(errno, EINVAL);
309 errno = 0;
310 // Mmap with both MAP_PRIVATE and MAP_SHARED
311 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED | MAP_PRIVATE, fd, 0), MAP_FAILED);
312 ASSERT_EQ(errno, EINVAL);
313 errno = 0;
314 // Mmap with unaligned offset
315 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 1), MAP_FAILED);
316 ASSERT_EQ(errno, EINVAL);
317 errno = 0;
318 // Mmap with a length of zero
319 ASSERT_EQ(mmap(NULL, 0, PROT_READ, MAP_SHARED, fd, 0), MAP_FAILED);
320 ASSERT_EQ(errno, EINVAL);
321 errno = 0;
322 ASSERT_EQ(close(fd), 0);
323 // Test all cases of MAP_PRIVATE and MAP_SHARED which require
324 // a readable file.
325 fd = open("::myfile", O_WRONLY);
326 ASSERT_GT(fd, 0);
327 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0), MAP_FAILED);
328 ASSERT_EQ(errno, EACCES);
329 errno = 0;
330 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_WRITE, MAP_PRIVATE, fd, 0), MAP_FAILED);
331 ASSERT_EQ(errno, EACCES);
332 errno = 0;
333 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0), MAP_FAILED);
334 ASSERT_EQ(errno, EACCES);
335 errno = 0;
336 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0), MAP_FAILED);
337 ASSERT_EQ(errno, EACCES);
338 errno = 0;
339 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_WRITE, MAP_SHARED, fd, 0), MAP_FAILED);
340 ASSERT_EQ(errno, EACCES);
341 errno = 0;
342 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0), MAP_FAILED);
343 ASSERT_EQ(errno, EACCES);
344 errno = 0;
345 ASSERT_EQ(close(fd), 0);
346 // Test all cases of MAP_PRIVATE and MAP_SHARED which require a
347 // writable file (notably, MAP_PRIVATE never requires a writable
348 // file, since it makes a copy).
349 fd = open("::myfile", O_RDONLY);
350 ASSERT_GT(fd, 0);
351 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_WRITE, MAP_SHARED, fd, 0), MAP_FAILED);
352 ASSERT_EQ(errno, EACCES);
353 errno = 0;
354 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0), MAP_FAILED);
355 ASSERT_EQ(errno, EACCES);
356 errno = 0;
357 ASSERT_EQ(close(fd), 0);
358 // PROT_WRITE requires that the file is NOT append-only
359 fd = open("::myfile", O_RDONLY | O_APPEND);
360 ASSERT_GT(fd, 0);
361 ASSERT_EQ(mmap(NULL, PAGE_SIZE, PROT_WRITE, MAP_SHARED, fd, 0), MAP_FAILED);
362 ASSERT_EQ(errno, EACCES);
363 errno = 0;
364 ASSERT_EQ(close(fd), 0);
365
366 ASSERT_EQ(unlink("::myfile"), 0);
367 END_TEST;
368 }
369
TestMmapTruncateAccess(void)370 bool TestMmapTruncateAccess(void) {
371 BEGIN_TEST;
372 if (!test_info->supports_mmap) {
373 return true;
374 }
375
376 fbl::unique_fd fd(open("::mmap_truncate", O_CREAT | O_RDWR));
377 ASSERT_TRUE(fd);
378
379 constexpr size_t kPageCount = 5;
380 char buf[PAGE_SIZE * kPageCount];
381 memset(buf, 'a', sizeof(buf));
382 ASSERT_EQ(write(fd.get(), buf, sizeof(buf)), sizeof(buf));
383
384 // Map all pages and validate their contents.
385 void* addr = mmap(NULL, sizeof(buf), PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0);
386 ASSERT_NE(addr, MAP_FAILED);
387 ASSERT_EQ(memcmp(addr, buf, sizeof(buf)), 0);
388
389 constexpr size_t kHalfPage = PAGE_SIZE / 2;
390 for (size_t i = (kPageCount * 2) - 1; i > 0; i--) {
391 // Shrink the underlying file.
392 size_t new_size = kHalfPage * i;
393 ASSERT_EQ(ftruncate(fd.get(), new_size), 0);
394 ASSERT_EQ(memcmp(addr, buf, new_size), 0);
395
396 // Accessing beyond the end of the file, but within the mapping, is
397 // undefined behavior on other platforms. However, on Fuchsia, this
398 // behavior is explicitly memory-safe.
399 char buf_beyond[PAGE_SIZE * kPageCount - new_size];
400 memset(buf_beyond, 'b', sizeof(buf_beyond));
401 void* beyond = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + new_size);
402 memset(beyond, 'b', sizeof(buf_beyond));
403 ASSERT_EQ(memcmp(buf_beyond, beyond, sizeof(buf_beyond)), 0);
404 }
405
406 ASSERT_EQ(munmap(addr, sizeof(buf)), 0);
407 ASSERT_EQ(unlink("::mmap_truncate"), 0);
408
409 END_TEST;
410 }
411
TestMmapTruncateExtend(void)412 bool TestMmapTruncateExtend(void) {
413 BEGIN_TEST;
414 if (!test_info->supports_mmap) {
415 return true;
416 }
417
418 fbl::unique_fd fd(open("::mmap_truncate_extend", O_CREAT | O_RDWR));
419 ASSERT_TRUE(fd);
420
421 constexpr size_t kPageCount = 5;
422 char buf[PAGE_SIZE * kPageCount];
423 memset(buf, 'a', sizeof(buf));
424 ASSERT_EQ(write(fd.get(), buf, sizeof(buf)), sizeof(buf));
425
426 // Map all pages and validate their contents.
427 void* addr = mmap(NULL, sizeof(buf), PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0);
428 ASSERT_NE(addr, MAP_FAILED);
429 ASSERT_EQ(memcmp(addr, buf, sizeof(buf)), 0);
430
431 constexpr size_t kHalfPage = PAGE_SIZE / 2;
432
433 ASSERT_EQ(ftruncate(fd.get(), 0), 0);
434 memset(buf, 0, sizeof(buf));
435
436 // Even though we trample over the "out-of-bounds" part of the mapping,
437 // ensure it is filled with zeroes as we truncate-extend it.
438 for (size_t i = 1; i < kPageCount * 2; i++) {
439 size_t new_size = kHalfPage * i;
440
441 // Fill "out-of-bounds" with invalid data.
442 char buf_beyond[PAGE_SIZE * kPageCount - new_size];
443 memset(buf_beyond, 'b', sizeof(buf_beyond));
444 void* beyond = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + new_size);
445 memset(beyond, 'b', sizeof(buf_beyond));
446 ASSERT_EQ(memcmp(buf_beyond, beyond, sizeof(buf_beyond)), 0);
447
448 // Observe that the truncate extension fills the file with zeroes.
449 ASSERT_EQ(ftruncate(fd.get(), new_size), 0);
450 ASSERT_EQ(memcmp(buf, addr, new_size), 0);
451 }
452
453 ASSERT_EQ(munmap(addr, sizeof(buf)), 0);
454 ASSERT_EQ(unlink("::mmap_truncate_extend"), 0);
455
456 END_TEST;
457 }
458
TestMmapTruncateWriteExtend(void)459 bool TestMmapTruncateWriteExtend(void) {
460 BEGIN_TEST;
461 if (!test_info->supports_mmap) {
462 return true;
463 }
464
465 fbl::unique_fd fd(open("::mmap_write_extend", O_CREAT | O_RDWR));
466 ASSERT_TRUE(fd);
467
468 constexpr size_t kPageCount = 5;
469 char buf[PAGE_SIZE * kPageCount];
470 memset(buf, 'a', sizeof(buf));
471 ASSERT_EQ(write(fd.get(), buf, sizeof(buf)), sizeof(buf));
472
473 // Map all pages and validate their contents.
474 void* addr = mmap(NULL, sizeof(buf), PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0);
475 ASSERT_NE(addr, MAP_FAILED);
476 ASSERT_EQ(memcmp(addr, buf, sizeof(buf)), 0);
477
478 constexpr size_t kHalfPage = PAGE_SIZE / 2;
479
480 ASSERT_EQ(ftruncate(fd.get(), 0), 0);
481 memset(buf, 0, sizeof(buf));
482
483 // Even though we trample over the "out-of-bounds" part of the mapping,
484 // ensure it is filled with zeroes as we truncate-extend it.
485 for (size_t i = 1; i < kPageCount * 2; i++) {
486 size_t new_size = kHalfPage * i;
487
488 // Fill "out-of-bounds" with invalid data.
489 char buf_beyond[PAGE_SIZE * kPageCount - new_size];
490 memset(buf_beyond, 'b', sizeof(buf_beyond));
491 void* beyond = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + new_size);
492 memset(beyond, 'b', sizeof(buf_beyond));
493 ASSERT_EQ(memcmp(buf_beyond, beyond, sizeof(buf_beyond)), 0);
494
495 // Observe that write extension fills the file with zeroes.
496 off_t offset = static_cast<off_t>(new_size - 1);
497 ASSERT_EQ(lseek(fd.get(), offset, SEEK_SET), offset);
498 char zero = 0;
499 ASSERT_EQ(write(fd.get(), &zero, 1), 1);
500 ASSERT_EQ(memcmp(buf, addr, new_size), 0);
501 }
502
503 ASSERT_EQ(munmap(addr, sizeof(buf)), 0);
504 ASSERT_EQ(unlink("::mmap_write_extend"), 0);
505
506 END_TEST;
507 }
508
509 enum RW {
510 Read,
511 Write,
512 ReadAfterUnmap,
513 WriteAfterUnmap,
514 };
515
mmap_crash(int prot,int flags,RW rw)516 bool mmap_crash(int prot, int flags, RW rw) {
517 BEGIN_HELPER;
518 int fd = open("::inaccessible", O_RDWR);
519 ASSERT_GT(fd, 0);
520 void* addr = mmap(NULL, PAGE_SIZE, prot, flags, fd, 0);
521 ASSERT_NE(addr, MAP_FAILED);
522 ASSERT_EQ(close(fd), 0);
523
524 if (rw == RW::Read || rw == RW::ReadAfterUnmap) {
525 // Read
526 if (rw == RW::ReadAfterUnmap) {
527 ASSERT_EQ(munmap(addr, PAGE_SIZE), 0);
528 }
529
530 ASSERT_DEATH([](void* addr) {
531 __UNUSED volatile int i = *static_cast<int*>(addr);
532 }, addr, "");
533
534 if (rw == RW::Read) {
535 ASSERT_EQ(munmap(addr, PAGE_SIZE), 0);
536 }
537 } else {
538 // Write
539 if (rw == RW::WriteAfterUnmap) {
540 ASSERT_EQ(munmap(addr, PAGE_SIZE), 0);
541 }
542
543 ASSERT_DEATH([](void* addr) {
544 *static_cast<int*>(addr) = 5;
545 }, addr, "");
546
547 if (rw == RW::Write) {
548 ASSERT_EQ(munmap(addr, PAGE_SIZE), 0);
549 }
550 }
551 END_HELPER;
552 }
553
TestMmapDeath(void)554 bool TestMmapDeath(void) {
555 BEGIN_TEST;
556 if (!test_info->supports_mmap) {
557 return true;
558 }
559
560 int fd = open("::inaccessible", O_RDWR | O_CREAT);
561 ASSERT_GT(fd, 0);
562 char tmp[] = "this is a temporary buffer";
563 ASSERT_EQ(write(fd, tmp, sizeof(tmp)), sizeof(tmp));
564 ASSERT_EQ(close(fd), 0);
565
566 // Crashes while mapped
567 ASSERT_TRUE(mmap_crash(PROT_READ, MAP_PRIVATE, Write));
568 ASSERT_TRUE(mmap_crash(PROT_READ, MAP_SHARED, Write));
569 // Write-only is not possible
570 ASSERT_TRUE(mmap_crash(PROT_NONE, MAP_SHARED, Read));
571 ASSERT_TRUE(mmap_crash(PROT_NONE, MAP_SHARED, Write));
572
573 // Crashes after unmapped
574 ASSERT_TRUE(mmap_crash(PROT_READ, MAP_PRIVATE, ReadAfterUnmap));
575 ASSERT_TRUE(mmap_crash(PROT_READ, MAP_SHARED, ReadAfterUnmap));
576 ASSERT_TRUE(mmap_crash(PROT_WRITE | PROT_READ, MAP_PRIVATE, WriteAfterUnmap));
577 ASSERT_TRUE(mmap_crash(PROT_WRITE | PROT_READ, MAP_SHARED, WriteAfterUnmap));
578 ASSERT_TRUE(mmap_crash(PROT_NONE, MAP_SHARED, WriteAfterUnmap));
579
580 ASSERT_EQ(unlink("::inaccessible"), 0);
581 END_TEST;
582 }
583
584 } // namespace
585
586 RUN_FOR_ALL_FILESYSTEMS(fs_mmap_tests,
587 RUN_TEST_MEDIUM(TestMmapEmpty)
588 RUN_TEST_MEDIUM(TestMmapReadable)
589 RUN_TEST_MEDIUM(TestMmapWritable)
590 RUN_TEST_MEDIUM(TestMmapUnlinked)
591 RUN_TEST_MEDIUM(TestMmapShared)
592 RUN_TEST_MEDIUM(TestMmapPrivate)
593 RUN_TEST_MEDIUM(TestMmapEvil)
594 RUN_TEST_MEDIUM(TestMmapTruncateAccess)
595 RUN_TEST_MEDIUM(TestMmapTruncateExtend)
596 RUN_TEST_MEDIUM(TestMmapTruncateWriteExtend)
597 RUN_TEST_ENABLE_CRASH_HANDLER(TestMmapDeath)
598 )
599