1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <fcntl.h>
6 #include <threads.h>
7 #include <unistd.h>
8
9 #include <fbl/unique_fd.h>
10 #include <fuchsia/sysinfo/c/fidl.h>
11 #include <lib/fdio/util.h>
12 #include <lib/zx/channel.h>
13 #include <lib/zx/guest.h>
14 #include <lib/zx/port.h>
15 #include <lib/zx/resource.h>
16 #include <lib/zx/vcpu.h>
17 #include <lib/zx/vmar.h>
18 #include <unittest/unittest.h>
19 #include <zircon/process.h>
20 #include <zircon/syscalls/hypervisor.h>
21 #include <zircon/syscalls/port.h>
22 #include <zircon/types.h>
23
24 #include "constants_priv.h"
25
26 static constexpr uint32_t kGuestMapFlags = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
27 ZX_VM_PERM_EXECUTE | ZX_VM_SPECIFIC;
28 static constexpr uint32_t kHostMapFlags = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
29 // Inject an interrupt with vector 32, the first user defined interrupt vector.
30 static constexpr uint32_t kInterruptVector = 32u;
31 static constexpr uint32_t kNmiVector = 2u;
32 static constexpr uint32_t kExceptionVector = 16u;
33 static constexpr uint64_t kTrapKey = 0x1234;
34 static constexpr char kSysInfoPath[] = "/dev/misc/sysinfo";
35
36 extern const char vcpu_resume_start[];
37 extern const char vcpu_resume_end[];
38 extern const char vcpu_interrupt_start[];
39 extern const char vcpu_interrupt_end[];
40 extern const char vcpu_hlt_start[];
41 extern const char vcpu_hlt_end[];
42 extern const char vcpu_pause_start[];
43 extern const char vcpu_pause_end[];
44 extern const char vcpu_write_cr0_start[];
45 extern const char vcpu_write_cr0_end[];
46 extern const char vcpu_wfi_start[];
47 extern const char vcpu_wfi_end[];
48 extern const char vcpu_aarch32_wfi_start[];
49 extern const char vcpu_aarch32_wfi_end[];
50 extern const char vcpu_fp_start[];
51 extern const char vcpu_fp_end[];
52 extern const char vcpu_aarch32_fp_start[];
53 extern const char vcpu_aarch32_fp_end[];
54 extern const char vcpu_read_write_state_start[];
55 extern const char vcpu_read_write_state_end[];
56 extern const char vcpu_compat_mode_start[];
57 extern const char vcpu_compat_mode_end[];
58 extern const char vcpu_syscall_start[];
59 extern const char vcpu_syscall_end[];
60 extern const char vcpu_sysenter_start[];
61 extern const char vcpu_sysenter_end[];
62 extern const char vcpu_sysenter_compat_start[];
63 extern const char vcpu_sysenter_compat_end[];
64 extern const char vcpu_vmcall_start[];
65 extern const char vcpu_vmcall_end[];
66 extern const char guest_set_trap_start[];
67 extern const char guest_set_trap_end[];
68 extern const char guest_set_trap_with_io_start[];
69 extern const char guest_set_trap_with_io_end[];
70
71 enum {
72 X86_PTE_P = 0x01, // P Valid
73 X86_PTE_RW = 0x02, // R/W Read/Write
74 X86_PTE_U = 0x04, // U Page is user accessible
75 X86_PTE_PS = 0x80, // PS Page size
76 };
77
78 typedef struct test {
79 bool supported = false;
80 bool interrupts_enabled = false;
81
82 zx::vmo vmo;
83 uintptr_t host_addr;
84 zx::guest guest;
85 zx::vmar vmar;
86 zx::vcpu vcpu;
87 } test_t;
88
teardown(test_t * test)89 static bool teardown(test_t* test) {
90 BEGIN_HELPER;
91 ASSERT_EQ(zx::vmar::root_self()->unmap(test->host_addr, VMO_SIZE), ZX_OK);
92 return true;
93 END_HELPER;
94 }
95
96 template <zx_status_t (*GetResource)(zx_handle_t, zx_status_t*, zx_handle_t*)>
get_resource(zx::resource * resource)97 static zx_status_t get_resource(zx::resource* resource) {
98 fbl::unique_fd fd(open(kSysInfoPath, O_RDWR));
99 if (!fd) {
100 return ZX_ERR_IO;
101 }
102
103 zx::channel channel;
104 zx_status_t status = fdio_get_service_handle(fd.release(), channel.reset_and_get_address());
105 if (status != ZX_OK) {
106 return status;
107 }
108
109 zx_status_t fidl_status =
110 GetResource(channel.get(), &status, resource->reset_and_get_address());
111 if (fidl_status != ZX_OK) {
112 return fidl_status;
113 }
114 return status;
115 }
116
setup(test_t * test,const char * start,const char * end)117 static bool setup(test_t* test, const char* start, const char* end) {
118 ASSERT_EQ(zx::vmo::create(VMO_SIZE, 0, &test->vmo), ZX_OK);
119 ASSERT_EQ(zx::vmar::root_self()->map(0, test->vmo, 0, VMO_SIZE, kHostMapFlags,
120 &test->host_addr),
121 ZX_OK);
122
123 zx::resource resource;
124 zx_status_t status = get_resource<fuchsia_sysinfo_DeviceGetHypervisorResource>(&resource);
125 ASSERT_EQ(status, ZX_OK);
126 status = zx::guest::create(resource, 0, &test->guest, &test->vmar);
127 test->supported = status != ZX_ERR_NOT_SUPPORTED;
128 if (!test->supported) {
129 fprintf(stderr, "Guest creation not supported\n");
130 return teardown(test);
131 }
132 ASSERT_EQ(status, ZX_OK);
133
134 zx_gpaddr_t guest_addr;
135 ASSERT_EQ(test->vmar.map(0, test->vmo, 0, VMO_SIZE, kGuestMapFlags, &guest_addr),
136 ZX_OK);
137 ASSERT_EQ(test->guest.set_trap(ZX_GUEST_TRAP_MEM, EXIT_TEST_ADDR, PAGE_SIZE,
138 zx::port(), 0),
139 ZX_OK);
140
141 // Setup the guest.
142 uintptr_t entry = 0;
143 #if __x86_64__
144 // PML4 entry pointing to (addr + 0x1000)
145 uint64_t* pte_off = reinterpret_cast<uint64_t*>(test->host_addr);
146 *pte_off = PAGE_SIZE | X86_PTE_P | X86_PTE_U | X86_PTE_RW;
147 // PDP entry with 1GB page.
148 pte_off = reinterpret_cast<uint64_t*>(test->host_addr + PAGE_SIZE);
149 *pte_off = X86_PTE_PS | X86_PTE_P | X86_PTE_U | X86_PTE_RW;
150 entry = GUEST_ENTRY;
151 #endif // __x86_64__
152 memcpy((void*)(test->host_addr + entry), start, end - start);
153
154 status = zx::vcpu::create(test->guest, 0, entry, &test->vcpu);
155 test->supported = status != ZX_ERR_NOT_SUPPORTED;
156 if (!test->supported) {
157 fprintf(stderr, "VCPU creation not supported\n");
158 return teardown(test);
159 }
160 ASSERT_EQ(status, ZX_OK);
161
162 return true;
163 }
164
setup_and_interrupt(test_t * test,const char * start,const char * end)165 static bool setup_and_interrupt(test_t* test, const char* start, const char* end) {
166 ASSERT_TRUE(setup(test, start, end));
167 if (!test->supported) {
168 // The hypervisor isn't supported, so don't run the test.
169 return true;
170 }
171 test->interrupts_enabled = true;
172
173 thrd_t thread;
174 int ret = thrd_create(&thread, [](void* ctx) -> int {
175 test_t* test = static_cast<test_t*>(ctx);
176 return test->vcpu.interrupt(kInterruptVector) == ZX_OK ? thrd_success : thrd_error;
177 },
178 test);
179 ASSERT_EQ(ret, thrd_success);
180
181 return true;
182 }
183
exception_thrown(const zx_packet_guest_mem_t & guest_mem,const zx::vcpu & vcpu)184 static inline bool exception_thrown(const zx_packet_guest_mem_t& guest_mem,
185 const zx::vcpu& vcpu) {
186 #if __x86_64__
187 if (guest_mem.inst_len != 12) {
188 // Not the expected mov imm, (EXIT_TEST_ADDR) size.
189 return true;
190 }
191 if (guest_mem.inst_buf[8] == 0 &&
192 guest_mem.inst_buf[9] == 0 &&
193 guest_mem.inst_buf[10] == 0 &&
194 guest_mem.inst_buf[11] == 0) {
195 return false;
196 }
197 zx_vcpu_state_t vcpu_state;
198 if (vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)) != ZX_OK) {
199 return true;
200 }
201 // Print out debug values from the exception handler.
202 fprintf(stderr, "Unexpected exception in guest\n");
203 fprintf(stderr, "vector = %lu\n", vcpu_state.rax);
204 fprintf(stderr, "error code = %lu\n", vcpu_state.rbx);
205 fprintf(stderr, "rip = 0x%lx\n", vcpu_state.rcx);
206 return true;
207 #else
208 return false;
209 #endif
210 }
211
resume_and_clean_exit(test_t * test)212 static inline bool resume_and_clean_exit(test_t* test) {
213 BEGIN_HELPER;
214
215 zx_port_packet_t packet = {};
216 ASSERT_EQ(test->vcpu.resume(&packet), ZX_OK);
217 EXPECT_EQ(packet.type, ZX_PKT_TYPE_GUEST_MEM);
218 EXPECT_EQ(packet.guest_mem.addr, EXIT_TEST_ADDR);
219 #if __x86_64__
220 EXPECT_EQ(packet.guest_mem.default_operand_size, 4u);
221 #endif
222 if (test->interrupts_enabled) {
223 ASSERT_FALSE(exception_thrown(packet.guest_mem, test->vcpu));
224 }
225
226 END_HELPER;
227 }
228
vcpu_resume()229 static bool vcpu_resume() {
230 BEGIN_TEST;
231
232 test_t test;
233 ASSERT_TRUE(setup(&test, vcpu_resume_start, vcpu_resume_end));
234 if (!test.supported) {
235 // The hypervisor isn't supported, so don't run the test.
236 return true;
237 }
238
239 ASSERT_TRUE(resume_and_clean_exit(&test));
240 ASSERT_TRUE(teardown(&test));
241
242 END_TEST;
243 }
244
vcpu_interrupt()245 static bool vcpu_interrupt() {
246 BEGIN_TEST;
247
248 test_t test;
249 ASSERT_TRUE(setup(&test, vcpu_interrupt_start, vcpu_interrupt_end));
250 if (!test.supported) {
251 // The hypervisor isn't supported, so don't run the test.
252 return true;
253 }
254 test.interrupts_enabled = true;
255
256 #if __x86_64__
257 // Resume once and wait for the guest to set up an IDT.
258 ASSERT_TRUE(resume_and_clean_exit(&test));
259 #endif
260
261 ASSERT_EQ(test.vcpu.interrupt(kInterruptVector), ZX_OK);
262 ASSERT_TRUE(resume_and_clean_exit(&test));
263
264 #if __x86_64__
265 zx_vcpu_state_t vcpu_state;
266 ASSERT_EQ(test.vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)), ZX_OK);
267 EXPECT_EQ(vcpu_state.rax, kInterruptVector);
268 #endif
269
270 ASSERT_TRUE(teardown(&test));
271
272 END_TEST;
273 }
274
vcpu_interrupt_priority()275 static bool vcpu_interrupt_priority() {
276 BEGIN_TEST;
277
278 test_t test;
279 ASSERT_TRUE(setup(&test, vcpu_interrupt_start, vcpu_interrupt_end));
280 if (!test.supported) {
281 // The hypervisor isn't supported, so don't run the test.
282 return true;
283 }
284 test.interrupts_enabled = true;
285
286 // Resume once and wait for the guest to set up an IDT.
287 ASSERT_TRUE(resume_and_clean_exit(&test));
288
289 // Check that interrupts have higher priority than exceptions.
290 ASSERT_EQ(test.vcpu.interrupt(kExceptionVector), ZX_OK);
291 ASSERT_EQ(test.vcpu.interrupt(kInterruptVector), ZX_OK);
292 ASSERT_TRUE(resume_and_clean_exit(&test));
293 #if __x86_64__
294 zx_vcpu_state_t vcpu_state;
295 ASSERT_EQ(test.vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)), ZX_OK);
296 EXPECT_EQ(vcpu_state.rax, kInterruptVector);
297 #endif
298
299 // TODO(MAC-225): Check that the exception is cleared.
300
301 ASSERT_TRUE(teardown(&test));
302
303 END_TEST;
304 }
305
vcpu_nmi()306 static bool vcpu_nmi() {
307 BEGIN_TEST;
308
309 test_t test;
310 ASSERT_TRUE(setup(&test, vcpu_interrupt_start, vcpu_interrupt_end));
311 if (!test.supported) {
312 // The hypervisor isn't supported, so don't run the test.
313 return true;
314 }
315 test.interrupts_enabled = true;
316
317 // Resume once and wait for the guest to set up an IDT.
318 ASSERT_TRUE(resume_and_clean_exit(&test));
319
320 // Check that NMIs are handled.
321 ASSERT_EQ(test.vcpu.interrupt(kNmiVector), ZX_OK);
322 ASSERT_TRUE(resume_and_clean_exit(&test));
323 #if __x86_64__
324 zx_vcpu_state_t vcpu_state;
325 ASSERT_EQ(test.vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)), ZX_OK);
326 EXPECT_EQ(vcpu_state.rax, kNmiVector);
327 #endif
328 ASSERT_TRUE(teardown(&test));
329
330 END_TEST;
331 }
332
vcpu_nmi_priority()333 static bool vcpu_nmi_priority() {
334 BEGIN_TEST;
335
336 test_t test;
337 ASSERT_TRUE(setup(&test, vcpu_interrupt_start, vcpu_interrupt_end));
338 if (!test.supported) {
339 // The hypervisor isn't supported, so don't run the test.
340 return true;
341 }
342 test.interrupts_enabled = true;
343
344 // Resume once and wait for the guest to set up an IDT.
345 ASSERT_TRUE(resume_and_clean_exit(&test));
346
347 // Check that NMIs have higher priority than interrupts.
348 ASSERT_EQ(test.vcpu.interrupt(kInterruptVector), ZX_OK);
349 ASSERT_EQ(test.vcpu.interrupt(kNmiVector), ZX_OK);
350 ASSERT_TRUE(resume_and_clean_exit(&test));
351 #if __x86_64__
352 zx_vcpu_state_t vcpu_state;
353 ASSERT_EQ(test.vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)), ZX_OK);
354 EXPECT_EQ(vcpu_state.rax, kNmiVector);
355 #endif
356
357 // TODO(MAC-225): Check that the interrupt is queued.
358
359 ASSERT_TRUE(teardown(&test));
360
361 END_TEST;
362 }
363
vcpu_exception()364 static bool vcpu_exception() {
365 BEGIN_TEST;
366
367 test_t test;
368 ASSERT_TRUE(setup(&test, vcpu_interrupt_start, vcpu_interrupt_end));
369 if (!test.supported) {
370 // The hypervisor isn't supported, so don't run the test.
371 return true;
372 }
373 test.interrupts_enabled = true;
374
375 // Resume once and wait for the guest to set up an IDT.
376 ASSERT_TRUE(resume_and_clean_exit(&test));
377
378 // Check that exceptions are handled.
379 ASSERT_EQ(test.vcpu.interrupt(kExceptionVector), ZX_OK);
380 ASSERT_TRUE(resume_and_clean_exit(&test));
381 #if __x86_64__
382 zx_vcpu_state_t vcpu_state;
383 ASSERT_EQ(test.vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)), ZX_OK);
384 EXPECT_EQ(vcpu_state.rax, kExceptionVector);
385 #endif
386 ASSERT_TRUE(teardown(&test));
387
388 END_TEST;
389 }
390
vcpu_hlt()391 static bool vcpu_hlt() {
392 BEGIN_TEST;
393
394 test_t test;
395 ASSERT_TRUE(setup_and_interrupt(&test, vcpu_hlt_start, vcpu_hlt_end));
396 if (!test.supported) {
397 // The hypervisor isn't supported, so don't run the test.
398 return true;
399 }
400
401 ASSERT_TRUE(resume_and_clean_exit(&test));
402 ASSERT_TRUE(teardown(&test));
403
404 END_TEST;
405 }
406
vcpu_pause()407 static bool vcpu_pause() {
408 BEGIN_TEST;
409
410 test_t test;
411 ASSERT_TRUE(setup(&test, vcpu_pause_start, vcpu_pause_end));
412 if (!test.supported) {
413 // The hypervisor isn't supported, so don't run the test.
414 return true;
415 }
416
417 ASSERT_TRUE(resume_and_clean_exit(&test));
418 ASSERT_TRUE(teardown(&test));
419
420 END_TEST;
421 }
422
vcpu_write_cr0()423 static bool vcpu_write_cr0() {
424 BEGIN_TEST;
425
426 test_t test;
427 ASSERT_TRUE(setup(&test, vcpu_write_cr0_start, vcpu_write_cr0_end));
428 if (!test.supported) {
429 // The hypervisor isn't supported, so don't run the test.
430 return true;
431 }
432
433 ASSERT_TRUE(resume_and_clean_exit(&test));
434
435 #if __x86_64__
436 zx_vcpu_state_t vcpu_state;
437 ASSERT_EQ(test.vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)), ZX_OK);
438 // Check that cr0 has the NE bit set when read.
439 EXPECT_TRUE(vcpu_state.rax & X86_CR0_NE);
440 #endif
441
442 ASSERT_TRUE(teardown(&test));
443
444 END_TEST;
445 }
446
vcpu_wfi()447 static bool vcpu_wfi() {
448 BEGIN_TEST;
449
450 test_t test;
451 ASSERT_TRUE(setup(&test, vcpu_wfi_start, vcpu_wfi_end));
452 if (!test.supported) {
453 // The hypervisor isn't supported, so don't run the test.
454 return true;
455 }
456
457 ASSERT_TRUE(resume_and_clean_exit(&test));
458 ASSERT_TRUE(teardown(&test));
459
460 END_TEST;
461 }
462
vcpu_wfi_aarch32()463 static bool vcpu_wfi_aarch32() {
464 BEGIN_TEST;
465
466 test_t test;
467 ASSERT_TRUE(setup(&test, vcpu_aarch32_wfi_start, vcpu_aarch32_wfi_end));
468 if (!test.supported) {
469 // The hypervisor isn't supported, so don't run the test.
470 return true;
471 }
472
473 zx_port_packet_t packet = {};
474 ASSERT_EQ(test.vcpu.resume(&packet), ZX_OK);
475 EXPECT_EQ(packet.type, ZX_PKT_TYPE_GUEST_MEM);
476 EXPECT_EQ(packet.guest_mem.addr, EXIT_TEST_ADDR);
477 #if __aarch64__
478 EXPECT_EQ(packet.guest_mem.read, false);
479 EXPECT_EQ(packet.guest_mem.data, 0);
480 #endif // __aarch64__
481
482 ASSERT_TRUE(teardown(&test));
483
484 END_TEST;
485 }
486
vcpu_fp()487 static bool vcpu_fp() {
488 BEGIN_TEST;
489
490 test_t test;
491 ASSERT_TRUE(setup(&test, vcpu_fp_start, vcpu_fp_end));
492 if (!test.supported) {
493 // The hypervisor isn't supported, so don't run the test.
494 return true;
495 }
496
497 ASSERT_TRUE(resume_and_clean_exit(&test));
498 ASSERT_TRUE(teardown(&test));
499
500 END_TEST;
501 }
502
vcpu_fp_aarch32()503 static bool vcpu_fp_aarch32() {
504 BEGIN_TEST;
505
506 test_t test;
507 ASSERT_TRUE(setup(&test, vcpu_aarch32_fp_start, vcpu_aarch32_fp_end));
508 if (!test.supported) {
509 // The hypervisor isn't supported, so don't run the test.
510 return true;
511 }
512
513 zx_port_packet_t packet = {};
514 ASSERT_EQ(test.vcpu.resume(&packet), ZX_OK);
515 EXPECT_EQ(packet.type, ZX_PKT_TYPE_GUEST_MEM);
516 EXPECT_EQ(packet.guest_mem.addr, EXIT_TEST_ADDR);
517 #if __aarch64__
518 EXPECT_EQ(packet.guest_mem.read, false);
519 EXPECT_EQ(packet.guest_mem.data, 0);
520 #endif // __aarch64__
521
522 ASSERT_TRUE(teardown(&test));
523
524 END_TEST;
525 }
526
vcpu_read_write_state()527 static bool vcpu_read_write_state() {
528 BEGIN_TEST;
529
530 test_t test;
531 ASSERT_TRUE(setup(&test, vcpu_read_write_state_start, vcpu_read_write_state_end));
532 if (!test.supported) {
533 // The hypervisor isn't supported, so don't run the test.
534 return true;
535 }
536
537 zx_vcpu_state_t vcpu_state = {
538 #if __aarch64__
539 // clang-format off
540 .x = {
541 0u, 1u, 2u, 3u, 4u, 5u, 6u, 7u, 8u, 9u,
542 10u, 11u, 12u, 13u, 14u, 15u, 16u, 17u, 18u, 19u,
543 20u, 21u, 22u, 23u, 24u, 25u, 26u, 27u, 28u, 29u,
544 30u,
545 },
546 // clang-format on
547 .sp = 64u,
548 .cpsr = 0,
549 #elif __x86_64__
550 .rax = 1u,
551 .rcx = 2u,
552 .rdx = 3u,
553 .rbx = 4u,
554 .rsp = 5u,
555 .rbp = 6u,
556 .rsi = 7u,
557 .rdi = 8u,
558 .r8 = 9u,
559 .r9 = 10u,
560 .r10 = 11u,
561 .r11 = 12u,
562 .r12 = 13u,
563 .r13 = 14u,
564 .r14 = 15u,
565 .r15 = 16u,
566 .rflags = 0,
567 #endif
568 };
569
570 ASSERT_EQ(test.vcpu.write_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)),
571 ZX_OK);
572
573 ASSERT_TRUE(resume_and_clean_exit(&test));
574
575 ASSERT_EQ(test.vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)), ZX_OK);
576
577 #if __aarch64__
578 EXPECT_EQ(vcpu_state.x[0], EXIT_TEST_ADDR);
579 EXPECT_EQ(vcpu_state.x[1], 2u);
580 EXPECT_EQ(vcpu_state.x[2], 4u);
581 EXPECT_EQ(vcpu_state.x[3], 6u);
582 EXPECT_EQ(vcpu_state.x[4], 8u);
583 EXPECT_EQ(vcpu_state.x[5], 10u);
584 EXPECT_EQ(vcpu_state.x[6], 12u);
585 EXPECT_EQ(vcpu_state.x[7], 14u);
586 EXPECT_EQ(vcpu_state.x[8], 16u);
587 EXPECT_EQ(vcpu_state.x[9], 18u);
588 EXPECT_EQ(vcpu_state.x[10], 20u);
589 EXPECT_EQ(vcpu_state.x[11], 22u);
590 EXPECT_EQ(vcpu_state.x[12], 24u);
591 EXPECT_EQ(vcpu_state.x[13], 26u);
592 EXPECT_EQ(vcpu_state.x[14], 28u);
593 EXPECT_EQ(vcpu_state.x[15], 30u);
594 EXPECT_EQ(vcpu_state.x[16], 32u);
595 EXPECT_EQ(vcpu_state.x[17], 34u);
596 EXPECT_EQ(vcpu_state.x[18], 36u);
597 EXPECT_EQ(vcpu_state.x[19], 38u);
598 EXPECT_EQ(vcpu_state.x[20], 40u);
599 EXPECT_EQ(vcpu_state.x[21], 42u);
600 EXPECT_EQ(vcpu_state.x[22], 44u);
601 EXPECT_EQ(vcpu_state.x[23], 46u);
602 EXPECT_EQ(vcpu_state.x[24], 48u);
603 EXPECT_EQ(vcpu_state.x[25], 50u);
604 EXPECT_EQ(vcpu_state.x[26], 52u);
605 EXPECT_EQ(vcpu_state.x[27], 54u);
606 EXPECT_EQ(vcpu_state.x[28], 56u);
607 EXPECT_EQ(vcpu_state.x[29], 58u);
608 EXPECT_EQ(vcpu_state.x[30], 60u);
609 EXPECT_EQ(vcpu_state.sp, 128u);
610 EXPECT_EQ(vcpu_state.cpsr, 0b0110 << 28);
611 #elif __x86_64__
612 EXPECT_EQ(vcpu_state.rax, 2u);
613 EXPECT_EQ(vcpu_state.rcx, 4u);
614 EXPECT_EQ(vcpu_state.rdx, 6u);
615 EXPECT_EQ(vcpu_state.rbx, 8u);
616 EXPECT_EQ(vcpu_state.rsp, 10u);
617 EXPECT_EQ(vcpu_state.rbp, 12u);
618 EXPECT_EQ(vcpu_state.rsi, 14u);
619 EXPECT_EQ(vcpu_state.rdi, 16u);
620 EXPECT_EQ(vcpu_state.r8, 18u);
621 EXPECT_EQ(vcpu_state.r9, 20u);
622 EXPECT_EQ(vcpu_state.r10, 22u);
623 EXPECT_EQ(vcpu_state.r11, 24u);
624 EXPECT_EQ(vcpu_state.r12, 26u);
625 EXPECT_EQ(vcpu_state.r13, 28u);
626 EXPECT_EQ(vcpu_state.r14, 30u);
627 EXPECT_EQ(vcpu_state.r15, 32u);
628 EXPECT_EQ(vcpu_state.rflags, (1u << 0) | (1u << 18));
629 #endif // __x86_64__
630
631 ASSERT_TRUE(teardown(&test));
632
633 END_TEST;
634 }
635
vcpu_compat_mode()636 static bool vcpu_compat_mode() {
637 BEGIN_TEST;
638
639 test_t test;
640 ASSERT_TRUE(setup(&test, vcpu_compat_mode_start, vcpu_compat_mode_end));
641 if (!test.supported) {
642 // The hypervisor isn't supported, so don't run the test.
643 return true;
644 }
645
646 ASSERT_TRUE(resume_and_clean_exit(&test));
647
648 zx_vcpu_state_t vcpu_state;
649 ASSERT_EQ(test.vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)), ZX_OK);
650 #if __x86_64__
651 EXPECT_EQ(vcpu_state.rbx, 1u);
652 EXPECT_EQ(vcpu_state.rcx, 2u);
653 #endif
654
655 ASSERT_TRUE(teardown(&test));
656
657 END_TEST;
658 }
659
vcpu_syscall()660 static bool vcpu_syscall() {
661 BEGIN_TEST;
662
663 test_t test;
664 ASSERT_TRUE(setup(&test, vcpu_syscall_start, vcpu_syscall_end));
665 if (!test.supported) {
666 // The hypervisor isn't supported, so don't run the test.
667 return true;
668 }
669
670 ASSERT_TRUE(resume_and_clean_exit(&test));
671 ASSERT_TRUE(teardown(&test));
672
673 END_TEST;
674 }
675
vcpu_sysenter()676 static bool vcpu_sysenter() {
677 BEGIN_TEST;
678
679 test_t test;
680 ASSERT_TRUE(setup(&test, vcpu_sysenter_start, vcpu_sysenter_end));
681 if (!test.supported) {
682 // The hypervisor isn't supported, so don't run the test.
683 return true;
684 }
685
686 ASSERT_TRUE(resume_and_clean_exit(&test));
687 ASSERT_TRUE(teardown(&test));
688
689 END_TEST;
690 }
691
vcpu_sysenter_compat()692 static bool vcpu_sysenter_compat() {
693 BEGIN_TEST;
694
695 test_t test;
696 ASSERT_TRUE(setup(&test, vcpu_sysenter_compat_start, vcpu_sysenter_compat_end));
697 if (!test.supported) {
698 // The hypervisor isn't supported, so don't run the test.
699 return true;
700 }
701
702 ASSERT_TRUE(resume_and_clean_exit(&test));
703 ASSERT_TRUE(teardown(&test));
704
705 END_TEST;
706 }
707
vcpu_vmcall()708 static bool vcpu_vmcall() {
709 BEGIN_TEST;
710
711 test_t test;
712 ASSERT_TRUE(setup(&test, vcpu_vmcall_start, vcpu_vmcall_end));
713 if (!test.supported) {
714 // The hypervisor isn't supported, so don't run the test.
715 return true;
716 }
717
718 ASSERT_TRUE(resume_and_clean_exit(&test));
719
720 zx_vcpu_state_t vcpu_state;
721 ASSERT_EQ(test.vcpu.read_state(ZX_VCPU_STATE, &vcpu_state, sizeof(vcpu_state)), ZX_OK);
722
723 #if __x86_64__
724 const uint64_t kVmCallNoSys = -1000;
725 EXPECT_EQ(vcpu_state.rax, kVmCallNoSys);
726 #endif
727
728 ASSERT_TRUE(teardown(&test));
729
730 END_TEST;
731 }
732
guest_set_trap_with_mem()733 static bool guest_set_trap_with_mem() {
734 BEGIN_TEST;
735
736 test_t test;
737 ASSERT_TRUE(setup(&test, guest_set_trap_start, guest_set_trap_end));
738 if (!test.supported) {
739 // The hypervisor isn't supported, so don't run the test.
740 return true;
741 }
742
743 // Trap on access of TRAP_ADDR.
744 ASSERT_EQ(test.guest.set_trap(ZX_GUEST_TRAP_MEM, TRAP_ADDR, PAGE_SIZE, zx::port(), kTrapKey),
745 ZX_OK);
746
747 zx_port_packet_t packet = {};
748 ASSERT_EQ(test.vcpu.resume(&packet), ZX_OK);
749 EXPECT_EQ(packet.key, kTrapKey);
750 EXPECT_EQ(packet.type, ZX_PKT_TYPE_GUEST_MEM);
751
752 ASSERT_TRUE(resume_and_clean_exit(&test));
753 ASSERT_TRUE(teardown(&test));
754
755 END_TEST;
756 }
757
guest_set_trap_with_bell()758 static bool guest_set_trap_with_bell() {
759 BEGIN_TEST;
760
761 test_t test;
762 ASSERT_TRUE(setup(&test, guest_set_trap_start, guest_set_trap_end));
763 if (!test.supported) {
764 // The hypervisor isn't supported, so don't run the test.
765 return true;
766 }
767
768 zx::port port;
769 ASSERT_EQ(zx::port::create(0, &port), ZX_OK);
770
771 // Trap on access of TRAP_ADDR.
772 ASSERT_EQ(test.guest.set_trap(ZX_GUEST_TRAP_BELL, TRAP_ADDR, PAGE_SIZE, port, kTrapKey),
773 ZX_OK);
774
775 zx_port_packet_t packet = {};
776 ASSERT_EQ(test.vcpu.resume(&packet), ZX_OK);
777 EXPECT_EQ(packet.type, ZX_PKT_TYPE_GUEST_MEM);
778 EXPECT_EQ(packet.guest_mem.addr, EXIT_TEST_ADDR);
779
780 ASSERT_EQ(port.wait(zx::time::infinite(), &packet), ZX_OK);
781 EXPECT_EQ(packet.key, kTrapKey);
782 EXPECT_EQ(packet.type, ZX_PKT_TYPE_GUEST_BELL);
783 EXPECT_EQ(packet.guest_bell.addr, TRAP_ADDR);
784
785 ASSERT_TRUE(teardown(&test));
786
787 END_TEST;
788 }
789
guest_set_trap_with_io()790 static bool guest_set_trap_with_io() {
791 BEGIN_TEST;
792
793 test_t test;
794 ASSERT_TRUE(setup(&test, guest_set_trap_with_io_start, guest_set_trap_with_io_end));
795 if (!test.supported) {
796 // The hypervisor isn't supported, so don't run the test.
797 return true;
798 }
799
800 // Trap on writes to TRAP_PORT.
801 ASSERT_EQ(test.guest.set_trap(ZX_GUEST_TRAP_IO, TRAP_PORT, 1, zx::port(), kTrapKey),
802 ZX_OK);
803
804 zx_port_packet_t packet = {};
805 ASSERT_EQ(test.vcpu.resume(&packet), ZX_OK);
806 EXPECT_EQ(packet.key, kTrapKey);
807 EXPECT_EQ(packet.type, ZX_PKT_TYPE_GUEST_IO);
808 EXPECT_EQ(packet.guest_io.port, TRAP_PORT);
809
810 ASSERT_TRUE(resume_and_clean_exit(&test));
811 ASSERT_TRUE(teardown(&test));
812
813 END_TEST;
814 }
815
816 BEGIN_TEST_CASE(guest)
817 RUN_TEST(vcpu_resume)
818 RUN_TEST(vcpu_read_write_state)
819 RUN_TEST(vcpu_interrupt)
820 RUN_TEST(guest_set_trap_with_mem)
821 RUN_TEST(guest_set_trap_with_bell)
822 #if __aarch64__
823 RUN_TEST(vcpu_wfi)
824 RUN_TEST(vcpu_wfi_aarch32)
825 RUN_TEST(vcpu_fp)
826 RUN_TEST(vcpu_fp_aarch32)
827 #elif __x86_64__
828 RUN_TEST(guest_set_trap_with_io)
829 RUN_TEST(vcpu_interrupt_priority)
830 RUN_TEST(vcpu_nmi)
831 RUN_TEST(vcpu_nmi_priority)
832 RUN_TEST(vcpu_exception)
833 RUN_TEST(vcpu_hlt)
834 RUN_TEST(vcpu_pause)
835 RUN_TEST(vcpu_write_cr0)
836 RUN_TEST(vcpu_compat_mode)
837 RUN_TEST(vcpu_syscall)
838 RUN_TEST(vcpu_sysenter)
839 RUN_TEST(vcpu_sysenter_compat)
840 RUN_TEST(vcpu_vmcall)
841 #endif
842 END_TEST_CASE(guest)
843