| /tools/testing/selftests/kvm/lib/x86/ |
| A D | vmx.c | 80 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 81 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 85 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx() 86 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); in vcpu_alloc_vmx() 90 vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); in vcpu_alloc_vmx() 91 vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); in vcpu_alloc_vmx() 101 vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread); in vcpu_alloc_vmx() 102 vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread); in vcpu_alloc_vmx() 111 return vmx; in vcpu_alloc_vmx() 543 vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp); in prepare_eptp() [all …]
|
| A D | memstress.c | 38 GUEST_ASSERT(vmx->vmcs_gpa); in memstress_l1_guest_code() 39 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in memstress_l1_guest_code() 40 GUEST_ASSERT(load_vmcs(vmx)); in memstress_l1_guest_code() 66 prepare_eptp(vmx, vm, 0); in memstress_setup_ept() 82 struct vmx_pages *vmx, *vmx0 = NULL; in memstress_setup_nested() local 91 vmx = vcpu_alloc_vmx(vm, &vmx_gva); in memstress_setup_nested() 94 memstress_setup_ept(vmx, vm); in memstress_setup_nested() 95 vmx0 = vmx; in memstress_setup_nested() 98 vmx->eptp = vmx0->eptp; in memstress_setup_nested() 99 vmx->eptp_hva = vmx0->eptp_hva; in memstress_setup_nested() [all …]
|
| /tools/testing/selftests/kvm/x86/ |
| A D | vmx_set_nested_state_test.c | 82 state->hdr.vmx.vmxon_pa = 0x1000; in set_default_vmx_state() 84 state->hdr.vmx.smm.flags = 0; in set_default_vmx_state() 113 state->hdr.vmx.vmxon_pa = -1ull; in test_vmx_nested_state() 149 state->hdr.vmx.smm.flags = 1; in test_vmx_nested_state() 154 state->hdr.vmx.flags = ~0; in test_vmx_nested_state() 165 state->hdr.vmx.vmxon_pa = 1; in test_vmx_nested_state() 206 state->hdr.vmx.vmcs12_pa = -1; in test_vmx_nested_state() 221 state->hdr.vmx.vmcs12_pa = -1; in test_vmx_nested_state() 222 state->hdr.vmx.flags = ~0; in test_vmx_nested_state() 227 state->hdr.vmx.vmxon_pa = 0; in test_vmx_nested_state() [all …]
|
| A D | vmx_dirty_log_test.c | 56 void l1_guest_code(struct vmx_pages *vmx) in l1_guest_code() argument 62 GUEST_ASSERT(vmx->vmcs_gpa); in l1_guest_code() 63 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in l1_guest_code() 64 GUEST_ASSERT(load_vmcs(vmx)); in l1_guest_code() 66 if (vmx->eptp_gpa) in l1_guest_code() 83 struct vmx_pages *vmx; in test_vmx_dirty_log() local 96 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); in test_vmx_dirty_log() 123 prepare_eptp(vmx, vm, 0); in test_vmx_dirty_log() 124 nested_map_memslot(vmx, vm, 0); in test_vmx_dirty_log() 125 nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096); in test_vmx_dirty_log() [all …]
|
| A D | triple_fault_event_test.c | 27 void l1_guest_code_vmx(struct vmx_pages *vmx) in l1_guest_code_vmx() argument 30 GUEST_ASSERT(vmx->vmcs_gpa); in l1_guest_code_vmx() 31 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in l1_guest_code_vmx() 32 GUEST_ASSERT(load_vmcs(vmx)); in l1_guest_code_vmx() 34 prepare_vmcs(vmx, l2_guest_code, in l1_guest_code_vmx()
|
| A D | kvm_buslock_test.c | 46 static void l1_vmx_code(struct vmx_pages *vmx) in l1_vmx_code() argument 50 GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true); in l1_vmx_code() 51 GUEST_ASSERT_EQ(load_vmcs(vmx), true); in l1_vmx_code() 53 prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]); in l1_vmx_code()
|
| A D | vmx_apic_access_test.c | 77 struct vmx_pages *vmx; in main() local 89 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); in main() 90 prepare_virtualize_apic_accesses(vmx, vm); in main()
|
| A D | aperfmperf_test.c | 74 static void l1_vmx_code(struct vmx_pages *vmx) in l1_vmx_code() argument 78 GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true); in l1_vmx_code() 79 GUEST_ASSERT_EQ(load_vmcs(vmx), true); in l1_vmx_code() 81 prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]); in l1_vmx_code()
|
| A D | nested_exceptions_test.c | 129 static void l1_vmx_code(struct vmx_pages *vmx) in l1_vmx_code() argument 133 GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true); in l1_vmx_code() 135 GUEST_ASSERT_EQ(load_vmcs(vmx), true); in l1_vmx_code() 137 prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]); in l1_vmx_code()
|
| /tools/testing/selftests/powerpc/ptrace/ |
| A D | ptrace-vsx.h | 37 if ((vmx[i][0] != load[64 + 2 * i]) || in validate_vmx() 38 (vmx[i][1] != load[65 + 2 * i])) { in validate_vmx() 40 i, vmx[i][0], 64 + 2 * i, in validate_vmx() 43 i, vmx[i][1], 65 + 2 * i, in validate_vmx() 51 if ((vmx[i][0] != load[65 + 2 * i]) || in validate_vmx() 52 (vmx[i][1] != load[64 + 2 * i])) { in validate_vmx() 54 i, vmx[i][0], 65 + 2 * i, in validate_vmx() 57 i, vmx[i][1], 64 + 2 * i, in validate_vmx() 109 unsigned long vmx[][2]) in load_vsx_vmx() 117 vmx[i][0] = load[64 + 2 * i]; in load_vsx_vmx() [all …]
|
| A D | ptrace-tm-vsx.c | 87 unsigned long vmx[VMX_MAX + 2][2]; in trace_tm_vsx() local 92 FAIL_IF(show_vmx(child, vmx)); in trace_tm_vsx() 93 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_tm_vsx() 96 FAIL_IF(show_vmx_ckpt(child, vmx)); in trace_tm_vsx() 97 FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); in trace_tm_vsx() 99 memset(vmx, 0, sizeof(vmx)); in trace_tm_vsx() 101 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_vsx() 104 FAIL_IF(write_vmx_ckpt(child, vmx)); in trace_tm_vsx()
|
| A D | ptrace-tm-spd-vsx.c | 99 unsigned long vmx[VMX_MAX + 2][2]; in trace_tm_spd_vsx() local 104 FAIL_IF(show_vmx(child, vmx)); in trace_tm_spd_vsx() 105 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_tm_spd_vsx() 108 FAIL_IF(show_vmx_ckpt(child, vmx)); in trace_tm_spd_vsx() 109 FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); in trace_tm_spd_vsx() 112 memset(vmx, 0, sizeof(vmx)); in trace_tm_spd_vsx() 114 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_spd_vsx() 117 FAIL_IF(write_vmx_ckpt(child, vmx)); in trace_tm_spd_vsx()
|
| A D | ptrace-vsx.c | 40 unsigned long vmx[VMX_MAX + 2][2]; in trace_vsx() local 45 FAIL_IF(show_vmx(child, vmx)); in trace_vsx() 46 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_vsx() 49 memset(vmx, 0, sizeof(vmx)); in trace_vsx() 50 load_vsx_vmx(fp_load_new, vsx, vmx); in trace_vsx() 53 FAIL_IF(write_vmx(child, vmx)); in trace_vsx()
|
| A D | ptrace.h | 597 int show_vmx(pid_t child, unsigned long vmx[][2]) in show_vmx() 601 ret = ptrace(PTRACE_GETVRREGS, child, 0, vmx); in show_vmx() 609 int show_vmx_ckpt(pid_t child, unsigned long vmx[][2]) in show_vmx_ckpt() 622 memcpy(vmx, regs, sizeof(regs)); in show_vmx_ckpt() 627 int write_vmx(pid_t child, unsigned long vmx[][2]) in write_vmx() 631 ret = ptrace(PTRACE_SETVRREGS, child, 0, vmx); in write_vmx() 639 int write_vmx_ckpt(pid_t child, unsigned long vmx[][2]) in write_vmx_ckpt() 645 memcpy(regs, vmx, sizeof(regs)); in write_vmx_ckpt()
|
| /tools/testing/selftests/kvm/include/x86/ |
| A D | vmx.h | 556 bool prepare_for_vmx_operation(struct vmx_pages *vmx); 557 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); 558 bool load_vmcs(struct vmx_pages *vmx); 562 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, 564 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, 566 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, 568 void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, 571 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, 573 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
|
| /tools/testing/selftests/powerpc/tm/ |
| A D | Makefile | 3 tm-signal-context-chk-vmx tm-signal-context-chk-vsx 6 tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \ 23 $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
|
| A D | .gitignore | 13 tm-signal-context-chk-vmx 18 tm-vmx-unavail
|
| /tools/arch/x86/include/uapi/asm/ |
| A D | kvm.h | 507 struct kvm_vmx_nested_state_hdr vmx; member 520 __DECLARE_FLEX_ARRAY(struct kvm_vmx_nested_state_data, vmx);
|
| /tools/testing/selftests/kvm/ |
| A D | Makefile.kvm | 31 LIBKVM_x86 += lib/x86/vmx.c
|
| /tools/arch/x86/kcpuid/ |
| A D | cpuid.csv | 38 0x1, 0, ecx, 5, vmx , Virtual Machine Extensions
|