| /tools/testing/selftests/kvm/s390/ |
| A D | sync_regs_test.c | 76 struct kvm_run *run = vcpu->run; in test_read_invalid() local 85 run->kvm_valid_regs = 0; in test_read_invalid() 92 run->kvm_valid_regs = 0; in test_read_invalid() 97 struct kvm_run *run = vcpu->run; in test_set_invalid() local 106 run->kvm_dirty_regs = 0; in test_set_invalid() 113 run->kvm_dirty_regs = 0; in test_set_invalid() 118 struct kvm_run *run = vcpu->run; in test_req_and_verify_all_valid_regs() local 132 run->s390_sieic.icptcode, run->s390_sieic.ipa, in test_req_and_verify_all_valid_regs() 144 struct kvm_run *run = vcpu->run; in test_set_and_verify_various_reg_values() local 183 struct kvm_run *run = vcpu->run; in test_clear_kvm_dirty_regs_bits() local [all …]
|
| A D | ucontrol_test.c | 108 struct kvm_run *run; in FIXTURE() local 308 struct kvm_run *run = self->run; in uc_handle_exit_ucontrol() local 350 struct kvm_run *run = self->run; in uc_handle_insn_ic() local 359 run->psw_addr = run->psw_addr - ilen; in uc_handle_insn_ic() 375 struct kvm_run *run = self->run; in uc_handle_sieic() local 380 run->s390_sieic.ipa, in uc_handle_sieic() 381 run->s390_sieic.ipb); in uc_handle_sieic() 402 struct kvm_run *run = self->run; in uc_handle_exit() local 471 struct kvm_run *run = self->run; in TEST_F() local 532 struct kvm_run *run = self->run; in TEST_F() local [all …]
|
| /tools/testing/selftests/sgx/ |
| A D | main.c | 289 memset(&self->run, 0, sizeof(self->run)); in TEST_F() 362 memset(&self->run, 0, sizeof(self->run)); in TEST_F() 428 memset(&self->run, 0, sizeof(self->run)); 511 memset(&self->run, 0, sizeof(self->run)); in TEST_F() 547 memset(&self->run, 0, sizeof(self->run)); in TEST_F() 580 memset(&self->run, 0, sizeof(self->run)); in TEST_F() 625 memset(&self->run, 0, sizeof(self->run)); in TEST_F() 727 memset(&self->run, 0, sizeof(self->run)); in TEST_F() 790 memset(&self->run, 0, sizeof(self->run)); in TEST_F() 991 memset(&self->run, 0, sizeof(self->run)); in TEST_F() [all …]
|
| /tools/testing/selftests/kvm/x86/ |
| A D | sync_regs_test.c | 161 struct kvm_run *run; in race_sync_regs() local 165 run = vcpu->run; in race_sync_regs() 169 run->kvm_valid_regs = 0; in race_sync_regs() 210 struct kvm_run *run = vcpu->run; in KVM_ONE_VCPU_TEST() local 219 run->kvm_valid_regs = 0; in KVM_ONE_VCPU_TEST() 231 struct kvm_run *run = vcpu->run; in KVM_ONE_VCPU_TEST() local 252 struct kvm_run *run = vcpu->run; in KVM_ONE_VCPU_TEST() local 275 struct kvm_run *run = vcpu->run; in KVM_ONE_VCPU_TEST() local 313 struct kvm_run *run = vcpu->run; in KVM_ONE_VCPU_TEST() local 330 struct kvm_run *run = vcpu->run; in KVM_ONE_VCPU_TEST() local [all …]
|
| A D | debug_regs.c | 86 struct kvm_run *run; in main() local 104 run = vcpu->run; in main() 115 run->exit_reason, run->debug.arch.exception, in main() 134 i, run->exit_reason, run->debug.arch.exception, in main() 136 run->debug.arch.dr6, target_dr6); in main() 157 i, run->exit_reason, run->debug.arch.exception, in main() 159 run->debug.arch.dr6, target_dr6); in main() 183 i, run->exit_reason, run->debug.arch.exception, in main() 184 run->debug.arch.pc, target_rip, run->debug.arch.dr6, in main() 201 run->exit_reason, run->debug.arch.exception, in main() [all …]
|
| A D | flds_emulation.h | 22 struct kvm_run *run = vcpu->run; in handle_flds_emulation_failure_exit() local 29 TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION, in handle_flds_emulation_failure_exit() 31 run->emulation_failure.suberror); in handle_flds_emulation_failure_exit() 33 flags = run->emulation_failure.flags; in handle_flds_emulation_failure_exit() 34 TEST_ASSERT(run->emulation_failure.ndata >= 3 && in handle_flds_emulation_failure_exit() 38 TEST_ASSERT(run->emulation_failure.insn_size >= 2, in handle_flds_emulation_failure_exit() 40 run->emulation_failure.insn_size); in handle_flds_emulation_failure_exit() 42 insn_bytes = run->emulation_failure.insn_bytes; in handle_flds_emulation_failure_exit()
|
| A D | hyperv_extended_hypercalls.c | 41 struct kvm_run *run; in main() local 56 run = vcpu->run; in main() 72 TEST_ASSERT(run->exit_reason == KVM_EXIT_HYPERV, in main() 74 run->exit_reason, exit_reason_str(run->exit_reason)); in main() 76 outval = addr_gpa2hva(vm, run->hyperv.u.hcall.params[1]); in main() 78 run->hyperv.u.hcall.result = HV_STATUS_SUCCESS; in main() 82 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, in main() 84 run->exit_reason, exit_reason_str(run->exit_reason)); in main()
|
| A D | xen_vmcall_test.c | 106 volatile struct kvm_run *run = vcpu->run; in main() local 111 if (run->exit_reason == KVM_EXIT_XEN) { in main() 112 TEST_ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); in main() 113 TEST_ASSERT_EQ(run->xen.u.hcall.cpl, 0); in main() 114 TEST_ASSERT_EQ(run->xen.u.hcall.longmode, 1); in main() 115 TEST_ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE); in main() 116 TEST_ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1)); in main() 117 TEST_ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2)); in main() 118 TEST_ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3)); in main() 119 TEST_ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4)); in main() [all …]
|
| A D | userspace_msr_exit_test.c | 396 struct kvm_run *run = vcpu->run; in process_rdmsr() local 407 run->msr.data = 0; in process_rdmsr() 410 run->msr.error = 1; in process_rdmsr() 428 struct kvm_run *run = vcpu->run; in process_wrmsr() local 439 if (run->msr.data != 0) in process_wrmsr() 440 run->msr.error = 1; in process_wrmsr() 444 run->msr.error = 1; in process_wrmsr() 597 run->msr.data = run->msr.index; in handle_rdmsr() 635 struct kvm_run *run = vcpu->run; in KVM_ONE_VCPU_TEST() local 655 handle_rdmsr(run); in KVM_ONE_VCPU_TEST() [all …]
|
| A D | userspace_io_test.c | 57 struct kvm_run *run; in main() local 62 run = vcpu->run; in main() 73 TEST_ASSERT(run->io.port == 0x80, in main() 74 "Expected I/O at port 0x80, got port 0x%x", run->io.port); in main() 88 memset((void *)run + run->io.data_offset, 0xaa, 4096); in main()
|
| A D | sev_smoke_test.c | 111 TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT, in test_sync_vmsa() 113 exit_reason_str(vcpu->run->exit_reason)); in test_sync_vmsa() 115 TEST_ASSERT_EQ(vcpu->run->system_event.ndata, 1); in test_sync_vmsa() 116 TEST_ASSERT_EQ(vcpu->run->system_event.data[0], GHCB_MSR_TERM_REQ); in test_sync_vmsa() 138 TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT, in test_sev() 140 exit_reason_str(vcpu->run->exit_reason)); in test_sev() 142 TEST_ASSERT_EQ(vcpu->run->system_event.ndata, 1); in test_sev() 143 TEST_ASSERT_EQ(vcpu->run->system_event.data[0], GHCB_MSR_TERM_REQ); in test_sev() 156 exit_reason_str(vcpu->run->exit_reason)); in test_sev() 184 TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SHUTDOWN, in test_sev_shutdown() [all …]
|
| A D | private_mem_kvm_exits_test.c | 37 TEST_ASSERT_EQ(vcpu->run->exit_reason, KVM_EXIT_MEMORY_FAULT); in run_vcpu_get_exit_reason() 39 return vcpu->run->exit_reason; in run_vcpu_get_exit_reason() 78 TEST_ASSERT_EQ(vcpu->run->memory_fault.flags, KVM_MEMORY_EXIT_FLAG_PRIVATE); in test_private_access_memslot_deleted() 79 TEST_ASSERT_EQ(vcpu->run->memory_fault.gpa, EXITS_TEST_GPA); in test_private_access_memslot_deleted() 80 TEST_ASSERT_EQ(vcpu->run->memory_fault.size, EXITS_TEST_SIZE); in test_private_access_memslot_deleted() 107 TEST_ASSERT_EQ(vcpu->run->memory_fault.flags, KVM_MEMORY_EXIT_FLAG_PRIVATE); in test_private_access_memslot_not_private() 108 TEST_ASSERT_EQ(vcpu->run->memory_fault.gpa, EXITS_TEST_GPA); in test_private_access_memslot_not_private() 109 TEST_ASSERT_EQ(vcpu->run->memory_fault.size, EXITS_TEST_SIZE); in test_private_access_memslot_not_private()
|
| /tools/testing/selftests/kvm/lib/s390/ |
| A D | ucall.c | 11 struct kvm_run *run = vcpu->run; in ucall_arch_get_ucall() local 13 if (run->exit_reason == KVM_EXIT_S390_SIEIC && in ucall_arch_get_ucall() 14 run->s390_sieic.icptcode == 4 && in ucall_arch_get_ucall() 15 (run->s390_sieic.ipa >> 8) == 0x83 && /* 0x83 means DIAGNOSE */ in ucall_arch_get_ucall() 16 (run->s390_sieic.ipb >> 16) == 0x501) { in ucall_arch_get_ucall() 17 int reg = run->s390_sieic.ipa & 0xf; in ucall_arch_get_ucall() 19 return (void *)run->s.regs.gprs[reg]; in ucall_arch_get_ucall()
|
| A D | diag318_test_handler.c | 30 struct kvm_run *run; in diag318_handler() local 36 run = vcpu->run; in diag318_handler() 39 TEST_ASSERT(run->s390_sieic.icptcode == ICPT_INSTRUCTION, in diag318_handler() 40 "Unexpected intercept code: 0x%x", run->s390_sieic.icptcode); in diag318_handler() 41 TEST_ASSERT((run->s390_sieic.ipa & 0xff00) == IPA0_DIAG, in diag318_handler() 42 "Unexpected IPA0 code: 0x%x", (run->s390_sieic.ipa & 0xff00)); in diag318_handler() 44 reg = (run->s390_sieic.ipa & 0x00f0) >> 4; in diag318_handler() 45 diag318_info = run->s.regs.gprs[reg]; in diag318_handler()
|
| /tools/testing/selftests/kvm/include/s390/ |
| A D | debug_print.h | 43 static inline void print_psw(struct kvm_run *run, struct kvm_s390_sie_block *sie_block) in print_psw() argument 46 run->flags, in print_psw() 47 run->psw_mask, run->psw_addr, in print_psw() 48 run->exit_reason, exit_reason_str(run->exit_reason)); in print_psw() 53 static inline void print_run(struct kvm_run *run, struct kvm_s390_sie_block *sie_block) in print_run() argument 55 print_hex_bytes("run", (u64)run, 0x150); in print_run() 57 print_psw(run, sie_block); in print_run() 60 static inline void print_regs(struct kvm_run *run) in print_regs() argument 62 struct kvm_sync_regs *sync_regs = &run->s.regs; in print_regs()
|
| /tools/perf/tests/ |
| A D | make | 129 run := make_pure 140 run += make_debug 151 run += make_no_ui 165 run += make_no_sdt 172 run += make_help 173 run += make_doc 174 run += make_perf_o 196 run += make_tags 294 run := $(shell shuf -e $(run)) 301 d := $(info run $(run)) [all …]
|
| /tools/perf/scripts/python/ |
| A D | stat-cpi.py | 23 def store(time, event, cpu, thread, val, ena, run): argument 29 data[key] = [ val, ena, run] 35 def stat__cycles_k(cpu, thread, time, val, ena, run): argument 36 store(time, "cycles", cpu, thread, val, ena, run); 38 def stat__instructions_k(cpu, thread, time, val, ena, run): argument 41 def stat__cycles_u(cpu, thread, time, val, ena, run): argument 42 store(time, "cycles", cpu, thread, val, ena, run); 44 def stat__instructions_u(cpu, thread, time, val, ena, run): argument 47 def stat__cycles(cpu, thread, time, val, ena, run): argument 48 store(time, "cycles", cpu, thread, val, ena, run); [all …]
|
| /tools/testing/selftests/kvm/ |
| A D | coalesced_io_test.c | 78 struct kvm_run *run = vcpu->run; in vcpu_run_and_verify_io_exit() local 90 if (run->exit_reason == KVM_EXIT_IO) in vcpu_run_and_verify_io_exit() 91 pio_value = *(uint32_t *)((void *)run + run->io.data_offset); in vcpu_run_and_verify_io_exit() 95 TEST_ASSERT((!want_pio && (run->exit_reason == KVM_EXIT_MMIO && run->mmio.is_write && in vcpu_run_and_verify_io_exit() 96 run->mmio.phys_addr == io->mmio_gpa && run->mmio.len == 8 && in vcpu_run_and_verify_io_exit() 98 (want_pio && (run->exit_reason == KVM_EXIT_IO && run->io.port == io->pio_port && in vcpu_run_and_verify_io_exit() 99 run->io.direction == KVM_EXIT_IO_OUT && run->io.count == 1 && in vcpu_run_and_verify_io_exit() 107 … run->exit_reason == KVM_EXIT_MMIO ? "MMIO" : run->exit_reason == KVM_EXIT_IO ? "PIO" : "other", in vcpu_run_and_verify_io_exit() 108 run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(uint64_t *)run->mmio.data, in vcpu_run_and_verify_io_exit() 109 run->io.port, run->io.direction, run->io.size, run->io.count, pio_value); in vcpu_run_and_verify_io_exit() [all …]
|
| A D | set_memory_region_test.c | 55 struct kvm_run *run = vcpu->run; in vcpu_worker() local 67 if (run->exit_reason == KVM_EXIT_IO) { in vcpu_worker() 76 if (run->exit_reason != KVM_EXIT_MMIO) in vcpu_worker() 80 TEST_ASSERT(run->mmio.len == 8, in vcpu_worker() 85 run->mmio.phys_addr); in vcpu_worker() 86 memcpy(run->mmio.data, &MMIO_VAL, 8); in vcpu_worker() 277 struct kvm_run *run; in test_delete_memory_region() local 307 run = vcpu->run; in test_delete_memory_region() 579 struct kvm_run *run; in test_mmio_during_vectoring() local 588 run = vcpu->run; in test_mmio_during_vectoring() [all …]
|
| /tools/testing/selftests/kvm/lib/riscv/ |
| A D | ucall.c | 16 struct kvm_run *run = vcpu->run; in ucall_arch_get_ucall() local 18 if (run->exit_reason == KVM_EXIT_RISCV_SBI && in ucall_arch_get_ucall() 19 run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) { in ucall_arch_get_ucall() 20 switch (run->riscv_sbi.function_id) { in ucall_arch_get_ucall() 22 return (void *)run->riscv_sbi.args[0]; in ucall_arch_get_ucall()
|
| /tools/testing/selftests/kvm/lib/loongarch/ |
| A D | ucall.c | 27 struct kvm_run *run = vcpu->run; in ucall_arch_get_ucall() local 29 if (run->exit_reason == KVM_EXIT_MMIO && in ucall_arch_get_ucall() 30 run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { in ucall_arch_get_ucall() 31 TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t), in ucall_arch_get_ucall() 34 return (void *)(*((uint64_t *)run->mmio.data)); in ucall_arch_get_ucall()
|
| /tools/testing/selftests/kvm/lib/arm64/ |
| A D | ucall.c | 24 struct kvm_run *run = vcpu->run; in ucall_arch_get_ucall() local 26 if (run->exit_reason == KVM_EXIT_MMIO && in ucall_arch_get_ucall() 27 run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { in ucall_arch_get_ucall() 28 TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t), in ucall_arch_get_ucall() 30 return (void *)(*((uint64_t *)run->mmio.data)); in ucall_arch_get_ucall()
|
| /tools/testing/selftests/arm64/mte/ |
| A D | check_tags_inclusion.c | 52 int tag, run, ret, result = KSFT_PASS; in check_single_included_tags() local 64 for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { in check_single_included_tags() 84 int tag, run, result = KSFT_PASS; in check_multiple_included_tags() local 96 for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { in check_multiple_included_tags() 116 int run, ret, result = KSFT_PASS; in check_all_included_tags() local 127 for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { in check_all_included_tags() 142 int run, ret; in check_none_included_tags() local 152 for (run = 0; run < RUNS; run++) { in check_none_included_tags()
|
| A D | check_child_memory.c | 87 int run, result; in check_child_memory_mapping() local 92 for (run = 0; run < item; run++) { in check_child_memory_mapping() 95 if (check_allocated_memory_range(ptr, sizes[run], mem_type, in check_child_memory_mapping() 98 result = check_child_tag_inheritance(ptr, sizes[run], mode); in check_child_memory_mapping() 109 int run, fd, map_size, result = KSFT_PASS; in check_child_file_mapping() local 113 for (run = 0; run < total; run++) { in check_child_file_mapping() 118 map_size = sizes[run] + OVERFLOW + UNDERFLOW; in check_child_file_mapping() 125 mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]); in check_child_file_mapping() 127 ptr = mte_insert_tags((void *)ptr, sizes[run]); in check_child_file_mapping() 134 result = check_child_tag_inheritance(ptr, sizes[run], mode); in check_child_file_mapping() [all …]
|
| /tools/testing/selftests/kvm/arm64/ |
| A D | psci_test.c | 181 struct kvm_run *run; in host_test_system_suspend() local 188 run = source->run; in host_test_system_suspend() 193 TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND, in host_test_system_suspend() 195 run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND); in host_test_system_suspend() 242 struct kvm_run *run; in host_test_system_off2() local 255 run = source->run; in host_test_system_off2() 258 while (run->exit_reason == KVM_EXIT_SYSTEM_EVENT) { in host_test_system_off2() 259 TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SHUTDOWN, in host_test_system_off2() 261 run->system_event.type, KVM_SYSTEM_EVENT_SHUTDOWN); in host_test_system_off2() 262 TEST_ASSERT(run->system_event.ndata >= 1, in host_test_system_off2() [all …]
|