| /linux/tools/testing/selftests/kvm/s390x/ |
| A D | sync_regs_test.c | 80 struct kvm_run *run; in main() local 135 run->exit_reason, in main() 141 run->s390_sieic.icptcode, run->s390_sieic.ipa, in main() 142 run->s390_sieic.ipb); in main() 166 run->exit_reason, in main() 170 run->s.regs.gprs[11]); in main() 173 run->s.regs.acrs[0]); in main() 176 run->s.regs.diag318); in main() 188 run->kvm_dirty_regs = 0; in main() 195 run->exit_reason, in main() [all …]
|
| /linux/tools/testing/selftests/kvm/x86_64/ |
| A D | sync_regs_test.c | 88 struct kvm_run *run; in main() local 148 run->exit_reason, in main() 170 run->exit_reason, in main() 174 run->s.regs.regs.rbx); in main() 192 run->kvm_dirty_regs = 0; in main() 197 run->exit_reason, in main() 207 run->kvm_valid_regs = 0; in main() 208 run->kvm_dirty_regs = 0; in main() 215 run->exit_reason, in main() 229 run->kvm_valid_regs = 0; in main() [all …]
|
| A D | debug_regs.c | 84 struct kvm_run *run; in main() local 105 run = vcpu_state(vm, VCPU_ID); in main() 116 run->exit_reason, run->debug.arch.exception, in main() 135 i, run->exit_reason, run->debug.arch.exception, in main() 137 run->debug.arch.dr6, target_dr6); in main() 158 i, run->exit_reason, run->debug.arch.exception, in main() 160 run->debug.arch.dr6, target_dr6); in main() 185 i, run->exit_reason, run->debug.arch.exception, in main() 186 run->debug.arch.pc, target_rip, run->debug.arch.dr6, in main() 203 run->exit_reason, run->debug.arch.exception, in main() [all …]
|
| A D | userspace_msr_exit_test.c | 426 run->exit_reason, in process_rdmsr() 434 run->msr.data = 0; in process_rdmsr() 437 run->msr.error = 1; in process_rdmsr() 461 run->exit_reason, in process_wrmsr() 470 run->msr.error = 1; in process_wrmsr() 474 run->msr.error = 1; in process_wrmsr() 493 run->exit_reason, in process_ucall_done() 641 run->msr.data = run->msr.index; in handle_rdmsr() 684 struct kvm_run *run; in test_msr_filter_deny() local 709 handle_rdmsr(run); in test_msr_filter_deny() [all …]
|
| A D | emulator_error_test.c | 70 run->exit_reason, in process_exit_on_emulation_error() 71 exit_reason_str(run->exit_reason)); in process_exit_on_emulation_error() 75 run->emulation_failure.suberror); in process_exit_on_emulation_error() 77 if (run->emulation_failure.ndata >= 1) { in process_exit_on_emulation_error() 78 flags = run->emulation_failure.flags; in process_exit_on_emulation_error() 80 run->emulation_failure.ndata >= 3) { in process_exit_on_emulation_error() 114 if (run->exit_reason == KVM_EXIT_IO && in check_for_guest_assert() 129 run->exit_reason, in process_ucall_done() 130 exit_reason_str(run->exit_reason)); in process_ucall_done() 144 run->exit_reason, in process_ucall() [all …]
|
| A D | xen_vmcall_test.c | 114 if (run->exit_reason == KVM_EXIT_XEN) { in main() 115 ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); in main() 116 ASSERT_EQ(run->xen.u.hcall.cpl, 0); in main() 117 ASSERT_EQ(run->xen.u.hcall.longmode, 1); in main() 118 ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE); in main() 119 ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1)); in main() 120 ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2)); in main() 125 run->xen.u.hcall.result = RETVALUE; in main() 129 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, in main() 131 run->exit_reason, in main() [all …]
|
| A D | mmu_role_test.c | 28 struct kvm_run *run; in mmu_role_test() local 35 run = vcpu_state(vm, VCPU_ID); in mmu_role_test() 44 TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO, in mmu_role_test() 46 run->exit_reason, exit_reason_str(run->exit_reason)); in mmu_role_test() 48 TEST_ASSERT(run->mmio.len == 8, "Unexpected exit mmio size = %u", run->mmio.len); in mmu_role_test() 50 TEST_ASSERT(run->mmio.phys_addr == MMIO_GPA, in mmu_role_test() 51 "Unexpected exit mmio address = 0x%llx", run->mmio.phys_addr); in mmu_role_test() 82 exit_reason_str(run->exit_reason), cmd); in mmu_role_test()
|
| /linux/fs/ntfs3/ |
| A D | run.c | 39 if (!run->count) { in run_lookup() 48 r = run->runs; in run_lookup() 158 run->count -= 1; in run_consolidate() 176 end = run->runs + run->count; in run_is_mapped_full() 177 r = run->runs + i; in run_is_mapped_full() 200 if (!run->runs) in run_lookup_entry() 248 r = run->runs; in run_truncate_head() 305 run_truncate(run, (run->runs + (run->count >> 1))->vcn); in run_truncate_around() 498 e = run->runs + run->count; in run_collapse_range() 961 if (!run) in run_unpack() [all …]
|
| A D | attrib.c | 128 run_truncate(run, vcn0); in run_deallocate_ex() 177 size_t cnt = run->count; in attr_allocate_clusters() 221 run_truncate(run, vcn0); in attr_allocate_clusters() 267 run_init(run); in attr_make_nonresident() 348 run_close(run); in attr_make_nonresident() 810 struct runs_tree *run = &ni->file.run; in attr_data_get_block() local 1352 struct runs_tree *run; in attr_is_frame_compressed() local 1364 run = &ni->file.run; in attr_is_frame_compressed() 1453 struct runs_tree *run = &ni->file.run; in attr_allocate_frame() local 1675 struct runs_tree *run = &ni->file.run; in attr_collapse_range() local [all …]
|
| /linux/drivers/staging/media/sunxi/cedrus/ |
| A D | cedrus_dec.c | 29 struct cedrus_run run = {}; in cedrus_device_run() local 32 run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); in cedrus_device_run() 33 run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); in cedrus_device_run() 36 src_req = run.src->vb2_buf.req_obj.req; in cedrus_device_run() 43 run.mpeg2.sequence = cedrus_find_control_data(ctx, in cedrus_device_run() 45 run.mpeg2.picture = cedrus_find_control_data(ctx, in cedrus_device_run() 54 run.h264.pps = cedrus_find_control_data(ctx, in cedrus_device_run() 60 run.h264.sps = cedrus_find_control_data(ctx, in cedrus_device_run() 67 run.h265.sps = cedrus_find_control_data(ctx, in cedrus_device_run() 69 run.h265.pps = cedrus_find_control_data(ctx, in cedrus_device_run() [all …]
|
| /linux/tools/testing/selftests/sgx/ |
| A D | main.c | 112 struct sgx_enclave_run run; in FIXTURE() local 169 memset(&self->run, 0, sizeof(self->run)); in FIXTURE_SETUP() 204 (run)); \ 213 (run)->exception_error_code, (run)->exception_addr); \ 225 EXPECT_EEXIT(&self->run); in TEST_F() 234 EXPECT_EEXIT(&self->run); in TEST_F() 247 EXPECT_EEXIT(&self->run); in TEST_F() 256 EXPECT_EEXIT(&self->run); in TEST_F() 263 run->user_data = 0; in test_handler() 280 EXPECT_EEXIT(&self->run); in TEST_F() [all …]
|
| /linux/tools/perf/tests/ |
| A D | make | 124 run := make_pure 135 run += make_debug 142 run += make_no_ui 156 run += make_no_sdt 162 run += make_help 163 run += make_doc 164 run += make_perf_o 180 run += make_tags 290 run := $(shell shuf -e $(run)) 297 d := $(info run $(run)) [all …]
|
| /linux/arch/riscv/kvm/ |
| A D | vcpu_sbi.c | 20 struct kvm_run *run) in kvm_riscv_vcpu_sbi_forward() argument 29 run->riscv_sbi.args[0] = cp->a0; in kvm_riscv_vcpu_sbi_forward() 30 run->riscv_sbi.args[1] = cp->a1; in kvm_riscv_vcpu_sbi_forward() 31 run->riscv_sbi.args[2] = cp->a2; in kvm_riscv_vcpu_sbi_forward() 32 run->riscv_sbi.args[3] = cp->a3; in kvm_riscv_vcpu_sbi_forward() 33 run->riscv_sbi.args[4] = cp->a4; in kvm_riscv_vcpu_sbi_forward() 35 run->riscv_sbi.ret[0] = cp->a0; in kvm_riscv_vcpu_sbi_forward() 36 run->riscv_sbi.ret[1] = cp->a1; in kvm_riscv_vcpu_sbi_forward() 49 cp->a0 = run->riscv_sbi.ret[0]; in kvm_riscv_vcpu_sbi_return() 70 memset(&run->system_event, 0, sizeof(run->system_event)); in kvm_sbi_system_shutdown() [all …]
|
| A D | vcpu_exit.c | 127 struct kvm_run *run, in truly_illegal_insn() argument 142 struct kvm_run *run, in system_opcode_insn() argument 280 run->mmio.is_write = false; in emulate_load() 281 run->mmio.phys_addr = fault_addr; in emulate_load() 282 run->mmio.len = len; in emulate_load() 295 run->exit_reason = KVM_EXIT_MMIO; in emulate_load() 384 *((u8 *)run->mmio.data) = data8; in emulate_store() 400 run->mmio.is_write = true; in emulate_store() 401 run->mmio.phys_addr = fault_addr; in emulate_store() 402 run->mmio.len = len; in emulate_store() [all …]
|
| /linux/tools/testing/selftests/arm64/mte/ |
| A D | check_mmap_options.c | 63 int run, result, map_size; in check_anonymous_memory_mapping() local 68 for (run = 0; run < item; run++) { in check_anonymous_memory_mapping() 69 map_size = sizes[run] + OVERFLOW + UNDERFLOW; in check_anonymous_memory_mapping() 84 mte_clear_tags((void *)ptr, sizes[run]); in check_anonymous_memory_mapping() 95 int run, fd, map_size; in check_file_memory_mapping() local 100 for (run = 0; run < total; run++) { in check_file_memory_mapping() 105 map_size = sizes[run] + UNDERFLOW + OVERFLOW; in check_file_memory_mapping() 122 mte_clear_tags((void *)ptr, sizes[run]); in check_file_memory_mapping() 134 int run, prot_flag, result, fd, map_size; in check_clear_prot_mte_flag() local 139 for (run = 0; run < total; run++) { in check_clear_prot_mte_flag() [all …]
|
| A D | check_child_memory.c | 87 int run, result; in check_child_memory_mapping() local 92 for (run = 0; run < item; run++) { in check_child_memory_mapping() 95 if (check_allocated_memory_range(ptr, sizes[run], mem_type, in check_child_memory_mapping() 98 result = check_child_tag_inheritance(ptr, sizes[run], mode); in check_child_memory_mapping() 109 int run, fd, map_size, result = KSFT_PASS; in check_child_file_mapping() local 113 for (run = 0; run < total; run++) { in check_child_file_mapping() 118 map_size = sizes[run] + OVERFLOW + UNDERFLOW; in check_child_file_mapping() 125 mte_initialize_current_context(mode, (uintptr_t)ptr, sizes[run]); in check_child_file_mapping() 127 ptr = mte_insert_tags((void *)ptr, sizes[run]); in check_child_file_mapping() 134 result = check_child_tag_inheritance(ptr, sizes[run], mode); in check_child_file_mapping() [all …]
|
| A D | check_tags_inclusion.c | 46 int tag, run, result = KSFT_PASS; in check_single_included_tags() local 56 for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { in check_single_included_tags() 76 int tag, run, result = KSFT_PASS; in check_multiple_included_tags() local 88 for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { in check_multiple_included_tags() 108 int run, result = KSFT_PASS; in check_all_included_tags() local 117 for (run = 0; (run < RUNS) && (result == KSFT_PASS); run++) { in check_all_included_tags() 132 int run; in check_none_included_tags() local 140 for (run = 0; run < RUNS; run++) { in check_none_included_tags()
|
| /linux/arch/mips/kvm/ |
| A D | emulate.c | 977 struct kvm_run *run = vcpu->run; in kvm_mips_emulate_store() local 1001 run->mmio.len = 8; in kvm_mips_emulate_store() 1011 run->mmio.len = 4; in kvm_mips_emulate_store() 1020 run->mmio.len = 2; in kvm_mips_emulate_store() 1029 run->mmio.len = 1; in kvm_mips_emulate_store() 1040 run->mmio.len = 4; in kvm_mips_emulate_store() 1070 run->mmio.len = 4; in kvm_mips_emulate_store() 1255 run->mmio.phys_addr, run->mmio.len, data); in kvm_mips_emulate_store() 1273 struct kvm_run *run = vcpu->run; in kvm_mips_emulate_load() local 1500 run->mmio.phys_addr, run->mmio.len, run->mmio.data); in kvm_mips_emulate_load() [all …]
|
| /linux/arch/s390/kvm/ |
| A D | diag.c | 76 vcpu->run->s.regs.gprs[rx]); in __diag_page_ref_service() 78 if (vcpu->run->s.regs.gprs[rx] & 7) in __diag_page_ref_service() 97 vcpu->run->s.regs.gprs[ry] = 8; in __diag_page_ref_service() 111 vcpu->run->s.regs.gprs[ry] = 0; in __diag_page_ref_service() 125 vcpu->run->s.regs.gprs[ry] = 0; in __diag_page_ref_service() 131 vcpu->run->s.regs.gprs[ry] = 4; in __diag_page_ref_service() 222 vcpu->run->s390_reset_flags = 0; in __diag_ipl_functions() 239 vcpu->run->s390_reset_flags); in __diag_ipl_functions() 257 vcpu->run->s.regs.gprs[4]); in __diag_virtio_hypercall() 268 vcpu->run->s.regs.gprs[4]); in __diag_virtio_hypercall() [all …]
|
| /linux/tools/perf/scripts/python/ |
| A D | stat-cpi.py | 23 def store(time, event, cpu, thread, val, ena, run): argument 29 data[key] = [ val, ena, run] 35 def stat__cycles_k(cpu, thread, time, val, ena, run): argument 36 store(time, "cycles", cpu, thread, val, ena, run); 38 def stat__instructions_k(cpu, thread, time, val, ena, run): argument 41 def stat__cycles_u(cpu, thread, time, val, ena, run): argument 42 store(time, "cycles", cpu, thread, val, ena, run); 44 def stat__instructions_u(cpu, thread, time, val, ena, run): argument 47 def stat__cycles(cpu, thread, time, val, ena, run): argument 48 store(time, "cycles", cpu, thread, val, ena, run); [all …]
|
| /linux/fs/befs/ |
| A D | endian.h | 74 befs_block_run run; in fsrun_to_cpu() local 78 run.start = le16_to_cpu((__force __le16)n.start); in fsrun_to_cpu() 79 run.len = le16_to_cpu((__force __le16)n.len); in fsrun_to_cpu() 83 run.len = be16_to_cpu((__force __be16)n.len); in fsrun_to_cpu() 85 return run; in fsrun_to_cpu() 91 befs_disk_block_run run; in cpu_to_fsrun() local 95 run.start = cpu_to_le16(n.start); in cpu_to_fsrun() 96 run.len = cpu_to_le16(n.len); in cpu_to_fsrun() 99 run.start = cpu_to_be16(n.start); in cpu_to_fsrun() 100 run.len = cpu_to_be16(n.len); in cpu_to_fsrun() [all …]
|
| A D | datastream.c | 31 befs_block_run *run); 36 befs_block_run *run); 53 befs_block_run run; in befs_read_datastream() local 67 bh = befs_bread_iaddr(sb, run); in befs_read_datastream() 308 befs_block_run *run) in befs_find_brun_indirect() argument 344 run->allocation_group = in befs_find_brun_indirect() 346 run->start = in befs_find_brun_indirect() 348 run->len = in befs_find_brun_indirect() 417 befs_block_run *run) in befs_find_brun_dblindirect() argument 517 run->start += offset; in befs_find_brun_dblindirect() [all …]
|
| /linux/arch/arm64/kvm/ |
| A D | mmio.c | 94 struct kvm_run *run = vcpu->run; in kvm_handle_mmio_return() local 97 data = kvm_mmio_read_buf(run->mmio.data, len); in kvm_handle_mmio_return() 125 struct kvm_run *run = vcpu->run; in io_mem_abort() local 139 run->exit_reason = KVM_EXIT_ARM_NISV; in io_mem_abort() 141 run->arm_nisv.fault_ipa = fault_ipa; in io_mem_abort() 176 run->mmio.is_write = is_write; in io_mem_abort() 177 run->mmio.phys_addr = fault_ipa; in io_mem_abort() 178 run->mmio.len = len; in io_mem_abort() 184 memcpy(run->mmio.data, data_buf, len); in io_mem_abort() 191 memcpy(run->mmio.data, data_buf, len); in io_mem_abort() [all …]
|
| /linux/tools/testing/selftests/kvm/ |
| A D | set_memory_region_test.c | 58 struct kvm_run *run; in vcpu_worker() local 67 run = vcpu_state(vm, VCPU_ID); in vcpu_worker() 72 if (run->exit_reason == KVM_EXIT_IO) { in vcpu_worker() 81 if (run->exit_reason != KVM_EXIT_MMIO) in vcpu_worker() 85 TEST_ASSERT(run->mmio.len == 8, in vcpu_worker() 90 run->mmio.phys_addr); in vcpu_worker() 91 memcpy(run->mmio.data, &MMIO_VAL, 8); in vcpu_worker() 262 struct kvm_run *run; in test_delete_memory_region() local 289 run = vcpu_state(vm, VCPU_ID); in test_delete_memory_region() 312 struct kvm_run *run; in test_zero_memory_regions() local [all …]
|
| /linux/tools/testing/selftests/kvm/lib/s390x/ |
| A D | diag318_test_handler.c | 31 struct kvm_run *run; in diag318_handler() local 37 run = vcpu_state(vm, VCPU_ID); in diag318_handler() 39 TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, in diag318_handler() 41 TEST_ASSERT(run->s390_sieic.icptcode == ICPT_INSTRUCTION, in diag318_handler() 42 "Unexpected intercept code: 0x%x", run->s390_sieic.icptcode); in diag318_handler() 43 TEST_ASSERT((run->s390_sieic.ipa & 0xff00) == IPA0_DIAG, in diag318_handler() 44 "Unexpected IPA0 code: 0x%x", (run->s390_sieic.ipa & 0xff00)); in diag318_handler() 46 reg = (run->s390_sieic.ipa & 0x00f0) >> 4; in diag318_handler() 47 diag318_info = run->s.regs.gprs[reg]; in diag318_handler()
|