| /linux/tools/testing/selftests/kvm/x86_64/ |
| A D | vmx_msrs_test.c | 15 static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, in vmx_fixed1_msr_test() argument 18 uint64_t val = vcpu_get_msr(vcpu, msr_index); in vmx_fixed1_msr_test() 24 vcpu_set_msr(vcpu, msr_index, val & ~BIT_ULL(bit)); in vmx_fixed1_msr_test() 25 vcpu_set_msr(vcpu, msr_index, val); in vmx_fixed1_msr_test() 29 static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index, in vmx_fixed0_msr_test() argument 32 uint64_t val = vcpu_get_msr(vcpu, msr_index); in vmx_fixed0_msr_test() 38 vcpu_set_msr(vcpu, msr_index, val | BIT_ULL(bit)); in vmx_fixed0_msr_test() 39 vcpu_set_msr(vcpu, msr_index, val); in vmx_fixed0_msr_test() 43 static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index) in vmx_fixed0and1_msr_test() argument 45 vmx_fixed0_msr_test(vcpu, msr_index, GENMASK_ULL(31, 0)); in vmx_fixed0and1_msr_test() [all …]
|
| A D | userspace_msr_exit_test.c | 388 static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) in process_rdmsr() argument 395 TEST_ASSERT(run->msr.index == msr_index, in process_rdmsr() 397 run->msr.index, msr_index); in process_rdmsr() 420 static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) in process_wrmsr() argument 427 TEST_ASSERT(run->msr.index == msr_index, in process_wrmsr() 429 run->msr.index, msr_index); in process_wrmsr() 486 uint32_t msr_index) in run_guest_then_process_rdmsr() argument 489 process_rdmsr(vcpu, msr_index); in run_guest_then_process_rdmsr() 493 uint32_t msr_index) in run_guest_then_process_wrmsr() argument 496 process_wrmsr(vcpu, msr_index); in run_guest_then_process_wrmsr()
|
| /linux/arch/x86/xen/ |
| A D | pmu.c | 146 static bool is_intel_pmu_msr(u32 msr_index, int *type, int *index) in is_intel_pmu_msr() argument 155 switch (msr_index) { in is_intel_pmu_msr() 170 if ((msr_index >= MSR_CORE_PERF_FIXED_CTR0) && in is_intel_pmu_msr() 171 (msr_index < MSR_CORE_PERF_FIXED_CTR0 + in is_intel_pmu_msr() 173 *index = msr_index - MSR_CORE_PERF_FIXED_CTR0; in is_intel_pmu_msr() 178 if ((msr_index >= MSR_P6_EVNTSEL0) && in is_intel_pmu_msr() 179 (msr_index < MSR_P6_EVNTSEL0 + intel_num_arch_counters)) { in is_intel_pmu_msr() 180 *index = msr_index - MSR_P6_EVNTSEL0; in is_intel_pmu_msr() 185 msr_index_pmc = msr_index & MSR_PMC_ALIAS_MASK; in is_intel_pmu_msr()
|
| /linux/drivers/thermal/intel/ |
| A D | intel_powerclamp.c | 316 int msr_index; member 321 .msr_index = MSR_PKG_C##id##_RESIDENCY, \ 342 while (info->msr_index) { in has_pkg_state_counter() 343 if (!rdmsrl_safe(info->msr_index, &val)) in has_pkg_state_counter() 357 while (info->msr_index) { in pkg_state_counter() 359 if (!rdmsrl_safe(info->msr_index, &val)) in pkg_state_counter()
|
| /linux/arch/x86/kvm/vmx/ |
| A D | hyperv.c | 111 void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) in nested_evmcs_filter_control_msr() argument 121 switch (msr_index) { in nested_evmcs_filter_control_msr()
|
| A D | nested.c | 998 u32 msr_index, in nested_vmx_get_vmexit_msr_value() argument 1022 msr_index); in nested_vmx_get_vmexit_msr_value() 1090 if (e.index == msr_index) in nested_msr_store_list_has_msr() 1097 u32 msr_index) in prepare_vmx_msr_autostore_list() argument 1121 msr_index); in prepare_vmx_msr_autostore_list() 1299 switch (msr_index) { in vmx_get_control_msr() 1413 switch (msr_index) { in vmx_get_fixed0_msr() 1454 switch (msr_index) { in vmx_set_vmx_msr() 1510 switch (msr_index) { in vmx_get_vmx_msr() 6181 msr_index -= 0xc0000000; in nested_vmx_exit_handled_msr() [all …]
|
| A D | hyperv.h | 59 void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
|
| A D | nested.h | 32 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 33 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
|
| A D | vmx.c | 2202 u32 msr_index = msr_info->index; in vmx_set_msr() local 2206 switch (msr_index) { in vmx_set_msr() 2265 kvm_pr_unimpl_wrmsr(vcpu, msr_index, data); in vmx_set_msr() 2393 [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; in vmx_set_msr() 2400 return vmx_set_vmx_msr(vcpu, msr_index, data); in vmx_set_msr() 2486 msr = vmx_find_uret_msr(vmx, msr_index); in vmx_set_msr() 2494 if (msr_index == MSR_IA32_ARCH_CAPABILITIES) in vmx_set_msr()
|
| /linux/tools/testing/selftests/kvm/lib/x86_64/ |
| A D | processor.c | 763 uint64_t kvm_get_feature_msr(uint64_t msr_index) in kvm_get_feature_msr() argument 772 buffer.entry.index = msr_index; in kvm_get_feature_msr() 883 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) in vcpu_get_msr() argument 891 buffer.entry.index = msr_index; in vcpu_get_msr() 898 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value) in _vcpu_set_msr() argument 907 buffer.entry.index = msr_index; in _vcpu_set_msr() 1016 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index) in kvm_msr_is_in_save_restore_list() argument 1022 if (list->indices[i] == msr_index) in kvm_msr_is_in_save_restore_list()
|
| /linux/arch/x86/kvm/ |
| A D | kvm_emulate.h | 209 int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); 210 int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); 211 int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
|
| A D | x86.c | 484 if (msrs_to_save[i] == msr_index) in kvm_is_advertised_msr() 489 if (emulated_msrs[i] == msr_index) in kvm_is_advertised_msr() 7398 switch (msr_index) { in kvm_probe_msr_to_save() 7431 (msr_index - MSR_IA32_RTIT_ADDR0_A >= in kvm_probe_msr_to_save() 8452 u32 msr_index, u64 *pdata) in emulator_get_msr_with_filter() argument 8466 trace_kvm_msr_read_ex(msr_index); in emulator_get_msr_with_filter() 8470 trace_kvm_msr_read(msr_index, *pdata); in emulator_get_msr_with_filter() 8475 u32 msr_index, u64 data) in emulator_set_msr_with_filter() argument 8489 trace_kvm_msr_write_ex(msr_index, data); in emulator_set_msr_with_filter() 8493 trace_kvm_msr_write(msr_index, data); in emulator_set_msr_with_filter() [all …]
|
| A D | emulate.c | 3316 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX); in em_wrmsr() local 3322 r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data); in em_wrmsr() 3332 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX); in em_rdmsr() local 3336 r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data); in em_rdmsr()
|
| /linux/tools/testing/selftests/kvm/include/x86_64/ |
| A D | processor.h | 857 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index); 858 uint64_t kvm_get_feature_msr(uint64_t msr_index); 1085 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index); 1086 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
|