| /linux/arch/x86/kvm/vmx/ |
| A D | nested.h | 107 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); in nested_cpu_vmx_misc_cr3_count() 117 return to_vmx(vcpu)->nested.msrs.misc_low & in nested_cpu_has_vmwrite_any_field() 128 return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & in nested_cpu_supports_monitor_trap_flag() 134 return to_vmx(vcpu)->nested.msrs.secondary_ctls_high & in nested_cpu_has_vmx_shadow_vmcs() 265 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; in nested_guest_cr0_valid() 266 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; in nested_guest_cr0_valid() 269 if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high & in nested_guest_cr0_valid() 279 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; in nested_host_cr0_valid() 280 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; in nested_host_cr0_valid() 287 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; in nested_cr4_valid() [all …]
|
| A D | nested.c | 1512 *pdata = msrs->basic; in vmx_get_vmx_msr() 1533 msrs->exit_ctls_low, in vmx_get_vmx_msr() 1534 msrs->exit_ctls_high); in vmx_get_vmx_msr() 1541 msrs->entry_ctls_low, in vmx_get_vmx_msr() 1548 msrs->misc_low, in vmx_get_vmx_msr() 1549 msrs->misc_high); in vmx_get_vmx_msr() 6970 msrs->exit_ctls_low = in nested_vmx_setup_exit_ctls() 6993 msrs->entry_ctls_low = in nested_vmx_setup_entry_ctls() 7079 msrs->ept_caps = in nested_vmx_setup_secondary_ctls() 7133 msrs->misc_low |= in nested_vmx_setup_misc_data() [all …]
|
| A D | vmx.c | 4573 vmx->nested.msrs.secondary_ctls_high |= control; in vmx_adjust_secondary_exec_control() 4855 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); in __vmx_vcpu_reset() 7179 struct perf_guest_switch_msr *msrs; in atomic_switch_perf_msrs() local 7188 if (!msrs) in atomic_switch_perf_msrs() 7192 if (msrs[i].host == msrs[i].guest) in atomic_switch_perf_msrs() 7193 clear_atomic_switch_msr(vmx, msrs[i].msr); in atomic_switch_perf_msrs() 7195 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, in atomic_switch_perf_msrs() 7196 msrs[i].host, false); in atomic_switch_perf_msrs() 7708 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; in nested_vmx_cr_fixed1_bits_update() 7709 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; in nested_vmx_cr_fixed1_bits_update() [all …]
|
| A D | vmx.h | 233 struct nested_vmx_msrs msrs; member
|
| /linux/arch/x86/lib/ |
| A D | msr.c | 11 struct msr __percpu *msrs = NULL; in msrs_alloc() local 13 msrs = alloc_percpu(struct msr); in msrs_alloc() 14 if (!msrs) { in msrs_alloc() 19 return msrs; in msrs_alloc() 23 void msrs_free(struct msr __percpu *msrs) in msrs_free() argument 25 free_percpu(msrs); in msrs_free()
|
| A D | msr-smp.c | 13 if (rv->msrs) in __rdmsr_on_cpu() 14 reg = this_cpu_ptr(rv->msrs); in __rdmsr_on_cpu() 26 if (rv->msrs) in __wrmsr_on_cpu() 27 reg = this_cpu_ptr(rv->msrs); in __wrmsr_on_cpu() 98 struct msr __percpu *msrs, in __rwmsr_on_cpus() argument 106 rv.msrs = msrs; in __rwmsr_on_cpus() 125 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs) in rdmsr_on_cpus() argument 127 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu); in rdmsr_on_cpus() 139 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs) in wrmsr_on_cpus() argument 141 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu); in wrmsr_on_cpus()
|
| /linux/drivers/platform/x86/intel/ifs/ |
| A D | load.c | 121 const struct ifs_test_msrs *msrs; in copy_hashes_authenticate_chunks() local 128 msrs = ifs_get_test_msrs(dev); in copy_hashes_authenticate_chunks() 130 wrmsrl(msrs->copy_hashes, ifs_hash_ptr); in copy_hashes_authenticate_chunks() 131 rdmsrl(msrs->copy_hashes_status, hashes_status.data); in copy_hashes_authenticate_chunks() 152 wrmsrl(msrs->copy_chunks, linear_addr); in copy_hashes_authenticate_chunks() 153 rdmsrl(msrs->copy_chunks_status, chunk_status.data); in copy_hashes_authenticate_chunks() 185 const struct ifs_test_msrs *msrs; in copy_hashes_authenticate_chunks_gen2() local 195 msrs = ifs_get_test_msrs(dev); in copy_hashes_authenticate_chunks_gen2() 198 wrmsrl(msrs->copy_hashes, ifs_hash_ptr); in copy_hashes_authenticate_chunks_gen2() 219 wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE); in copy_hashes_authenticate_chunks_gen2() [all …]
|
| /linux/Documentation/trace/postprocess/ |
| A D | decode_msr.py | 7 msrs = dict() variable 13 msrs[int(m.group(2), 16)] = m.group(1) 26 if num in msrs: 27 r = msrs[num]
|
| /linux/arch/x86/include/asm/ |
| A D | msr.h | 20 struct msr __percpu *msrs; member 326 void msrs_free(struct msr __percpu *msrs); 335 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); 336 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); 365 struct msr __percpu *msrs) in rdmsr_on_cpus() argument 367 rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h)); in rdmsr_on_cpus() 370 struct msr __percpu *msrs) in wrmsr_on_cpus() argument 372 wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h)); in wrmsr_on_cpus()
|
| /linux/Documentation/devicetree/bindings/arm/ |
| A D | qcom,coresight-tpdm.yaml | 62 qcom,dsb-msrs-num: 71 qcom,cmb-msrs-num: 114 qcom,dsb-msrs-num = <16>; 134 qcom,cmb-msrs-num = <32>;
|
| /linux/tools/testing/selftests/kvm/include/x86_64/ |
| A D | processor.h | 418 struct kvm_msrs msrs; member 861 struct kvm_msrs *msrs) in vcpu_msrs_get() argument 863 int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs); in vcpu_msrs_get() 865 TEST_ASSERT(r == msrs->nmsrs, in vcpu_msrs_get() 867 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); in vcpu_msrs_get() 869 static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs) in vcpu_msrs_set() argument 871 int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs); in vcpu_msrs_set() 873 TEST_ASSERT(r == msrs->nmsrs, in vcpu_msrs_set() 875 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index); in vcpu_msrs_set()
|
| /linux/Documentation/virt/kvm/x86/ |
| A D | cpuid.rst | 44 KVM_FEATURE_CLOCKSOURCE 0 kvmclock available at msrs 52 KVM_FEATURE_CLOCKSOURCE2 3 kvmclock available at msrs
|
| /linux/arch/x86/kernel/cpu/mce/ |
| A D | amd.c | 637 u32 msrs[NR_BLOCKS]; in disable_err_thresholding() local 640 msrs[0] = 0x00000413; /* MC4_MISC0 */ in disable_err_thresholding() 641 msrs[1] = 0xc0000408; /* MC4_MISC1 */ in disable_err_thresholding() 649 msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank); in disable_err_thresholding() 664 msr_clear_bit(msrs[i], 62); in disable_err_thresholding()
|
| /linux/tools/testing/selftests/kvm/lib/x86_64/ |
| A D | processor.c | 1066 state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0])); in vcpu_save_state() 1090 state->msrs.nmsrs = msr_list->nmsrs; in vcpu_save_state() 1092 state->msrs.entries[i].index = msr_list->indices[i]; in vcpu_save_state() 1093 vcpu_msrs_get(vcpu, &state->msrs); in vcpu_save_state() 1103 vcpu_msrs_set(vcpu, &state->msrs); in vcpu_load_state()
|
| /linux/drivers/edac/ |
| A D | amd64_edac.c | 15 static struct msr __percpu *msrs; variable 3203 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); in nb_mce_bank_enabled_on_node() 3206 struct msr *reg = per_cpu_ptr(msrs, cpu); in nb_mce_bank_enabled_on_node() 3235 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); in toggle_ecc_err_reporting() 3239 struct msr *reg = per_cpu_ptr(msrs, cpu); in toggle_ecc_err_reporting() 4163 msrs = msrs_alloc(); in amd64_edac_init() 4164 if (!msrs) in amd64_edac_init() 4200 msrs_free(msrs); in amd64_edac_init() 4201 msrs = NULL; in amd64_edac_init() 4231 msrs_free(msrs); in amd64_edac_exit() [all …]
|
| /linux/arch/x86/kvm/ |
| A D | x86.c | 574 if (msrs->registered) { in kvm_on_user_return() 575 msrs->registered = false; in kvm_on_user_return() 580 values = &msrs->values[slot]; in kvm_on_user_return() 635 msrs->values[i].host = value; in kvm_user_return_msr_cpu_online() 636 msrs->values[i].curr = value; in kvm_user_return_msr_cpu_online() 652 msrs->values[slot].curr = value; in kvm_set_user_return_msr() 653 if (!msrs->registered) { in kvm_set_user_return_msr() 656 msrs->registered = true; in kvm_set_user_return_msr() 666 if (msrs->registered) in drop_user_return_notifiers() 4539 struct kvm_msrs msrs; in msr_io() local [all …]
|
| /linux/arch/arm64/boot/dts/qcom/ |
| A D | sa8775p.dtsi | 1942 qcom,cmb-msrs-num = <32>; 2000 qcom,cmb-msrs-num = <32>; 2282 qcom,cmb-msrs-num = <32>; 2302 qcom,cmb-msrs-num = <32>; 2322 qcom,cmb-msrs-num = <32>; 2342 qcom,cmb-msrs-num = <32>; 2362 qcom,dsb-msrs-num = <32>; 2674 qcom,cmb-msrs-num = <32>; 2694 qcom,dsb-msrs-num = <32>; 2776 qcom,cmb-msrs-num = <32>; [all …]
|
| /linux/Documentation/virt/kvm/ |
| A D | api.rst | 227 __u32 nmsrs; /* number of msrs in entries */ 232 kvm adjusts nmsrs to reflect the actual number of msrs and fills in the 235 KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported. The list 637 :Returns: number of msrs successfully returned; 653 __u32 nmsrs; /* number of msrs in entries */ 677 :Returns: number of msrs successfully set (see below), -1 on error 4166 __u32 nmsrs; /* number of msrs in bitmap */
|