/xen-4.10.0-shim-comet/xen/arch/x86/cpu/mcheck/ |
A D | mce.h | 81 static inline uint64_t mca_rdmsr(unsigned int msr) in mca_rdmsr() argument 84 if (intpose_lookup(smp_processor_id(), msr, &val) == NULL) in mca_rdmsr() 85 rdmsrl(msr, val); in mca_rdmsr() 90 #define mca_wrmsr(msr, val) do { \ argument 91 if ( !intpose_inval(smp_processor_id(), msr) ) \ 92 wrmsrl(msr, val); \ 166 if (msr >= MSR_IA32_MC0_CTL2 && in mce_vendor_bank_msr() 172 switch (msr) { in mce_vendor_bank_msr() 183 static inline int mce_bank_msr(const struct vcpu *v, uint32_t msr) in mce_bank_msr() argument 185 if ( (msr >= MSR_IA32_MC0_CTL && in mce_bank_msr() [all …]
|
A D | vmce.c | 110 switch ( msr & (-MSR_IA32_MC0_CTL | 3) ) in bank_mce_rdmsr() 153 ret = vmce_intel_rdmsr(v, msr, val); in bank_mce_rdmsr() 157 ret = vmce_amd_rdmsr(v, msr, val); in bank_mce_rdmsr() 175 int vmce_rdmsr(uint32_t msr, uint64_t *val) in vmce_rdmsr() argument 184 switch ( msr ) in vmce_rdmsr() 225 ret = mce_bank_msr(cur, msr) ? bank_mce_rdmsr(cur, msr, val) : 0; in vmce_rdmsr() 243 switch ( msr & (-MSR_IA32_MC0_CTL | 3) ) in bank_mce_wrmsr() 287 ret = vmce_amd_wrmsr(v, msr, val); in bank_mce_wrmsr() 305 int vmce_wrmsr(uint32_t msr, uint64_t val) in vmce_wrmsr() argument 312 switch ( msr ) in vmce_wrmsr() [all …]
|
A D | vmce.h | 14 int vmce_intel_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); 15 int vmce_intel_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); 16 int vmce_amd_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); 17 int vmce_amd_wrmsr(struct vcpu *, uint32_t msr, uint64_t val);
|
A D | mce_intel.c | 185 && msr < MSR_IA32_MCG_EAX + nr_intel_ext_msrs ) in intel_get_extended_msr() 187 ext->mc_msr[ext->mc_msrs].reg = msr; in intel_get_extended_msr() 188 rdmsrl(msr, ext->mc_msr[ext->mc_msrs].value); in intel_get_extended_msr() 492 unsigned msr = MSR_IA32_MCx_CTL2(i); in do_cmci_discover() local 498 rdmsrl(msr, val); in do_cmci_discover() 509 rdmsrl(msr, val); in do_cmci_discover() 516 wrmsrl(msr, val & ~CMCI_THRESHOLD_MASK); in do_cmci_discover() 626 unsigned msr = MSR_IA32_MCx_CTL2(i); in clear_cmci() local 630 rdmsrl(msr, val); in clear_cmci() 947 unsigned int bank = msr - MSR_IA32_MC0_CTL2; in vmce_intel_wrmsr() [all …]
|
A D | mce.c | 1033 uint64_t msr; member 1057 if ( intpose_arr[i].cpu_nr == cpu_nr && intpose_arr[i].msr == msr ) in intpose_lookup() 1084 ent->msr = msr; in intpose_add() 1213 struct mcinfo_msr *msr; in x86_mc_msrinject() local 1223 for ( i = 0, msr = &mci->mcinj_msr[0]; i < mci->mcinj_count; i++, msr++ ) in x86_mc_msrinject() 1228 (unsigned long long)msr->reg, in x86_mc_msrinject() 1232 intpose_add(mci->mcinj_cpunr, msr->reg, msr->value); in x86_mc_msrinject() 1234 wrmsrl(msr->reg, msr->value); in x86_mc_msrinject() 1461 struct mcinfo_msr *msr; in do_mca() local 1481 i++, msr++ ) in do_mca() [all …]
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/ |
A D | msr.h | 14 #define rdmsr(msr,val1,val2) \ argument 17 : "c" (msr)) 19 #define rdmsrl(msr,val) do { unsigned long a__,b__; \ argument 22 : "c" (msr)); \ 26 #define wrmsr(msr,val1,val2) \ argument 29 : "c" (msr), "a" (val1), "d" (val2)) 31 static inline void wrmsrl(unsigned int msr, __u64 val) in wrmsrl() argument 36 wrmsr(msr, lo, hi); in wrmsrl() 40 #define rdmsr_safe(msr,val) ({\ argument 51 : "c" (msr), "2" (0), "i" (-EFAULT)); \ [all …]
|
A D | vpmu.h | 42 int (*do_wrmsr)(unsigned int msr, uint64_t msr_content, 44 int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content); 107 int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, 116 static inline int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, in vpmu_do_wrmsr() argument 119 return vpmu_do_msr(msr, &msr_content, supported, 1); in vpmu_do_wrmsr() 121 static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) in vpmu_do_rdmsr() argument 123 return vpmu_do_msr(msr, msr_content, 0, 0); in vpmu_do_rdmsr()
|
A D | xenoprof.h | 66 int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_content); 67 int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content); 72 static inline int passive_domain_do_rdmsr(unsigned int msr, in passive_domain_do_rdmsr() argument 78 static inline int passive_domain_do_wrmsr(unsigned int msr, in passive_domain_do_wrmsr() argument
|
A D | mce.h | 38 extern int vmce_wrmsr(uint32_t msr, uint64_t val); 39 extern int vmce_rdmsr(uint32_t msr, uint64_t *val);
|
/xen-4.10.0-shim-comet/xen/arch/x86/ |
A D | monitor.c | 46 ASSERT(d->arch.monitor.msr_bitmap && msr); in monitor_bitmap_for_msr() 48 switch ( *msr ) in monitor_bitmap_for_msr() 57 *msr &= 0x1fff; in monitor_bitmap_for_msr() 62 *msr &= 0x1fff; in monitor_bitmap_for_msr() 73 u32 index = msr; in monitor_enable_msr() 85 hvm_enable_msr_interception(d, msr); in monitor_enable_msr() 97 bitmap = monitor_bitmap_for_msr(d, &msr); in monitor_disable_msr() 102 __clear_bit(msr, bitmap); in monitor_disable_msr() 119 return test_bit(msr, bitmap); in monitored_msr() 188 u32 msr = mop->u.mov_to_msr.msr; in arch_monitor_domctl_event() local [all …]
|
A D | msr.c | 94 d->arch.msr = dp; in init_domain_msr_policy() 116 v->arch.msr = vp; in init_vcpu_msr_policy() 121 int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) in guest_rdmsr() argument 123 const struct msr_domain_policy *dp = v->domain->arch.msr; in guest_rdmsr() 124 const struct msr_vcpu_policy *vp = v->arch.msr; in guest_rdmsr() 126 switch ( msr ) in guest_rdmsr() 152 int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) in guest_wrmsr() argument 156 struct msr_domain_policy *dp = d->arch.msr; in guest_wrmsr() 157 struct msr_vcpu_policy *vp = v->arch.msr; in guest_wrmsr() 159 switch ( msr ) in guest_wrmsr()
|
/xen-4.10.0-shim-comet/xen/arch/arm/arm64/ |
A D | entry.S | 63 msr SPSR_el1, x23 69 msr SP_el0, x22 73 msr SP_el1, x22 74 msr ELR_el1, x23 80 msr SPSR_fiq, x22 81 msr SPSR_irq, x23 85 msr SPSR_und, x22 182 msr daifclr, #2 190 msr daifclr, #6 197 msr daifclr, #4 [all …]
|
A D | head.S | 238 msr DAIFSet, 0xf /* Disable all interrupts */ 259 msr DAIFSet, 0xf /* Disable all interrupts */ 342 msr mair_el2, x0 355 msr tcr_el2, x0 366 msr SCTLR_EL2, x0 371 msr spsel, #1 391 msr TTBR0_EL2, x4 517 msr SCTLR_EL2, x0 /* now paging is enabled */ 576 msr TTBR0_EL2, x4 661 msr TTBR0_EL2, x0 [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/cpu/ |
A D | vpmu_amd.c | 37 #define is_guest_mode(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT)) argument 38 #define is_pmu_enabled(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_EN_SHIFT)) argument 39 #define set_guest_mode(msr) (msr |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT)) argument 40 #define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH-1)))) argument 335 ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) ) in context_update() 337 msr = get_fam15h_addr(msr); in context_update() 342 if ( msr == ctrls[i] ) in context_update() 347 else if (msr == counters[i] ) in context_update() 407 context_update(msr, msr_content); in amd_vpmu_do_wrmsr() 410 wrmsrl(msr, msr_content); in amd_vpmu_do_wrmsr() [all …]
|
A D | mwait-idle.c | 1008 unsigned long long msr; in bxt_idle_state_table_update() local 1011 rdmsrl(MSR_PKGC6_IRTL, msr); in bxt_idle_state_table_update() 1012 usec = irtl_2_usec(msr); in bxt_idle_state_table_update() 1018 rdmsrl(MSR_PKGC7_IRTL, msr); in bxt_idle_state_table_update() 1019 usec = irtl_2_usec(msr); in bxt_idle_state_table_update() 1025 rdmsrl(MSR_PKGC8_IRTL, msr); in bxt_idle_state_table_update() 1026 usec = irtl_2_usec(msr); in bxt_idle_state_table_update() 1033 usec = irtl_2_usec(msr); in bxt_idle_state_table_update() 1040 usec = irtl_2_usec(msr); in bxt_idle_state_table_update() 1055 u64 msr; in sklh_idle_state_table_update() local [all …]
|
A D | intel.c | 25 static uint64_t __init _probe_mask_msr(unsigned int *msr, uint64_t caps) in _probe_mask_msr() argument 31 if (rdmsr_safe(*msr, val) || wrmsr_safe(*msr, val)) in _probe_mask_msr() 32 *msr = 0; in _probe_mask_msr() 146 #define LAZY(msr, field) \ in intel_ctxt_switch_masking() argument 149 (msr)) \ in intel_ctxt_switch_masking() 151 wrmsrl((msr), masks->field); \ in intel_ctxt_switch_masking()
|
A D | amd.c | 47 static inline int rdmsr_amd_safe(unsigned int msr, unsigned int *lo, in rdmsr_amd_safe() argument 59 : "c" (msr), "D" (0x9c5a203a), "2" (0), "i" (-EFAULT)); in rdmsr_amd_safe() 64 static inline int wrmsr_amd_safe(unsigned int msr, unsigned int lo, in wrmsr_amd_safe() argument 76 : "c" (msr), "a" (lo), "d" (hi), "D" (0x9c5a203a), in wrmsr_amd_safe() 82 static void wrmsr_amd(unsigned int msr, uint64_t val) in wrmsr_amd() argument 85 "c" (msr), "a" ((uint32_t)val), in wrmsr_amd() 139 static uint64_t __init _probe_mask_msr(unsigned int msr, uint64_t caps) in _probe_mask_msr() argument 145 if ((rdmsr_amd_safe(msr, &lo, &hi) == 0) && in _probe_mask_msr() 146 (wrmsr_amd_safe(msr, lo, hi) == 0)) in _probe_mask_msr() 232 #define LAZY(cap, msr, field) \ in amd_ctxt_switch_masking() argument [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/hvm/vmx/ |
A D | vmcs.c | 836 if ( msr <= 0x1fff ) in vmx_clear_msr_intercept() 843 else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) in vmx_clear_msr_intercept() 845 msr &= 0x1fff; in vmx_clear_msr_intercept() 864 if ( msr <= 0x1fff ) in vmx_set_msr_intercept() 871 else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) in vmx_set_msr_intercept() 873 msr &= 0x1fff; in vmx_set_msr_intercept() 886 if ( msr <= 0x1fff ) in vmx_msr_is_intercepted() 889 else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) in vmx_msr_is_intercepted() 1284 const u32 *msr = key; in vmx_msr_entry_key_cmp() local 1287 if ( *msr > entry->index ) in vmx_msr_entry_key_cmp() [all …]
|
A D | vmx.c | 493 switch ( msr ) in long_mode_do_msr_read() 539 switch ( msr ) in long_mode_do_msr_write() 2890 switch ( msr ) in vmx_msr_read_intercept() 3025 unsigned int msr; in vmx_vlapic_msr_changed() local 3049 msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++ ) in vmx_vlapic_msr_changed() 3070 msr <= MSR_IA32_APICBASE_MSR + 0xff; msr++ ) in vmx_vlapic_msr_changed() 3083 switch ( msr ) in vmx_msr_write_intercept() 3330 msr->index, msr->data, msr->mbz); in vmx_failed_vmentry() 4176 for ( ; msr < msr_area + msr_count && msr->index < lbr_from_end; msr++ ) in lbr_tsx_fixup() 4177 msr->data |= ((LBR_FROM_SIGNEXT_2MSB & msr->data) << 2); in lbr_tsx_fixup() [all …]
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/hvm/vmx/ |
A D | vmcs.h | 540 void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, 542 void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, 544 int vmx_read_guest_msr(u32 msr, u64 *val); 545 int vmx_write_guest_msr(u32 msr, u64 val); 546 struct vmx_msr_entry *vmx_find_msr(u32 msr, int type); 547 int vmx_add_msr(u32 msr, int type); 552 unsigned int msr, bool is_write) __nonnull(1); 562 static inline int vmx_add_guest_msr(u32 msr) in vmx_add_guest_msr() argument 564 return vmx_add_msr(msr, VMX_GUEST_MSR); in vmx_add_guest_msr() 566 static inline int vmx_add_host_load_msr(u32 msr) in vmx_add_host_load_msr() argument [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/hvm/svm/ |
A D | svm.c | 136 if ( msr <= 0x1fff ) in svm_msrbit() 138 else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) in svm_msrbit() 140 else if ( (msr >= 0xc0010000) && (msr <= 0xc0011fff) ) in svm_msrbit() 152 msr &= 0x1fff; in svm_intercept_msr() 155 __set_bit(msr * 2, msr_bit); in svm_intercept_msr() 157 __clear_bit(msr * 2, msr_bit); in svm_intercept_msr() 457 ctxt->msr[i]._rsvd = 1; in svm_load_msr() 1834 switch ( msr ) in svm_msr_read_intercept() 1962 msr, *msr_content); in svm_msr_read_intercept() 1976 switch ( msr ) in svm_msr_write_intercept() [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/oprofile/ |
A D | nmi_int.c | 40 static int passive_domain_msr_op_checks(unsigned int msr, int *typep, int *indexp) in passive_domain_msr_op_checks() argument 47 if ( !model->is_arch_pmu_msr(msr, typep, indexp) ) in passive_domain_msr_op_checks() 56 int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_content) in passive_domain_do_rdmsr() argument 60 if ( !passive_domain_msr_op_checks(msr, &type, &index)) in passive_domain_do_rdmsr() 67 int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content) in passive_domain_do_wrmsr() argument 71 if ( !passive_domain_msr_op_checks(msr, &type, &index)) in passive_domain_do_wrmsr()
|
/xen-4.10.0-shim-comet/tools/debugger/kdd/ |
A D | kdd.c | 712 uint32_t msr = s->rxp.cmd.msr.msr; in kdd_handle_read_msr() local 715 KDD_LOG(s, "Read MSR 0x%"PRIx32"\n", msr); in kdd_handle_read_msr() 717 ok = (kdd_rdmsr(s->guest, s->cpuid, msr, &val) == 0); in kdd_handle_read_msr() 718 s->txp.cmd.msr.msr = msr; in kdd_handle_read_msr() 719 s->txp.cmd.msr.val = val; in kdd_handle_read_msr() 720 s->txp.cmd.msr.status = (ok ? KDD_STATUS_SUCCESS : KDD_STATUS_FAILURE); in kdd_handle_read_msr() 726 uint32_t msr = s->rxp.cmd.msr.msr; in kdd_handle_write_msr() local 727 uint64_t val = s->rxp.cmd.msr.val; in kdd_handle_write_msr() 729 KDD_LOG(s, "Write MSR 0x%"PRIx32" = 0x%"PRIx64"\n", msr, val); in kdd_handle_write_msr() 731 ok = (kdd_wrmsr(s->guest, s->cpuid, msr, val) == 0); in kdd_handle_write_msr() [all …]
|
/xen-4.10.0-shim-comet/unmodified_drivers/linux-2.6/platform-pci/ |
A D | platform-pci.c | 143 uint32_t eax, ebx, ecx, edx, pages, msr, i, base; in init_hypercall_stubs() local 160 cpuid(base + 2, &pages, &msr, &ecx, &edx); in init_hypercall_stubs() 183 wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); in init_hypercall_stubs() 196 uint32_t base, ecx, edx, pages, msr, i; in resume_hypercall_stubs() local 201 cpuid(base + 2, &pages, &msr, &ecx, &edx); in resume_hypercall_stubs() 209 wrmsrl(msr, ((u64)pfn << PAGE_SHIFT) + i); in resume_hypercall_stubs()
|
/xen-4.10.0-shim-comet/xen/arch/x86/hvm/ |
A D | monitor.c | 77 void hvm_monitor_msr(unsigned int msr, uint64_t value) in hvm_monitor_msr() argument 81 if ( monitored_msr(curr->domain, msr) ) in hvm_monitor_msr() 85 .u.mov_to_msr.msr = msr, in hvm_monitor_msr()
|