Lines Matching refs:val
321 unsigned long *val, struct x86_emulate_ctxt *ctxt) in read_io() argument
345 *val = guest_io_read(port, bytes, currd); in read_io()
420 unsigned long val, struct x86_emulate_ctxt *ctxt) in write_io() argument
442 pv_post_outb_hook(port, val); in write_io()
446 guest_io_write(port, bytes, val, currd); in write_io()
700 static int read_cr(unsigned int reg, unsigned long *val, in read_cr() argument
708 *val = (read_cr0() & ~X86_CR0_TS) | curr->arch.pv_vcpu.ctrlreg[0]; in read_cr()
713 *val = curr->arch.pv_vcpu.ctrlreg[reg]; in read_cr()
724 *val = xen_pfn_to_cr3(mfn_to_gmfn(currd, mfn_x(mfn))); in read_cr()
733 *val = compat_pfn_to_cr3(mfn_to_gmfn(currd, mfn_x(mfn))); in read_cr()
744 static int write_cr(unsigned int reg, unsigned long val, in write_cr() argument
752 if ( (val ^ read_cr0()) & ~X86_CR0_TS ) in write_cr()
758 do_fpu_taskswitch(!!(val & X86_CR0_TS)); in write_cr()
762 curr->arch.pv_vcpu.ctrlreg[2] = val; in write_cr()
763 arch_set_cr2(curr, val); in write_cr()
774 ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val); in write_cr()
792 curr->arch.pv_vcpu.ctrlreg[4] = pv_guest_cr4_fixup(curr, val); in write_cr()
801 static int read_dr(unsigned int reg, unsigned long *val, in read_dr() argument
809 *val = res; in read_dr()
814 static int write_dr(unsigned int reg, unsigned long val, in write_dr() argument
817 return do_set_debugreg(reg, val) == 0 in write_dr()
821 static inline uint64_t guest_misc_enable(uint64_t val) in guest_misc_enable() argument
823 val &= ~(MSR_IA32_MISC_ENABLE_PERF_AVAIL | in guest_misc_enable()
825 val |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL | in guest_misc_enable()
828 return val; in guest_misc_enable()
837 static int read_msr(unsigned int reg, uint64_t *val, in read_msr() argument
846 if ( (ret = guest_rdmsr(curr, reg, val)) != X86EMUL_UNHANDLEABLE ) in read_msr()
861 *val = cpu_has_fsgsbase ? __rdfsbase() : curr->arch.pv_vcpu.fs_base; in read_msr()
867 *val = cpu_has_fsgsbase ? __rdgsbase() in read_msr()
874 *val = curr->arch.pv_vcpu.gs_base_user; in read_msr()
890 *val = 0; in read_msr()
894 *val = read_efer(); in read_msr()
896 *val &= ~(EFER_LME | EFER_LMA | EFER_LMSLE); in read_msr()
916 *val = 0; in read_msr()
931 if ( rdmsr_safe(reg, *val) ) in read_msr()
933 *val = guest_misc_enable(*val); in read_msr()
939 *val = curr->arch.pv_vcpu.dr_mask[0]; in read_msr()
945 *val = curr->arch.pv_vcpu.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1]; in read_msr()
950 *val = 0; in read_msr()
965 if ( vpmu_do_rdmsr(reg, val) ) in read_msr()
972 if ( rdmsr_hypervisor_regs(reg, val) ) in read_msr()
975 rc = vmce_rdmsr(reg, val); in read_msr()
984 if ( rdmsr_safe(reg, *val) ) in read_msr()
992 static int write_msr(unsigned int reg, uint64_t val, in write_msr() argument
1000 if ( (ret = guest_wrmsr(curr, reg, val)) != X86EMUL_UNHANDLEABLE ) in write_msr()
1014 if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) ) in write_msr()
1016 wrfsbase(val); in write_msr()
1017 curr->arch.pv_vcpu.fs_base = val; in write_msr()
1021 if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) ) in write_msr()
1023 wrgsbase(val); in write_msr()
1024 curr->arch.pv_vcpu.gs_base_kernel = val; in write_msr()
1028 if ( is_pv_32bit_domain(currd) || !is_canonical_address(val) ) in write_msr()
1030 wrmsrl(MSR_SHADOW_GS_BASE, val); in write_msr()
1031 curr->arch.pv_vcpu.gs_base_user = val; in write_msr()
1051 wrmsr_safe(reg, val) == 0 ) in write_msr()
1062 ((val ^ temp) & ~(1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) ) in write_msr()
1064 if ( wrmsr_safe(MSR_AMD64_NB_CFG, val) == 0 ) in write_msr()
1077 temp != val : in write_msr()
1078 ((temp ^ val) & in write_msr()
1085 if ( wrmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, val) == 0 ) in write_msr()
1096 if ( val ) in write_msr()
1103 if ( val != guest_misc_enable(temp) ) in write_msr()
1113 wrmsr_safe(reg, val) == 0 ) in write_msr()
1121 wrmsr_safe(reg, val) == 0 ) in write_msr()
1130 wrmsr_safe(reg, val) == 0 ) in write_msr()
1135 if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) ) in write_msr()
1137 curr->arch.pv_vcpu.dr_mask[0] = val; in write_msr()
1139 wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val); in write_msr()
1143 if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) ) in write_msr()
1145 curr->arch.pv_vcpu.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val; in write_msr()
1147 wrmsrl(reg, val); in write_msr()
1165 if ( vpmu_do_wrmsr(reg, val, 0) ) in write_msr()
1172 if ( wrmsr_hypervisor_regs(reg, val) == 1 ) in write_msr()
1175 rc = vmce_wrmsr(reg, val); in write_msr()
1181 if ( (rdmsr_safe(reg, temp) != 0) || (val != temp) ) in write_msr()
1185 reg, temp, val); in write_msr()