Lines Matching refs:hvm_vmx

110     struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc;  in vmx_vcpu_block()
113 old_lock = cmpxchg(&v->arch.hvm_vmx.pi_blocking.lock, NULL, in vmx_vcpu_block()
123 list_add_tail(&v->arch.hvm_vmx.pi_blocking.list, in vmx_vcpu_block()
139 struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc; in vmx_pi_switch_from()
149 struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc; in vmx_pi_switch_to()
162 struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc; in vmx_pi_unblock_vcpu()
171 pi_blocking_list_lock = v->arch.hvm_vmx.pi_blocking.lock; in vmx_pi_unblock_vcpu()
186 if ( v->arch.hvm_vmx.pi_blocking.lock != NULL ) in vmx_pi_unblock_vcpu()
188 ASSERT(v->arch.hvm_vmx.pi_blocking.lock == pi_blocking_list_lock); in vmx_pi_unblock_vcpu()
189 list_del(&v->arch.hvm_vmx.pi_blocking.list); in vmx_pi_unblock_vcpu()
190 v->arch.hvm_vmx.pi_blocking.lock = NULL; in vmx_pi_unblock_vcpu()
239 vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx)); in vmx_pi_desc_fixup()
341 struct pi_desc *pi_desc = &v->arch.hvm_vmx.pi_desc; in vmx_pi_hooks_assign()
430 spin_lock_init(&v->arch.hvm_vmx.vmcs_lock); in vmx_vcpu_initialise()
432 INIT_LIST_HEAD(&v->arch.hvm_vmx.pi_blocking.list); in vmx_vcpu_initialise()
508 *msr_content = v->arch.hvm_vmx.star; in long_mode_do_msr_read()
512 *msr_content = v->arch.hvm_vmx.lstar; in long_mode_do_msr_read()
516 *msr_content = v->arch.hvm_vmx.cstar; in long_mode_do_msr_read()
520 *msr_content = v->arch.hvm_vmx.sfmask; in long_mode_do_msr_read()
557 v->arch.hvm_vmx.star = msr_content; in long_mode_do_msr_write()
564 v->arch.hvm_vmx.lstar = msr_content; in long_mode_do_msr_write()
571 v->arch.hvm_vmx.cstar = msr_content; in long_mode_do_msr_write()
575 v->arch.hvm_vmx.sfmask = msr_content; in long_mode_do_msr_write()
606 rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); in vmx_save_guest_msrs()
611 wrmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); in vmx_restore_guest_msrs()
612 wrmsrl(MSR_STAR, v->arch.hvm_vmx.star); in vmx_restore_guest_msrs()
613 wrmsrl(MSR_LSTAR, v->arch.hvm_vmx.lstar); in vmx_restore_guest_msrs()
614 wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm_vmx.sfmask); in vmx_restore_guest_msrs()
632 nvmx_update_exec_control(v, v->arch.hvm_vmx.exec_control); in vmx_update_cpu_exec_control()
634 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); in vmx_update_cpu_exec_control()
641 v->arch.hvm_vmx.secondary_exec_control); in vmx_update_secondary_exec_control()
644 v->arch.hvm_vmx.secondary_exec_control); in vmx_update_secondary_exec_control()
649 u32 bitmap = unlikely(v->arch.hvm_vmx.vmx_realmode) in vmx_update_exception_bitmap()
650 ? 0xffffffffu : v->arch.hvm_vmx.exception_bitmap; in vmx_update_exception_bitmap()
662 v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op); in vmx_update_guest_vendor()
664 v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_invalid_op); in vmx_update_guest_vendor()
693 v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING; in vmx_save_dr()
862 data->shadow_gs = v->arch.hvm_vmx.shadow_gs; in vmx_save_cpu_state()
864 data->msr_lstar = v->arch.hvm_vmx.lstar; in vmx_save_cpu_state()
865 data->msr_star = v->arch.hvm_vmx.star; in vmx_save_cpu_state()
866 data->msr_cstar = v->arch.hvm_vmx.cstar; in vmx_save_cpu_state()
867 data->msr_syscall_mask = v->arch.hvm_vmx.sfmask; in vmx_save_cpu_state()
872 v->arch.hvm_vmx.shadow_gs = data->shadow_gs; in vmx_load_cpu_state()
873 v->arch.hvm_vmx.star = data->msr_star; in vmx_load_cpu_state()
874 v->arch.hvm_vmx.lstar = data->msr_lstar; in vmx_load_cpu_state()
875 v->arch.hvm_vmx.cstar = data->msr_cstar; in vmx_load_cpu_state()
876 v->arch.hvm_vmx.sfmask = data->msr_syscall_mask; in vmx_load_cpu_state()
968 v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device); in vmx_fpu_enter()
970 v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS; in vmx_fpu_enter()
971 __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0); in vmx_fpu_enter()
979 if ( !(v->arch.hvm_vmx.host_cr0 & X86_CR0_TS) ) in vmx_fpu_leave()
981 v->arch.hvm_vmx.host_cr0 |= X86_CR0_TS; in vmx_fpu_leave()
982 __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0); in vmx_fpu_leave()
995 v->arch.hvm_vmx.exception_bitmap |= (1u << TRAP_no_device); in vmx_fpu_leave()
1155 if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr in vmx_get_segment_register()
1156 && !(v->arch.hvm_vmx.vm86_segment_mask & (1u << seg)) ) in vmx_get_segment_register()
1158 struct segment_register *sreg = &v->arch.hvm_vmx.vm86_saved_seg[seg]; in vmx_get_segment_register()
1195 if ( v->arch.hvm_vmx.vmx_realmode && seg <= x86_seg_tr ) in vmx_set_segment_register()
1198 v->arch.hvm_vmx.vm86_saved_seg[seg] = *reg; in vmx_set_segment_register()
1217 v->arch.hvm_vmx.vm86_segment_mask &= ~(1u << seg); in vmx_set_segment_register()
1220 v->arch.hvm_vmx.vm86_segment_mask |= (1u << seg); in vmx_set_segment_register()
1233 v->arch.hvm_vmx.vm86_segment_mask &= ~(1u << seg); in vmx_set_segment_register()
1236 v->arch.hvm_vmx.vm86_segment_mask |= (1u << seg); in vmx_set_segment_register()
1285 return v->arch.hvm_vmx.shadow_gs; in vmx_get_shadow_gs_base()
1408 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_RDTSC_EXITING; in vmx_set_rdtsc_exiting()
1410 v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING; in vmx_set_rdtsc_exiting()
1418 v->arch.hvm_vmx.secondary_exec_control |= in vmx_set_descriptor_access_exiting()
1421 v->arch.hvm_vmx.secondary_exec_control &= in vmx_set_descriptor_access_exiting()
1530 v->arch.hvm_vmx.exception_bitmap |= 1U << TRAP_int3; in vmx_update_debug_state()
1532 v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_int3); in vmx_update_debug_state()
1559 uint32_t old_ctls = v->arch.hvm_vmx.exec_control; in vmx_update_guest_cr()
1563 v->arch.hvm_vmx.exec_control &= ~cr3_ctls; in vmx_update_guest_cr()
1565 v->arch.hvm_vmx.exec_control |= cr3_ctls; in vmx_update_guest_cr()
1570 v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING; in vmx_update_guest_cr()
1572 if ( old_ctls != v->arch.hvm_vmx.exec_control ) in vmx_update_guest_cr()
1592 (realmode != v->arch.hvm_vmx.vmx_realmode) ) in vmx_update_guest_cr()
1604 v->arch.hvm_vmx.vmx_realmode = realmode; in vmx_update_guest_cr()
1614 if ( !(v->arch.hvm_vmx.vm86_segment_mask & (1<<s)) ) in vmx_update_guest_cr()
1616 v, s, &v->arch.hvm_vmx.vm86_saved_seg[s]); in vmx_update_guest_cr()
1638 if ( v->arch.hvm_vmx.vmx_realmode ) in vmx_update_guest_cr()
1774 if ( curr->arch.hvm_vmx.vmx_realmode ) in __vmx_inject_exception()
1775 curr->arch.hvm_vmx.vmx_emulate = 1; in __vmx_inject_exception()
1994 set_bit(i, v->arch.hvm_vmx.eoi_exit_bitmap); in vmx_process_isr()
1996 for ( i = 0; i < ARRAY_SIZE(v->arch.hvm_vmx.eoi_exit_bitmap); ++i ) in vmx_process_isr()
1997 __vmwrite(EOI_EXIT_BITMAP(i), v->arch.hvm_vmx.eoi_exit_bitmap[i]); in vmx_process_isr()
2059 if ( pi_test_and_set_pir(vector, &v->arch.hvm_vmx.pi_desc) ) in vmx_deliver_posted_intr()
2062 if ( unlikely(v->arch.hvm_vmx.eoi_exitmap_changed) ) in vmx_deliver_posted_intr()
2069 pi_set_on(&v->arch.hvm_vmx.pi_desc); in vmx_deliver_posted_intr()
2075 prev.control = v->arch.hvm_vmx.pi_desc.control; in vmx_deliver_posted_intr()
2091 old.control = v->arch.hvm_vmx.pi_desc.control & in vmx_deliver_posted_intr()
2093 new.control = v->arch.hvm_vmx.pi_desc.control | in vmx_deliver_posted_intr()
2096 prev.control = cmpxchg(&v->arch.hvm_vmx.pi_desc.control, in vmx_deliver_posted_intr()
2113 if ( !pi_test_and_clear_on(&v->arch.hvm_vmx.pi_desc) ) in vmx_sync_pir_to_irr()
2117 pending_intr[group] = pi_get_pir(&v->arch.hvm_vmx.pi_desc, group); in vmx_sync_pir_to_irr()
2125 return pi_test_pir(vec, &v->arch.hvm_vmx.pi_desc); in vmx_test_pir()
2169 if ( v->arch.hvm_vmx.secondary_exec_control & in vmx_vcpu_update_eptp()
2191 v->arch.hvm_vmx.secondary_exec_control |= mask; in vmx_vcpu_update_vmfunc_ve()
2205 v->arch.hvm_vmx.secondary_exec_control &= in vmx_vcpu_update_vmfunc_ve()
2210 v->arch.hvm_vmx.secondary_exec_control &= ~mask; in vmx_vcpu_update_vmfunc_ve()
2396 vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx)); in pi_wakeup_interrupt()
2603 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING; in vmx_dr_access()
3036 v->arch.hvm_vmx.secondary_exec_control &= in vmx_vlapic_msr_changed()
3044 v->arch.hvm_vmx.secondary_exec_control |= in vmx_vlapic_msr_changed()
3064 v->arch.hvm_vmx.secondary_exec_control |= in vmx_vlapic_msr_changed()
3067 if ( !(v->arch.hvm_vmx.secondary_exec_control & in vmx_vlapic_msr_changed()
3122 v->arch.hvm_vmx.lbr_fixup_enabled |= FIXUP_LBR_TSX; in vmx_msr_write_intercept()
3124 v->arch.hvm_vmx.lbr_fixup_enabled |= in vmx_msr_write_intercept()
3327 msr = &curr->arch.hvm_vmx.msr_area[idx]; in vmx_failed_vmentry()
3360 v->arch.hvm_vmx.vm86_saved_eflags = regs->eflags; in vmx_enter_realmode()
3570 (v->arch.hvm_vmx.secondary_exec_control & in vmx_vmexit_handler()
3575 if ( v->arch.hvm_vmx.secondary_exec_control & in vmx_vmexit_handler()
3615 if ( v->arch.hvm_vmx.vmx_realmode ) in vmx_vmexit_handler()
3619 regs->eflags |= (v->arch.hvm_vmx.vm86_saved_eflags & X86_EFLAGS_IOPL); in vmx_vmexit_handler()
3632 v->arch.hvm_vmx.vmx_emulate = 1; in vmx_vmexit_handler()
3812 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; in vmx_vmexit_handler()
3817 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; in vmx_vmexit_handler()
4066 v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG; in vmx_vmexit_handler()
4166 unsigned int msr_count = curr->arch.hvm_vmx.msr_count; in lbr_tsx_fixup()
4167 struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area; in lbr_tsx_fixup()
4216 if ( curr->arch.hvm_vmx.lbr_fixup_enabled & FIXUP_LBR_TSX ) in lbr_fixup()
4218 if ( curr->arch.hvm_vmx.lbr_fixup_enabled & FIXUP_BDW_ERRATUM_BDF14 ) in lbr_fixup()
4254 curr->arch.hvm_vmx.secondary_exec_control |= in vmx_vmenter_helper()
4261 curr->arch.hvm_vmx.secondary_exec_control &= in vmx_vmenter_helper()
4286 if ( unlikely(curr->arch.hvm_vmx.lbr_fixup_enabled) ) in vmx_vmenter_helper()