Lines Matching refs:vmcb
92 if ( vmcb_get_cpl(v->arch.hvm_svm.vmcb) ) in svm_crash_or_fault()
117 curr->arch.hvm_svm.vmcb->interrupt_shadow = 0; in __update_guest_eip()
167 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_save_dr() local
175 vmcb_set_dr_intercepts(vmcb, ~0u); in svm_save_dr()
194 v->arch.debugreg[6] = vmcb_get_dr6(vmcb); in svm_save_dr()
195 v->arch.debugreg[7] = vmcb_get_dr7(vmcb); in svm_save_dr()
198 static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v) in __restore_debug_registers() argument
204 vmcb_set_dr_intercepts(vmcb, 0); in __restore_debug_registers()
225 vmcb_set_dr6(vmcb, v->arch.debugreg[6]); in __restore_debug_registers()
226 vmcb_set_dr7(vmcb, v->arch.debugreg[7]); in __restore_debug_registers()
237 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_restore_dr() local
239 __restore_debug_registers(vmcb, v); in svm_restore_dr()
244 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_vmcb_save() local
257 if ( vmcb->eventinj.fields.v && in svm_vmcb_save()
258 hvm_event_needs_reinjection(vmcb->eventinj.fields.type, in svm_vmcb_save()
259 vmcb->eventinj.fields.vector) ) in svm_vmcb_save()
261 c->pending_event = (uint32_t)vmcb->eventinj.bytes; in svm_vmcb_save()
262 c->error_code = vmcb->eventinj.fields.errorcode; in svm_vmcb_save()
271 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_vmcb_restore() local
323 vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs; in svm_vmcb_restore()
324 vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = c->sysenter_esp; in svm_vmcb_restore()
325 vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = c->sysenter_eip; in svm_vmcb_restore()
329 vmcb_set_np_enable(vmcb, 1); in svm_vmcb_restore()
330 vmcb_set_g_pat(vmcb, MSR_IA32_CR_PAT_RESET /* guest PAT */); in svm_vmcb_restore()
331 vmcb_set_h_cr3(vmcb, pagetable_get_paddr(p2m_get_pagetable(p2m))); in svm_vmcb_restore()
339 vmcb->eventinj.bytes = c->pending_event; in svm_vmcb_restore()
340 vmcb->eventinj.fields.errorcode = c->error_code; in svm_vmcb_restore()
343 vmcb->eventinj.bytes = 0; in svm_vmcb_restore()
345 vmcb->cleanbits.bytes = 0; in svm_vmcb_restore()
354 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_save_cpu_state() local
356 data->shadow_gs = vmcb->kerngsbase; in svm_save_cpu_state()
357 data->msr_lstar = vmcb->lstar; in svm_save_cpu_state()
358 data->msr_star = vmcb->star; in svm_save_cpu_state()
359 data->msr_cstar = vmcb->cstar; in svm_save_cpu_state()
360 data->msr_syscall_mask = vmcb->sfmask; in svm_save_cpu_state()
368 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_load_cpu_state() local
370 vmcb->kerngsbase = data->shadow_gs; in svm_load_cpu_state()
371 vmcb->lstar = data->msr_lstar; in svm_load_cpu_state()
372 vmcb->star = data->msr_star; in svm_load_cpu_state()
373 vmcb->cstar = data->msr_cstar; in svm_load_cpu_state()
374 vmcb->sfmask = data->msr_syscall_mask; in svm_load_cpu_state()
497 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_get_interrupt_shadow() local
500 if ( vmcb->interrupt_shadow ) in svm_get_interrupt_shadow()
503 if ( vmcb_get_general1_intercepts(vmcb) & GENERAL1_INTERCEPT_IRET ) in svm_get_interrupt_shadow()
511 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_set_interrupt_shadow() local
512 u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); in svm_set_interrupt_shadow()
514 vmcb->interrupt_shadow = in svm_set_interrupt_shadow()
520 vmcb_set_general1_intercepts(vmcb, general1_intercepts); in svm_set_interrupt_shadow()
525 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_guest_x86_mode() local
531 if ( hvm_long_mode_active(v) && likely(vmcb->cs.l) ) in svm_guest_x86_mode()
533 return likely(vmcb->cs.db) ? 4 : 2; in svm_guest_x86_mode()
538 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_update_guest_cr() local
550 else if ( vmcb_get_cr0(vmcb) & X86_CR0_TS ) in svm_update_guest_cr()
557 vmcb_set_cr0(vmcb, value); in svm_update_guest_cr()
561 vmcb_set_cr2(vmcb, v->arch.hvm_vcpu.guest_cr[2]); in svm_update_guest_cr()
564 vmcb_set_cr3(vmcb, v->arch.hvm_vcpu.hw_cr[3]); in svm_update_guest_cr()
597 vmcb_set_cr4(vmcb, value); in svm_update_guest_cr()
606 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_update_guest_efer() local
613 vmcb_set_efer(vmcb, new_efer); in svm_update_guest_efer()
619 struct vmcb_struct *vmcb = arch_svm->vmcb; in svm_update_guest_vendor() local
620 u32 bitmap = vmcb_get_exception_intercepts(vmcb); in svm_update_guest_vendor()
628 vmcb_set_exception_intercepts(vmcb, bitmap); in svm_update_guest_vendor()
640 svm_vmsave(arch_svm->vmcb); in svm_sync_vmcb()
645 return vmcb_get_cpl(v->arch.hvm_svm.vmcb); in svm_get_cpl()
651 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_get_segment_register() local
662 *reg = vmcb->sreg[seg]; in svm_get_segment_register()
665 reg->dpl = vmcb_get_cpl(vmcb); in svm_get_segment_register()
670 *reg = vmcb->tr; in svm_get_segment_register()
674 *reg = vmcb->gdtr; in svm_get_segment_register()
678 *reg = vmcb->idtr; in svm_get_segment_register()
683 *reg = vmcb->ldtr; in svm_get_segment_register()
696 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_set_segment_register() local
707 vmcb->cleanbits.fields.seg = 0; in svm_set_segment_register()
712 vmcb->cleanbits.fields.dt = 0; in svm_set_segment_register()
734 vmcb_set_cpl(vmcb, reg->dpl); in svm_set_segment_register()
739 vmcb->sreg[seg] = *reg; in svm_set_segment_register()
743 vmcb->tr = *reg; in svm_set_segment_register()
747 vmcb->gdtr.base = reg->base; in svm_set_segment_register()
748 vmcb->gdtr.limit = reg->limit; in svm_set_segment_register()
752 vmcb->idtr.base = reg->base; in svm_set_segment_register()
753 vmcb->idtr.limit = reg->limit; in svm_set_segment_register()
757 vmcb->ldtr = *reg; in svm_set_segment_register()
766 svm_vmload(vmcb); in svm_set_segment_register()
771 return v->arch.hvm_svm.vmcb->kerngsbase; in svm_get_shadow_gs_base()
776 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_set_guest_pat() local
781 vmcb_set_g_pat(vmcb, gpat); in svm_set_guest_pat()
787 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_get_guest_pat() local
792 *gpat = vmcb_get_g_pat(vmcb); in svm_get_guest_pat()
835 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_set_tsc_offset() local
841 vmcb_set_tsc_offset(vmcb, offset); in svm_set_tsc_offset()
863 vmcb_set_tsc_offset(vmcb, offset + n2_tsc_offset); in svm_set_tsc_offset()
868 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_set_rdtsc_exiting() local
869 u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); in svm_set_rdtsc_exiting()
870 u32 general2_intercepts = vmcb_get_general2_intercepts(vmcb); in svm_set_rdtsc_exiting()
881 vmcb_set_general1_intercepts(vmcb, general1_intercepts); in svm_set_rdtsc_exiting()
882 vmcb_set_general2_intercepts(vmcb, general2_intercepts); in svm_set_rdtsc_exiting()
887 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_set_descriptor_access_exiting() local
888 u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); in svm_set_descriptor_access_exiting()
899 vmcb_set_general1_intercepts(vmcb, general1_intercepts); in svm_set_descriptor_access_exiting()
904 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_get_insn_bytes() local
910 memcpy(buf, vmcb->guest_ins, MAX_INST_LEN); in svm_get_insn_bytes()
1048 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_ctxt_switch_to() local
1069 svm_vmload(vmcb); in svm_ctxt_switch_to()
1070 vmcb->cleanbits.bytes = 0; in svm_ctxt_switch_to()
1080 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_do_resume() local
1091 uint32_t intercepts = vmcb_get_exception_intercepts(vmcb); in svm_do_resume()
1095 vmcb, debug_state ? (intercepts | (1U << TRAP_int3)) in svm_do_resume()
1113 intr = vmcb_get_vintr(vmcb); in svm_do_resume()
1116 vmcb_set_vintr(vmcb, intr); in svm_do_resume()
1240 const struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; in svm_emul_swint_injection() local
1249 bool lm = vmcb_get_efer(vmcb) & EFER_LMA; in svm_emul_swint_injection()
1252 if ( !(vmcb_get_cr0(vmcb) & X86_CR0_PE) ) in svm_emul_swint_injection()
1319 vmcb_get_cpl(vmcb) > ((idte.b >> 13) & 3) ) in svm_emul_swint_injection()
1347 struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; in svm_inject_event() local
1348 eventinj_t eventinj = vmcb->eventinj; in svm_inject_event()
1375 __restore_debug_registers(vmcb, curr); in svm_inject_event()
1376 vmcb_set_dr6(vmcb, vmcb_get_dr6(vmcb) | 0x4000); in svm_inject_event()
1420 vmcb->nextrip = regs->rip + _event.insn_len; in svm_inject_event()
1434 vmcb->nextrip = regs->rip; in svm_inject_event()
1444 vmcb->nextrip = regs->rip + _event.insn_len; in svm_inject_event()
1461 if ( !((vmcb_get_efer(vmcb) & EFER_LMA) && vmcb->cs.l) ) in svm_inject_event()
1464 vmcb->nextrip = (uint32_t)vmcb->nextrip; in svm_inject_event()
1469 vmcb->eventinj = eventinj; in svm_inject_event()
1474 vmcb_set_cr2(vmcb, _event.cr2); in svm_inject_event()
1485 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_event_pending() local
1486 return vmcb->eventinj.fields.v; in svm_event_pending()
1759 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_fpu_dirty_intercept() local
1764 if ( vmcb != n1vmcb ) in svm_fpu_dirty_intercept()
1775 vmcb_set_cr0(vmcb, vmcb_get_cr0(vmcb) & ~X86_CR0_TS); in svm_fpu_dirty_intercept()
1805 struct vmcb_struct *vmcb, struct cpu_user_regs *regs) in svm_vmexit_do_cr_access() argument
1809 cr = vmcb->exitcode - VMEXIT_CR0_READ; in svm_vmexit_do_cr_access()
1812 gp = vmcb->exitinfo1 & 0xf; in svm_vmexit_do_cr_access()
1817 __update_guest_eip(regs, vmcb->nextrip - vmcb->rip); in svm_vmexit_do_cr_access()
1822 struct vmcb_struct *vmcb = vcpu_nestedhvm(v).nv_n1vmcx; in svm_dr_access() local
1825 __restore_debug_registers(vmcb, v); in svm_dr_access()
1832 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_msr_read_intercept() local
1865 *msr_content = vmcb_get_debugctlmsr(vmcb); in svm_msr_read_intercept()
1869 *msr_content = vmcb_get_lastbranchfromip(vmcb); in svm_msr_read_intercept()
1873 *msr_content = vmcb_get_lastbranchtoip(vmcb); in svm_msr_read_intercept()
1877 *msr_content = vmcb_get_lastintfromip(vmcb); in svm_msr_read_intercept()
1881 *msr_content = vmcb_get_lastinttoip(vmcb); in svm_msr_read_intercept()
1973 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_msr_write_intercept() local
1993 vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = msr_content; in svm_msr_write_intercept()
1996 vmcb->sysenter_esp = v->arch.hvm_svm.guest_sysenter_esp = msr_content; in svm_msr_write_intercept()
1999 vmcb->sysenter_eip = v->arch.hvm_svm.guest_sysenter_eip = msr_content; in svm_msr_write_intercept()
2003 vmcb_set_debugctlmsr(vmcb, msr_content); in svm_msr_write_intercept()
2006 vmcb->lbr_control.fields.enable = 1; in svm_msr_write_intercept()
2015 vmcb_set_lastbranchfromip(vmcb, msr_content); in svm_msr_write_intercept()
2019 vmcb_set_lastbranchtoip(vmcb, msr_content); in svm_msr_write_intercept()
2023 vmcb_set_lastintfromip(vmcb, msr_content); in svm_msr_write_intercept()
2027 vmcb_set_lastinttoip(vmcb, msr_content); in svm_msr_write_intercept()
2113 svm_vmload(vmcb); in svm_msr_write_intercept()
2124 bool rdmsr = curr->arch.hvm_svm.vmcb->exitinfo1 == 0; in svm_do_msr_access()
2148 static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb, in svm_vmexit_do_hlt() argument
2236 svm_vmexit_do_vmload(struct vmcb_struct *vmcb, in svm_vmexit_do_vmload() argument
2272 svm_vmexit_do_vmsave(struct vmcb_struct *vmcb, in svm_vmexit_do_vmsave() argument
2395 const struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_get_pending_event() local
2397 if ( vmcb->eventinj.fields.v ) in svm_get_pending_event()
2400 info->vector = vmcb->eventinj.fields.vector; in svm_get_pending_event()
2401 info->type = vmcb->eventinj.fields.type; in svm_get_pending_event()
2402 info->error_code = vmcb->eventinj.fields.errorcode; in svm_get_pending_event()
2468 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; in svm_vmexit_handler() local
2479 vmcb_get_cr3(vmcb); in svm_vmexit_handler()
2493 intr = vmcb_get_vintr(vmcb); in svm_vmexit_handler()
2499 exit_reason = vmcb->exitcode; in svm_vmexit_handler()
2523 ns_vmcb->exitinfo1 = vmcb->exitinfo1; in svm_vmexit_handler()
2536 exitinfo1 = vmcb->exitinfo1; in svm_vmexit_handler()
2537 exitinfo2 = vmcb->exitinfo2; in svm_vmexit_handler()
2576 svm_vmcb_dump(__func__, vmcb); in svm_vmexit_handler()
2585 vmcb->cleanbits.bytes = cpu_has_svm_cleanbits ? ~0u : 0u; in svm_vmexit_handler()
2588 eventinj = vmcb->exitintinfo; in svm_vmexit_handler()
2592 vmcb->eventinj = eventinj; in svm_vmexit_handler()
2635 va = vmcb->exitinfo2; in svm_vmexit_handler()
2636 regs->error_code = vmcb->exitinfo1; in svm_vmexit_handler()
2643 v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf; in svm_vmexit_handler()
2664 hvm_inject_hw_exception(TRAP_alignment_check, vmcb->exitinfo1); in svm_vmexit_handler()
2678 u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); in svm_vmexit_handler()
2679 intr = vmcb_get_vintr(vmcb); in svm_vmexit_handler()
2684 vmcb_set_vintr(vmcb, intr); in svm_vmexit_handler()
2685 vmcb_set_general1_intercepts(vmcb, general1_intercepts); in svm_vmexit_handler()
2697 if ( (vmcb->exitinfo2 >> 36) & 1 ) in svm_vmexit_handler()
2699 else if ( (vmcb->exitinfo2 >> 38) & 1 ) in svm_vmexit_handler()
2703 if ( (vmcb->exitinfo2 >> 44) & 1 ) in svm_vmexit_handler()
2704 errcode = (uint32_t)vmcb->exitinfo2; in svm_vmexit_handler()
2712 vmcb->eventinj.bytes = 0; in svm_vmexit_handler()
2714 hvm_task_switch((uint16_t)vmcb->exitinfo1, reason, errcode); in svm_vmexit_handler()
2723 svm_vmexit_do_hlt(vmcb, regs); in svm_vmexit_handler()
2727 if ( (vmcb->exitinfo1 & (1u<<2)) == 0 ) in svm_vmexit_handler()
2729 uint16_t port = (vmcb->exitinfo1 >> 16) & 0xFFFF; in svm_vmexit_handler()
2730 int bytes = ((vmcb->exitinfo1 >> 4) & 0x07); in svm_vmexit_handler()
2731 int dir = (vmcb->exitinfo1 & 1) ? IOREQ_READ : IOREQ_WRITE; in svm_vmexit_handler()
2733 __update_guest_eip(regs, vmcb->exitinfo2 - vmcb->rip); in svm_vmexit_handler()
2741 if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) ) in svm_vmexit_handler()
2742 svm_vmexit_do_cr_access(vmcb, regs); in svm_vmexit_handler()
2750 svm_invlpg_intercept(vmcb->exitinfo1); in svm_vmexit_handler()
2751 __update_guest_eip(regs, vmcb->nextrip - vmcb->rip); in svm_vmexit_handler()
2803 svm_vmexit_do_vmload(vmcb, regs, v, regs->rax); in svm_vmexit_handler()
2806 svm_vmexit_do_vmsave(vmcb, regs, v, regs->rax); in svm_vmexit_handler()
2819 if ( vmcb_get_cpl(vmcb) ) in svm_vmexit_handler()
2829 v->arch.hvm_svm.cached_insn_len = vmcb->guest_ins_len & 0xf; in svm_vmexit_handler()
2830 rc = vmcb->exitinfo1 & PFEC_page_present in svm_vmexit_handler()
2831 ? p2m_pt_handle_deferred_changes(vmcb->exitinfo2) : 0; in svm_vmexit_handler()
2833 svm_do_nested_pgfault(v, regs, vmcb->exitinfo1, vmcb->exitinfo2); in svm_vmexit_handler()
2838 v, rc, vmcb->exitinfo2, vmcb->exitinfo1); in svm_vmexit_handler()
2845 u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); in svm_vmexit_handler()
2856 vmcb->interrupt_shadow = 1; in svm_vmexit_handler()
2858 vmcb_set_general1_intercepts(vmcb, general1_intercepts); in svm_vmexit_handler()
2868 hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0, in svm_vmexit_handler()
2874 hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0, in svm_vmexit_handler()
2880 hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0, in svm_vmexit_handler()
2886 hvm_descriptor_access_intercept(vmcb->exitintinfo.bytes, 0, in svm_vmexit_handler()
2894 exit_reason, vmcb->exitinfo1, vmcb->exitinfo2); in svm_vmexit_handler()
2904 intr = vmcb_get_vintr(vmcb); in svm_vmexit_handler()
2907 vmcb_set_vintr(vmcb, intr); in svm_vmexit_handler()