| /hypervisor/arch/x86/guest/ |
| A D | vcpu.c | 43 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_gpreg() 51 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_set_gpreg() 59 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_rip() 70 vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.rip = val; in vcpu_set_rip() 77 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_rsp() 85 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_set_rsp() 94 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_efer() 101 vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.ia32_efer in vcpu_set_efer() 119 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_rflags() 661 &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in write_cached_registers() [all …]
|
| A D | virq.c | 132 bitmap_set_lock(eventid, &vcpu->arch.pending_req); in vcpu_make_request() 188 struct acrn_vcpu_arch *arch = &vcpu->arch; in vcpu_queue_exception() local 220 arch->exception_info.exception = vector; in vcpu_queue_exception() 223 arch->exception_info.error = err_code; in vcpu_queue_exception() 225 arch->exception_info.error = 0U; in vcpu_queue_exception() 244 vcpu->arch.exception_info.error); in vcpu_inject_exception() 320 vcpu->arch.irq_window_enabled = false; in interrupt_window_vmexit_handler() 364 struct acrn_vcpu_arch *arch = &vcpu->arch; in acrn_handle_pending_request() local 393 flush_vpid_single(arch->vpid); in acrn_handle_pending_request() 438 arch->idt_vectoring_info = 0U; in acrn_handle_pending_request() [all …]
|
| A D | trusty.c | 159 vcpu->arch.contexts[vcpu->arch.cur_context].world_msrs[i] = vcpu->arch.guest_msrs[i]; in save_world_ctx() 212 vcpu->arch.guest_msrs[i] = vcpu->arch.contexts[!vcpu->arch.cur_context].world_msrs[i]; in load_world_ctx() 227 struct acrn_vcpu_arch *arch = &vcpu->arch; in switch_world() local 233 load_world_ctx(vcpu, &arch->contexts[next_world].ext_ctx); in switch_world() 236 copy_smc_param(&arch->contexts[!next_world].run_ctx, in switch_world() 237 &arch->contexts[next_world].run_ctx); in switch_world() 255 arch->cur_context = next_world; in switch_world() 327 vcpu->arch.inst_len = 0U; in init_secure_world_env() 336 vcpu->arch.contexts[NORMAL_WORLD].world_msrs[i] = vcpu->arch.guest_msrs[i]; in init_secure_world_env() 337 vcpu->arch.contexts[SECURE_WORLD].world_msrs[i] = vcpu->arch.guest_msrs[i]; in init_secure_world_env() [all …]
|
| A D | vmexit.c | 239 vcpu->arch.idt_vectoring_info = 0U; in vmexit_handler() 242 vcpu->arch.idt_vectoring_info = 0U; in vmexit_handler() 324 if (vcpu->arch.emulating_lock) { in mtf_vmexit_handler() 325 vcpu->arch.emulating_lock = false; in mtf_vmexit_handler() 399 idx = vcpu->arch.cur_context; in xsetbv_vmexit_handler() 482 vcpu->arch.IWKey.encryption_key[0] = xmm[2]; in loadiwkey_vmexit_handler() 483 vcpu->arch.IWKey.encryption_key[1] = xmm[3]; in loadiwkey_vmexit_handler() 484 vcpu->arch.IWKey.encryption_key[2] = xmm[4]; in loadiwkey_vmexit_handler() 485 vcpu->arch.IWKey.encryption_key[3] = xmm[5]; in loadiwkey_vmexit_handler() 486 vcpu->arch.IWKey.integrity_key[0] = xmm[0]; in loadiwkey_vmexit_handler() [all …]
|
| A D | vmcs.c | 26 struct guest_cpu_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context]; in init_guest_vmx() 77 struct guest_cpu_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context]; in init_guest_state() 322 vcpu->arch.proc_vm_exec_ctrls = value32; in init_exec_ctrl() 364 vcpu->arch.xsave_enabled = true; in init_exec_ctrl() 568 (void)memcpy_s(vcpu->arch.vmcs, 4U, (void *)&vmx_rev_id, 4U); in init_vmcs() 571 clear_va_vmcs(vcpu->arch.vmcs); in init_vmcs() 574 load_va_vmcs(vcpu->arch.vmcs); in init_vmcs() 575 *vmcs_ptr = (void *)vcpu->arch.vmcs; in init_vmcs() 594 load_va_vmcs(vcpu->arch.vmcs); in load_vmcs() 595 *vmcs_ptr = (void *)vcpu->arch.vmcs; in load_vmcs() [all …]
|
| A D | vmsr.c | 381 vcpu->arch.msr_area.guest[vcpu->arch.msr_area.count].msr_index = MSR_IA32_PERF_GLOBAL_CTRL; in prepare_auto_msr_area() 382 vcpu->arch.msr_area.guest[vcpu->arch.msr_area.count].value = 0; in prepare_auto_msr_area() 383 vcpu->arch.msr_area.host[vcpu->arch.msr_area.count].msr_index = MSR_IA32_PERF_GLOBAL_CTRL; in prepare_auto_msr_area() 384 vcpu->arch.msr_area.host[vcpu->arch.msr_area.count].value = 0; in prepare_auto_msr_area() 385 vcpu->arch.msr_area.count++; in prepare_auto_msr_area() 400 vcpu->arch.msr_area.guest[vcpu->arch.msr_area.count].msr_index = MSR_IA32_PQR_ASSOC; in prepare_auto_msr_area() 401 vcpu->arch.msr_area.guest[vcpu->arch.msr_area.count].value = clos2pqr_msr(vcpu_clos); in prepare_auto_msr_area() 402 vcpu->arch.msr_area.host[vcpu->arch.msr_area.count].msr_index = MSR_IA32_PQR_ASSOC; in prepare_auto_msr_area() 403 vcpu->arch.msr_area.host[vcpu->arch.msr_area.count].value = clos2pqr_msr(hv_clos); in prepare_auto_msr_area() 404 vcpu->arch.msr_area.index_of_pqr_assoc = vcpu->arch.msr_area.count; in prepare_auto_msr_area() [all …]
|
| A D | lock_instr_emul.c | 115 vcpu->arch.inst_len = 1U; in emulate_lock_instr() 118 vcpu->arch.proc_vm_exec_ctrls |= VMX_PROCBASED_CTLS_MON_TRAP; in emulate_lock_instr() 119 exec_vmwrite32(VMX_PROC_VM_EXEC_CONTROLS, vcpu->arch.proc_vm_exec_ctrls); in emulate_lock_instr() 120 vcpu->arch.emulating_lock = true; in emulate_lock_instr() 143 vcpu->arch.emulating_lock = true; in emulate_lock_instr() 145 vcpu->arch.emulating_lock = false; in emulate_lock_instr()
|
| A D | nested.c | 575 offset = vcpu->arch.exit_qualification; in get_vmx_memory_operand() 715 vcpu->arch.nested.current_vvmcs = NULL; in reset_vvmcs() 718 vvmcs = &vcpu->arch.nested.vvmcs[idx]; in reset_vvmcs() 761 vcpu->arch.nested.vmxon = true; in vmxon_vmexit_handler() 784 if ((vcpu->arch.nested.vmxon == false) in check_vmx_permission() 807 vcpu->arch.nested.vmxon = false; in vmxoff_vmexit_handler() 1210 load_va_vmcs(vcpu->arch.vmcs); in vmptrld_vmexit_handler() 1255 load_va_vmcs(vcpu->arch.vmcs); in vmclear_vmexit_handler() 1294 return vcpu->arch.nested.in_l2_guest; in is_vcpu_in_l2_guest() 1444 load_va_vmcs(vcpu->arch.vmcs); in nested_vmexit_handler() [all …]
|
| A D | virtual_cr.c | 401 vcpu->arch.cr4_kl_enabled = true; in vmx_write_cr4() 404 vcpu->arch.cr4_kl_enabled = false; in vmx_write_cr4() 522 struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_cr0() 539 return vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.cr2; in vcpu_get_cr2() 544 vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx.cr2 = val; in vcpu_set_cr2() 550 struct run_context *ctx = &vcpu->arch.contexts[vcpu->arch.cur_context].run_ctx; in vcpu_get_cr4() 572 exit_qual = vcpu->arch.exit_qualification; in cr_access_vmexit_handler()
|
| A D | vmx_io.c | 71 int32_t cur_context_idx = vcpu->arch.cur_context; in pio_instr_vmexit_handler() 75 exit_qual = vcpu->arch.exit_qualification; in pio_instr_vmexit_handler() 108 exit_qual = vcpu->arch.exit_qualification; in ept_violation_vmexit_handler() 117 if (vcpu->arch.cur_context == NORMAL_WORLD) { in ept_violation_vmexit_handler()
|
| A D | vmtrr.c | 48 return container_of(container_of(vmtrr, struct acrn_vcpu_arch, vmtrr), struct acrn_vcpu, arch); in vmtrr2vcpu() 96 struct acrn_vmtrr *vmtrr = &vcpu->arch.vmtrr; in init_vmtrr() 207 struct acrn_vmtrr *vmtrr = &vcpu->arch.vmtrr; in write_vmtrr() 252 const struct acrn_vmtrr *vmtrr = &vcpu->arch.vmtrr; in read_vmtrr()
|
| A D | vlapic.c | 515 bitmap_set_lock(ACRN_REQUEST_EVENT, &vcpu->arch.pending_req); in apicv_advanced_accept_intr() 1099 target_vcpu->arch.nr_sipi = 1U; in vlapic_process_init_sipi() 1104 (target_vcpu->arch.nr_sipi != 0U)) { in vlapic_process_init_sipi() 1111 target_vcpu->arch.nr_sipi--; in vlapic_process_init_sipi() 1112 if (target_vcpu->arch.nr_sipi <= 0U) { in vlapic_process_init_sipi() 1699 vcpu->arch.lapic_pt_enabled = true; in vlapic_set_apicbase() 2330 return vcpu->arch.pending_req != 0UL; in apicv_basic_has_pending_delivery_intr() 2366 struct lapic_reg *irrptr = &(vcpu->arch.vlapic.apic_page.irr[0]); in vlapic_clear_pending_intr() 2405 qual = vcpu->arch.exit_qualification; in apic_access_vmexit_handler() 2465 vector = (uint32_t)(vcpu->arch.exit_qualification & 0xFFUL); in veoi_vmexit_handler() [all …]
|
| A D | vcat.c | 421 vcpu->arch.msr_area.guest[vcpu->arch.msr_area.index_of_pqr_assoc].value = clos2pqr_msr(pclosid); in write_vclosid()
|
| A D | optee.c | 97 (uint32_t)(vcpu->arch.pid.control.bits.nv)); in tee_switch_to_ree()
|
| A D | ept.c | 417 if ((vcpu != NULL) && (vcpu->arch.cur_context == SECURE_WORLD)) { in get_eptp()
|
| A D | vept.c | 396 uint64_t guest_eptp = vcpu->arch.nested.current_vvmcs->vmcs12.ept_pointer; in handle_l2_ept_violation()
|
| A D | instr_emul.c | 2394 vcpu->arch.inst_len = emul_ctxt->vie.num_processed; in decode_instruction() 2471 if (vcpu->arch.emulating_lock) { in emulate_instruction()
|
| A D | vcpuid.c | 865 *ebx &= ~(vcpu->arch.cr4_kl_enabled ? 0U : CPUID_EBX_KL_AES_EN); in guest_cpuid_19h()
|
| /hypervisor/ |
| A D | Makefile | 203 HW_S_SRCS += arch/x86/idt.S 206 HW_C_SRCS += arch/x86/cpu.c 210 HW_C_SRCS += arch/x86/mmu.c 215 HW_C_SRCS += arch/x86/vtd.c 216 HW_C_SRCS += arch/x86/gdt.c 217 HW_C_SRCS += arch/x86/nmi.c 219 HW_C_SRCS += arch/x86/irq.c 220 HW_C_SRCS += arch/x86/tsc.c 223 HW_C_SRCS += arch/x86/vmx.c 225 HW_C_SRCS += arch/x86/pm.c [all …]
|
| /hypervisor/include/arch/x86/asm/guest/ |
| A D | vcpu.h | 310 struct acrn_vcpu_arch arch; member 347 return vcpu->arch.cpu_mode; in get_vcpu_mode() 353 (vcpu)->arch.inst_len = 0U; in vcpu_retain_rip() 358 return &(vcpu->arch.vlapic); in vcpu_vlapic() 372 return &(vcpu->arch.pid); in get_pi_desc() 726 return vcpu->arch.lapic_pt_enabled; in is_lapic_pt_enabled()
|
| /hypervisor/common/ |
| A D | hv_main.c | 57 TRACE_2L(TRACE_VM_EXIT, vcpu->arch.exit_reason, vcpu_get_rip(vcpu)); in vcpu_thread() 68 " %d, ret = %d!", vcpu->arch.exit_reason, ret); in vcpu_thread()
|
| A D | trusty_hypercall.c | 30 int32_t next_world_id = !(vcpu->arch.cur_context); in hcall_world_switch() 61 && (vcpu->arch.cur_context == NORMAL_WORLD)) { in hcall_initialize_trusty()
|
| /hypervisor/debug/ |
| A D | dump.c | 80 vcpu->arch.cur_context); in dump_guest_reg()
|
| A D | profiling.c | 131 (void)memcpy_s(ss->vmexit_msr_list, size, vcpu->arch.msr_area.host, size); in profiling_enable_pmu() 190 exec_vmwrite64(VMX_EXIT_MSR_LOAD_ADDR_FULL, hva2hpa(vcpu->arch.msr_area.host)); in profiling_disable_pmu() 1314 exit_reason = vcpu->arch.exit_reason & 0xFFFFUL; in profiling_pre_vmexit_handler()
|
| /hypervisor/boot/guest/ |
| A D | bzimage_loader.c | 385 if (vcpu->arch.cpu_mode == CPU_MODE_64BIT) { in load_bzimage()
|