/qemu/target/i386/hvf/ |
A D | x86hvf.c | 101 macvm_set_cr4(cs->accel->fd, env->cr[4]); in hvf_put_segments() 102 macvm_set_cr0(cs->accel->fd, env->cr[0]); in hvf_put_segments() 261 wreg(cs->accel->fd, HV_X86_RIP, env->eip); in hvf_put_registers() 263 wreg(cs->accel->fd, HV_X86_XCR0, env->xcr0); in hvf_put_registers() 271 wreg(cs->accel->fd, HV_X86_DR0, env->dr[0]); in hvf_put_registers() 272 wreg(cs->accel->fd, HV_X86_DR1, env->dr[1]); in hvf_put_registers() 273 wreg(cs->accel->fd, HV_X86_DR2, env->dr[2]); in hvf_put_registers() 274 wreg(cs->accel->fd, HV_X86_DR3, env->dr[3]); in hvf_put_registers() 275 wreg(cs->accel->fd, HV_X86_DR4, env->dr[4]); in hvf_put_registers() 306 env->eip = rreg(cs->accel->fd, HV_X86_RIP); in hvf_get_registers() [all …]
|
A D | hvf.c | 88 wreg(cpu->accel->fd, HV_X86_TPR, tpr); in vmx_update_tpr() 100 int tpr = rreg(cpu->accel->fd, HV_X86_TPR) >> 4; in update_apic_tpr() 218 hv_vcpu_interrupt(&cpu->accel->fd, 1); in hvf_kick_vcpu_thread() 286 wvmcs(cpu->accel->fd, VMCS_PIN_BASED_CTLS, in hvf_arch_init_vcpu() 309 wvmcs(cpu->accel->fd, VMCS_ENTRY_CTLS, in hvf_arch_init_vcpu() 313 wvmcs(cpu->accel->fd, VMCS_TPR_THRESHOLD, 0); in hvf_arch_init_vcpu() 449 if (cpu->accel->dirty) { in hvf_vcpu_exec() 451 cpu->accel->dirty = false; in hvf_vcpu_exec() 477 rip = rreg(cpu->accel->fd, HV_X86_RIP); in hvf_vcpu_exec() 689 wreg(cpu->accel->fd, HV_X86_RAX, 0); in hvf_vcpu_exec() [all …]
|
A D | x86.c | 64 base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE); in x86_read_segment_descriptor() 65 limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT); in x86_read_segment_descriptor() 67 base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE); in x86_read_segment_descriptor() 68 limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT); in x86_read_segment_descriptor() 87 base = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_BASE); in x86_write_segment_descriptor() 88 limit = rvmcs(cpu->accel->fd, VMCS_GUEST_GDTR_LIMIT); in x86_write_segment_descriptor() 90 base = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_BASE); in x86_write_segment_descriptor() 91 limit = rvmcs(cpu->accel->fd, VMCS_GUEST_LDTR_LIMIT); in x86_write_segment_descriptor() 120 uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); in x86_is_protected() 151 uint64_t cr0 = rvmcs(cpu->accel->fd, VMCS_GUEST_CR0); in x86_is_paging_mode() [all …]
|
A D | x86_descr.c | 50 return (uint32_t)rvmcs(cpu->accel->fd, vmx_segment_fields[seg].limit); in vmx_read_segment_limit() 60 return rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base); in vmx_read_segment_base() 66 sel.sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector); in vmx_read_segment_selector() 77 desc->sel = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].selector); in vmx_read_segment_descriptor() 78 desc->base = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].base); in vmx_read_segment_descriptor() 79 desc->limit = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].limit); in vmx_read_segment_descriptor() 80 desc->ar = rvmcs(cpu->accel->fd, vmx_segment_fields[seg].ar_bytes); in vmx_read_segment_descriptor() 87 wvmcs(cpu->accel->fd, sf->base, desc->base); in vmx_write_segment_descriptor() 88 wvmcs(cpu->accel->fd, sf->limit, desc->limit); in vmx_write_segment_descriptor() 89 wvmcs(cpu->accel->fd, sf->selector, desc->sel); in vmx_write_segment_descriptor() [all …]
|
A D | vmx.h | 182 wreg(cpu->accel->fd, HV_X86_RIP, rip); in macvm_set_rip() 186 val = rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY); in macvm_set_rip() 190 wvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY, in macvm_set_rip() 202 uint32_t gi = (uint32_t) rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY); in vmx_clear_nmi_blocking() 204 wvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); in vmx_clear_nmi_blocking() 213 uint32_t gi = (uint32_t)rvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY); in vmx_set_nmi_blocking() 215 wvmcs(cpu->accel->fd, VMCS_GUEST_INTERRUPTIBILITY, gi); in vmx_set_nmi_blocking() 221 val = rvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS); in vmx_set_nmi_window_exiting() 222 wvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS, val | in vmx_set_nmi_window_exiting() 231 val = rvmcs(cpu->accel->fd, VMCS_PRI_PROC_BASED_CTLS); in vmx_clear_nmi_window_exiting() [all …]
|
A D | x86_emu.c | 840 hv_vcpu_invalidate_tlb(cs->accel->fd); in simulate_wrmsr() 1478 env->eip = rreg(cs->accel->fd, HV_X86_RIP); in load_regs() 1487 wreg(cs->accel->fd, HV_X86_RAX, RAX(env)); in store_regs() 1488 wreg(cs->accel->fd, HV_X86_RBX, RBX(env)); in store_regs() 1489 wreg(cs->accel->fd, HV_X86_RCX, RCX(env)); in store_regs() 1490 wreg(cs->accel->fd, HV_X86_RDX, RDX(env)); in store_regs() 1491 wreg(cs->accel->fd, HV_X86_RSI, RSI(env)); in store_regs() 1492 wreg(cs->accel->fd, HV_X86_RDI, RDI(env)); in store_regs() 1493 wreg(cs->accel->fd, HV_X86_RBP, RBP(env)); in store_regs() 1494 wreg(cs->accel->fd, HV_X86_RSP, RSP(env)); in store_regs() [all …]
|
A D | x86_task.c | 64 wvmcs(cpu->accel->fd, VMCS_GUEST_CR3, tss->cr3); in load_state_from_tss32() 113 uint64_t rip = rreg(cpu->accel->fd, HV_X86_RIP); in vmx_handle_task_switch() 117 int ins_len = rvmcs(cpu->accel->fd, VMCS_EXIT_INSTRUCTION_LENGTH); in vmx_handle_task_switch() 176 macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) | in vmx_handle_task_switch() 183 hv_vcpu_invalidate_tlb(cpu->accel->fd); in vmx_handle_task_switch()
|
/qemu/accel/ |
A D | accel-system.c | 33 int accel_init_machine(AccelState *accel, MachineState *ms) in accel_init_machine() argument 35 AccelClass *acc = ACCEL_GET_CLASS(accel); in accel_init_machine() 37 ms->accelerator = accel; in accel_init_machine() 43 object_unref(OBJECT(accel)); in accel_init_machine() 57 AccelState *accel = ms->accelerator; in accel_setup_post() local 58 AccelClass *acc = ACCEL_GET_CLASS(accel); in accel_setup_post() 60 acc->setup_post(ms, accel); in accel_setup_post()
|
A D | accel-user.c | 15 static AccelState *accel; in current_accel() local 17 if (!accel) { in current_accel() 21 accel = ACCEL(object_new_with_class(OBJECT_CLASS(ac))); in current_accel() 23 return accel; in current_accel()
|
A D | accel-target.c | 125 AccelState *accel = current_accel(); in accel_cpu_common_realize() local 126 AccelClass *acc = ACCEL_GET_CLASS(accel); in accel_cpu_common_realize() 144 AccelState *accel = current_accel(); in accel_cpu_common_unrealize() local 145 AccelClass *acc = ACCEL_GET_CLASS(accel); in accel_cpu_common_unrealize() 155 AccelState *accel = current_accel(); in accel_supported_gdbstub_sstep_flags() local 156 AccelClass *acc = ACCEL_GET_CLASS(accel); in accel_supported_gdbstub_sstep_flags()
|
A D | meson.build | 1 specific_ss.add(files('accel-target.c')) 2 system_ss.add(files('accel-system.c', 'accel-blocker.c')) 3 user_ss.add(files('accel-user.c'))
|
/qemu/target/arm/hvf/ |
A D | hvf.c | 586 if (cpu->accel->guest_debug_enabled) { in hvf_get_registers() 723 if (cpu->accel->guest_debug_enabled) { in hvf_put_registers() 812 if (cpu->accel->dirty) { in flush_cpu_state() 814 cpu->accel->dirty = false; in flush_cpu_state() 1067 hv_vcpus_exit(&cpu->accel->fd, 1); in hvf_kick_vcpu_thread() 1851 if (!cpu->accel->vtimer_masked) { in hvf_sync_vtimer() 1866 cpu->accel->vtimer_masked = false; in hvf_sync_vtimer() 1875 hv_vcpu_exit_t *hvf_exit = cpu->accel->exit; in hvf_vcpu_exec() 1891 assert_hvf_ok(hv_vcpu_run(cpu->accel->fd)); in hvf_vcpu_exec() 1906 cpu->accel->vtimer_masked = true; in hvf_vcpu_exec() [all …]
|
/qemu/accel/hvf/ |
A D | hvf-accel-ops.c | 204 if (!cpu->accel->dirty) { in do_hvf_cpu_synchronize_state() 206 cpu->accel->dirty = true; in do_hvf_cpu_synchronize_state() 212 if (!cpu->accel->dirty) { in hvf_cpu_synchronize_state() 221 cpu->accel->dirty = true; in do_hvf_cpu_synchronize_set_dirty() 381 hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd); in hvf_vcpu_destroy() 385 g_free(cpu->accel); in hvf_vcpu_destroy() 386 cpu->accel = NULL; in hvf_vcpu_destroy() 393 cpu->accel = g_new0(AccelCPUState, 1); in hvf_init_vcpu() 406 r = hv_vcpu_create(&cpu->accel->fd, in hvf_init_vcpu() 411 cpu->accel->dirty = true; in hvf_init_vcpu() [all …]
|
A D | meson.build | 4 'hvf-accel-ops.c',
|
/qemu/docs/devel/migration/ |
A D | qpl-compression.rst | 22 | MultiFD Thread | |accel-config tool | 90 For ``accel-config`` installation, please refer to `accel-config installation 97 #accel-config config-engine iax1/engine1.0 -g 0 98 #accel-config config-engine iax1/engine1.1 -g 0 99 #accel-config config-engine iax1/engine1.2 -g 0 100 #accel-config config-engine iax1/engine1.3 -g 0 101 #accel-config config-engine iax1/engine1.4 -g 0 102 #accel-config config-engine iax1/engine1.5 -g 0 103 #accel-config config-engine iax1/engine1.6 -g 0 106 #accel-config enable-device iax1 [all …]
|
/qemu/accel/tcg/ |
A D | meson.build | 32 'tcg-accel-ops.c', 33 'tcg-accel-ops-mttcg.c', 34 'tcg-accel-ops-icount.c', 35 'tcg-accel-ops-rr.c',
|
/qemu/target/i386/nvmm/ |
A D | nvmm-all.c | 84 AccelCPUState *qcpu = cpu->accel; in nvmm_set_registers() 221 AccelCPUState *qcpu = cpu->accel; in nvmm_get_registers() 344 AccelCPUState *qcpu = cpu->accel; in nvmm_can_take_int() 730 if (cpu->accel->dirty) { in nvmm_vcpu_loop() 830 cpu->accel->dirty = true; in do_nvmm_cpu_synchronize_state() 837 cpu->accel->dirty = false; in do_nvmm_cpu_synchronize_post_reset() 844 cpu->accel->dirty = false; in do_nvmm_cpu_synchronize_post_init() 850 cpu->accel->dirty = true; in do_nvmm_cpu_synchronize_pre_loadvm() 855 if (!cpu->accel->dirty) { in nvmm_cpu_synchronize_state() 986 cpu->accel = qcpu; in nvmm_init_vcpu() [all …]
|
A D | meson.build | 4 'nvmm-accel-ops.c',
|
/qemu/system/ |
A D | runstate-hmp-cmds.c | 46 AccelState *accel = current_accel(); in hmp_one_insn_per_tb() local 49 if (!object_property_find(OBJECT(accel), "one-insn-per-tb")) { in hmp_one_insn_per_tb() 64 object_property_set_bool(OBJECT(accel), "one-insn-per-tb", in hmp_one_insn_per_tb()
|
/qemu/include/qemu/ |
A D | accel.h | 42 void (*setup_post)(MachineState *ms, AccelState *accel); 82 int accel_init_machine(AccelState *accel, MachineState *ms);
|
/qemu/target/i386/whpx/ |
A D | whpx-all.c | 843 cpu->accel->dirty = false; in whpx_emu_setreg_callback() 1398 if (cpu->accel->dirty) { in whpx_vcpu_get_pc() 1717 if (cpu->accel->dirty) { in whpx_vcpu_run() 2067 if (!cpu->accel->dirty) { in do_whpx_cpu_synchronize_state() 2069 cpu->accel->dirty = true; in do_whpx_cpu_synchronize_state() 2077 cpu->accel->dirty = false; in do_whpx_cpu_synchronize_post_reset() 2084 cpu->accel->dirty = false; in do_whpx_cpu_synchronize_post_init() 2090 cpu->accel->dirty = true; in do_whpx_cpu_synchronize_pre_loadvm() 2099 if (!cpu->accel->dirty) { in whpx_cpu_synchronize_state() 2240 cpu->accel = vcpu; in whpx_init_vcpu() [all …]
|
A D | meson.build | 4 'whpx-accel-ops.c',
|
/qemu/bsd-user/ |
A D | main.c | 456 AccelState *accel = current_accel(); in main() local 457 AccelClass *ac = ACCEL_GET_CLASS(accel); in main() 460 object_property_set_bool(OBJECT(accel), "one-insn-per-tb", in main() 462 object_property_set_int(OBJECT(accel), "tb-size", in main()
|
/qemu/docs/system/ppc/ |
A D | powernv.rst | 62 powernv. kvm-pr in theory could be used as a valid accel option but 66 to use accel=kvm, the powernv machine will throw an error informing that 68 any other KVM alternative) is usable as KVM accel for this machine. 78 -accel tcg,thread=single \ 114 $ qemu-system-ppc64 -m 2G -machine powernv9 -smp 2,cores=2,threads=1 -accel tcg,thread=single \
|
/qemu/accel/kvm/ |
A D | meson.build | 4 'kvm-accel-ops.c',
|