Home
last modified time | relevance | path

Searched refs:kvm_state (Results 1 – 25 of 45) sorted by relevance

12

/qemu/target/s390x/kvm/
A Dkvm.c165 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); in kvm_s390_query_mem_limit()
189 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); in kvm_s390_set_mem_limit()
221 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); in kvm_s390_cmma_reset()
238 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); in kvm_s390_enable_cmma()
715 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); in kvm_s390_get_clock()
735 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); in kvm_s390_get_clock_ext()
751 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); in kvm_s390_set_clock()
2002 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); in kvm_s390_assign_subch_ioeventfd()
2021 if (CPU(cpu)->kvm_state == NULL) { in kvm_s390_set_cpu_state()
2210 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, in configure_cpu_subfunc()
[all …]
A Dpv.c42 rc = kvm_vm_ioctl(kvm_state, KVM_S390_PV_COMMAND, &pv_cmd); in __s390_pv_cmd()
137 !kvm_check_extension(kvm_state, KVM_CAP_S390_PROTECTED_ASYNC_DISABLE)) { in s390_pv_vm_try_disable_async()
/qemu/hw/i386/kvm/
A Di8254.c100 ret = kvm_vm_ioctl(kvm_state, KVM_GET_PIT2, &kpit); in kvm_pit_get()
161 ret = kvm_vm_ioctl(kvm_state, KVM_SET_PIT2, &kpit); in kvm_pit_put()
249 if (!kvm_check_extension(kvm_state, KVM_CAP_PIT_STATE2) || in kvm_pit_realizefn()
250 !kvm_check_extension(kvm_state, KVM_CAP_PIT2)) { in kvm_pit_realizefn()
254 ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT2, &config); in kvm_pit_realizefn()
264 if (kvm_check_extension(kvm_state, KVM_CAP_REINJECT_CONTROL)) { in kvm_pit_realizefn()
267 ret = kvm_vm_ioctl(kvm_state, KVM_REINJECT_CONTROL, &control); in kvm_pit_realizefn()
A Dioapic.c24 KVMState *s = kvm_state; in kvm_pc_setup_irq_routing()
63 ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, &chip); in kvm_ioapic_get()
96 ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, &chip); in kvm_ioapic_put()
118 delivered = kvm_set_irq(kvm_state, s->kvm_gsi_base + irq, level); in kvm_ioapic_set_irq()
A Dxen_overlay.c81 return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa); in xen_overlay_set_be_shinfo()
241 ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_GET_ATTR, &xa); in xen_sync_long_mode()
261 ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa); in xen_set_long_mode()
A Di8259.c44 ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, &chip); in kvm_pic_get()
97 ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, &chip); in kvm_pic_put()
119 delivered = kvm_set_irq(kvm_state, irq, level); in kvm_pic_set_irq()
A Dclock.c106 ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data); in kvm_update_clock()
169 int cap_clock_ctrl = kvm_check_extension(kvm_state, KVM_CAP_KVMCLOCK_CTRL); in kvmclock_vm_state_change()
190 ret = kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data); in kvmclock_vm_state_change()
/qemu/hw/s390x/
A Ds390-stattrib-kvm.c25 kvm_check_extension(kvm_state, KVM_CAP_S390_CMMA_MIGRATION)) { in kvm_s390_stattrib_create()
53 r = kvm_vm_ioctl(kvm_state, KVM_S390_GET_CMMA_BITS, &clog); in kvm_s390_stattrib_read_helper()
121 r = kvm_vm_ioctl(kvm_state, KVM_S390_SET_CMMA_BITS, &clog); in kvm_s390_stattrib_synchronize()
131 r = kvm_vm_ioctl(kvm_state, KVM_S390_SET_CMMA_BITS, &clog); in kvm_s390_stattrib_synchronize()
151 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); in kvm_s390_stattrib_set_migrationmode()
A Ds390-pci-kvm.c47 rc = kvm_vm_ioctl(kvm_state, KVM_S390_ZPCI_OP, &args); in s390_pci_kvm_aif_enable()
76 rc = kvm_vm_ioctl(kvm_state, KVM_S390_ZPCI_OP, &args); in s390_pci_kvm_aif_disable()
A Ds390-skeys-kvm.c40 return kvm_vm_ioctl(kvm_state, KVM_S390_GET_SKEYS, &args); in kvm_s390_skeys_get()
52 return kvm_vm_ioctl(kvm_state, KVM_S390_SET_SKEYS, &args); in kvm_s390_skeys_set()
/qemu/accel/kvm/
A Dkvm-all.c83 KVMState *kvm_state; variable
234 KVMState *s = kvm_state; in kvm_get_free_memslots()
352 KVMState *s = kvm_state; in kvm_set_user_memory_region()
443 KVMState *s = kvm_state; in kvm_create_vcpu()
458 cpu->kvm_state = s; in kvm_create_vcpu()
482 KVMState *s = kvm_state; in do_kvm_destroy_vcpu()
527 KVMState *s = kvm_state; in kvm_init_vcpu()
973 KVMState *s = kvm_state; in kvm_physical_sync_dirty_bitmap()
1002 KVMState *s = kvm_state; in kvm_log_clear_one_slot()
2431 return kvm_state && kvm_state->kvm_dirty_ring_size; in kvm_dirty_ring_enabled()
[all …]
/qemu/target/i386/kvm/
A Dkvm-cpu.c25 eax = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x80000008, 0, R_EAX); in kvm_set_guest_phys_bits()
79 kvm_arch_get_supported_msr_feature(kvm_state, in kvm_cpu_realizefn()
100 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { in lmce_supported()
109 KVMState *s = kvm_state; in kvm_cpu_max_instance_init()
A Dkvm.c1151 assert(do_sys_ioctl || cs->kvm_state); in get_supported_hv_cpuid()
1259 if (kvm_check_extension(cs->kvm_state, in get_supported_hv_cpuid_legacy()
1265 if (kvm_check_extension(cs->kvm_state, in get_supported_hv_cpuid_legacy()
1270 if (kvm_check_extension(cs->kvm_state, in get_supported_hv_cpuid_legacy()
1296 assert(cs->kvm_state); in hv_cpuid_get_host()
1420 if (!cs->kvm_state && in kvm_hyperv_expand_features()
2132 if (cs->kvm_state->xen_version) { in kvm_arch_init_vcpu()
2147 c->eax = cs->kvm_state->xen_version; in kvm_arch_init_vcpu()
2620 *val = cs->kvm_state->msr_energy.msr_unit; in kvm_rdmsr_rapl_power_unit()
6298 kvm_irqchip_commit_routes(kvm_state); in kvm_update_msi_routes_all()
[all …]
/qemu/target/ppc/
A Dkvm.c263 assert(kvm_state != NULL); in kvm_get_smmu_info()
502 if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) { in kvm_arch_init_vcpu()
2045 kvmppc_enable_hcall(kvm_state, H_SET_MODE); in kvmppc_enable_set_mode_hcall()
2050 kvmppc_enable_hcall(kvm_state, H_CLEAR_REF); in kvmppc_enable_clear_ref_mod_hcalls()
2051 kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD); in kvmppc_enable_clear_ref_mod_hcalls()
2056 kvmppc_enable_hcall(kvm_state, H_PAGE_INIT); in kvmppc_enable_h_page_init()
2061 kvmppc_enable_hcall(kvm_state, H_RPT_INVALIDATE); in kvmppc_enable_h_rpt_invalidate()
2324 if (kvmppc_is_pr(kvm_state)) { in kvmppc_reset_htab()
2892 return kvmppc_enable_hcall(kvm_state, H_RANDOM); in kvmppc_enable_hwrng()
2906 if (kvmppc_is_pr(kvm_state)) { in kvmppc_check_papr_resize_hpt()
[all …]
/qemu/hw/intc/
A Dopenpic_kvm.c56 kvm_set_irq(kvm_state, n_IRQ, level); in kvm_openpic_set_irq()
199 KVMState *s = kvm_state; in kvm_openpic_realize()
246 kvm_init_irq_routing(kvm_state); in kvm_openpic_realize()
248 kvm_irqchip_add_irq_route(kvm_state, i, 0, i); in kvm_openpic_realize()
A Dxics_kvm.c342 rc = kvm_vm_ioctl(kvm_state, KVM_IRQ_LINE, &args); in ics_kvm_set_irq()
364 if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_IRQ_XICS)) { in xics_kvm_connect()
399 rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false); in xics_kvm_connect()
489 rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false); in xics_kvm_has_broken_disconnect()
501 rc = kvm_create_device(kvm_state, KVM_DEV_TYPE_XICS, false); in xics_kvm_has_broken_disconnect()
A Dioapic.c125 kvm_set_irq(kvm_state, i, 1); in ioapic_service()
126 kvm_set_irq(kvm_state, i, 0); in ioapic_service()
128 kvm_set_irq(kvm_state, i, 1); in ioapic_service()
205 kvm_irqchip_update_msi_route(kvm_state, i, msg, NULL); in ioapic_update_kvm_routes()
208 kvm_irqchip_commit_routes(kvm_state); in ioapic_update_kvm_routes()
A Ds390_flic_kvm.c366 ret = kvm_irqchip_add_adapter_route(kvm_state, &routes->adapter); in kvm_s390_add_adapter_routes()
373 kvm_irqchip_commit_routes(kvm_state); in kvm_s390_add_adapter_routes()
380 kvm_irqchip_release_virq(kvm_state, routes->gsi[i]); in kvm_s390_add_adapter_routes()
398 kvm_irqchip_release_virq(kvm_state, routes->gsi[i]); in kvm_s390_release_adapter_routes()
625 ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); in DECLARE_CLASS_CHECKERS()
A Darm_gic_kvm.c528 kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i); in kvm_arm_gic_realize()
533 ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false); in kvm_arm_gic_realize()
578 kvm_irqchip_add_irq_route(kvm_state, i, 0, i); in kvm_arm_gic_realize()
583 kvm_irqchip_commit_routes(kvm_state); in kvm_arm_gic_realize()
A Darm_gicv3_its_kvm.c66 return kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi); in kvm_its_send_msi()
96 s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_ITS, false); in kvm_arm_its_realize()
/qemu/hw/ppc/
A Dpef.c48 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_SECURE_GUEST)) { in kvmppc_svm_init()
53 int ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SECURE_GUEST, 0, 1); in kvmppc_svm_init()
/qemu/target/arm/
A Dkvm.c132 KVMState kvm_state; in kvm_arm_create_scratch_host_vcpu() local
134 kvm_state.fd = kvmfd; in kvm_arm_create_scratch_host_vcpu()
135 kvm_state.vmfd = vmfd; in kvm_arm_create_scratch_host_vcpu()
136 kvm_vm_enable_cap(&kvm_state, KVM_CAP_ARM_MTE, 0); in kvm_arm_create_scratch_host_vcpu()
543 return kvm_check_extension(kvm_state, KVM_CAP_ARM_PMU_V3); in kvm_arm_pmu_supported()
1533 if (kvm_create_device(kvm_state, in kvm_arm_vgic_probe()
1537 if (kvm_create_device(kvm_state, in kvm_arm_vgic_probe()
1553 return kvm_set_irq(kvm_state, kvm_irq, !!level); in kvm_arm_set_irq()
1802 return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); in kvm_arm_sve_supported()
1807 return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE); in kvm_arm_mte_supported()
[all …]
/qemu/hw/hyperv/
A Dhyperv.c458 gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint); in hyperv_sint_route_new()
463 r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, in hyperv_sint_route_new()
477 kvm_irqchip_release_virq(kvm_state, gsi); in hyperv_sint_route_new()
521 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, in hyperv_sint_route_unref()
524 kvm_irqchip_release_virq(kvm_state, sint_route->gsi); in hyperv_sint_route_unref()
681 !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) { in hyperv_set_event_flag_handler()
695 return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd); in hyperv_set_event_flag_handler()
/qemu/hw/misc/
A Divshmem.c290 ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev); in ivshmem_vector_unmask()
294 kvm_irqchip_commit_routes(kvm_state); in ivshmem_vector_unmask()
296 ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); in ivshmem_vector_unmask()
319 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, v->virq); in ivshmem_vector_mask()
433 c = kvm_irqchip_begin_route_changes(kvm_state); in ivshmem_add_kvm_msi_virq()
467 kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, in setup_interrupt()
755 kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq); in ivshmem_remove_kvm_msi_virq()
/qemu/accel/stubs/
A Dkvm-stub.c17 KVMState *kvm_state; variable

Completed in 74 milliseconds

12