Lines Matching refs:kvm_vm
62 struct kvm_vm *vm;
87 struct kvm_vm { struct
151 memslot2region(struct kvm_vm *vm, uint32_t memslot);
153 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, in vm_get_mem_region()
307 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } in static_assert_is_vm()
370 static inline int vm_check_cap(struct kvm_vm *vm, long cap) in vm_check_cap()
378 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in __vm_enable_cap()
384 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in vm_enable_cap()
391 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, in vm_set_memory_attributes()
412 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_private()
418 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_shared()
424 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
427 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_punch_hole()
433 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_allocate()
439 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
442 void kvm_vm_free(struct kvm_vm *vmp);
443 void kvm_vm_restart(struct kvm_vm *vmp);
444 void kvm_vm_release(struct kvm_vm *vmp);
445 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
448 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
450 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) in kvm_vm_get_dirty_log()
457 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, in kvm_vm_clear_dirty_log()
470 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) in kvm_vm_reset_dirty_ring()
475 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, in kvm_vm_register_coalesced_io()
488 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, in kvm_vm_unregister_coalesced_io()
501 static inline int vm_get_stats_fd(struct kvm_vm *vm) in vm_get_stats_fd()
509 static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, in __kvm_irqfd()
522 static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, in kvm_irqfd()
530 static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) in kvm_assign_irqfd()
535 static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) in kvm_deassign_irqfd()
636 void vm_create_irqchip(struct kvm_vm *vm);
638 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, in __vm_create_guest_memfd()
649 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, in vm_create_guest_memfd()
658 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
660 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
662 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
665 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
669 void vm_userspace_mem_region_add(struct kvm_vm *vm,
673 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
678 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) in vm_arch_has_protected_memory()
684 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
685 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
686 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
687 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
688 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
689 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
690 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
691 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
693 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
696 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
697 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
699 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
701 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
703 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
704 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
705 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
706 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
712 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) in vm_untag_gpa()
918 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
919 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
921 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) in kvm_create_device()
950 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
951 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
958 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
959 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
963 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
965 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
968 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
970 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in vm_phy_pages_alloc()
988 struct kvm_vm *____vm_create(struct vm_shape shape);
989 struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
992 static inline struct kvm_vm *vm_create_barebones(void) in vm_create_barebones()
997 static inline struct kvm_vm *vm_create_barebones_type(unsigned long type) in vm_create_barebones_type()
1007 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus) in vm_create()
1012 struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
1016 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus, in vm_create_with_vcpus()
1025 struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
1034 static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, in __vm_create_with_one_vcpu()
1042 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, in vm_create_with_one_vcpu()
1048 static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape, in vm_create_shape_with_one_vcpu()
1055 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
1091 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
1148 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1151 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in vm_vcpu_add()
1162 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1164 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, in vm_vcpu_recreate()
1172 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1174 static inline void virt_pgd_alloc(struct kvm_vm *vm) in virt_pgd_alloc()
1195 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1197 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) in virt_pg_map()
1218 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1220 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa()
1240 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1242 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in virt_dump()
1248 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) in __vm_disable_nx_huge_pages()
1260 void kvm_arch_vm_post_create(struct kvm_vm *vm);
1262 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);