Lines Matching refs:vm
62 struct kvm_vm *vm; member
144 #define kvm_for_each_vcpu(vm, i, vcpu) \ argument
145 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
146 if (!((vcpu) = vm->vcpus[i])) \
151 memslot2region(struct kvm_vm *vm, uint32_t memslot);
153 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, in vm_get_mem_region() argument
157 return memslot2region(vm, vm->memslots[type]); in vm_get_mem_region()
307 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } in static_assert_is_vm() argument
309 #define __vm_ioctl(vm, cmd, arg) \ argument
311 static_assert_is_vm(vm); \
312 kvm_do_ioctl((vm)->fd, cmd, arg); \
323 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \ argument
327 static_assert_is_vm(vm); \
333 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \
341 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \ argument
342 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
344 #define vm_ioctl(vm, cmd, arg) \ argument
346 int ret = __vm_ioctl(vm, cmd, arg); \
348 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
363 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \
370 static inline int vm_check_cap(struct kvm_vm *vm, long cap) in vm_check_cap() argument
372 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap); in vm_check_cap()
374 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm); in vm_check_cap()
378 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in __vm_enable_cap() argument
382 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); in __vm_enable_cap()
384 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in vm_enable_cap() argument
388 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); in vm_enable_cap()
391 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, in vm_set_memory_attributes() argument
408 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr); in vm_set_memory_attributes()
412 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_private() argument
415 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); in vm_mem_set_private()
418 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_shared() argument
421 vm_set_memory_attributes(vm, gpa, size, 0); in vm_mem_set_shared()
424 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
427 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_punch_hole() argument
430 vm_guest_mem_fallocate(vm, gpa, size, true); in vm_guest_mem_punch_hole()
433 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_allocate() argument
436 vm_guest_mem_fallocate(vm, gpa, size, false); in vm_guest_mem_allocate()
439 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
445 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
448 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
450 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) in kvm_vm_get_dirty_log() argument
454 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args); in kvm_vm_get_dirty_log()
457 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, in kvm_vm_clear_dirty_log() argument
467 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); in kvm_vm_clear_dirty_log()
470 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) in kvm_vm_reset_dirty_ring() argument
472 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); in kvm_vm_reset_dirty_ring()
475 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, in kvm_vm_register_coalesced_io() argument
485 vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone); in kvm_vm_register_coalesced_io()
488 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, in kvm_vm_unregister_coalesced_io() argument
498 vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone); in kvm_vm_unregister_coalesced_io()
501 static inline int vm_get_stats_fd(struct kvm_vm *vm) in vm_get_stats_fd() argument
503 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL); in vm_get_stats_fd()
505 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm); in vm_get_stats_fd()
509 static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, in __kvm_irqfd() argument
519 return __vm_ioctl(vm, KVM_IRQFD, &irqfd); in __kvm_irqfd()
522 static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd, in kvm_irqfd() argument
525 int ret = __kvm_irqfd(vm, gsi, eventfd, flags); in kvm_irqfd()
527 TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm); in kvm_irqfd()
530 static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) in kvm_assign_irqfd() argument
532 kvm_irqfd(vm, gsi, eventfd, 0); in kvm_assign_irqfd()
535 static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd) in kvm_deassign_irqfd() argument
537 kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN); in kvm_deassign_irqfd()
598 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat) argument
636 void vm_create_irqchip(struct kvm_vm *vm);
638 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, in __vm_create_guest_memfd() argument
646 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); in __vm_create_guest_memfd()
649 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, in vm_create_guest_memfd() argument
652 int fd = __vm_create_guest_memfd(vm, size, flags); in vm_create_guest_memfd()
658 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
660 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
662 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
665 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
669 void vm_userspace_mem_region_add(struct kvm_vm *vm,
673 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
678 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) in vm_arch_has_protected_memory() argument
684 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
685 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
686 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
687 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
688 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
689 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
690 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
691 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
693 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
696 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
697 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
699 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
701 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
703 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
704 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
705 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
706 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
712 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) in vm_untag_gpa() argument
714 return gpa & ~vm->gpa_tag_mask; in vm_untag_gpa()
849 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm); in vcpu_get_stats_fd()
918 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
919 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
921 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) in kvm_create_device() argument
923 int fd = __kvm_create_device(vm, type); in kvm_create_device()
950 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
951 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
958 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
959 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
963 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
965 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
968 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
970 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in vm_phy_pages_alloc() argument
978 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, in vm_phy_pages_alloc()
979 vm_arch_has_protected_memory(vm)); in vm_phy_pages_alloc()
1055 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
1091 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
1107 #define sync_global_to_guest(vm, g) ({ \ argument
1108 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1112 #define sync_global_from_guest(vm, g) ({ \ argument
1113 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1123 #define write_guest_global(vm, g, val) ({ \ argument
1124 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1148 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1151 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in vm_vcpu_add() argument
1154 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); in vm_vcpu_add()
1162 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1164 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, in vm_vcpu_recreate() argument
1167 return vm_arch_vcpu_recreate(vm, vcpu_id); in vm_vcpu_recreate()
1172 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1174 static inline void virt_pgd_alloc(struct kvm_vm *vm) in virt_pgd_alloc() argument
1176 virt_arch_pgd_alloc(vm); in virt_pgd_alloc()
1195 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1197 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) in virt_pg_map() argument
1199 virt_arch_pg_map(vm, vaddr, paddr); in virt_pg_map()
1218 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1220 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() argument
1222 return addr_arch_gva2gpa(vm, gva); in addr_gva2gpa()
1240 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1242 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in virt_dump() argument
1244 virt_arch_dump(stream, vm, indent); in virt_dump()
1248 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) in __vm_disable_nx_huge_pages() argument
1250 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); in __vm_disable_nx_huge_pages()
1260 void kvm_arch_vm_post_create(struct kvm_vm *vm);
1262 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);