Lines Matching refs:vm

125 void virt_arch_pgd_alloc(struct kvm_vm *vm)  in virt_arch_pgd_alloc()  argument
127 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in virt_arch_pgd_alloc()
128 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in virt_arch_pgd_alloc()
131 if (!vm->pgd_created) { in virt_arch_pgd_alloc()
132 vm->pgd = vm_alloc_page_table(vm); in virt_arch_pgd_alloc()
133 vm->pgd_created = true; in virt_arch_pgd_alloc()
137 static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte, in virt_get_pte() argument
141 uint64_t *page_table = addr_gpa2hva(vm, pt_gpa); in virt_get_pte()
144 TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->pgd, in virt_get_pte()
151 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, in virt_create_upper_pte() argument
158 uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level); in virt_create_upper_pte()
160 paddr = vm_untag_gpa(vm, paddr); in virt_create_upper_pte()
167 *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; in virt_create_upper_pte()
184 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) in __virt_pg_map() argument
190 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, in __virt_pg_map()
191 "Unknown or unsupported guest mode, mode: 0x%x", vm->mode); in __virt_pg_map()
196 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)), in __virt_pg_map()
201 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in __virt_pg_map()
204 paddr, vm->max_gfn, vm->page_size); in __virt_pg_map()
205 TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr, in __virt_pg_map()
212 pml4e = virt_create_upper_pte(vm, &vm->pgd, vaddr, paddr, PG_LEVEL_512G, level); in __virt_pg_map()
216 pdpe = virt_create_upper_pte(vm, pml4e, vaddr, paddr, PG_LEVEL_1G, level); in __virt_pg_map()
220 pde = virt_create_upper_pte(vm, pdpe, vaddr, paddr, PG_LEVEL_2M, level); in __virt_pg_map()
225 pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); in __virt_pg_map()
234 if (vm_is_gpa_protected(vm, paddr)) in __virt_pg_map()
235 *pte |= vm->arch.c_bit; in __virt_pg_map()
237 *pte |= vm->arch.s_bit; in __virt_pg_map()
240 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) in virt_arch_pg_map() argument
242 __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); in virt_arch_pg_map()
245 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map_level() argument
257 __virt_pg_map(vm, vaddr, paddr, level); in virt_map_level()
276 uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, in __vm_get_page_table_entry() argument
281 TEST_ASSERT(!vm->arch.is_pt_protected, in __vm_get_page_table_entry()
287 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " in __vm_get_page_table_entry()
288 "unknown or unsupported guest mode, mode: 0x%x", vm->mode); in __vm_get_page_table_entry()
289 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, in __vm_get_page_table_entry()
290 (vaddr >> vm->page_shift)), in __vm_get_page_table_entry()
300 pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G); in __vm_get_page_table_entry()
304 pdpe = virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G); in __vm_get_page_table_entry()
308 pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M); in __vm_get_page_table_entry()
312 return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); in __vm_get_page_table_entry()
315 uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr) in vm_get_page_table_entry() argument
319 return __vm_get_page_table_entry(vm, vaddr, &level); in vm_get_page_table_entry()
322 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in virt_arch_dump() argument
329 if (!vm->pgd_created) in virt_arch_dump()
337 pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd); in virt_arch_dump()
346 addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e), in virt_arch_dump()
349 pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK); in virt_arch_dump()
358 addr_hva2gpa(vm, pdpe), in virt_arch_dump()
362 pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK); in virt_arch_dump()
370 addr_hva2gpa(vm, pde), in virt_arch_dump()
374 pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK); in virt_arch_dump()
384 addr_hva2gpa(vm, pte), in virt_arch_dump()
417 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp) in kvm_seg_fill_gdt_64bit() argument
419 void *gdt = addr_gva2hva(vm, vm->arch.gdt); in kvm_seg_fill_gdt_64bit()
466 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
469 uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level); in addr_arch_gva2gpa()
478 return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level)); in addr_arch_gva2gpa()
491 static void vcpu_init_sregs(struct kvm_vm *vm, struct kvm_vcpu *vcpu) in vcpu_init_sregs() argument
495 TEST_ASSERT_EQ(vm->mode, VM_MODE_PXXV48_4K); in vcpu_init_sregs()
500 sregs.idt.base = vm->arch.idt; in vcpu_init_sregs()
502 sregs.gdt.base = vm->arch.gdt; in vcpu_init_sregs()
516 kvm_seg_set_tss_64bit(vm->arch.tss, &sregs.tr); in vcpu_init_sregs()
518 sregs.cr3 = vm->pgd; in vcpu_init_sregs()
522 static void vcpu_init_xcrs(struct kvm_vm *vm, struct kvm_vcpu *vcpu) in vcpu_init_xcrs() argument
536 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, in set_idt_entry() argument
540 (struct idt_entry *)addr_gva2hva(vm, vm->arch.idt); in set_idt_entry()
585 static void vm_init_descriptor_tables(struct kvm_vm *vm) in vm_init_descriptor_tables() argument
591 vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables()
592 vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables()
593 vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables()
594 vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables()
598 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS); in vm_init_descriptor_tables()
600 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; in vm_init_descriptor_tables()
603 kvm_seg_fill_gdt_64bit(vm, &seg); in vm_init_descriptor_tables()
606 kvm_seg_fill_gdt_64bit(vm, &seg); in vm_init_descriptor_tables()
608 kvm_seg_set_tss_64bit(vm->arch.tss, &seg); in vm_init_descriptor_tables()
609 kvm_seg_fill_gdt_64bit(vm, &seg); in vm_init_descriptor_tables()
612 void vm_install_exception_handler(struct kvm_vm *vm, int vector, in vm_install_exception_handler() argument
615 vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); in vm_install_exception_handler()
628 void kvm_arch_vm_post_create(struct kvm_vm *vm) in kvm_arch_vm_post_create() argument
635 vm_create_irqchip(vm); in kvm_arch_vm_post_create()
636 vm_init_descriptor_tables(vm); in kvm_arch_vm_post_create()
638 sync_global_to_guest(vm, host_cpu_is_intel); in kvm_arch_vm_post_create()
639 sync_global_to_guest(vm, host_cpu_is_amd); in kvm_arch_vm_post_create()
640 sync_global_to_guest(vm, is_forced_emulation_enabled); in kvm_arch_vm_post_create()
642 if (is_sev_vm(vm)) { in kvm_arch_vm_post_create()
645 vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); in kvm_arch_vm_post_create()
648 r = __vm_ioctl(vm, KVM_GET_TSC_KHZ, NULL); in kvm_arch_vm_post_create()
651 sync_global_to_guest(vm, guest_tsc_khz); in kvm_arch_vm_post_create()
663 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_add() argument
670 stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), in vm_arch_vcpu_add()
689 vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_add()
691 vcpu_init_sregs(vm, vcpu); in vm_arch_vcpu_add()
692 vcpu_init_xcrs(vm, vcpu); in vm_arch_vcpu_add()
714 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) in vm_arch_vcpu_recreate() argument
716 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_recreate()
1054 int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2); in vcpu_save_xsave_state()
1157 void kvm_init_vm_address_properties(struct kvm_vm *vm) in kvm_init_vm_address_properties() argument
1159 if (is_sev_vm(vm)) { in kvm_init_vm_address_properties()
1160 vm->arch.sev_fd = open_sev_dev_path_or_exit(); in kvm_init_vm_address_properties()
1161 vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT)); in kvm_init_vm_address_properties()
1162 vm->gpa_tag_mask = vm->arch.c_bit; in kvm_init_vm_address_properties()
1164 vm->arch.sev_fd = -1; in kvm_init_vm_address_properties()
1216 unsigned long vm_compute_max_gfn(struct kvm_vm *vm) in vm_compute_max_gfn() argument
1218 const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ in vm_compute_max_gfn()
1229 guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits; in vm_compute_max_gfn()
1230 TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits, in vm_compute_max_gfn()
1233 max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1; in vm_compute_max_gfn()
1240 if (vm->pa_bits < 40) in vm_compute_max_gfn()
1257 max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1; in vm_compute_max_gfn()