| /tools/testing/selftests/kvm/lib/x86/ |
| A D | sev.c | 28 if (!is_sev_snp_vm(vm)) in encrypt_region() 38 if (is_sev_snp_vm(vm)) in encrypt_region() 48 void sev_vm_init(struct kvm_vm *vm) in sev_vm_init() argument 103 vm->arch.is_pt_protected = true; in sev_vm_launch() 168 struct kvm_vm *vm; in vm_sev_create_with_one_vcpu() local 174 return vm; in vm_sev_create_with_one_vcpu() 179 if (is_sev_snp_vm(vm)) { in vm_sev_launch() 184 snp_vm_launch_update(vm); in vm_sev_launch() 186 snp_vm_launch_finish(vm); in vm_sev_launch() 191 sev_vm_launch(vm, policy); in vm_sev_launch() [all …]
|
| A D | processor.c | 132 vm->pgd = vm_alloc_page_table(vm); in virt_arch_pgd_alloc() 201 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in __virt_pg_map() 204 paddr, vm->max_gfn, vm->page_size); in __virt_pg_map() 300 pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G); in __vm_get_page_table_entry() 337 pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd); in virt_arch_dump() 419 void *gdt = addr_gva2hva(vm, vm->arch.gdt); in kvm_seg_fill_gdt_64bit() 540 (struct idt_entry *)addr_gva2hva(vm, vm->arch.idt); in set_idt_entry() 591 vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables() 592 vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables() 593 vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); in vm_init_descriptor_tables() [all …]
|
| A D | vmx.c | 380 pte->address = vm_alloc_page_table(vm) >> vm->page_shift; in nested_create_pte() 414 TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn, in __nested_pg_map() 417 paddr, vm->max_gfn, vm->page_size); in __nested_pg_map() 422 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in __nested_pg_map() 425 paddr, vm->max_gfn, vm->page_size); in __nested_pg_map() 436 pt = addr_gpa2hva(vm, pte->address * vm->page_size); in __nested_pg_map() 502 memslot2region(vm, memslot); in nested_map_memslot() 511 nested_map(vmx, vm, in nested_map_memslot() 512 (uint64_t)i << vm->page_shift, in nested_map_memslot() 513 (uint64_t)i << vm->page_shift, in nested_map_memslot() [all …]
|
| /tools/testing/selftests/kvm/lib/arm64/ |
| A D | processor.c | 117 size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc() 147 paddr, vm->max_gfn, vm->page_size); in _virt_pg_map() 149 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; in _virt_pg_map() 156 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; in _virt_pg_map() 162 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; in _virt_pg_map() 195 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; in virt_get_pte_hva() 201 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; in virt_get_pte_hva() 206 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; in virt_get_pte_hva() 211 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; in virt_get_pte_hva() 260 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) { in virt_arch_dump() [all …]
|
| A D | ucall.c | 11 void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) in ucall_arch_init() argument 13 vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); in ucall_arch_init() 15 virt_map(vm, mmio_gva, mmio_gpa, 1); in ucall_arch_init() 17 vm->ucall_mmio_addr = mmio_gpa; in ucall_arch_init() 19 write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); in ucall_arch_init() 27 run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { in ucall_arch_get_ucall()
|
| /tools/testing/selftests/kvm/s390/ |
| A D | cmma_test.c | 153 struct kvm_vm *vm; in create_vm_two_memslots() local 157 create_memslots(vm); in create_vm_two_memslots() 161 return vm; in create_vm_two_memslots() 238 enable_cmma(vm); in test_get_cmma_basic() 261 kvm_vm_free(vm); in test_get_cmma_basic() 289 enable_cmma(vm); in test_migration_mode() 367 kvm_vm_free(vm); in test_migration_mode() 452 enable_cmma(vm); in test_get_initial_dirty() 470 kvm_vm_free(vm); in test_get_initial_dirty() 511 enable_cmma(vm); in test_get_skip_holes() [all …]
|
| /tools/testing/selftests/kvm/lib/loongarch/ |
| A D | processor.c | 20 shift = level * (vm->page_shift - 3) + vm->page_shift; in virt_pte_index() 63 vm->pgd = table; in virt_arch_pgd_alloc() 81 child = vm->pgd; in virt_populate_pte() 98 ptep = addr_gpa2hva(vm, child) + virt_pte_index(vm, gva, level) * 8; in virt_populate_pte() 113 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); in addr_arch_gva2gpa() 130 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in virt_arch_pg_map() 133 paddr, vm->max_gfn, vm->page_size); in virt_arch_pg_map() 154 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level--); in pte_dump() 166 pte_dump(stream, vm, indent, vm->pgd, level); in virt_arch_dump() 234 struct kvm_vm *vm = vcpu->vm; in loongarch_vcpu_setup() local [all …]
|
| A D | ucall.c | 14 void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) in ucall_arch_init() argument 16 vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); in ucall_arch_init() 18 virt_map(vm, mmio_gva, mmio_gpa, 1); in ucall_arch_init() 20 vm->ucall_mmio_addr = mmio_gpa; in ucall_arch_init() 22 write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); in ucall_arch_init() 30 run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { in ucall_arch_get_ucall()
|
| /tools/testing/selftests/kvm/include/x86/ |
| A D | sev.h | 36 return vm->type == KVM_X86_SNP_VM; in is_sev_snp_vm() 41 return is_sev_snp_vm(vm) || vm->type == KVM_X86_SEV_ES_VM; in is_sev_es_vm() 46 return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM; in is_sev_vm() 51 void sev_vm_launch_finish(struct kvm_vm *vm); 53 void snp_vm_launch_update(struct kvm_vm *vm); 79 #define __vm_sev_ioctl(vm, cmd, arg) \ argument 89 .sev_fd = (vm)->arch.sev_fd, \ 96 #define vm_sev_ioctl(vm, cmd, arg) \ argument 103 void sev_vm_init(struct kvm_vm *vm); 104 void sev_es_vm_init(struct kvm_vm *vm); [all …]
|
| /tools/testing/selftests/kvm/lib/s390/ |
| A D | processor.c | 18 vm->page_size); in virt_arch_pgd_alloc() 20 if (vm->pgd_created) in virt_arch_pgd_alloc() 26 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_arch_pgd_alloc() 28 vm->pgd = paddr; in virt_arch_pgd_alloc() 29 vm->pgd_created = true; in virt_arch_pgd_alloc() 43 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_alloc_region() 67 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, in virt_arch_pg_map() 70 gva, vm->max_gfn, vm->page_size); in virt_arch_pg_map() 73 entry = addr_gpa2hva(vm, vm->pgd); in virt_arch_pg_map() 97 entry = addr_gpa2hva(vm, vm->pgd); in addr_arch_gva2gpa() [all …]
|
| /tools/testing/selftests/kvm/arm64/ |
| A D | smccc_filter.c | 64 struct kvm_vm *vm; in setup_vm() local 66 vm = vm_create(1); in setup_vm() 76 return vm; in setup_vm() 117 kvm_vm_free(vm); in test_filter_reserved_range() 130 kvm_vm_free(vm); in test_invalid_nr_functions() 143 kvm_vm_free(vm); in test_overflow_nr_functions() 156 kvm_vm_free(vm); in test_reserved_action() 173 kvm_vm_free(vm); in test_filter_overlap() 203 kvm_vm_free(vm); in test_filter_denied() 241 kvm_vm_free(vm); in test_filter_fwd_to_user() [all …]
|
| A D | vcpu_width_config.c | 25 struct kvm_vm *vm; in add_init_2vcpus() local 28 vm = vm_create_barebones(); in add_init_2vcpus() 30 vcpu0 = __vm_vcpu_add(vm, 0); in add_init_2vcpus() 35 vcpu1 = __vm_vcpu_add(vm, 1); in add_init_2vcpus() 39 kvm_vm_free(vm); in add_init_2vcpus() 51 struct kvm_vm *vm; in add_2vcpus_init_2vcpus() local 54 vm = vm_create_barebones(); in add_2vcpus_init_2vcpus() 66 kvm_vm_free(vm); in add_2vcpus_init_2vcpus() 82 struct kvm_vm *vm; in main() local 88 vm = vm_create_barebones(); in main() [all …]
|
| A D | external_aborts.c | 36 vm_init_descriptor_tables(vm); in vm_create_with_dabt_handler() 40 virt_map(vm, MMIO_ADDR, MMIO_ADDR, 1); in vm_create_with_dabt_handler() 42 return vm; in vm_create_with_dabt_handler() 137 kvm_vm_free(vm); in test_mmio_abort() 166 kvm_vm_free(vm); in test_mmio_nisv() 180 vm_enable_cap(vm, KVM_CAP_ARM_NISV_TO_USER, 1); in test_mmio_nisv_abort() 188 kvm_vm_free(vm); in test_mmio_nisv_abort() 215 kvm_vm_free(vm); in test_serror_masked() 250 kvm_vm_free(vm); in test_serror() 275 kvm_vm_free(vm); in test_serror_emulated() [all …]
|
| /tools/testing/selftests/kvm/x86/ |
| A D | nx_huge_pages_test.c | 109 struct kvm_vm *vm; in run_test() local 114 vm = vm_create(1); in run_test() 152 check_2m_page_count(vm, 0); in run_test() 153 check_split_count(vm, 0); in run_test() 160 check_2m_page_count(vm, 1); in run_test() 161 check_split_count(vm, 0); in run_test() 168 check_2m_page_count(vm, 2); in run_test() 169 check_split_count(vm, 0); in run_test() 206 check_split_count(vm, 0); in run_test() 214 check_split_count(vm, 0); in run_test() [all …]
|
| A D | max_vcpuid_cap_test.c | 16 struct kvm_vm *vm; in main() local 19 vm = vm_create_barebones(); in main() 22 ret = vm_check_cap(vm, KVM_CAP_MAX_VCPU_ID); in main() 25 ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, ret + 1); in main() 31 vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)MAX_VCPU_ID); in main() 40 vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID); in main() 43 ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID + 1); in main() 48 ret = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)MAX_VCPU_ID); in main() 52 ret = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(1L << 32)); in main() 56 ret = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)0); in main() [all …]
|
| A D | set_boot_cpu_id.c | 36 static void test_set_invalid_bsp(struct kvm_vm *vm) in test_set_invalid_bsp() argument 52 int r = __vm_ioctl(vcpu->vm, KVM_SET_BOOT_CPU_ID, in test_set_bsp_busy() 92 struct kvm_vm *vm; in create_vm() local 95 vm = vm_create(nr_vcpus); in create_vm() 97 test_set_invalid_bsp(vm); in create_vm() 104 return vm; in create_vm() 110 struct kvm_vm *vm; in run_vm_bsp() local 117 kvm_vm_free(vm); in run_vm_bsp() 123 struct kvm_vm *vm; in check_set_bsp_busy() local 125 vm = create_vm(ARRAY_SIZE(vcpus), 0, vcpus); in check_set_bsp_busy() [all …]
|
| A D | dirty_log_page_splitting_test.c | 44 stats->pages_4k = vm_get_stat(vm, pages_4k); in get_page_stats() 45 stats->pages_2m = vm_get_stat(vm, pages_2m); in get_page_stats() 46 stats->pages_1g = vm_get_stat(vm, pages_1g); in get_page_stats() 90 struct kvm_vm *vm; in run_test() local 129 run_vcpu_iteration(vm); in run_test() 133 memstress_enable_dirty_logging(vm, SLOTS); in run_test() 138 run_vcpu_iteration(vm); in run_test() 142 memstress_get_dirty_log(vm, bitmaps, SLOTS); in run_test() 152 memstress_disable_dirty_logging(vm, SLOTS); in run_test() 157 run_vcpu_iteration(vm); in run_test() [all …]
|
| A D | hyperv_extended_hypercalls.c | 42 struct kvm_vm *vm; in main() local 55 vm = vm_create_with_one_vcpu(&vcpu, guest_code); in main() 60 hcall_in_page = vm_vaddr_alloc_pages(vm, 1); in main() 61 memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size); in main() 64 hcall_out_page = vm_vaddr_alloc_pages(vm, 1); in main() 65 memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size); in main() 67 vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page), in main() 68 addr_gva2gpa(vm, hcall_out_page), hcall_out_page); in main() 76 outval = addr_gpa2hva(vm, run->hyperv.u.hcall.params[1]); in main() 96 kvm_vm_free(vm); in main()
|
| A D | private_mem_kvm_exits_test.c | 49 struct kvm_vm *vm; in test_private_access_memslot_deleted() local 55 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, in test_private_access_memslot_deleted() 58 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in test_private_access_memslot_deleted() 66 vm_mem_set_private(vm, EXITS_TEST_GPA, EXITS_TEST_SIZE); in test_private_access_memslot_deleted() 72 vm_mem_region_delete(vm, EXITS_TEST_SLOT); in test_private_access_memslot_deleted() 82 kvm_vm_free(vm); in test_private_access_memslot_deleted() 87 struct kvm_vm *vm; in test_private_access_memslot_not_private() local 91 vm = vm_create_shape_with_one_vcpu(protected_vm_shape, &vcpu, in test_private_access_memslot_not_private() 95 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, in test_private_access_memslot_not_private() 102 vm_mem_set_private(vm, EXITS_TEST_GPA, EXITS_TEST_SIZE); in test_private_access_memslot_not_private() [all …]
|
| /tools/testing/selftests/kvm/lib/ |
| A D | kvm_util.c | 213 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open() 217 vm->stats.fd = vm_get_stats_fd(vm); in vm_open() 285 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap() 288 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap() 295 vm = calloc(1, sizeof(*vm)); in ____vm_create() 352 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in ____vm_create() 376 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in ____vm_create() 386 vm->max_gfn = vm_compute_max_gfn(vm); in ____vm_create() 391 return vm; in ____vm_create() 1366 vcpu->vm = vm; in __vm_vcpu_add() [all …]
|
| A D | memstress.c | 130 struct kvm_vm *vm; in memstress_create_vm() local 174 args->vm = vm; in memstress_create_vm() 177 region_end_gfn = vm->max_gfn + 1; in memstress_create_vm() 229 sync_global_to_guest(vm, memstress_args); in memstress_create_vm() 231 return vm; in memstress_create_vm() 234 void memstress_destroy_vm(struct kvm_vm *vm) in memstress_destroy_vm() argument 236 kvm_vm_free(vm); in memstress_destroy_vm() 330 vm_mem_region_set_flags(vm, slot, flags); in toggle_dirty_logging() 336 toggle_dirty_logging(vm, slots, true); in memstress_enable_dirty_logging() 341 toggle_dirty_logging(vm, slots, false); in memstress_disable_dirty_logging() [all …]
|
| /tools/testing/selftests/kvm/ |
| A D | set_memory_region_test.c | 113 struct kvm_vm *vm; in spawn_vm() local 141 return vm; in spawn_vm() 182 struct kvm_vm *vm; in test_move_memory_region() local 221 kvm_vm_free(vm); in test_move_memory_region() 325 kvm_vm_free(vm); in test_delete_memory_region() 342 kvm_vm_free(vm); in test_zero_memory_regions() 461 kvm_vm_free(vm); in test_add_max_memory_regions() 485 test_invalid_guest_memfd(vm, vm->fd, 0, "VM's fd should fail"); in test_add_private_memory_region() 508 kvm_vm_free(vm); in test_add_private_memory_region() 554 kvm_vm_free(vm); in test_add_overlapping_private_memory_regions() [all …]
|
| /tools/testing/selftests/kvm/lib/riscv/ |
| A D | processor.c | 31 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align() 71 size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size; in virt_arch_pgd_alloc() 76 vm->pgd = vm_phy_pages_alloc(vm, nr_pages, in virt_arch_pgd_alloc() 96 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, in virt_arch_pg_map() 99 paddr, vm->max_gfn, vm->page_size); in virt_arch_pg_map() 101 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8; in virt_arch_pg_map() 110 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + in virt_arch_pg_map() 134 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa() 140 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + in addr_arch_gva2gpa() 185 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) { in virt_arch_dump() [all …]
|
| /tools/testing/selftests/kvm/include/ |
| A D | kvm_util.h | 62 struct kvm_vm *vm; member 146 if (!((vcpu) = vm->vcpus[i])) \ 157 return memslot2region(vm, vm->memslots[type]); in vm_get_mem_region() 309 #define __vm_ioctl(vm, cmd, arg) \ argument 311 static_assert_is_vm(vm); \ 327 static_assert_is_vm(vm); \ 344 #define vm_ioctl(vm, cmd, arg) \ argument 532 kvm_irqfd(vm, gsi, eventfd, 0); in kvm_assign_irqfd() 598 #define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat) argument 714 return gpa & ~vm->gpa_tag_mask; in vm_untag_gpa() [all …]
|
| /tools/testing/selftests/net/ |
| A D | test_vxlan_vnifiltering.sh | 196 setup-vm() { 396 vm[11]=$vm_11 397 vm[21]=$vm_21 398 vm[12]=$vm_12 399 vm[22]=$vm_22 429 vm[11]=$vm_11 430 vm[21]=$vm_21 431 vm[12]=$vm_12 432 vm[22]=$vm_22 462 vm[11]=$vm_11 [all …]
|