Lines Matching refs:data
127 static void check_mmio_access(struct vm_data *data, struct kvm_run *run) in check_mmio_access() argument
129 TEST_ASSERT(data->mmio_ok, "Unexpected mmio exit"); in check_mmio_access()
133 TEST_ASSERT(run->mmio.phys_addr >= data->mmio_gpa_min && in check_mmio_access()
134 run->mmio.phys_addr <= data->mmio_gpa_max, in check_mmio_access()
141 struct vm_data *data = __data; in vcpu_worker() local
142 struct kvm_vcpu *vcpu = data->vcpu; in vcpu_worker()
158 check_mmio_access(data, run); in vcpu_worker()
188 static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) in vm_gpa2hva() argument
193 uint32_t guest_page_size = data->vm->page_size; in vm_gpa2hva()
196 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size, in vm_gpa2hva()
202 slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1); in vm_gpa2hva()
203 slotoffs = gpage - (slot * data->pages_per_slot); in vm_gpa2hva()
208 if (slot == data->nslots - 1) in vm_gpa2hva()
209 slotpages = data->npages - slot * data->pages_per_slot; in vm_gpa2hva()
211 slotpages = data->pages_per_slot; in vm_gpa2hva()
218 base = data->hva_slots[slot]; in vm_gpa2hva()
222 static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot) in vm_slot2gpa() argument
224 uint32_t guest_page_size = data->vm->page_size; in vm_slot2gpa()
226 TEST_ASSERT(slot < data->nslots, "Too high slot number"); in vm_slot2gpa()
228 return MEM_GPA + slot * data->pages_per_slot * guest_page_size; in vm_slot2gpa()
233 struct vm_data *data; in alloc_vm() local
235 data = malloc(sizeof(*data)); in alloc_vm()
236 TEST_ASSERT(data, "malloc(vmdata) failed"); in alloc_vm()
238 data->vm = NULL; in alloc_vm()
239 data->vcpu = NULL; in alloc_vm()
240 data->hva_slots = NULL; in alloc_vm()
242 return data; in alloc_vm()
261 static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size) in get_max_slots() argument
263 uint32_t guest_page_size = data->vm->page_size; in get_max_slots()
267 mempages = data->npages; in get_max_slots()
268 slots = data->nslots; in get_max_slots()
283 static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, in prepare_vm() argument
297 data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code); in prepare_vm()
298 TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size"); in prepare_vm()
300 data->npages = mempages; in prepare_vm()
301 TEST_ASSERT(data->npages > 1, "Can't test without any memory"); in prepare_vm()
302 data->nslots = nslots; in prepare_vm()
303 data->pages_per_slot = data->npages / data->nslots; in prepare_vm()
304 rempages = data->npages % data->nslots; in prepare_vm()
306 data->pages_per_slot, rempages)) { in prepare_vm()
307 *maxslots = get_max_slots(data, host_page_size); in prepare_vm()
311 data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); in prepare_vm()
312 TEST_ASSERT(data->hva_slots, "malloc() fail"); in prepare_vm()
315 data->nslots, data->pages_per_slot, rempages); in prepare_vm()
318 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { in prepare_vm()
321 npages = data->pages_per_slot; in prepare_vm()
322 if (slot == data->nslots) in prepare_vm()
325 vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS, in prepare_vm()
332 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) { in prepare_vm()
336 npages = data->pages_per_slot; in prepare_vm()
337 if (slot == data->nslots) in prepare_vm()
340 gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot); in prepare_vm()
344 data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr); in prepare_vm()
345 memset(data->hva_slots[slot - 1], 0, npages * guest_page_size); in prepare_vm()
350 virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages); in prepare_vm()
352 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); in prepare_vm()
353 sync->guest_page_size = data->vm->page_size; in prepare_vm()
358 data->mmio_ok = false; in prepare_vm()
363 static void launch_vm(struct vm_data *data) in launch_vm() argument
367 pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data); in launch_vm()
373 static void free_vm(struct vm_data *data) in free_vm() argument
375 kvm_vm_free(data->vm); in free_vm()
376 free(data->hva_slots); in free_vm()
377 free(data); in free_vm()
380 static void wait_guest_exit(struct vm_data *data) in wait_guest_exit() argument
382 pthread_join(data->vcpu_thread, NULL); in wait_guest_exit()
577 static bool test_memslot_move_prepare(struct vm_data *data, in test_memslot_move_prepare() argument
581 uint32_t guest_page_size = data->vm->page_size; in test_memslot_move_prepare()
586 vm_enable_cap(data->vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL); in test_memslot_move_prepare()
589 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); in test_memslot_move_prepare()
594 vm_gpa2hva(data, movesrcgpa, &lastpages); in test_memslot_move_prepare()
605 data->mmio_ok = true; in test_memslot_move_prepare()
606 data->mmio_gpa_min = movesrcgpa; in test_memslot_move_prepare()
607 data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1; in test_memslot_move_prepare()
613 static bool test_memslot_move_prepare_active(struct vm_data *data, in test_memslot_move_prepare_active() argument
617 return test_memslot_move_prepare(data, sync, maxslots, true); in test_memslot_move_prepare_active()
620 static bool test_memslot_move_prepare_inactive(struct vm_data *data, in test_memslot_move_prepare_inactive() argument
624 return test_memslot_move_prepare(data, sync, maxslots, false); in test_memslot_move_prepare_inactive()
627 static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) in test_memslot_move_loop() argument
631 movesrcgpa = vm_slot2gpa(data, data->nslots - 1); in test_memslot_move_loop()
632 vm_mem_region_move(data->vm, data->nslots - 1 + 1, in test_memslot_move_loop()
634 vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa); in test_memslot_move_loop()
637 static void test_memslot_do_unmap(struct vm_data *data, in test_memslot_do_unmap() argument
641 uint32_t guest_page_size = data->vm->page_size; in test_memslot_do_unmap()
648 hva = vm_gpa2hva(data, gpa, &npages); in test_memslot_do_unmap()
662 static void test_memslot_map_unmap_check(struct vm_data *data, in test_memslot_map_unmap_check() argument
667 uint32_t guest_page_size = data->vm->page_size; in test_memslot_map_unmap_check()
673 val = (typeof(val))vm_gpa2hva(data, gpa, NULL); in test_memslot_map_unmap_check()
680 static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) in test_memslot_map_loop() argument
682 uint32_t guest_page_size = data->vm->page_size; in test_memslot_map_loop()
689 test_memslot_do_unmap(data, guest_pages / 2, guest_pages / 2); in test_memslot_map_loop()
699 test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1); in test_memslot_map_loop()
700 test_memslot_map_unmap_check(data, guest_pages / 2 - 1, MEM_TEST_VAL_1); in test_memslot_map_loop()
701 test_memslot_do_unmap(data, 0, guest_pages / 2); in test_memslot_map_loop()
714 test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2); in test_memslot_map_loop()
715 test_memslot_map_unmap_check(data, guest_pages - 1, MEM_TEST_VAL_2); in test_memslot_map_loop()
718 static void test_memslot_unmap_loop_common(struct vm_data *data, in test_memslot_unmap_loop_common() argument
722 uint32_t guest_page_size = data->vm->page_size; in test_memslot_unmap_loop_common()
734 test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1); in test_memslot_unmap_loop_common()
736 test_memslot_do_unmap(data, ctr, chunk); in test_memslot_unmap_loop_common()
740 test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2); in test_memslot_unmap_loop_common()
742 test_memslot_do_unmap(data, ctr, chunk); in test_memslot_unmap_loop_common()
745 static void test_memslot_unmap_loop(struct vm_data *data, in test_memslot_unmap_loop() argument
749 uint32_t guest_page_size = data->vm->page_size; in test_memslot_unmap_loop()
753 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); in test_memslot_unmap_loop()
756 static void test_memslot_unmap_loop_chunked(struct vm_data *data, in test_memslot_unmap_loop_chunked() argument
759 uint32_t guest_page_size = data->vm->page_size; in test_memslot_unmap_loop_chunked()
762 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages); in test_memslot_unmap_loop_chunked()
765 static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) in test_memslot_rw_loop() argument
768 uint32_t guest_page_size = data->vm->page_size; in test_memslot_rw_loop()
772 *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2; in test_memslot_rw_loop()
778 uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL); in test_memslot_rw_loop()
794 bool (*prepare)(struct vm_data *data, struct sync_area *sync,
796 void (*loop)(struct vm_data *data, struct sync_area *sync);
807 struct vm_data *data; in test_execute() local
812 data = alloc_vm(); in test_execute()
813 if (!prepare_vm(data, nslots, maxslots, tdata->guest_code, in test_execute()
819 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); in test_execute()
821 !tdata->prepare(data, sync, maxslots)) { in test_execute()
826 launch_vm(data); in test_execute()
836 tdata->loop(data, sync); in test_execute()
842 wait_guest_exit(data); in test_execute()
845 free_vm(data); in test_execute()
1046 static bool test_loop(const struct test_data *data, in test_loop() argument
1054 if (!test_execute(targs->nslots, &maxslots, targs->seconds, data, in test_loop()
1087 if (!data->mem_size && in test_loop()
1117 const struct test_data *data = &tests[tctr]; in main() local
1125 data->name, targs.runs, targs.seconds); in main()
1128 if (!test_loop(data, &targs, in main()