Lines Matching refs:region

414 	struct userspace_mem_region *region;  in kvm_vm_restart()  local
420 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
421 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region); in kvm_vm_restart()
426 ret, errno, region->region.slot, in kvm_vm_restart()
427 region->region.flags, in kvm_vm_restart()
428 region->region.guest_phys_addr, in kvm_vm_restart()
429 region->region.memory_size); in kvm_vm_restart()
486 struct userspace_mem_region *region = in userspace_mem_region_find() local
488 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
489 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
490 + region->region.memory_size - 1; in userspace_mem_region_find()
492 return region; in userspace_mem_region_find()
523 struct userspace_mem_region *region; in kvm_userspace_memory_region_find() local
525 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
526 if (!region) in kvm_userspace_memory_region_find()
529 return &region->region; in kvm_userspace_memory_region_find()
612 struct userspace_mem_region *region, in __vm_mem_region_delete() argument
618 rb_erase(&region->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
619 rb_erase(&region->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
620 hash_del(&region->slot_node); in __vm_mem_region_delete()
623 region->region.memory_size = 0; in __vm_mem_region_delete()
624 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region); in __vm_mem_region_delete()
628 sparsebit_free(&region->unused_phy_pages); in __vm_mem_region_delete()
629 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
632 free(region); in __vm_mem_region_delete()
642 struct userspace_mem_region *region; in kvm_vm_free() local
648 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
649 __vm_mem_region_delete(vmp, region, false); in kvm_vm_free()
732 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
741 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
742 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
745 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
746 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
753 rb_link_node(&region->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
754 rb_insert_color(&region->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
758 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
767 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
770 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
778 rb_link_node(&region->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
779 rb_insert_color(&region->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
810 struct userspace_mem_region *region; in vm_userspace_mem_region_add() local
833 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_userspace_mem_region_add()
835 if (region != NULL) in vm_userspace_mem_region_add()
842 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
843 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
846 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_userspace_mem_region_add()
848 if (region->region.slot != slot) in vm_userspace_mem_region_add()
856 region->region.slot, in vm_userspace_mem_region_add()
857 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
858 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
862 region = calloc(1, sizeof(*region)); in vm_userspace_mem_region_add()
863 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_userspace_mem_region_add()
864 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
886 region->mmap_size += alignment; in vm_userspace_mem_region_add()
888 region->fd = -1; in vm_userspace_mem_region_add()
895 region->fd = memfd_create("kvm_selftest", memfd_flags); in vm_userspace_mem_region_add()
896 TEST_ASSERT(region->fd != -1, in vm_userspace_mem_region_add()
899 ret = ftruncate(region->fd, region->mmap_size); in vm_userspace_mem_region_add()
902 ret = fallocate(region->fd, in vm_userspace_mem_region_add()
904 region->mmap_size); in vm_userspace_mem_region_add()
908 region->mmap_start = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
911 region->fd, 0); in vm_userspace_mem_region_add()
912 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_userspace_mem_region_add()
914 region->mmap_start, errno); in vm_userspace_mem_region_add()
917 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), in vm_userspace_mem_region_add()
919 region->mmap_start, backing_src_pagesz); in vm_userspace_mem_region_add()
922 region->host_mem = align_ptr_up(region->mmap_start, alignment); in vm_userspace_mem_region_add()
927 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
930 region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
934 region->unused_phy_pages = sparsebit_alloc(); in vm_userspace_mem_region_add()
935 sparsebit_set_num(region->unused_phy_pages, in vm_userspace_mem_region_add()
937 region->region.slot = slot; in vm_userspace_mem_region_add()
938 region->region.flags = flags; in vm_userspace_mem_region_add()
939 region->region.guest_phys_addr = guest_paddr; in vm_userspace_mem_region_add()
940 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
941 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_userspace_mem_region_add()
942 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_userspace_mem_region_add()
948 guest_paddr, (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
951 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_userspace_mem_region_add()
952 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_userspace_mem_region_add()
953 hash_add(vm->regions.slot_hash, &region->slot_node, slot); in vm_userspace_mem_region_add()
956 if (region->fd >= 0) { in vm_userspace_mem_region_add()
957 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
960 region->fd, 0); in vm_userspace_mem_region_add()
961 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_userspace_mem_region_add()
965 region->host_alias = align_ptr_up(region->mmap_alias, alignment); in vm_userspace_mem_region_add()
987 struct userspace_mem_region *region; in memslot2region() local
989 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
991 if (region->region.slot == memslot) in memslot2region()
992 return region; in memslot2region()
1019 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1021 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1023 region->region.flags = flags; in vm_mem_region_set_flags()
1025 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_mem_region_set_flags()
1048 struct userspace_mem_region *region; in vm_mem_region_move() local
1051 region = memslot2region(vm, slot); in vm_mem_region_move()
1053 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1055 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region); in vm_mem_region_move()
1380 struct userspace_mem_region *region; in addr_gpa2hva() local
1382 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1383 if (!region) { in addr_gpa2hva()
1388 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1389 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1414 struct userspace_mem_region *region = in addr_hva2gpa() local
1417 if (hva >= region->host_mem) { in addr_hva2gpa()
1418 if (hva <= (region->host_mem in addr_hva2gpa()
1419 + region->region.memory_size - 1)) in addr_hva2gpa()
1421 region->region.guest_phys_addr in addr_hva2gpa()
1422 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1455 struct userspace_mem_region *region; in addr_gpa2alias() local
1458 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1459 if (!region) in addr_gpa2alias()
1462 if (!region->host_alias) in addr_gpa2alias()
1465 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1466 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
2107 struct userspace_mem_region *region; in vm_dump() local
2114 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
2117 (uint64_t) region->region.guest_phys_addr, in vm_dump()
2118 (uint64_t) region->region.memory_size, in vm_dump()
2119 region->host_mem); in vm_dump()
2121 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
2220 struct userspace_mem_region *region; in vm_phy_pages_alloc() local
2230 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
2235 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in vm_phy_pages_alloc()
2236 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in vm_phy_pages_alloc()
2252 sparsebit_clear(region->unused_phy_pages, pg); in vm_phy_pages_alloc()