Lines Matching refs:region
368 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
440 struct userspace_mem_region *region; in kvm_vm_restart() local
446 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { in kvm_vm_restart()
447 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); in kvm_vm_restart()
452 ret, errno, region->region.slot, in kvm_vm_restart()
453 region->region.flags, in kvm_vm_restart()
454 region->region.guest_phys_addr, in kvm_vm_restart()
455 region->region.memory_size); in kvm_vm_restart()
550 struct userspace_mem_region *region = in userspace_mem_region_find() local
552 uint64_t existing_start = region->region.guest_phys_addr; in userspace_mem_region_find()
553 uint64_t existing_end = region->region.guest_phys_addr in userspace_mem_region_find()
554 + region->region.memory_size - 1; in userspace_mem_region_find()
556 return region; in userspace_mem_region_find()
587 struct userspace_mem_region *region; in kvm_userspace_memory_region_find() local
589 region = userspace_mem_region_find(vm, start, end); in kvm_userspace_memory_region_find()
590 if (!region) in kvm_userspace_memory_region_find()
593 return ®ion->region; in kvm_userspace_memory_region_find()
651 struct userspace_mem_region *region, in __vm_mem_region_delete() argument
657 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
658 rb_erase(®ion->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
659 hash_del(®ion->slot_node); in __vm_mem_region_delete()
662 region->region.memory_size = 0; in __vm_mem_region_delete()
663 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in __vm_mem_region_delete()
665 sparsebit_free(®ion->unused_phy_pages); in __vm_mem_region_delete()
666 ret = munmap(region->mmap_start, region->mmap_size); in __vm_mem_region_delete()
668 if (region->fd >= 0) { in __vm_mem_region_delete()
670 ret = munmap(region->mmap_alias, region->mmap_size); in __vm_mem_region_delete()
672 close(region->fd); in __vm_mem_region_delete()
675 free(region); in __vm_mem_region_delete()
685 struct userspace_mem_region *region; in kvm_vm_free() local
697 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) in kvm_vm_free()
698 __vm_mem_region_delete(vmp, region, false); in kvm_vm_free()
801 struct userspace_mem_region *region) in vm_userspace_mem_region_gpa_insert() argument
810 if (region->region.guest_phys_addr < in vm_userspace_mem_region_gpa_insert()
811 cregion->region.guest_phys_addr) in vm_userspace_mem_region_gpa_insert()
814 TEST_ASSERT(region->region.guest_phys_addr != in vm_userspace_mem_region_gpa_insert()
815 cregion->region.guest_phys_addr, in vm_userspace_mem_region_gpa_insert()
822 rb_link_node(®ion->gpa_node, parent, cur); in vm_userspace_mem_region_gpa_insert()
823 rb_insert_color(®ion->gpa_node, gpa_tree); in vm_userspace_mem_region_gpa_insert()
827 struct userspace_mem_region *region) in vm_userspace_mem_region_hva_insert() argument
836 if (region->host_mem < cregion->host_mem) in vm_userspace_mem_region_hva_insert()
839 TEST_ASSERT(region->host_mem != in vm_userspace_mem_region_hva_insert()
847 rb_link_node(®ion->hva_node, parent, cur); in vm_userspace_mem_region_hva_insert()
848 rb_insert_color(®ion->hva_node, hva_tree); in vm_userspace_mem_region_hva_insert()
855 struct kvm_userspace_memory_region region = { in __vm_set_user_memory_region() local
863 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); in __vm_set_user_memory_region()
903 struct userspace_mem_region *region; in vm_userspace_mem_region_add() local
926 region = (struct userspace_mem_region *) userspace_mem_region_find( in vm_userspace_mem_region_add()
928 if (region != NULL) in vm_userspace_mem_region_add()
935 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
936 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
939 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_userspace_mem_region_add()
941 if (region->region.slot != slot) in vm_userspace_mem_region_add()
949 region->region.slot, in vm_userspace_mem_region_add()
950 (uint64_t) region->region.guest_phys_addr, in vm_userspace_mem_region_add()
951 (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
955 region = calloc(1, sizeof(*region)); in vm_userspace_mem_region_add()
956 TEST_ASSERT(region != NULL, "Insufficient Memory"); in vm_userspace_mem_region_add()
957 region->mmap_size = npages * vm->page_size; in vm_userspace_mem_region_add()
979 region->mmap_size += alignment; in vm_userspace_mem_region_add()
981 region->fd = -1; in vm_userspace_mem_region_add()
983 region->fd = kvm_memfd_alloc(region->mmap_size, in vm_userspace_mem_region_add()
986 region->mmap_start = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
989 region->fd, 0); in vm_userspace_mem_region_add()
990 TEST_ASSERT(region->mmap_start != MAP_FAILED, in vm_userspace_mem_region_add()
994 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), in vm_userspace_mem_region_add()
996 region->mmap_start, backing_src_pagesz); in vm_userspace_mem_region_add()
999 region->host_mem = align_ptr_up(region->mmap_start, alignment); in vm_userspace_mem_region_add()
1004 ret = madvise(region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
1007 region->host_mem, npages * vm->page_size, in vm_userspace_mem_region_add()
1011 region->backing_src_type = src_type; in vm_userspace_mem_region_add()
1012 region->unused_phy_pages = sparsebit_alloc(); in vm_userspace_mem_region_add()
1013 sparsebit_set_num(region->unused_phy_pages, in vm_userspace_mem_region_add()
1015 region->region.slot = slot; in vm_userspace_mem_region_add()
1016 region->region.flags = flags; in vm_userspace_mem_region_add()
1017 region->region.guest_phys_addr = guest_paddr; in vm_userspace_mem_region_add()
1018 region->region.memory_size = npages * vm->page_size; in vm_userspace_mem_region_add()
1019 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_userspace_mem_region_add()
1020 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_userspace_mem_region_add()
1026 guest_paddr, (uint64_t) region->region.memory_size); in vm_userspace_mem_region_add()
1029 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_userspace_mem_region_add()
1030 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_userspace_mem_region_add()
1031 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); in vm_userspace_mem_region_add()
1034 if (region->fd >= 0) { in vm_userspace_mem_region_add()
1035 region->mmap_alias = mmap(NULL, region->mmap_size, in vm_userspace_mem_region_add()
1038 region->fd, 0); in vm_userspace_mem_region_add()
1039 TEST_ASSERT(region->mmap_alias != MAP_FAILED, in vm_userspace_mem_region_add()
1043 region->host_alias = align_ptr_up(region->mmap_alias, alignment); in vm_userspace_mem_region_add()
1065 struct userspace_mem_region *region; in memslot2region() local
1067 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
1069 if (region->region.slot == memslot) in memslot2region()
1070 return region; in memslot2region()
1097 struct userspace_mem_region *region; in vm_mem_region_set_flags() local
1099 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1101 region->region.flags = flags; in vm_mem_region_set_flags()
1103 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_set_flags()
1126 struct userspace_mem_region *region; in vm_mem_region_move() local
1129 region = memslot2region(vm, slot); in vm_mem_region_move()
1131 region->region.guest_phys_addr = new_gpa; in vm_mem_region_move()
1133 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); in vm_mem_region_move()
1454 struct userspace_mem_region *region; in addr_gpa2hva() local
1456 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1457 if (!region) { in addr_gpa2hva()
1462 return (void *)((uintptr_t)region->host_mem in addr_gpa2hva()
1463 + (gpa - region->region.guest_phys_addr)); in addr_gpa2hva()
1488 struct userspace_mem_region *region = in addr_hva2gpa() local
1491 if (hva >= region->host_mem) { in addr_hva2gpa()
1492 if (hva <= (region->host_mem in addr_hva2gpa()
1493 + region->region.memory_size - 1)) in addr_hva2gpa()
1495 region->region.guest_phys_addr in addr_hva2gpa()
1496 + (hva - (uintptr_t)region->host_mem)); in addr_hva2gpa()
1528 struct userspace_mem_region *region; in addr_gpa2alias() local
1531 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1532 if (!region) in addr_gpa2alias()
1535 if (!region->host_alias) in addr_gpa2alias()
1538 offset = gpa - region->region.guest_phys_addr; in addr_gpa2alias()
1539 return (void *) ((uintptr_t) region->host_alias + offset); in addr_gpa2alias()
1787 struct userspace_mem_region *region; in vm_dump() local
1794 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1797 (uint64_t) region->region.guest_phys_addr, in vm_dump()
1798 (uint64_t) region->region.memory_size, in vm_dump()
1799 region->host_mem); in vm_dump()
1801 sparsebit_dump(stream, region->unused_phy_pages, 0); in vm_dump()
1902 struct userspace_mem_region *region; in vm_phy_pages_alloc() local
1912 region = memslot2region(vm, memslot); in vm_phy_pages_alloc()
1917 if (!sparsebit_is_set(region->unused_phy_pages, pg)) { in vm_phy_pages_alloc()
1918 base = pg = sparsebit_next_set(region->unused_phy_pages, pg); in vm_phy_pages_alloc()
1934 sparsebit_clear(region->unused_phy_pages, pg); in vm_phy_pages_alloc()