Lines Matching refs:vm

198 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)  in vm_enable_dirty_ring()  argument
200 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) in vm_enable_dirty_ring()
201 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); in vm_enable_dirty_ring()
203 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); in vm_enable_dirty_ring()
204 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
207 static void vm_open(struct kvm_vm *vm) in vm_open() argument
209 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open()
213 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open()
214 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open()
217 vm->stats.fd = vm_get_stats_fd(vm); in vm_open()
219 vm->stats.fd = -1; in vm_open()
282 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) in vm_vaddr_populate_bitmap() argument
284 sparsebit_set_num(vm->vpages_valid, in vm_vaddr_populate_bitmap()
285 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap()
286 sparsebit_set_num(vm->vpages_valid, in vm_vaddr_populate_bitmap()
287 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in vm_vaddr_populate_bitmap()
288 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap()
293 struct kvm_vm *vm; in ____vm_create() local
295 vm = calloc(1, sizeof(*vm)); in ____vm_create()
296 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in ____vm_create()
298 INIT_LIST_HEAD(&vm->vcpus); in ____vm_create()
299 vm->regions.gpa_tree = RB_ROOT; in ____vm_create()
300 vm->regions.hva_tree = RB_ROOT; in ____vm_create()
301 hash_init(vm->regions.slot_hash); in ____vm_create()
303 vm->mode = shape.mode; in ____vm_create()
304 vm->type = shape.type; in ____vm_create()
306 vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits; in ____vm_create()
307 vm->va_bits = vm_guest_mode_params[vm->mode].va_bits; in ____vm_create()
308 vm->page_size = vm_guest_mode_params[vm->mode].page_size; in ____vm_create()
309 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift; in ____vm_create()
312 switch (vm->mode) { in ____vm_create()
314 vm->pgtable_levels = 4; in ____vm_create()
317 vm->pgtable_levels = 3; in ____vm_create()
320 vm->pgtable_levels = 4; in ____vm_create()
323 vm->pgtable_levels = 3; in ____vm_create()
327 vm->pgtable_levels = 4; in ____vm_create()
331 vm->pgtable_levels = 3; in ____vm_create()
337 vm->pgtable_levels = 4; in ____vm_create()
341 vm->pgtable_levels = 3; in ____vm_create()
345 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in ____vm_create()
346 kvm_init_vm_address_properties(vm); in ____vm_create()
352 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in ____vm_create()
354 vm->va_bits); in ____vm_create()
356 vm->pa_bits); in ____vm_create()
357 vm->pgtable_levels = 4; in ____vm_create()
358 vm->va_bits = 48; in ____vm_create()
364 vm->pgtable_levels = 5; in ____vm_create()
367 vm->pgtable_levels = 5; in ____vm_create()
370 TEST_FAIL("Unknown guest mode: 0x%x", vm->mode); in ____vm_create()
374 TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types"); in ____vm_create()
375 if (vm->pa_bits != 40) in ____vm_create()
376 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in ____vm_create()
379 vm_open(vm); in ____vm_create()
382 vm->vpages_valid = sparsebit_alloc(); in ____vm_create()
383 vm_vaddr_populate_bitmap(vm); in ____vm_create()
386 vm->max_gfn = vm_compute_max_gfn(vm); in ____vm_create()
389 vm->vpages_mapped = sparsebit_alloc(); in ____vm_create()
391 return vm; in ____vm_create()
480 struct kvm_vm *vm; in __vm_create() local
488 vm = ____vm_create(shape); in __vm_create()
498 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, flags); in __vm_create()
500 vm->memslots[i] = 0; in __vm_create()
502 kvm_vm_elf_load(vm, program_invocation_name); in __vm_create()
510 slot0 = memslot2region(vm, 0); in __vm_create()
511 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
518 sync_global_to_guest(vm, guest_rng); in __vm_create()
520 kvm_arch_vm_post_create(vm); in __vm_create()
522 return vm; in __vm_create()
548 struct kvm_vm *vm; in __vm_create_with_vcpus() local
553 vm = __vm_create(shape, nr_vcpus, extra_mem_pages); in __vm_create_with_vcpus()
556 vcpus[i] = vm_vcpu_add(vm, i, guest_code); in __vm_create_with_vcpus()
558 return vm; in __vm_create_with_vcpus()
567 struct kvm_vm *vm; in __vm_create_shape_with_one_vcpu() local
569 vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus); in __vm_create_shape_with_one_vcpu()
572 return vm; in __vm_create_shape_with_one_vcpu()
610 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, in vm_arch_vcpu_recreate() argument
613 return __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_recreate()
616 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm) in vm_recreate_with_one_vcpu() argument
618 kvm_vm_restart(vm); in vm_recreate_with_one_vcpu()
620 return vm_vcpu_recreate(vm, 0); in vm_recreate_with_one_vcpu()
712 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) in userspace_mem_region_find() argument
716 for (node = vm->regions.gpa_tree.rb_node; node; ) { in userspace_mem_region_find()
768 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) in vm_vcpu_rm() argument
773 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); in vm_vcpu_rm()
810 static void __vm_mem_region_delete(struct kvm_vm *vm, in __vm_mem_region_delete() argument
815 rb_erase(&region->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
816 rb_erase(&region->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
933 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in __vm_set_user_memory_region() argument
944 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region); in __vm_set_user_memory_region()
947 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in vm_set_user_memory_region() argument
950 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); in vm_set_user_memory_region()
960 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in __vm_set_user_memory_region2() argument
976 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region); in __vm_set_user_memory_region2()
979 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in vm_set_user_memory_region2() argument
983 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, in vm_set_user_memory_region2()
992 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, in vm_mem_add() argument
999 size_t mem_size = npages * vm->page_size; in vm_mem_add()
1004 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, in vm_mem_add()
1006 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); in vm_mem_add()
1008 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_mem_add()
1011 guest_paddr, vm->page_size); in vm_mem_add()
1012 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_mem_add()
1013 <= vm->max_gfn, "Physical range beyond maximum " in vm_mem_add()
1017 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_mem_add()
1024 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_mem_add()
1031 guest_paddr, npages, vm->page_size, in vm_mem_add()
1036 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_mem_add()
1115 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); in vm_mem_add()
1134 if (vm_arch_has_protected_memory(vm)) in vm_mem_add()
1137 guest_paddr >> vm->page_shift, npages); in vm_mem_add()
1141 region->region.memory_size = npages * vm->page_size; in vm_mem_add()
1143 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_add()
1153 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_mem_add()
1154 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_mem_add()
1155 hash_add(vm->regions.slot_hash, &region->slot_node, slot); in vm_mem_add()
1171 void vm_userspace_mem_region_add(struct kvm_vm *vm, in vm_userspace_mem_region_add() argument
1176 vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0); in vm_userspace_mem_region_add()
1195 memslot2region(struct kvm_vm *vm, uint32_t memslot) in memslot2region() argument
1199 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
1207 vm_dump(stderr, vm, 2); in memslot2region()
1226 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) in vm_mem_region_set_flags() argument
1231 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1235 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_region_set_flags()
1256 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) in vm_mem_region_move() argument
1261 region = memslot2region(vm, slot); in vm_mem_region_move()
1265 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_region_move()
1285 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) in vm_mem_region_delete() argument
1287 struct userspace_mem_region *region = memslot2region(vm, slot); in vm_mem_region_delete()
1290 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_region_delete()
1292 __vm_mem_region_delete(vm, region); in vm_mem_region_delete()
1295 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, in vm_guest_mem_fallocate() argument
1308 region = userspace_mem_region_find(vm, gpa, gpa); in vm_guest_mem_fallocate()
1339 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) in vcpu_exists() argument
1343 list_for_each_entry(vcpu, &vm->vcpus, list) { in vcpu_exists()
1355 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in __vm_vcpu_add() argument
1360 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id); in __vm_vcpu_add()
1366 vcpu->vm = vm; in __vm_vcpu_add()
1368 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); in __vm_vcpu_add()
1369 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm); in __vm_vcpu_add()
1385 list_add(&vcpu->list, &vm->vcpus); in __vm_vcpu_add()
1410 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, in vm_vaddr_unused_gap() argument
1413 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1416 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1417 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
1421 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1423 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1432 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1435 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1444 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1447 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
1460 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1466 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1473 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
1476 static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, in ____vm_vaddr_alloc() argument
1481 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in ____vm_vaddr_alloc()
1483 virt_pgd_alloc(vm); in ____vm_vaddr_alloc()
1484 vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, in ____vm_vaddr_alloc()
1485 KVM_UTIL_MIN_PFN * vm->page_size, in ____vm_vaddr_alloc()
1486 vm->memslots[type], protected); in ____vm_vaddr_alloc()
1492 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); in ____vm_vaddr_alloc()
1496 pages--, vaddr += vm->page_size, paddr += vm->page_size) { in ____vm_vaddr_alloc()
1498 virt_pg_map(vm, vaddr, paddr); in ____vm_vaddr_alloc()
1500 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); in ____vm_vaddr_alloc()
1506 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, in __vm_vaddr_alloc() argument
1509 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, in __vm_vaddr_alloc()
1510 vm_arch_has_protected_memory(vm)); in __vm_vaddr_alloc()
1513 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, in vm_vaddr_alloc_shared() argument
1517 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); in vm_vaddr_alloc_shared()
1539 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) in vm_vaddr_alloc() argument
1541 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); in vm_vaddr_alloc()
1558 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) in vm_vaddr_alloc_pages() argument
1560 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); in vm_vaddr_alloc_pages()
1563 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) in __vm_vaddr_alloc_page() argument
1565 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); in __vm_vaddr_alloc_page()
1582 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) in vm_vaddr_alloc_page() argument
1584 return vm_vaddr_alloc_pages(vm, 1); in vm_vaddr_alloc_page()
1603 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map() argument
1606 size_t page_size = vm->page_size; in virt_map()
1613 virt_pg_map(vm, vaddr, paddr); in virt_map()
1614 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); in virt_map()
1638 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2hva() argument
1642 gpa = vm_untag_gpa(vm, gpa); in addr_gpa2hva()
1644 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1671 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) in addr_hva2gpa() argument
1675 for (node = vm->regions.hva_tree.rb_node; node; ) { in addr_hva2gpa()
1714 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2alias() argument
1719 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1731 void vm_create_irqchip(struct kvm_vm *vm) in vm_create_irqchip() argument
1740 r = __vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); in vm_create_irqchip()
1742 vm_enable_cap(vm, KVM_CAP_SPLIT_IRQCHIP, 24); in vm_create_irqchip()
1744 TEST_ASSERT_VM_VCPU_IOCTL(!r, KVM_CREATE_IRQCHIP, r, vm); in vm_create_irqchip()
1746 vm->has_irqchip = true; in vm_create_irqchip()
1809 uint32_t size = vcpu->vm->dirty_ring_size; in vcpu_map_dirty_ring()
1850 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) in __kvm_test_create_device() argument
1857 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); in __kvm_test_create_device()
1860 int __kvm_create_device(struct kvm_vm *vm, uint64_t type) in __kvm_create_device() argument
1869 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); in __kvm_create_device()
1902 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) in _kvm_irq_line() argument
1909 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); in _kvm_irq_line()
1912 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) in kvm_irq_line() argument
1914 int ret = _kvm_irq_line(vm, irq, level); in kvm_irq_line()
1950 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) in _kvm_gsi_routing_write() argument
1955 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing); in _kvm_gsi_routing_write()
1961 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) in kvm_gsi_routing_write() argument
1965 ret = _kvm_gsi_routing_write(vm, routing); in kvm_gsi_routing_write()
1984 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in vm_dump() argument
1990 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
1991 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
1992 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
1994 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
2008 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
2010 vm->pgd_created); in vm_dump()
2011 if (vm->pgd_created) { in vm_dump()
2014 virt_dump(stream, vm, indent + 4); in vm_dump()
2018 list_for_each_entry(vcpu, &vm->vcpus, list) in vm_dump()
2118 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in __vm_phy_pages_alloc() argument
2127 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in __vm_phy_pages_alloc()
2130 paddr_min, vm->page_size); in __vm_phy_pages_alloc()
2132 region = memslot2region(vm, memslot); in __vm_phy_pages_alloc()
2136 base = pg = paddr_min >> vm->page_shift; in __vm_phy_pages_alloc()
2149 paddr_min, vm->page_size, memslot); in __vm_phy_pages_alloc()
2151 vm_dump(stderr, vm, 2); in __vm_phy_pages_alloc()
2161 return base * vm->page_size; in __vm_phy_pages_alloc()
2164 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, in vm_phy_page_alloc() argument
2167 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); in vm_phy_page_alloc()
2170 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) in vm_alloc_page_table() argument
2172 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, in vm_alloc_page_table()
2173 vm->memslots[MEM_REGION_PT]); in vm_alloc_page_table()
2188 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva() argument
2190 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva()
2193 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm) in vm_compute_max_gfn() argument
2195 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_compute_max_gfn()
2333 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm) in kvm_arch_vm_post_create() argument
2352 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) in vm_is_gpa_protected() argument
2357 if (!vm_arch_has_protected_memory(vm)) in vm_is_gpa_protected()
2360 region = userspace_mem_region_find(vm, paddr, paddr); in vm_is_gpa_protected()
2363 pg = paddr >> vm->page_shift; in vm_is_gpa_protected()