| /linux/arch/powerpc/kvm/ |
| A D | book3s_hv_uvmem.c | 394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge() 434 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_delete() 450 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create() 451 memslot->npages * PAGE_SIZE, in __kvmppc_uvmem_memslot_create() 452 0, memslot->id); in __kvmppc_uvmem_memslot_create() 459 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_create() 468 struct kvm_memory_slot *memslot, *m; in kvmppc_h_svm_init_start() local 498 if (m == memslot) in kvmppc_h_svm_init_start() 662 struct kvm_memory_slot *memslot; in kvmppc_h_svm_init_abort() local 797 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot() [all …]
|
| A D | book3s_64_mmu_hv.c | 584 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_book3s_hv_page_fault() 706 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault() 772 memslot->npages * sizeof(*memslot->arch.rmap)); in kvmppc_rmap_reset() 828 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp() 908 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_rmapp() 979 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_test_age_rmapp() 1113 gfn >= memslot->base_gfn + memslot->npages) in kvmppc_harvest_vpa_dirty() 1156 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_pin_guest_page() 1192 if (memslot && memslot->dirty_bitmap) in kvmppc_unpin_guest_page() 1193 set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); in kvmppc_unpin_guest_page() [all …]
|
| A D | book3s_64_mmu_radix.c | 441 if (!memslot) { in kvmppc_unmap_pte() 442 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte() 443 if (!memslot) in kvmppc_unmap_pte() 823 struct kvm_memory_slot *memslot, in kvmppc_book3s_instantiate_page() argument 950 struct kvm_memory_slot *memslot; in kvmppc_book3s_radix_page_fault() local 980 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_radix_page_fault() 983 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { in kvmppc_book3s_radix_page_fault() 1069 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_radix() 1144 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_radix_test_clear_dirty() 1192 gpa = memslot->base_gfn << PAGE_SHIFT; in kvmppc_radix_flush_memslot() [all …]
|
| A D | book3s_hv_rm_mmu.c | 104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 112 struct kvm_memory_slot *memslot; in kvmppc_set_dirty_from_hpte() local 119 if (memslot && memslot->dirty_bitmap) in kvmppc_set_dirty_from_hpte() 129 struct kvm_memory_slot *memslot; in revmap_for_hpte() local 136 *memslotp = memslot; in revmap_for_hpte() 139 if (!memslot) in revmap_for_hpte() 142 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte() 155 struct kvm_memory_slot *memslot; in remove_revmap_chain() local 232 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { in kvmppc_do_h_enter() 896 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_get_hpa() [all …]
|
| A D | trace_hv.h | 294 struct kvm_memory_slot *memslot, unsigned long ea, 297 TP_ARGS(vcpu, hptep, memslot, ea, dsisr), 317 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL; 318 __entry->slot_flags = memslot ? memslot->flags : 0;
|
| A D | book3s_hv_nested.c | 797 struct kvm_memory_slot *memslot; in kvmhv_release_all_nested() local 818 kvmhv_free_memslot_nest_rmap(memslot); in kvmhv_release_all_nested() 1029 const struct kvm_memory_slot *memslot, in kvmhv_remove_nest_rmap_range() argument 1036 if (!memslot) in kvmhv_remove_nest_rmap_range() 1038 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range() 1045 unsigned long *rmap = &memslot->arch.rmap[gfn]; in kvmhv_remove_nest_rmap_range() 1519 struct kvm_memory_slot *memslot; in __kvmhv_nested_page_fault() local 1589 memslot = gfn_to_memslot(kvm, gfn); in __kvmhv_nested_page_fault() 1590 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { in __kvmhv_nested_page_fault() 1602 if (memslot->flags & KVM_MEM_READONLY) { in __kvmhv_nested_page_fault() [all …]
|
| A D | book3s.h | 11 struct kvm_memory_slot *memslot);
|
| A D | book3s_64_vio.c | 367 struct kvm_memory_slot *memslot; in kvmppc_tce_to_ua() local 369 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); in kvmppc_tce_to_ua() 370 if (!memslot) in kvmppc_tce_to_ua() 373 *ua = __gfn_to_hva_memslot(memslot, gfn) | in kvmppc_tce_to_ua()
|
| A D | book3s_hv.c | 5215 struct kvm_memory_slot *memslot; in kvm_vm_ioctl_get_dirty_log_hv() local 5228 memslot = id_to_memslot(slots, log->slot); in kvm_vm_ioctl_get_dirty_log_hv() 5230 if (!memslot || !memslot->dirty_bitmap) in kvm_vm_ioctl_get_dirty_log_hv() 5237 n = kvm_dirty_bitmap_bytes(memslot); in kvm_vm_ioctl_get_dirty_log_hv() 5254 p = memslot->dirty_bitmap; in kvm_vm_ioctl_get_dirty_log_hv() 5424 struct kvm_memory_slot *memslot; in kvmppc_hv_setup_htab_rma() local 5452 memslot = gfn_to_memslot(kvm, 0); in kvmppc_hv_setup_htab_rma() 5456 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_hv_setup_htab_rma() 5460 hva = memslot->userspace_addr; in kvmppc_hv_setup_htab_rma() 5483 kvmppc_map_vrma(vcpu, memslot, porder); in kvmppc_hv_setup_htab_rma() [all …]
|
| A D | book3s_pr.c | 1868 struct kvm_memory_slot *memslot; in kvm_vm_ioctl_get_dirty_log_pr() local 1877 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); in kvm_vm_ioctl_get_dirty_log_pr() 1883 ga = memslot->base_gfn << PAGE_SHIFT; in kvm_vm_ioctl_get_dirty_log_pr() 1884 ga_end = ga + (memslot->npages << PAGE_SHIFT); in kvm_vm_ioctl_get_dirty_log_pr() 1889 n = kvm_dirty_bitmap_bytes(memslot); in kvm_vm_ioctl_get_dirty_log_pr() 1890 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log_pr() 1900 struct kvm_memory_slot *memslot) in kvmppc_core_flush_memslot_pr() argument
|
| /linux/arch/arm64/kvm/ |
| A D | mmu.c | 160 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); in memslot_is_logging() 343 struct kvm_memory_slot *memslot) in stage2_flush_memslot() argument 361 struct kvm_memory_slot *memslot; in stage2_flush_vm() local 369 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm() 1035 struct kvm_memory_slot *memslot; in stage2_unmap_vm() local 1176 if (WARN_ON_ONCE(!memslot)) in kvm_mmu_wp_memory_region() 1180 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region() 1201 struct kvm_memory_slot *memslot; in kvm_mmu_split_memory_region() local 1210 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_split_memory_region() 1271 size = memslot->npages * PAGE_SIZE; in fault_supports_stage2_huge_mapping() [all …]
|
| /linux/arch/loongarch/kvm/ |
| A D | mmu.c | 630 if (kvm_slot_dirty_track_enabled(memslot) && write) in fault_supports_huge_mapping() 633 if (kvm_hugepage_capable(memslot)) in fault_supports_huge_mapping() 636 if (kvm_hugepage_incapable(memslot)) in fault_supports_huge_mapping() 639 start = memslot->userspace_addr; in fault_supports_huge_mapping() 640 end = start + memslot->npages * PAGE_SIZE; in fault_supports_huge_mapping() 797 struct kvm_memory_slot *memslot; in kvm_map_page() local 806 memslot = gfn_to_memslot(kvm, gfn); in kvm_map_page() 881 if (fault_supports_huge_mapping(memslot, hva, write)) { in kvm_map_page() 883 level = host_pfn_mapping_level(kvm, gfn, memslot); in kvm_map_page() 921 mark_page_dirty_in_slot(kvm, memslot, gfn); in kvm_map_page() [all …]
|
| /linux/virt/kvm/ |
| A D | dirty_ring.c | 55 struct kvm_memory_slot *memslot; in kvm_reset_dirty_gfn() local 67 memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id); in kvm_reset_dirty_gfn() 69 if (!memslot || (offset + __fls(mask)) >= memslot->npages) in kvm_reset_dirty_gfn() 73 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); in kvm_reset_dirty_gfn()
|
| A D | kvm_main.c | 379 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); in kvm_flush_remote_tlbs_memslot() 972 if (!memslot->dirty_bitmap) in kvm_destroy_dirty_bitmap() 975 vfree(memslot->dirty_bitmap); in kvm_destroy_dirty_bitmap() 1422 if (!memslot->dirty_bitmap) in kvm_alloc_dirty_bitmap() 2148 *memslot = NULL; in kvm_get_dirty_log() 2158 if (!(*memslot) || !(*memslot)->dirty_bitmap) in kvm_get_dirty_log() 2220 if (!memslot || !memslot->dirty_bitmap) in kvm_get_dirty_log_protect() 2335 if (!memslot || !memslot->dirty_bitmap) in kvm_clear_dirty_log_protect() 3477 ghc->memslot = NULL; in __kvm_gfn_to_hva_cache_init() 3600 if (memslot && kvm_slot_dirty_track_enabled(memslot)) { in mark_page_dirty_in_slot() [all …]
|
| A D | pfncache.c | 279 gpc->memslot = NULL; in __kvm_gpc_refresh() 295 gpc->memslot = __gfn_to_memslot(slots, gfn); in __kvm_gpc_refresh() 296 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); in __kvm_gpc_refresh()
|
| /linux/arch/riscv/kvm/ |
| A D | vcpu_exit.c | 16 struct kvm_memory_slot *memslot; in gstage_page_fault() local 24 memslot = gfn_to_memslot(vcpu->kvm, gfn); in gstage_page_fault() 25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in gstage_page_fault() 43 ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva, in gstage_page_fault()
|
| A D | mmu.c | 336 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); in gstage_wp_memory_region() local 337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in gstage_wp_memory_region() 338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in gstage_wp_memory_region() 405 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument 590 struct kvm_memory_slot *memslot, in kvm_riscv_gstage_map() argument 601 bool logging = (memslot->dirty_bitmap && in kvm_riscv_gstage_map() 602 !(memslot->flags & KVM_MEM_READONLY)) ? true : false; in kvm_riscv_gstage_map()
|
| /linux/arch/powerpc/include/asm/ |
| A D | kvm_book3s.h | 198 const struct kvm_memory_slot *memslot, 205 struct kvm_memory_slot *memslot, 214 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 216 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 218 extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 221 struct kvm_memory_slot *memslot, unsigned long *map); 223 const struct kvm_memory_slot *memslot); 241 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, 258 struct kvm_memory_slot *memslot, unsigned long *map); 260 struct kvm_memory_slot *memslot,
|
| A D | kvm_book3s_64.h | 488 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, in slot_is_aligned() argument 495 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned() 635 const struct kvm_memory_slot *memslot,
|
| /linux/include/linux/ |
| A D | kvm_types.h | 57 struct kvm_memory_slot *memslot; member 64 struct kvm_memory_slot *memslot; member
|
| A D | kvm_host.h | 623 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; in kvm_dirty_bitmap_bytes() 628 unsigned long len = kvm_dirty_bitmap_bytes(memslot); in kvm_second_dirty_bitmap() 630 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); in kvm_second_dirty_bitmap() 1066 #define kvm_for_each_memslot(memslot, bkt, slots) \ argument 1067 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \ 1068 if (WARN_ON_ONCE(!memslot->npages)) { \ 1449 const struct kvm_memory_slot *memslot); 1481 int *is_dirty, struct kvm_memory_slot **memslot); 1847 if (!gpc->memslot) in kvm_gpc_mark_dirty_in_slot() 2317 return (memslot && memslot->id < KVM_USER_MEM_SLOTS && in kvm_is_visible_memslot() [all …]
|
| /linux/arch/x86/include/asm/uv/ |
| A D | uv_geo.h | 57 char memslot; /* The memory slot on the bus */ member
|
| /linux/tools/testing/selftests/kvm/lib/ |
| A D | kvm_util.c | 1108 memslot2region(struct kvm_vm *vm, uint32_t memslot) in memslot2region() argument 1113 memslot) in memslot2region() 1114 if (region->region.slot == memslot) in memslot2region() 1118 " requested slot: %u\n", memslot); in memslot2region() 2011 vm_paddr_t paddr_min, uint32_t memslot, in __vm_phy_pages_alloc() argument 2024 region = memslot2region(vm, memslot); in __vm_phy_pages_alloc() 2041 paddr_min, vm->page_size, memslot); in __vm_phy_pages_alloc() 2057 uint32_t memslot) in vm_phy_page_alloc() argument 2059 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); in vm_phy_page_alloc()
|
| /linux/tools/testing/selftests/kvm/include/ |
| A D | kvm_util.h | 144 memslot2region(struct kvm_vm *vm, uint32_t memslot); 867 uint32_t memslot); 869 vm_paddr_t paddr_min, uint32_t memslot, 874 vm_paddr_t paddr_min, uint32_t memslot) in vm_phy_pages_alloc() argument 881 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, in vm_phy_pages_alloc()
|
| /linux/arch/x86/kvm/mmu/ |
| A D | mmu.c | 6573 const struct kvm_memory_slot *memslot; in kvm_rmap_zap_gfn_range() local 6587 memslot = iter.slot; in kvm_rmap_zap_gfn_range() 6588 start = max(gfn_start, memslot->base_gfn); in kvm_rmap_zap_gfn_range() 6589 end = min(gfn_end, memslot->base_gfn + memslot->npages); in kvm_rmap_zap_gfn_range() 6639 const struct kvm_memory_slot *memslot, in kvm_mmu_slot_remove_write_access() argument 6887 const struct kvm_memory_slot *memslot, in kvm_mmu_try_split_huge_pages() argument 6906 const struct kvm_memory_slot *memslot, in kvm_mmu_slot_try_split_huge_pages() argument 6909 u64 start = memslot->base_gfn; in kvm_mmu_slot_try_split_huge_pages() 6910 u64 end = start + memslot->npages; in kvm_mmu_slot_try_split_huge_pages() 7003 const struct kvm_memory_slot *memslot) in kvm_mmu_slot_leaf_clear_dirty() argument [all …]
|