Lines Matching refs:gfn
289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument
295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn()
296 unsigned long index = gfn - p->base_pfn; in kvmppc_mark_gfn()
308 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, in kvmppc_gfn_secure_uvmem_pfn() argument
311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn()
315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument
317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn()
321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument
323 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); in kvmppc_gfn_shared()
327 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_remove() argument
329 kvmppc_mark_gfn(gfn, kvm, 0, 0); in kvmppc_gfn_remove()
333 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, in kvmppc_gfn_is_uvmem_pfn() argument
339 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_gfn_is_uvmem_pfn()
340 unsigned long index = gfn - p->base_pfn; in kvmppc_gfn_is_uvmem_pfn()
362 struct kvm *kvm, unsigned long *gfn) in kvmppc_next_nontransitioned_gfn() argument
369 if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { in kvmppc_next_nontransitioned_gfn()
379 for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) { in kvmppc_next_nontransitioned_gfn()
383 *gfn = i; in kvmppc_next_nontransitioned_gfn()
394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge() local
395 unsigned long end, start = gfn_to_hva(kvm, gfn); in kvmppc_memslot_page_merge()
616 unsigned long uvmem_pfn, gfn; in kvmppc_uvmem_drop_pages() local
623 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages()
624 for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) { in kvmppc_uvmem_drop_pages()
630 pr_err("Can't find VMA for gfn:0x%lx\n", gfn); in kvmppc_uvmem_drop_pages()
637 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_uvmem_drop_pages()
649 kvmppc_gfn_remove(gfn, kvm); in kvmppc_uvmem_drop_pages()
796 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot() local
803 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) { in kvmppc_uv_migrate_mem_slot()
805 start = gfn_to_hva(kvm, gfn); in kvmppc_uv_migrate_mem_slot()
815 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false); in kvmppc_uv_migrate_mem_slot()
884 unsigned long gfn = gpa >> page_shift; in kvmppc_share_page() local
890 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_share_page()
903 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_share_page()
908 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_share_page()
919 kvmppc_gfn_shared(gfn, kvm); in kvmppc_share_page()
942 unsigned long gfn = gpa >> page_shift; in kvmppc_h_svm_page_in() local
961 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_in()
967 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) in kvmppc_h_svm_page_in()
1048 unsigned long gfn = gpa >> page_shift; in kvmppc_h_svm_page_out() local
1066 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_out()
1083 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) in kvmppc_send_page_to_uv() argument
1088 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_send_page_to_uv()
1093 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) in kvmppc_send_page_to_uv()
1096 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, in kvmppc_send_page_to_uv()