Home
last modified time | relevance | path

Searched refs:pfns (Results 1 – 25 of 25) sorted by relevance

/linux-6.3-rc2/drivers/iommu/iommufd/
A Dpages.c298 batch->pfns[0] = batch->pfns[batch->end - 1] + in batch_clear_carry()
1026 return pfns->batch_start_index == pfns->last_index + 1; in pfn_reader_done()
1034 pfns->batch_start_index = pfns->batch_end_index; in pfn_reader_next()
1036 while (pfns->batch_end_index != pfns->last_index + 1) { in pfn_reader_next()
1051 pfns->batch_start_index + pfns->batch.total_pfns; in pfn_reader_next()
1052 if (pfns->batch_end_index == pfns->span.last_used + 1) in pfn_reader_next()
1095 if (pfns->user.upages_end > pfns->batch_end_index) { in pfn_reader_release_pins()
1103 pfns->user.upages_end = pfns->batch_end_index; in pfn_reader_release_pins()
1105 if (pfns->batch_start_index != pfns->batch_end_index) { in pfn_reader_release_pins()
1107 pfns->batch_start_index = pfns->batch_end_index; in pfn_reader_release_pins()
[all …]
A Dselftest.c87 struct xarray pfns; member
120 xa_init(&mock->pfns); in mock_domain_alloc()
129 WARN_ON(!xa_empty(&mock->pfns)); in mock_domain_free()
160 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, in mock_domain_map_pages()
167 xa_erase(&mock->pfns, in mock_domain_map_pages()
200 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); in mock_domain_unmap_pages()
234 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); in mock_domain_iova_to_phys()
371 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); in iommufd_test_md_check_pa()
/linux-6.3-rc2/drivers/gpu/drm/amd/amdgpu/
A Damdgpu_hmm.c165 unsigned long *pfns; in amdgpu_hmm_range_get_pages() local
172 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in amdgpu_hmm_range_get_pages()
173 if (unlikely(!pfns)) { in amdgpu_hmm_range_get_pages()
182 hmm_range->hmm_pfns = pfns; in amdgpu_hmm_range_get_pages()
219 hmm_range->hmm_pfns = pfns; in amdgpu_hmm_range_get_pages()
227 pages[i] = hmm_pfn_to_page(pfns[i]); in amdgpu_hmm_range_get_pages()
234 kvfree(pfns); in amdgpu_hmm_range_get_pages()
/linux-6.3-rc2/drivers/xen/
A Dxlate_mmu.c193 xen_pfn_t *pfns; member
201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
218 xen_pfn_t *pfns; in xen_xlate_map_ballooned_pages() local
230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages()
231 if (!pfns) { in xen_xlate_map_ballooned_pages()
240 kfree(pfns); in xen_xlate_map_ballooned_pages()
244 data.pfns = pfns; in xen_xlate_map_ballooned_pages()
254 kfree(pfns); in xen_xlate_map_ballooned_pages()
259 *gfns = pfns; in xen_xlate_map_ballooned_pages()
A Dprivcmd.c728 xen_pfn_t *pfns = NULL; in privcmd_ioctl_mmap_resource() local
763 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); in privcmd_ioctl_mmap_resource()
764 if (!pfns) { in privcmd_ioctl_mmap_resource()
784 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE); in privcmd_ioctl_mmap_resource()
791 set_xen_guest_handle(xdata.frame_list, pfns); in privcmd_ioctl_mmap_resource()
807 int num, *errs = (int *)pfns; in privcmd_ioctl_mmap_resource()
809 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns)); in privcmd_ioctl_mmap_resource()
812 pfns, kdata.num, errs, in privcmd_ioctl_mmap_resource()
831 kfree(pfns); in privcmd_ioctl_mmap_resource()
/linux-6.3-rc2/arch/x86/hyperv/
A Divm.c368 unsigned long *pfns = kcalloc(size / PAGE_SIZE, in hv_map_memory() local
373 if (!pfns) in hv_map_memory()
377 pfns[i] = vmalloc_to_pfn(addr + i * PAGE_SIZE) + in hv_map_memory()
380 vaddr = vmap_pfn(pfns, size / PAGE_SIZE, PAGE_KERNEL_IO); in hv_map_memory()
381 kfree(pfns); in hv_map_memory()
/linux-6.3-rc2/drivers/gpu/drm/i915/gem/
A Di915_gem_pages.c330 unsigned long stack[32], *pfns = stack, i; in i915_gem_object_map_pfn() local
339 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); in i915_gem_object_map_pfn()
340 if (!pfns) in i915_gem_object_map_pfn()
346 pfns[i++] = (iomap + addr) >> PAGE_SHIFT; in i915_gem_object_map_pfn()
347 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); in i915_gem_object_map_pfn()
348 if (pfns != stack) in i915_gem_object_map_pfn()
349 kvfree(pfns); in i915_gem_object_map_pfn()
/linux-6.3-rc2/Documentation/translations/zh_CN/mm/
A Dhmm.rst134 这两个函数都将 CPU 页表条目复制到它们的 pfns 数组参数中。该数组中的每个条目对应于虚拟
171 /* Use pfns array content to update device page table,
185 的故障或快照策略,而不必为 pfns 数组中的每个条目设置它们。
199 range->pfns[index_of_write] = HMM_PFN_REQ_WRITE;
A Dmemory-model.rst115 为给定的pfns范围执行足够的内存热插拔来开启 :c:func:`pfn_to_page`,
/linux-6.3-rc2/drivers/gpu/drm/nouveau/
A Dnouveau_svm.h34 void nouveau_pfns_free(u64 *pfns);
36 unsigned long addr, u64 *pfns, unsigned long npages);
A Dnouveau_dmem.c665 dma_addr_t *dma_addrs, u64 *pfns) in nouveau_dmem_migrate_chunk() argument
672 args->src[i], dma_addrs + nr_dma, pfns + i); in nouveau_dmem_migrate_chunk()
681 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i); in nouveau_dmem_migrate_chunk()
707 u64 *pfns; in nouveau_dmem_migrate_vma() local
724 pfns = nouveau_pfns_alloc(max); in nouveau_dmem_migrate_vma()
725 if (!pfns) in nouveau_dmem_migrate_vma()
740 pfns); in nouveau_dmem_migrate_vma()
746 nouveau_pfns_free(pfns); in nouveau_dmem_migrate_vma()
A Dnouveau_svm.c895 nouveau_pfns_to_args(void *pfns) in nouveau_pfns_to_args() argument
897 return container_of(pfns, struct nouveau_pfnmap_args, p.phys); in nouveau_pfns_to_args()
917 nouveau_pfns_free(u64 *pfns) in nouveau_pfns_free() argument
919 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_free()
926 unsigned long addr, u64 *pfns, unsigned long npages) in nouveau_pfns_map() argument
928 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_map()
/linux-6.3-rc2/drivers/virtio/
A Dvirtio_balloon.c108 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; member
150 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); in tell_host()
189 __virtio32 pfns[], struct page *page) in set_page_pfns() argument
200 pfns[i] = cpu_to_virtio32(vb->vdev, in set_page_pfns()
212 num = min(num, ARRAY_SIZE(vb->pfns)); in fill_balloon()
237 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in fill_balloon()
276 num = min(num, ARRAY_SIZE(vb->pfns)); in leak_balloon()
286 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in leak_balloon()
784 set_page_pfns(vb, vb->pfns, newpage); in virtballoon_migratepage()
792 set_page_pfns(vb, vb->pfns, page); in virtballoon_migratepage()
/linux-6.3-rc2/arch/powerpc/kvm/
A Dbook3s_hv_uvmem.c230 unsigned long *pfns; member
255 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns)); in kvmppc_uvmem_slot_init()
256 if (!p->pfns) { in kvmppc_uvmem_slot_init()
280 vfree(p->pfns); in kvmppc_uvmem_slot_free()
299 p->pfns[index] = uvmem_pfn | flag; in kvmppc_mark_gfn()
301 p->pfns[index] = flag; in kvmppc_mark_gfn()
342 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) { in kvmppc_gfn_is_uvmem_pfn()
344 *uvmem_pfn = p->pfns[index] & in kvmppc_gfn_is_uvmem_pfn()
382 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) { in kvmppc_next_nontransitioned_gfn()
/linux-6.3-rc2/mm/
A Dsparse.c204 unsigned long pfns; in subsection_map_init() local
206 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init()
209 subsection_mask_set(ms->usage->subsection_map, pfn, pfns); in subsection_map_init()
212 pfns, subsection_map_index(pfn), in subsection_map_init()
213 subsection_map_index(pfn + pfns - 1)); in subsection_map_init()
215 pfn += pfns; in subsection_map_init()
216 nr_pages -= pfns; in subsection_map_init()
A Dvmalloc.c2838 unsigned long *pfns; member
2847 if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) in vmap_pfn_apply()
2849 *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); in vmap_pfn_apply()
2862 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) in vmap_pfn() argument
2864 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; in vmap_pfn()
/linux-6.3-rc2/lib/
A Dtest_hmm.c209 unsigned long *pfns = range->hmm_pfns; in dmirror_do_fault() local
214 pfn++, pfns++) { in dmirror_do_fault()
222 WARN_ON(*pfns & HMM_PFN_ERROR); in dmirror_do_fault()
223 WARN_ON(!(*pfns & HMM_PFN_VALID)); in dmirror_do_fault()
225 page = hmm_pfn_to_page(*pfns); in dmirror_do_fault()
229 if (*pfns & HMM_PFN_WRITE) in dmirror_do_fault()
332 unsigned long pfns[64]; in dmirror_fault() local
335 .hmm_pfns = pfns, in dmirror_fault()
1178 unsigned long pfns[64]; in dmirror_snapshot() local
1182 .hmm_pfns = pfns, in dmirror_snapshot()
[all …]
/linux-6.3-rc2/drivers/iommu/
A Diova.c632 unsigned long pfns[IOVA_MAG_SIZE]; member
667 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns()
698 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--) in iova_magazine_pop()
703 pfn = mag->pfns[i]; in iova_magazine_pop()
704 mag->pfns[i] = mag->pfns[--mag->size]; in iova_magazine_pop()
711 mag->pfns[mag->size++] = pfn; in iova_magazine_push()
/linux-6.3-rc2/arch/s390/include/asm/
A Dgmap.h152 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
/linux-6.3-rc2/include/xen/
A Dxen-ops.h189 int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
/linux-6.3-rc2/include/linux/
A Dvmalloc.h169 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
/linux-6.3-rc2/arch/s390/mm/
A Dgmap.c2714 unsigned long pfns[GATHER_GET_PAGES]; member
2726 p->pfns[p->count] = phys_to_pfn(pte_val(pte)); in s390_gather_pages()
2741 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns) in s390_uv_destroy_pfns() argument
2747 uv_destroy_owned_page(pfn_to_phys(pfns[i])); in s390_uv_destroy_pfns()
2749 put_page(pfn_to_page(pfns[i])); in s390_uv_destroy_pfns()
2781 s390_uv_destroy_pfns(state.count, state.pfns); in __s390_uv_destroy_range()
/linux-6.3-rc2/Documentation/mm/
A Dhmm.rst168 Both functions copy CPU page table entries into their pfns array argument. Each
208 /* Use pfns array content to update device page table,
225 for each entry in the pfns array.
241 range->pfns[index_of_write] = HMM_PFN_REQ_WRITE;
A Dmemory-model.rst147 :c:func:`get_user_pages` service for the given range of pfns. Since the
/linux-6.3-rc2/Documentation/virt/kvm/x86/
A Dmmu.rst55 spte shadow pte (referring to pfns)

Completed in 94 milliseconds