| /drivers/iommu/iommufd/ |
| A D | pages.c | 298 batch->pfns[0] = batch->pfns[batch->end - 1] + in batch_clear_carry() 1080 return pfn_reader_user_update_pinned(&pfns->user, pfns->pages); in pfn_reader_update_pinned() 1121 batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns, in pfn_reader_fill_span() 1165 return pfns->batch_start_index == pfns->last_index + 1; in pfn_reader_done() 1173 pfns->batch_start_index = pfns->batch_end_index; in pfn_reader_next() 1175 while (pfns->batch_end_index != pfns->last_index + 1) { in pfn_reader_next() 1190 pfns->batch_start_index + pfns->batch.total_pfns; in pfn_reader_next() 1191 if (pfns->batch_end_index == pfns->span.last_used + 1) in pfn_reader_next() 1253 if (pfns->batch_start_index != pfns->batch_end_index) { in pfn_reader_release_pins() 1255 pfns->batch_start_index = pfns->batch_end_index; in pfn_reader_release_pins() [all …]
|
| A D | selftest.c | 129 struct xarray pfns; member 356 ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); in mock_test_and_clear_dirty() 366 old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE, in mock_test_and_clear_dirty() 391 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); in mock_domain_read_and_clear_dirty() 489 xa_init(&mock->pfns); in mock_domain_alloc_paging_flags() 500 WARN_ON(!xa_empty(&mock->pfns)); in mock_domain_free() 533 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, in mock_domain_map_pages() 540 xa_erase(&mock->pfns, in mock_domain_map_pages() 612 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); in mock_domain_iova_to_phys() 1823 ent = xa_load(&mock->pfns, cur / page_size); in iommufd_test_dirty() [all …]
|
| /drivers/xen/ |
| A D | xlate_mmu.c | 193 xen_pfn_t *pfns; member 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn() 218 xen_pfn_t *pfns; in xen_xlate_map_ballooned_pages() local 230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() 231 if (!pfns) { in xen_xlate_map_ballooned_pages() 240 kfree(pfns); in xen_xlate_map_ballooned_pages() 244 data.pfns = pfns; in xen_xlate_map_ballooned_pages() 254 kfree(pfns); in xen_xlate_map_ballooned_pages() 259 *gfns = pfns; in xen_xlate_map_ballooned_pages()
|
| A D | privcmd.c | 741 xen_pfn_t *pfns = NULL; in privcmd_ioctl_mmap_resource() local 776 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); in privcmd_ioctl_mmap_resource() 777 if (!pfns) { in privcmd_ioctl_mmap_resource() 798 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE); in privcmd_ioctl_mmap_resource() 805 set_xen_guest_handle(xdata.frame_list, pfns); in privcmd_ioctl_mmap_resource() 821 int num, *errs = (int *)pfns; in privcmd_ioctl_mmap_resource() 823 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns)); in privcmd_ioctl_mmap_resource() 826 pfns, kdata.num, errs, in privcmd_ioctl_mmap_resource() 845 kfree(pfns); in privcmd_ioctl_mmap_resource()
|
| /drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_hmm.c | 177 unsigned long *pfns; in amdgpu_hmm_range_get_pages() local 184 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in amdgpu_hmm_range_get_pages() 185 if (unlikely(!pfns)) { in amdgpu_hmm_range_get_pages() 194 hmm_range->hmm_pfns = pfns; in amdgpu_hmm_range_get_pages() 223 hmm_range->hmm_pfns = pfns; in amdgpu_hmm_range_get_pages() 231 pages[i] = hmm_pfn_to_page(pfns[i]); in amdgpu_hmm_range_get_pages() 238 kvfree(pfns); in amdgpu_hmm_range_get_pages()
|
| A D | amdgpu_umc.h | 187 uint64_t pa_addr, uint64_t *pfns, int len);
|
| A D | amdgpu_umc.c | 509 uint64_t pa_addr, uint64_t *pfns, int len) in amdgpu_umc_lookup_bad_pages_in_a_row() argument 529 pfns[i] = err_data.err_addr[i].retired_page; in amdgpu_umc_lookup_bad_pages_in_a_row()
|
| /drivers/gpu/drm/ |
| A D | drm_gpusvm.c | 742 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in drm_gpusvm_check_pages() 743 if (!pfns) in drm_gpusvm_check_pages() 773 kvfree(pfns); in drm_gpusvm_check_pages() 1276 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in drm_gpusvm_range_get_pages() 1277 if (!pfns) in drm_gpusvm_range_get_pages() 1322 kvfree(pfns); in drm_gpusvm_range_get_pages() 1421 kvfree(pfns); in drm_gpusvm_range_get_pages() 1431 kvfree(pfns); in drm_gpusvm_range_get_pages() 1501 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in drm_gpusvm_range_evict() 1502 if (!pfns) in drm_gpusvm_range_evict() [all …]
|
| /drivers/gpu/drm/xe/ |
| A D | xe_hmm.c | 219 unsigned long *pfns; in xe_hmm_userptr_populate_range() local 254 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in xe_hmm_userptr_populate_range() 255 if (unlikely(!pfns)) in xe_hmm_userptr_populate_range() 266 hmm_range.hmm_pfns = pfns; in xe_hmm_userptr_populate_range() 315 kvfree(pfns); in xe_hmm_userptr_populate_range() 323 kvfree(pfns); in xe_hmm_userptr_populate_range()
|
| /drivers/virtio/ |
| A D | virtio_balloon.c | 108 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; member 188 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); in tell_host() 227 __virtio32 pfns[], struct page *page) in set_page_pfns() argument 238 pfns[i] = cpu_to_virtio32(vb->vdev, in set_page_pfns() 250 num = min(num, ARRAY_SIZE(vb->pfns)); in fill_balloon() 275 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in fill_balloon() 314 num = min(num, ARRAY_SIZE(vb->pfns)); in leak_balloon() 324 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in leak_balloon() 865 set_page_pfns(vb, vb->pfns, newpage); in virtballoon_migratepage() 870 set_page_pfns(vb, vb->pfns, page); in virtballoon_migratepage()
|
| /drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_pages.c | 336 unsigned long stack[32], *pfns = stack, i; in i915_gem_object_map_pfn() local 345 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); in i915_gem_object_map_pfn() 346 if (!pfns) in i915_gem_object_map_pfn() 352 pfns[i++] = (iomap + addr) >> PAGE_SHIFT; in i915_gem_object_map_pfn() 353 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); in i915_gem_object_map_pfn() 354 if (pfns != stack) in i915_gem_object_map_pfn() 355 kvfree(pfns); in i915_gem_object_map_pfn()
|
| /drivers/gpu/drm/nouveau/ |
| A D | nouveau_dmem.c | 658 dma_addr_t *dma_addrs, u64 *pfns) in nouveau_dmem_migrate_chunk() argument 665 args->src[i], dma_addrs + nr_dma, pfns + i); in nouveau_dmem_migrate_chunk() 674 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i); in nouveau_dmem_migrate_chunk() 700 u64 *pfns; in nouveau_dmem_migrate_vma() local 717 pfns = nouveau_pfns_alloc(max); in nouveau_dmem_migrate_vma() 718 if (!pfns) in nouveau_dmem_migrate_vma() 733 pfns); in nouveau_dmem_migrate_vma() 739 nouveau_pfns_free(pfns); in nouveau_dmem_migrate_vma()
|
| A D | nouveau_svm.h | 34 void nouveau_pfns_free(u64 *pfns); 36 unsigned long addr, u64 *pfns, unsigned long npages);
|
| A D | nouveau_svm.c | 893 nouveau_pfns_to_args(void *pfns) in nouveau_pfns_to_args() argument 895 return container_of(pfns, struct nouveau_pfnmap_args, p.phys); in nouveau_pfns_to_args() 915 nouveau_pfns_free(u64 *pfns) in nouveau_pfns_free() argument 917 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_free() 924 unsigned long addr, u64 *pfns, unsigned long npages) in nouveau_pfns_map() argument 926 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_map()
|
| /drivers/iommu/ |
| A D | iova.c | 575 unsigned long pfns[IOVA_MAG_SIZE]; member 626 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns() 657 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--) in iova_magazine_pop() 662 pfn = mag->pfns[i]; in iova_magazine_pop() 663 mag->pfns[i] = mag->pfns[--mag->size]; in iova_magazine_pop() 670 mag->pfns[mag->size++] = pfn; in iova_magazine_push()
|
| /drivers/hv/ |
| A D | mshv_root_hv_call.c | 507 input->data[i].pfns = page_to_pfn(pages[i]); in hv_call_set_vp_state()
|