| /drivers/dax/ |
| A D | device.c | 79 struct file *filp = vmf->vma->vm_file; in dax_set_mapping() 87 pgoff = linear_page_index(vmf->vma, in dax_set_mapping() 102 struct vm_fault *vmf) in __dev_dax_pte_fault() argument 132 vmf->flags & FAULT_FLAG_WRITE); in __dev_dax_pte_fault() 136 struct vm_fault *vmf) in __dev_dax_pmd_fault() argument 160 if (pmd_addr < vmf->vma->vm_start || in __dev_dax_pmd_fault() 176 vmf->flags & FAULT_FLAG_WRITE); in __dev_dax_pmd_fault() 181 struct vm_fault *vmf) in __dev_dax_pud_fault() argument 222 vmf->flags & FAULT_FLAG_WRITE); in __dev_dax_pud_fault() 226 struct vm_fault *vmf) in __dev_dax_pud_fault() argument [all …]
|
| /drivers/gpu/drm/ttm/ |
| A D | ttm_bo_vm.c | 44 struct vm_fault *vmf) in ttm_bo_vm_fault_idle() argument 60 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in ttm_bo_vm_fault_idle() 64 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_fault_idle() 119 struct vm_fault *vmf) in ttm_bo_vm_reserve() argument 136 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_reserve() 187 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_fault_reserved() 198 unsigned long address = vmf->address; in ttm_bo_vm_fault_reserved() 204 ret = ttm_bo_vm_fault_idle(bo, vmf); in ttm_bo_vm_fault_reserved() 294 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_dummy_page() 324 struct vm_area_struct *vma = vmf->vma; in ttm_bo_vm_fault() [all …]
|
| /drivers/video/fbdev/core/ |
| A D | fb_defio.c | 122 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) in fb_deferred_io_fault() argument 126 struct fb_info *info = vmf->vma->vm_private_data; in fb_deferred_io_fault() 128 offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_fault() 136 if (!vmf->vma->vm_file) in fb_deferred_io_fault() 141 vmf->page = page; in fb_deferred_io_fault() 222 unsigned long offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_page_mkwrite() 223 struct page *page = vmf->page; in fb_deferred_io_page_mkwrite() 225 file_update_time(vmf->vma->vm_file); in fb_deferred_io_page_mkwrite() 231 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) in fb_deferred_io_mkwrite() argument 233 struct fb_info *info = vmf->vma->vm_private_data; in fb_deferred_io_mkwrite() [all …]
|
| /drivers/gpu/drm/vmwgfx/ |
| A D | vmwgfx_page_dirty.c | 396 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) in vmw_bo_vm_mkwrite() argument 398 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite() 410 save_flags = vmf->flags; in vmw_bo_vm_mkwrite() 411 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite() 412 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_mkwrite() 413 vmf->flags = save_flags; in vmw_bo_vm_mkwrite() 437 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) in vmw_bo_vm_fault() argument 439 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault() 447 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_fault() 458 page_offset = vmf->pgoff - in vmw_bo_vm_fault() [all …]
|
| /drivers/xen/ |
| A D | privcmd-buf.c | 118 static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf) in privcmd_buf_vma_fault() argument 121 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, in privcmd_buf_vma_fault() 122 vmf->pgoff, (void *)vmf->address); in privcmd_buf_vma_fault()
|
| /drivers/misc/ocxl/ |
| A D | sysfs.c | 109 static vm_fault_t global_mmio_fault(struct vm_fault *vmf) in global_mmio_fault() argument 111 struct vm_area_struct *vma = vmf->vma; in global_mmio_fault() 115 if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT)) in global_mmio_fault() 118 offset = vmf->pgoff; in global_mmio_fault() 120 return vmf_insert_pfn(vma, vmf->address, offset); in global_mmio_fault()
|
| A D | context.c | 139 static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) in ocxl_mmap_fault() argument 141 struct vm_area_struct *vma = vmf->vma; in ocxl_mmap_fault() 146 offset = vmf->pgoff << PAGE_SHIFT; in ocxl_mmap_fault() 148 ctx->pasid, vmf->address, offset); in ocxl_mmap_fault() 151 ret = map_pp_mmio(vma, vmf->address, offset, ctx); in ocxl_mmap_fault() 153 ret = map_afu_irq(vma, vmf->address, offset, ctx); in ocxl_mmap_fault()
|
| /drivers/char/agp/ |
| A D | alpha-agp.c | 14 static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf) in alpha_core_agp_vm_fault() argument 21 dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base; in alpha_core_agp_vm_fault() 32 vmf->page = page; in alpha_core_agp_vm_fault()
|
| /drivers/gpu/drm/gma500/ |
| A D | fbdev.c | 22 static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf) in psb_fbdev_vm_fault() argument 24 struct vm_area_struct *vma = vmf->vma; in psb_fbdev_vm_fault() 26 unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT); in psb_fbdev_vm_fault()
|
| A D | gem.c | 109 static vm_fault_t psb_gem_fault(struct vm_fault *vmf); 254 static vm_fault_t psb_gem_fault(struct vm_fault *vmf) in psb_gem_fault() argument 256 struct vm_area_struct *vma = vmf->vma; in psb_gem_fault() 290 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in psb_gem_fault() 297 ret = vmf_insert_pfn(vma, vmf->address, pfn); in psb_gem_fault()
|
| /drivers/infiniband/hw/hfi1/ |
| A D | file_ops.c | 73 static vm_fault_t vma_fault(struct vm_fault *vmf); 313 ctxt, subctxt, type, mapio, vmf, !!memdma, in mmap_cdbg() 328 u8 subctxt, mapio = 0, vmf = 0, type; in hfi1_file_mmap() local 487 vmf = 1; in hfi1_file_mmap() 520 vmf = 1; in hfi1_file_mmap() 526 vmf = 1; in hfi1_file_mmap() 533 vmf = 1; in hfi1_file_mmap() 545 vmf = 1; in hfi1_file_mmap() 564 if (vmf) { in hfi1_file_mmap() 595 static vm_fault_t vma_fault(struct vm_fault *vmf) in vma_fault() argument [all …]
|
| /drivers/gpu/drm/armada/ |
| A D | armada_gem.c | 20 static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf) in armada_gem_vm_fault() argument 22 struct drm_gem_object *gobj = vmf->vma->vm_private_data; in armada_gem_vm_fault() 26 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT; in armada_gem_vm_fault() 27 return vmf_insert_pfn(vmf->vma, vmf->address, pfn); in armada_gem_vm_fault()
|
| /drivers/dma-buf/heaps/ |
| A D | cma_heap.c | 167 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) in cma_heap_vm_fault() argument 169 struct vm_area_struct *vma = vmf->vma; in cma_heap_vm_fault() 172 if (vmf->pgoff >= buffer->pagecount) in cma_heap_vm_fault() 175 return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff])); in cma_heap_vm_fault()
|
| /drivers/gpu/drm/omapdrm/ |
| A D | omap_gem.c | 353 struct vm_area_struct *vma, struct vm_fault *vmf) in omap_gem_fault_1d() argument 360 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in omap_gem_fault_1d() 370 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, in omap_gem_fault_1d() 373 return vmf_insert_mixed(vma, vmf->address, pfn); in omap_gem_fault_1d() 378 struct vm_area_struct *vma, struct vm_fault *vmf) in omap_gem_fault_2d() argument 408 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in omap_gem_fault_2d() 464 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, in omap_gem_fault_2d() 494 static vm_fault_t omap_gem_fault(struct vm_fault *vmf) in omap_gem_fault() argument 496 struct vm_area_struct *vma = vmf->vma; in omap_gem_fault() 521 ret = omap_gem_fault_2d(obj, vma, vmf); in omap_gem_fault() [all …]
|
| /drivers/gpu/drm/nouveau/ |
| A D | nouveau_dmem.c | 162 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) in nouveau_dmem_migrate_to_ram() argument 164 struct nouveau_drm *drm = page_to_drm(vmf->page); in nouveau_dmem_migrate_to_ram() 173 .vma = vmf->vma, in nouveau_dmem_migrate_to_ram() 174 .start = vmf->address, in nouveau_dmem_migrate_to_ram() 175 .end = vmf->address + PAGE_SIZE, in nouveau_dmem_migrate_to_ram() 179 .fault_page = vmf->page, in nouveau_dmem_migrate_to_ram() 197 dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address); in nouveau_dmem_migrate_to_ram()
|
| /drivers/infiniband/core/ |
| A D | uverbs_main.c | 788 static vm_fault_t rdma_umap_fault(struct vm_fault *vmf) in rdma_umap_fault() argument 790 struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data; in rdma_umap_fault() 791 struct rdma_umap_priv *priv = vmf->vma->vm_private_data; in rdma_umap_fault() 798 if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) { in rdma_umap_fault() 799 vmf->page = ZERO_PAGE(vmf->address); in rdma_umap_fault() 800 get_page(vmf->page); in rdma_umap_fault() 807 alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0); in rdma_umap_fault() 814 vmf->page = ufile->disassociate_page; in rdma_umap_fault() 815 get_page(vmf->page); in rdma_umap_fault()
|
| /drivers/dma-buf/ |
| A D | udmabuf.c | 47 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) in udmabuf_vm_fault() argument 49 struct vm_area_struct *vma = vmf->vma; in udmabuf_vm_fault() 51 pgoff_t pgoff = vmf->pgoff; in udmabuf_vm_fault() 61 ret = vmf_insert_pfn(vma, vmf->address, pfn); in udmabuf_vm_fault() 70 if (addr == vmf->address) in udmabuf_vm_fault()
|
| /drivers/gpu/drm/ |
| A D | drm_pagemap.c | 733 static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf) in drm_pagemap_migrate_to_ram() argument 735 struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data; in drm_pagemap_migrate_to_ram() 738 err = __drm_pagemap_migrate_to_ram(vmf->vma, in drm_pagemap_migrate_to_ram() 740 vmf->page, vmf->address, in drm_pagemap_migrate_to_ram()
|
| A D | drm_gem_shmem_helper.c | 538 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) in drm_gem_shmem_fault() argument 540 struct vm_area_struct *vma = vmf->vma; in drm_gem_shmem_fault() 549 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in drm_gem_shmem_fault() 560 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); in drm_gem_shmem_fault()
|
| /drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_gem.c | 156 static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) in amdgpu_gem_fault() argument 158 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; in amdgpu_gem_fault() 163 ret = ttm_bo_vm_reserve(bo, vmf); in amdgpu_gem_fault() 174 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in amdgpu_gem_fault() 179 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in amdgpu_gem_fault() 181 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in amdgpu_gem_fault()
|
| /drivers/gpu/drm/i915/gem/ |
| A D | i915_gem_ttm.c | 1035 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) in vm_fault_ttm() argument 1037 struct vm_area_struct *area = vmf->vma; in vm_fault_ttm() 1050 ret = ttm_bo_vm_reserve(bo, vmf); in vm_fault_ttm() 1111 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in vm_fault_ttm() 1115 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); in vm_fault_ttm() 1118 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in vm_fault_ttm()
|
| /drivers/gpu/drm/radeon/ |
| A D | radeon_gem.c | 47 static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) in radeon_gem_fault() argument 49 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; in radeon_gem_fault() 55 ret = ttm_bo_vm_reserve(bo, vmf); in radeon_gem_fault() 63 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in radeon_gem_fault() 65 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in radeon_gem_fault()
|
| /drivers/gpu/drm/etnaviv/ |
| A D | etnaviv_gem.c | 162 static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf) in etnaviv_gem_fault() argument 164 struct vm_area_struct *vma = vmf->vma; in etnaviv_gem_fault() 190 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; in etnaviv_gem_fault() 194 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, in etnaviv_gem_fault() 197 return vmf_insert_pfn(vma, vmf->address, pfn); in etnaviv_gem_fault()
|
| /drivers/gpu/drm/amd/amdkfd/ |
| A D | kfd_migrate.c | 934 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) in svm_migrate_to_ram() argument 937 unsigned long addr = vmf->address; in svm_migrate_to_ram() 944 svm_bo = vmf->page->zone_device_data; in svm_migrate_to_ram() 955 if (mm != vmf->vma->vm_mm) in svm_migrate_to_ram() 992 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last, in svm_migrate_to_ram() 993 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page); in svm_migrate_to_ram()
|
| /drivers/vdpa/vdpa_user/ |
| A D | iova_domain.c | 509 static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf) in vduse_domain_mmap_fault() argument 511 struct vduse_iova_domain *domain = vmf->vma->vm_private_data; in vduse_domain_mmap_fault() 512 unsigned long iova = vmf->pgoff << PAGE_SHIFT; in vduse_domain_mmap_fault() 526 vmf->page = page; in vduse_domain_mmap_fault()
|