| /arch/x86/entry/vdso/ |
| A D | vma.c | 53 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() argument 57 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) in vdso_fault() 60 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); in vdso_fault() 61 get_page(vmf->page); in vdso_fault() 92 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_vclock_fault() argument 94 switch (vmf->pgoff) { in vvar_vclock_fault() 102 return vmf_insert_pfn_prot(vma, vmf->address, in vvar_vclock_fault() 113 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_vclock_fault()
|
| /arch/x86/kernel/cpu/sgx/ |
| A D | virt.c | 74 static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf) in sgx_vepc_fault() argument 76 struct vm_area_struct *vma = vmf->vma; in sgx_vepc_fault() 81 ret = __sgx_vepc_fault(vepc, vma, vmf->address); in sgx_vepc_fault() 87 if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) { in sgx_vepc_fault()
|
| A D | encl.c | 430 static vm_fault_t sgx_vma_fault(struct vm_fault *vmf) in sgx_vma_fault() argument 432 unsigned long addr = (unsigned long)vmf->address; in sgx_vma_fault() 433 struct vm_area_struct *vma = vmf->vma; in sgx_vma_fault()
|
| /arch/powerpc/platforms/cell/spufs/ |
| A D | file.c | 230 spufs_mem_mmap_fault(struct vm_fault *vmf) in spufs_mem_mmap_fault() argument 232 struct vm_area_struct *vma = vmf->vma; in spufs_mem_mmap_fault() 237 offset = vmf->pgoff << PAGE_SHIFT; in spufs_mem_mmap_fault() 242 vmf->address, offset); in spufs_mem_mmap_fault() 254 ret = vmf_insert_pfn(vma, vmf->address, pfn); in spufs_mem_mmap_fault() 353 ret = vmf_insert_pfn(vmf->vma, vmf->address, in spufs_ps_fault() 1016 spufs_signal1_mmap_fault(struct vm_fault *vmf) in spufs_signal1_mmap_fault() argument 1150 spufs_signal2_mmap_fault(struct vm_fault *vmf) in spufs_signal2_mmap_fault() argument 1277 spufs_mss_mmap_fault(struct vm_fault *vmf) in spufs_mss_mmap_fault() argument 1338 spufs_psmap_mmap_fault(struct vm_fault *vmf) in spufs_psmap_mmap_fault() argument [all …]
|
| /arch/hexagon/include/asm/ |
| A D | cacheflush.h | 61 static inline void update_mmu_cache_range(struct vm_fault *vmf, in update_mmu_cache_range() argument
|
| /arch/microblaze/include/asm/ |
| A D | tlbflush.h | 36 #define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0) argument
|
| /arch/csky/abiv1/ |
| A D | cacheflush.c | 44 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, in update_mmu_cache_range() argument
|
| /arch/powerpc/kvm/ |
| A D | book3s_xive_native.c | 228 static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf) in xive_native_esb_fault() argument 230 struct vm_area_struct *vma = vmf->vma; in xive_native_esb_fault() 246 page_offset = vmf->pgoff - vma->vm_pgoff; in xive_native_esb_fault() 280 vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT); in xive_native_esb_fault() 288 static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf) in xive_native_tima_fault() argument 290 struct vm_area_struct *vma = vmf->vma; in xive_native_tima_fault() 292 switch (vmf->pgoff - vma->vm_pgoff) { in xive_native_tima_fault() 297 vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT); in xive_native_tima_fault()
|
| A D | book3s_64_vio.c | 229 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) in kvm_spapr_tce_fault() argument 231 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; in kvm_spapr_tce_fault() 234 if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) in kvm_spapr_tce_fault() 237 page = kvm_spapr_get_tce_page(stt, vmf->pgoff); in kvm_spapr_tce_fault() 242 vmf->page = page; in kvm_spapr_tce_fault()
|
| A D | book3s_hv_uvmem.c | 998 static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf) in kvmppc_uvmem_migrate_to_ram() argument 1000 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data; in kvmppc_uvmem_migrate_to_ram() 1002 if (kvmppc_svm_page_out(vmf->vma, vmf->address, in kvmppc_uvmem_migrate_to_ram() 1003 vmf->address + PAGE_SIZE, PAGE_SHIFT, in kvmppc_uvmem_migrate_to_ram() 1004 pvt->kvm, pvt->gpa, vmf->page)) in kvmppc_uvmem_migrate_to_ram()
|
| /arch/csky/abiv2/ |
| A D | cacheflush.c | 10 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, in update_mmu_cache_range() argument
|
| /arch/m68k/include/asm/ |
| A D | pgtable_mm.h | 139 static inline void update_mmu_cache_range(struct vm_fault *vmf, in update_mmu_cache_range() argument
|
| /arch/sh/include/asm/ |
| A D | pgtable.h | 105 static inline void update_mmu_cache_range(struct vm_fault *vmf, in update_mmu_cache_range() argument
|
| /arch/powerpc/platforms/book3s/ |
| A D | vas-api.c | 395 static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) in vas_mmap_fault() argument 397 struct vm_area_struct *vma = vmf->vma; in vas_mmap_fault() 423 if (txwin->task_ref.vma != vmf->vma) { in vas_mmap_fault()
|
| /arch/arc/include/asm/ |
| A D | pgtable-bits-arcv2.h | 104 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
| /arch/powerpc/include/asm/ |
| A D | pgtable.h | 146 static inline void update_mmu_cache_range(struct vm_fault *vmf, in update_mmu_cache_range() argument
|
| /arch/arm/include/asm/ |
| A D | tlbflush.h | 625 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, 628 static inline void update_mmu_cache_range(struct vm_fault *vmf, in update_mmu_cache_range() argument
|
| /arch/arm/mm/ |
| A D | fault-armv.c | 187 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, in update_mmu_cache_range() argument
|
| /arch/xtensa/mm/ |
| A D | cache.c | 216 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, in update_mmu_cache_range() argument
|
| /arch/csky/include/asm/ |
| A D | pgtable.h | 261 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
| /arch/nios2/include/asm/ |
| A D | pgtable.h | 282 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
| /arch/nios2/mm/ |
| A D | cacheflush.c | 209 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, in update_mmu_cache_range() argument
|
| /arch/um/include/asm/ |
| A D | pgtable.h | 293 #define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0) argument
|
| /arch/openrisc/include/asm/ |
| A D | pgtable.h | 381 static inline void update_mmu_cache_range(struct vm_fault *vmf, in update_mmu_cache_range() argument
|
| /arch/alpha/include/asm/ |
| A D | pgtable.h | 299 static inline void update_mmu_cache_range(struct vm_fault *vmf, in update_mmu_cache_range() argument
|