Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 79) sorted by relevance

1234

/include/linux/
A Duserfaultfd_k.h181 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing()
186 return vma->vm_flags & VM_UFFD_WP; in userfaultfd_wp()
191 return vma->vm_flags & VM_UFFD_MINOR; in userfaultfd_minor()
217 if (vma->vm_flags & VM_DROPPABLE) in vma_can_userfault()
221 (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) in vma_can_userfault()
242 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || in vma_can_userfault()
243 vma_is_shmem(vma); in vma_can_userfault()
279 struct vm_area_struct *vma,
284 struct vm_area_struct *vma,
423 if (!userfaultfd_wp(vma)) in userfaultfd_wp_use_markers()
[all …]
A Dmmap_lock.h122 vma->vm_lock_seq = UINT_MAX; in vma_lock_init()
210 vma_refcount_put(vma); in vma_start_read()
228 vma_refcount_put(vma); in vma_start_read()
232 return vma; in vma_start_read()
245 mmap_assert_locked(vma->vm_mm); in vma_start_read_locked_nested()
267 vma_refcount_put(vma); in vma_end_read()
304 VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); in vma_assert_write_locked()
312 !__is_vma_write_locked(vma, &mm_lock_seq), vma); in vma_assert_locked()
332 vma_assert_write_locked(vma); in vma_mark_attached()
333 vma_assert_detached(vma); in vma_mark_attached()
[all …]
A Dhuge_mm.h17 struct vm_area_struct *vma);
224 if (!vma_is_anonymous(vma)) { in thp_vma_suitable_order()
225 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, in thp_vma_suitable_order()
232 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) in thp_vma_suitable_order()
257 if (thp_vma_suitable_order(vma, addr, order)) in thp_vma_suitable_orders()
450 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
453 return __pmd_trans_huge_lock(pmd, vma); in pmd_trans_huge_lock()
458 struct vm_area_struct *vma) in pud_trans_huge_lock() argument
461 return __pud_trans_huge_lock(pud, vma); in pud_trans_huge_lock()
620 struct vm_area_struct *vma) in pmd_trans_huge_lock() argument
[all …]
A Dhugetlb.h108 struct vm_area_struct *vma; member
135 struct vm_area_struct *vma,
153 struct vm_area_struct *vma,
170 struct vm_area_struct *vma,
257 if (is_vm_hugetlb_page(vma)) in hugetlb_zap_begin()
264 if (is_vm_hugetlb_page(vma)) in hugetlb_zap_end()
313 struct vm_area_struct *vma, in adjust_range_if_pmd_sharing_possible() argument
319 struct vm_area_struct *vma, in hugetlb_zap_begin() argument
325 struct vm_area_struct *vma, in hugetlb_zap_end() argument
1337 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) argument
[all …]
A Dmm.h710 memset(vma, 0, sizeof(*vma)); in vma_init()
794 return vma->vm_start < vma->vm_mm->brk && in vma_is_initial_heap()
795 vma->vm_end > vma->vm_mm->start_brk; in vma_is_initial_heap()
809 return vma->vm_start <= vma->vm_mm->start_stack && in vma_is_initial_stack()
810 vma->vm_end >= vma->vm_mm->start_stack; in vma_is_initial_stack()
2361 zap_page_range_single(vma, vma->vm_start, in zap_vma_pages()
2362 vma->vm_end - vma->vm_start, NULL); in zap_vma_pages()
3275 vma; vma = vma_interval_tree_iter_next(vma, start, last))
3481 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) in find_exact_vma()
3490 return (vma && vma->vm_start <= start && end <= vma->vm_end); in range_in_vma()
[all …]
A Dhugetlb_inline.h9 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) in is_vm_hugetlb_page() argument
11 return !!(vma->vm_flags & VM_HUGETLB); in is_vm_hugetlb_page()
16 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) in is_vm_hugetlb_page() argument
A Drmap.h84 struct vm_area_struct *vma; member
162 if (likely(vma->anon_vma)) in anon_vma_prepare()
165 return __anon_vma_prepare(vma); in anon_vma_prepare()
171 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); in anon_vma_merge()
244 int diff, struct vm_area_struct *vma) in folio_add_return_large_mapcount() argument
345 struct vm_area_struct *vma) in folio_set_large_mapcount() argument
512 struct vm_area_struct *vma) in hugetlb_try_dup_anon_rmap() argument
935 struct vm_area_struct *vma; member
948 .vma = _vma, \
1002 struct vm_area_struct *vma);
[all …]
A Dpgtable.h536 set_pte_at(vma->vm_mm, addr, ptep, pte); in clear_young_dirty_ptes()
834 update_mmu_tlb_range(vma, address, ptep, 1); in update_mmu_tlb()
1041 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in generic_pmdp_establish()
1147 struct vm_area_struct *vma, in arch_do_swap_page_nr() argument
1164 struct vm_area_struct *vma, in arch_do_swap_page_nr() argument
1170 arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE, in arch_do_swap_page_nr()
1187 struct vm_area_struct *vma, in arch_unmap_one() argument
1232 #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address) argument
1352 set_pte_at(vma->vm_mm, addr, ptep, pte); in __ptep_modify_prot_commit()
1890 #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) argument
[all …]
A Dcacheflush.h21 static inline void flush_icache_pages(struct vm_area_struct *vma, in flush_icache_pages() argument
27 #define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) argument
A Dsecretmem.h14 bool vma_is_secretmem(struct vm_area_struct *vma);
19 static inline bool vma_is_secretmem(struct vm_area_struct *vma) in vma_is_secretmem() argument
A Dmempolicy.h125 struct vm_area_struct *vma, struct mempolicy *mpol);
131 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
133 struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
135 bool vma_policy_mof(struct vm_area_struct *vma);
142 extern int huge_node(struct vm_area_struct *vma,
169 extern bool vma_migratable(struct vm_area_struct *vma);
228 static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma, in get_vma_policy() argument
258 static inline int huge_node(struct vm_area_struct *vma, in huge_node() argument
A Dbuildid.h10 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
11 int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
A Dmmdebug.h15 void dump_vma(const struct vm_area_struct *vma);
36 #define VM_BUG_ON_VMA(cond, vma) \ argument
39 dump_vma(vma); \
92 #define VM_WARN_ON_ONCE_VMA(cond, vma) ({ \ argument
97 dump_vma(vma); \
121 #define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) argument
129 #define VM_WARN_ON_ONCE_VMA(cond, vma) BUILD_BUG_ON_INVALID(cond) argument
A Dpagewalk.h125 struct vm_area_struct *vma; member
137 int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
140 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
189 struct vm_area_struct *vma; member
194 struct vm_area_struct *vma, unsigned long addr,
A Dpkeys.h14 #define arch_override_mprotect_pkey(vma, prot, pkey) (0) argument
18 static inline int vma_pkey(struct vm_area_struct *vma) in vma_pkey() argument
A Duprobes.h191 extern int set_swbp(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
192 extern int set_orig_insn(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
197 extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned lo…
202 extern int uprobe_mmap(struct vm_area_struct *vma);
203 extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
256 static inline int uprobe_mmap(struct vm_area_struct *vma) in uprobe_mmap() argument
261 uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) in uprobe_munmap() argument
A Dksm.h18 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
89 struct vm_area_struct *vma, unsigned long addr);
135 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, in ksm_madvise() argument
142 struct vm_area_struct *vma, unsigned long addr) in ksm_might_need_to_copy() argument
A Dkhugepaged.h14 extern void khugepaged_enter_vma(struct vm_area_struct *vma,
39 static inline void khugepaged_enter_vma(struct vm_area_struct *vma, in khugepaged_enter_vma() argument
A Dmm_inline.h416 static inline void free_anon_vma_name(struct vm_area_struct *vma) in free_anon_vma_name() argument
422 anon_vma_name_put(vma->anon_name); in free_anon_vma_name()
440 static inline void free_anon_vma_name(struct vm_area_struct *vma) {} in free_anon_vma_name() argument
570 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, in pte_install_uffd_wp_if_needed() argument
585 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) in pte_install_uffd_wp_if_needed()
600 set_pte_at(vma->vm_mm, addr, pte, in pte_install_uffd_wp_if_needed()
608 static inline bool vma_has_recency(struct vm_area_struct *vma) in vma_has_recency() argument
610 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) in vma_has_recency()
613 if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE)) in vma_has_recency()
/include/xen/
A Dxen-ops.h45 int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
62 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
68 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
75 static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, in xen_xlate_remap_gfn_array() argument
85 static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, in xen_xlate_unmap_gfn_range() argument
92 int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
120 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array()
128 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array()
156 return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid, in xen_remap_domain_mfn_array()
181 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false); in xen_remap_domain_gfn_range()
[all …]
/include/asm-generic/
A Dcacheflush.h35 static inline void flush_cache_range(struct vm_area_struct *vma, in flush_cache_range() argument
43 static inline void flush_cache_page(struct vm_area_struct *vma, in flush_cache_page() argument
81 static inline void flush_icache_user_page(struct vm_area_struct *vma, in flush_icache_user_page() argument
107 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
111 flush_icache_user_page(vma, page, vaddr, len); \
117 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
A Dtlb.h300 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
443 struct vm_area_struct vma = { in tlb_flush() local
449 flush_tlb_range(&vma, tlb->start, tlb->end); in tlb_flush()
457 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_update_vma_flags() argument
470 tlb->vma_huge = is_vm_hugetlb_page(vma); in tlb_update_vma_flags()
471 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); in tlb_update_vma_flags()
477 tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); in tlb_update_vma_flags()
548 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument
553 tlb_update_vma_flags(tlb, vma); in tlb_start_vma()
555 flush_cache_range(vma, vma->vm_start, vma->vm_end); in tlb_start_vma()
[all …]
A Dhugetlb.h86 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument
89 return ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush()
117 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, in huge_ptep_set_access_flags() argument
121 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags()
A Dmm_hooks.h20 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, in arch_vma_access_permitted() argument
/include/drm/
A Ddrm_gem_dma_helper.h43 int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma);
129 static inline int drm_gem_dma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) in drm_gem_dma_object_mmap() argument
133 return drm_gem_dma_mmap(dma_obj, vma); in drm_gem_dma_object_mmap()

Completed in 62 milliseconds

1234