| /mm/ |
| A D | mmap.c | 83 vm_flags_t vm_flags = vma->vm_flags; in vma_set_page_prot() local 88 vm_flags &= ~VM_SHARED; in vma_set_page_prot() 470 if (vm_flags & VM_EXEC) in do_mmap() 472 vm_flags &= ~VM_MAYEXEC; in do_mmap() 519 vm_flags |= VM_DROPPABLE; in do_mmap() 728 vm_flags); in arch_get_unmapped_area() 796 vm_flags); in arch_get_unmapped_area_topdown() 1096 vm_flags_t vm_flags; in SYSCALL_DEFINE5() local 1138 vm_flags = vma->vm_flags; in SYSCALL_DEFINE5() 1164 if (vma->vm_flags != vm_flags) in SYSCALL_DEFINE5() [all …]
|
| A D | vma.c | 59 .vm_flags = (map_)->vm_flags, \ 98 if ((vma->vm_flags ^ vmg->vm_flags) & ~VM_SOFTDIRTY) in is_mergeable_vma() 1647 vmg.vm_flags = vm_flags; in vma_modify_flags() 1692 vmg.vm_flags = vm_flags; in vma_modify_flags_uffd() 2431 VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags && in __mmap_new_file_vma() 2436 map->vm_flags = vma->vm_flags; in __mmap_new_file_vma() 2527 vm_flags_t vm_flags = vma->vm_flags; in __mmap_complete() local 2580 .vm_flags = map->vm_flags, in call_mmap_prepare() 2592 map->vm_flags = desc.vm_flags; in call_mmap_prepare() 2723 if (map_deny_write_exec(vm_flags, vm_flags)) in mmap_region() [all …]
|
| A D | vma.h | 101 vm_flags_t vm_flags; member 173 .vm_flags = vm_flags_, \ 187 .vm_flags = vma_->vm_flags, \ 241 desc->vm_flags = vma->vm_flags; in vma_to_desc() 263 if (vma->vm_flags != desc->vm_flags) in set_vma_from_desc() 264 vm_flags_set(vma, desc->vm_flags); in set_vma_from_desc() 291 vm_flags_t vm_flags); 316 vm_flags_t vm_flags, 371 if (vma->vm_flags & VM_SHARED) in vma_wants_manual_pte_write_upgrade() 373 return !!(vma->vm_flags & VM_WRITE); in vma_wants_manual_pte_write_upgrade() [all …]
|
| A D | userfaultfd.c | 51 else if (!(vma->vm_flags & VM_SHARED) && in find_vma_and_prepare_anon() 1496 if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) || in validate_move_areas() 1501 if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED)) in validate_move_areas() 1901 vm_flags_t vm_flags) in userfaultfd_set_vm_flags() argument 1903 const bool uffd_wp_changed = (vma->vm_flags ^ vm_flags) & VM_UFFD_WP; in userfaultfd_set_vm_flags() 1905 vm_flags_reset(vma, vm_flags); in userfaultfd_set_vm_flags() 1917 vm_flags_t vm_flags) in userfaultfd_set_ctx() argument 1922 (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags); in userfaultfd_set_ctx() 1968 vm_flags_t vm_flags, in userfaultfd_register_range() argument 1993 (vma->vm_flags & vm_flags) == vm_flags) in userfaultfd_register_range() [all …]
|
| A D | nommu.c | 852 vm_flags_t vm_flags; in determine_vm_flags() local 878 vm_flags |= VM_MAYOVERLAY; in determine_vm_flags() 885 return vm_flags; in determine_vm_flags() 965 region->vm_flags = vma->vm_flags; in do_mmap_private() 1016 vm_flags_t vm_flags, in do_mmap() argument 1056 region->vm_flags = vm_flags; in do_mmap() 1059 vm_flags_init(vma, vm_flags); in do_mmap() 1615 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range() 1743 if (vma->vm_flags & VM_MAYREAD) { in __copy_remote_vm_str() 1821 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings() [all …]
|
| A D | execmem.c | 29 pgprot_t pgprot, unsigned long vm_flags) in execmem_vmalloc() argument 39 vm_flags |= VM_DEFER_KMEMLEAK; in execmem_vmalloc() 41 if (vm_flags & VM_ALLOW_HUGE_VMAP) in execmem_vmalloc() 45 pgprot, vm_flags, NUMA_NO_NODE, in execmem_vmalloc() 51 pgprot, vm_flags, NUMA_NO_NODE, in execmem_vmalloc() 85 pgprot_t pgprot, unsigned long vm_flags) in execmem_vmalloc() argument 286 unsigned long vm_flags = VM_ALLOW_HUGE_VMAP; in execmem_cache_populate() local 293 p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags); in execmem_cache_populate() 296 p = execmem_vmalloc(range, alloc_size, PAGE_KERNEL, vm_flags); in execmem_cache_populate() 468 unsigned long vm_flags = VM_FLUSH_RESET_PERMS; in execmem_alloc() local [all …]
|
| A D | mlock.c | 332 if (!(vma->vm_flags & VM_LOCKED)) in allow_mlock_munlock() 371 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range() 396 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range() 473 vm_flags_t oldflags = vma->vm_flags; in mlock_fixup() 545 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_vma_lock_flags() 586 if (vma->vm_flags & VM_LOCKED) { in count_mm_mlocked_page_nr() 666 vm_flags_t vm_flags = VM_LOCKED; in SYSCALL_DEFINE3() local 672 vm_flags |= VM_LOCKONFAULT; in SYSCALL_DEFINE3() 674 return do_mlock(start, len, vm_flags); in SYSCALL_DEFINE3() 731 newflags = vma->vm_flags & ~VM_LOCKED_MASK; in apply_mlockall_flags()
|
| A D | mremap.c | 949 if (vma->vm_flags & VM_MAYSHARE) in vrm_set_new_addr() 971 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) in vrm_calc_charge() 998 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) in vrm_uncharge() 1017 vm_stat_account(mm, vma->vm_flags, pages); in vrm_stat_account() 1018 if (vma->vm_flags & VM_LOCKED) in vrm_stat_account() 1032 vm_flags_t dummy = vma->vm_flags; in prep_move_vma() 1399 vm_flags_t vm_flags = vrm->vma->vm_flags; in mremap_to() local 1402 if (!may_expand_vm(mm, vm_flags, pages)) in mremap_to() 1693 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) in check_prep_vma() 1723 if (vma->vm_flags & VM_LOCKED) in check_prep_vma() [all …]
|
| A D | gup.c | 610 if (!(vma->vm_flags & VM_MAYWRITE)) in can_follow_write_common() 614 if (vma->vm_flags & VM_WRITE) in can_follow_write_common() 1220 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags() local 1225 if (vm_flags & (VM_IO | VM_PFNMAP)) in check_vma_flags() 1245 if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { in check_vma_flags() 1257 if (!is_cow_mapping(vm_flags)) in check_vma_flags() 1260 } else if (!(vm_flags & VM_READ)) { in check_vma_flags() 1267 if (!(vm_flags & VM_MAYREAD)) in check_vma_flags() 1539 if (!(vm_flags & vma->vm_flags)) in vma_permits_fault() 2005 vm_flags_t vm_flags; in __get_user_pages_locked() local [all …]
|
| A D | mprotect.c | 45 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in maybe_change_pte_writable() 102 if (!(vma->vm_flags & VM_SHARED)) in can_change_pte_writable() 140 if (is_cow_mapping(vma->vm_flags) && in prot_numa_skip() 255 if (vma->vm_flags & VM_SHARED) { in set_write_prot_commit_flush_ptes() 291 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range() 760 vm_flags_t oldflags = READ_ONCE(vma->vm_flags); in mprotect_fixup() 914 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey() 922 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey() 945 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey() 957 newflags |= (vma->vm_flags & ~mask_off_old_flags); in do_mprotect_pkey() [all …]
|
| A D | memory.c | 620 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page() 754 if ((vma->vm_flags & VM_WRITE) && in restore_exclusive_pte() 799 vm_flags_t vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte() local 830 is_cow_mapping(vm_flags)) { in copy_nonpresent_pte() 871 is_cow_mapping(vm_flags)) { in copy_nonpresent_pte() 965 if (src_vma->vm_flags & VM_SHARED) in __copy_present_ptes() 2537 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_prot() 2586 if (vma->vm_flags & VM_MIXEDMAP) in vm_mixed_ok() 5015 if (vma->vm_flags & VM_SHARED) in do_anonymous_page() 5072 if (vma->vm_flags & VM_WRITE) in do_anonymous_page() [all …]
|
| A D | ksm.c | 690 if (vm_flags & VM_SAO) in ksm_compatible() 694 if (vm_flags & VM_SPARC_ADI) in ksm_compatible() 2705 if (vm_flags & VM_MERGEABLE) in __ksm_should_add_vma() 2743 vm_flags_t vm_flags) in ksm_vma_flags() argument 2747 vm_flags |= VM_MERGEABLE; in ksm_vma_flags() 2749 return vm_flags; in ksm_vma_flags() 2850 if (vma->vm_flags & VM_MERGEABLE) in ksm_madvise() 2861 *vm_flags |= VM_MERGEABLE; in ksm_madvise() 2865 if (!(*vm_flags & VM_MERGEABLE)) in ksm_madvise() 2874 *vm_flags &= ~VM_MERGEABLE; in ksm_madvise() [all …]
|
| A D | rmap.c | 842 vm_flags_t vm_flags; member 860 if (vma->vm_flags & VM_LOCKED) { in folio_referenced_one() 865 pra->vm_flags |= VM_LOCKED; in folio_referenced_one() 917 if ((vma->vm_flags & VM_LOCKED) && in folio_referenced_one() 929 pra->vm_flags |= VM_LOCKED; in folio_referenced_one() 941 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; in folio_referenced_one() 1002 *vm_flags = 0; in folio_referenced() 1016 *vm_flags = pra.vm_flags; in folio_referenced() 1107 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma() 2215 if (vma->vm_flags & VM_LOCKED) in try_to_unmap_one() [all …]
|
| A D | huge_memory.c | 101 vm_flags_t vm_flags, in __thp_vma_allowable_orders() argument 1054 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite() 1093 vm_flags_t vm_flags) in __thp_get_unmapped_area() argument 1137 vm_flags_t vm_flags) in thp_get_unmapped_area_vmflags() argument 1147 vm_flags); in thp_get_unmapped_area_vmflags() 1328 khugepaged_enter_vma(vma, vma->vm_flags); in do_huge_pmd_anonymous_page() 1465 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd() 1529 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pud_mkwrite() 1602 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pud() 2012 if (!(vma->vm_flags & VM_SHARED)) { in can_change_pmd_writable() [all …]
|
| A D | hugetlb.c | 1181 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map() 1238 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_dup_vma_private() 2616 if (vma->vm_flags & VM_MAYSHARE) { in __vma_reservation_common() 2626 if (vma->vm_flags & VM_MAYSHARE) { in __vma_reservation_common() 5359 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_vm_op_open() 5506 if (vma->vm_flags & VM_WRITE) in set_huge_ptep_maybe_writable() 6159 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_wp() 7245 vm_flags_t vm_flags) in hugetlb_reserve_pages() argument 7271 if (vm_flags & VM_NORESERVE) in hugetlb_reserve_pages() 7462 vm_flags_t vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; in page_table_shareable() local [all …]
|
| A D | mapping_dirty_helpers.c | 221 vm_flags_t vm_flags = READ_ONCE(walk->vma->vm_flags); in wp_clean_test_walk() local 224 if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) != in wp_clean_test_walk()
|
| A D | secretmem.c | 125 if ((desc->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) in secretmem_mmap_prepare() 128 if (!mlock_future_ok(desc->mm, desc->vm_flags | VM_LOCKED, len)) in secretmem_mmap_prepare() 131 desc->vm_flags |= VM_LOCKED | VM_DONTDUMP; in secretmem_mmap_prepare()
|
| A D | mseal.c | 71 if (!(vma->vm_flags & VM_SEALED)) { in mseal_apply() 74 vma->vm_flags | VM_SEALED); in mseal_apply()
|
| A D | msync.c | 83 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3() 93 (vma->vm_flags & VM_SHARED)) { in SYSCALL_DEFINE3()
|
| A D | debug.c | 171 vma->vm_flags, &vma->vm_flags); in dump_vma() 259 vmg->start, vmg->end, vmg->vm_flags, in dump_vmg()
|
| A D | khugepaged.c | 350 vm_flags_t *vm_flags, int advice) in hugepage_madvise() argument 363 *vm_flags &= ~VM_NOHUGEPAGE; in hugepage_madvise() 364 *vm_flags |= VM_HUGEPAGE; in hugepage_madvise() 370 khugepaged_enter_vma(vma, *vm_flags); in hugepage_madvise() 373 *vm_flags &= ~VM_HUGEPAGE; in hugepage_madvise() 374 *vm_flags |= VM_NOHUGEPAGE; in hugepage_madvise() 473 vm_flags_t vm_flags) in khugepaged_enter_vma() argument 477 if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS, in khugepaged_enter_vma() 1538 if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER)) in collapse_pte_mapped_thp() 1615 if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED)) in collapse_pte_mapped_thp() [all …]
|
| A D | vma_init.c | 51 vm_flags_init(dest, src->vm_flags); in vm_area_init_from() 127 ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); in vm_area_dup()
|
| A D | internal.h | 964 extern bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags, 1037 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) in mlock_vma_folio() 1054 if (unlikely(vma->vm_flags & VM_LOCKED)) in munlock_vma_folio() 1394 unsigned long vm_flags, unsigned long start, 1506 return is_cow_mapping(vma->vm_flags); in gup_must_unshare() 1548 return !(vma->vm_flags & VM_SOFTDIRTY); in vma_soft_dirty_enabled()
|
| A D | madvise.c | 161 if (new_flags == vma->vm_flags && (!set_new_anon_name || in madvise_update_vma() 595 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); in can_madv_lru_vma() 644 (vma->vm_flags & VM_MAYSHARE))) in madvise_pageout() 888 return !(vma->vm_flags & forbidden); in madvise_dontneed_free_valid_vma() 1023 if (vma->vm_flags & VM_LOCKED) in madvise_remove() 1069 return !(vma->vm_flags & disallowed); in is_valid_guard_vma() 1313 if ((vma->vm_flags & VM_WRITE) && in can_madvise_modify() 1337 vm_flags_t new_flags = vma->vm_flags; in madvise_vma_behavior()
|
| A D | mmap_lock.c | 348 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { in lock_mm_and_find_vma() 371 if (!(vma->vm_flags & VM_GROWSDOWN)) in lock_mm_and_find_vma()
|