Lines Matching refs:mm
79 static void unmap_region(struct mm_struct *mm,
197 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local
204 if (mmap_write_lock_killable(mm)) in SYSCALL_DEFINE1()
207 origbrk = mm->brk; in SYSCALL_DEFINE1()
216 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
218 min_brk = mm->end_data; in SYSCALL_DEFINE1()
220 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
231 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1()
232 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1()
236 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1()
238 mm->brk = brk; in SYSCALL_DEFINE1()
246 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
254 mm->brk = brk; in SYSCALL_DEFINE1()
255 ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); in SYSCALL_DEFINE1()
257 mm->brk = origbrk; in SYSCALL_DEFINE1()
266 next = find_vma(mm, oldbrk); in SYSCALL_DEFINE1()
273 mm->brk = brk; in SYSCALL_DEFINE1()
276 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; in SYSCALL_DEFINE1()
278 mmap_read_unlock(mm); in SYSCALL_DEFINE1()
280 mmap_write_unlock(mm); in SYSCALL_DEFINE1()
281 userfaultfd_unmap_complete(mm, &uf); in SYSCALL_DEFINE1()
287 mmap_write_unlock(mm); in SYSCALL_DEFINE1()
331 static int browse_rb(struct mm_struct *mm) in browse_rb() argument
333 struct rb_root *root = &mm->mm_rb; in browse_rb()
356 spin_lock(&mm->page_table_lock); in browse_rb()
363 spin_unlock(&mm->page_table_lock); in browse_rb()
392 static void validate_mm(struct mm_struct *mm) in validate_mm() argument
397 struct vm_area_struct *vma = mm->mmap; in validate_mm()
414 if (i != mm->map_count) { in validate_mm()
415 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); in validate_mm()
418 if (highest_address != mm->highest_vm_end) { in validate_mm()
420 mm->highest_vm_end, highest_address); in validate_mm()
423 i = browse_rb(mm); in validate_mm()
424 if (i != mm->map_count) { in validate_mm()
426 pr_emerg("map_count %d rb %d\n", mm->map_count, i); in validate_mm()
429 VM_BUG_ON_MM(bug, mm); in validate_mm()
433 #define validate_mm(mm) do { } while (0) argument
529 static int find_vma_links(struct mm_struct *mm, unsigned long addr, in find_vma_links() argument
535 mmap_assert_locked(mm); in find_vma_links()
536 __rb_link = &mm->mm_rb.rb_node; in find_vma_links()
573 static inline struct vm_area_struct *vma_next(struct mm_struct *mm, in vma_next() argument
577 return mm->mmap; in vma_next()
597 munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len, in munmap_vma_range() argument
602 while (find_vma_links(mm, start, start + len, pprev, link, parent)) in munmap_vma_range()
603 if (do_munmap(mm, start, len, uf)) in munmap_vma_range()
608 static unsigned long count_vma_pages_range(struct mm_struct *mm, in count_vma_pages_range() argument
615 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range()
636 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_rb() argument
643 mm->highest_vm_end = vm_end_gap(vma); in __vma_link_rb()
657 vma_rb_insert(vma, &mm->mm_rb); in __vma_link_rb()
678 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link() argument
682 __vma_link_list(mm, vma, prev); in __vma_link()
683 __vma_link_rb(mm, vma, rb_link, rb_parent); in __vma_link()
686 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in vma_link() argument
697 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
703 mm->map_count++; in vma_link()
704 validate_mm(mm); in vma_link()
711 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in __insert_vm_struct() argument
716 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in __insert_vm_struct()
719 __vma_link(mm, vma, prev, rb_link, rb_parent); in __insert_vm_struct()
720 mm->map_count++; in __insert_vm_struct()
723 static __always_inline void __vma_unlink(struct mm_struct *mm, in __vma_unlink() argument
727 vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); in __vma_unlink()
728 __vma_unlink_list(mm, vma); in __vma_unlink()
730 vmacache_invalidate(mm); in __vma_unlink()
744 struct mm_struct *mm = vma->vm_mm; in __vma_adjust() local
905 __vma_unlink(mm, next, next); in __vma_adjust()
916 __vma_unlink(mm, next, vma); in __vma_adjust()
925 __insert_vm_struct(mm, insert); in __vma_adjust()
931 mm->highest_vm_end = vm_end_gap(vma); in __vma_adjust()
959 mm->map_count--; in __vma_adjust()
1015 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); in __vma_adjust()
1021 validate_mm(mm); in __vma_adjust()
1158 struct vm_area_struct *vma_merge(struct mm_struct *mm, in vma_merge() argument
1176 next = vma_next(mm, prev); in vma_merge()
1352 int mlock_future_check(struct mm_struct *mm, unsigned long flags, in mlock_future_check() argument
1360 locked += mm->locked_vm; in mlock_future_check()
1409 struct mm_struct *mm = current->mm; in do_mmap() local
1445 if (mm->map_count > sysctl_max_map_count) in do_mmap()
1456 if (find_vma_intersection(mm, addr, addr + len)) in do_mmap()
1461 pkey = execute_only_pkey(mm); in do_mmap()
1471 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in do_mmap()
1477 if (mlock_future_check(mm, vm_flags, len)) in do_mmap()
1719 struct mm_struct *mm = current->mm; in mmap_region() local
1726 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { in mmap_region()
1733 nr_pages = count_vma_pages_range(mm, addr, addr + len); in mmap_region()
1735 if (!may_expand_vm(mm, vm_flags, in mmap_region()
1741 if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) in mmap_region()
1748 if (security_vm_enough_memory_mm(mm, charged)) in mmap_region()
1756 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, in mmap_region()
1766 vma = vm_area_alloc(mm); in mmap_region()
1805 merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, in mmap_region()
1839 vma_link(mm, vma, prev, rb_link, rb_parent); in mmap_region()
1848 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); in mmap_region()
1852 vma == get_gate_vma(current->mm)) in mmap_region()
1855 mm->locked_vm += (len >> PAGE_SHIFT); in mmap_region()
1879 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); in mmap_region()
1901 struct mm_struct *mm = current->mm; in unmapped_area() local
1920 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area()
1922 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
1976 gap_start = mm->highest_vm_end; in unmapped_area()
1996 struct mm_struct *mm = current->mm; in unmapped_area_topdown() local
2019 gap_start = mm->highest_vm_end; in unmapped_area_topdown()
2024 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area_topdown()
2026 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area_topdown()
2139 struct mm_struct *mm = current->mm; in arch_get_unmapped_area() local
2152 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area()
2161 info.low_limit = mm->mmap_base; in arch_get_unmapped_area()
2180 struct mm_struct *mm = current->mm; in arch_get_unmapped_area_topdown() local
2194 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area_topdown()
2204 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); in arch_get_unmapped_area_topdown()
2242 get_area = current->mm->get_unmapped_area; in get_unmapped_area()
2272 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) in find_vma() argument
2277 mmap_assert_locked(mm); in find_vma()
2279 vma = vmacache_find(mm, addr); in find_vma()
2283 rb_node = mm->mm_rb.rb_node; in find_vma()
2310 find_vma_prev(struct mm_struct *mm, unsigned long addr, in find_vma_prev() argument
2315 vma = find_vma(mm, addr); in find_vma_prev()
2319 struct rb_node *rb_node = rb_last(&mm->mm_rb); in find_vma_prev()
2334 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth() local
2338 if (!may_expand_vm(mm, vma->vm_flags, grow)) in acct_stack_growth()
2349 locked = mm->locked_vm + grow; in acct_stack_growth()
2366 if (security_vm_enough_memory_mm(mm, grow)) in acct_stack_growth()
2379 struct mm_struct *mm = vma->vm_mm; in expand_upwards() local
2440 spin_lock(&mm->page_table_lock); in expand_upwards()
2442 mm->locked_vm += grow; in expand_upwards()
2443 vm_stat_account(mm, vma->vm_flags, grow); in expand_upwards()
2450 mm->highest_vm_end = vm_end_gap(vma); in expand_upwards()
2451 spin_unlock(&mm->page_table_lock); in expand_upwards()
2459 validate_mm(mm); in expand_upwards()
2470 struct mm_struct *mm = vma->vm_mm; in expand_downwards() local
2520 spin_lock(&mm->page_table_lock); in expand_downwards()
2522 mm->locked_vm += grow; in expand_downwards()
2523 vm_stat_account(mm, vma->vm_flags, grow); in expand_downwards()
2529 spin_unlock(&mm->page_table_lock); in expand_downwards()
2537 validate_mm(mm); in expand_downwards()
2564 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2569 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma()
2586 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2592 vma = find_vma(mm, addr); in find_extend_vma()
2616 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) in remove_vma_list() argument
2621 update_hiwater_vm(mm); in remove_vma_list()
2627 vm_stat_account(mm, vma->vm_flags, -nrpages); in remove_vma_list()
2631 validate_mm(mm); in remove_vma_list()
2639 static void unmap_region(struct mm_struct *mm, in unmap_region() argument
2643 struct vm_area_struct *next = vma_next(mm, prev); in unmap_region()
2647 tlb_gather_mmu(&tlb, mm); in unmap_region()
2648 update_hiwater_rss(mm); in unmap_region()
2660 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, in detach_vmas_to_be_unmapped() argument
2666 insertion_point = (prev ? &prev->vm_next : &mm->mmap); in detach_vmas_to_be_unmapped()
2669 vma_rb_erase(vma, &mm->mm_rb); in detach_vmas_to_be_unmapped()
2670 mm->map_count--; in detach_vmas_to_be_unmapped()
2679 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; in detach_vmas_to_be_unmapped()
2683 vmacache_invalidate(mm); in detach_vmas_to_be_unmapped()
2701 int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in __split_vma() argument
2765 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
2768 if (mm->map_count >= sysctl_max_map_count) in split_vma()
2771 return __split_vma(mm, vma, addr, new_below); in split_vma()
2777 struct mm_struct *mm = start->vm_mm; in unlock_range() local
2782 mm->locked_vm -= vma_pages(tmp); in unlock_range()
2795 int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, in __do_munmap() argument
2814 arch_unmap(mm, start, end); in __do_munmap()
2817 vma = find_vma_intersection(mm, start, end); in __do_munmap()
2837 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in __do_munmap()
2840 error = __split_vma(mm, vma, start, 0); in __do_munmap()
2847 last = find_vma(mm, end); in __do_munmap()
2849 int error = __split_vma(mm, last, end, 1); in __do_munmap()
2853 vma = vma_next(mm, prev); in __do_munmap()
2873 if (mm->locked_vm) in __do_munmap()
2877 if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) in __do_munmap()
2881 mmap_write_downgrade(mm); in __do_munmap()
2883 unmap_region(mm, vma, prev, start, end); in __do_munmap()
2886 remove_vma_list(mm, vma); in __do_munmap()
2891 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, in do_munmap() argument
2894 return __do_munmap(mm, start, len, uf, false); in do_munmap()
2900 struct mm_struct *mm = current->mm; in __vm_munmap() local
2903 if (mmap_write_lock_killable(mm)) in __vm_munmap()
2906 ret = __do_munmap(mm, start, len, &uf, downgrade); in __vm_munmap()
2913 mmap_read_unlock(mm); in __vm_munmap()
2916 mmap_write_unlock(mm); in __vm_munmap()
2918 userfaultfd_unmap_complete(mm, &uf); in __vm_munmap()
2943 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE5() local
2964 if (mmap_write_lock_killable(mm)) in SYSCALL_DEFINE5()
2967 vma = vma_lookup(mm, start); in SYSCALL_DEFINE5()
3008 mmap_write_unlock(mm); in SYSCALL_DEFINE5()
3023 struct mm_struct *mm = current->mm; in do_brk_flags() local
3033 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; in do_brk_flags()
3039 error = mlock_future_check(mm, mm->def_flags, len); in do_brk_flags()
3044 if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) in do_brk_flags()
3048 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) in do_brk_flags()
3051 if (mm->map_count > sysctl_max_map_count) in do_brk_flags()
3054 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) in do_brk_flags()
3058 vma = vma_merge(mm, prev, addr, addr + len, flags, in do_brk_flags()
3066 vma = vm_area_alloc(mm); in do_brk_flags()
3078 vma_link(mm, vma, prev, rb_link, rb_parent); in do_brk_flags()
3081 mm->total_vm += len >> PAGE_SHIFT; in do_brk_flags()
3082 mm->data_vm += len >> PAGE_SHIFT; in do_brk_flags()
3084 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk_flags()
3091 struct mm_struct *mm = current->mm; in vm_brk_flags() local
3103 if (mmap_write_lock_killable(mm)) in vm_brk_flags()
3107 populate = ((mm->def_flags & VM_LOCKED) != 0); in vm_brk_flags()
3108 mmap_write_unlock(mm); in vm_brk_flags()
3109 userfaultfd_unmap_complete(mm, &uf); in vm_brk_flags()
3123 void exit_mmap(struct mm_struct *mm) in exit_mmap() argument
3130 mmu_notifier_release(mm); in exit_mmap()
3132 if (unlikely(mm_is_oom_victim(mm))) { in exit_mmap()
3149 (void)__oom_reap_task_mm(mm); in exit_mmap()
3151 set_bit(MMF_OOM_SKIP, &mm->flags); in exit_mmap()
3152 mmap_write_lock(mm); in exit_mmap()
3153 mmap_write_unlock(mm); in exit_mmap()
3156 if (mm->locked_vm) in exit_mmap()
3157 unlock_range(mm->mmap, ULONG_MAX); in exit_mmap()
3159 arch_exit_mmap(mm); in exit_mmap()
3161 vma = mm->mmap; in exit_mmap()
3166 flush_cache_mm(mm); in exit_mmap()
3167 tlb_gather_mmu_fullmm(&tlb, mm); in exit_mmap()
3191 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
3196 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in insert_vm_struct()
3200 security_vm_enough_memory_mm(mm, vma_pages(vma))) in insert_vm_struct()
3220 vma_link(mm, vma, prev, rb_link, rb_parent); in insert_vm_struct()
3234 struct mm_struct *mm = vma->vm_mm; in copy_vma() local
3248 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) in copy_vma()
3250 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3290 vma_link(mm, new_vma, prev, rb_link, rb_parent); in copy_vma()
3307 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) in may_expand_vm() argument
3309 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) in may_expand_vm()
3313 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { in may_expand_vm()
3316 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) in may_expand_vm()
3321 (mm->data_vm + npages) << PAGE_SHIFT, in may_expand_vm()
3332 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) in vm_stat_account() argument
3334 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); in vm_stat_account()
3337 mm->exec_vm += npages; in vm_stat_account()
3339 mm->stack_vm += npages; in vm_stat_account()
3341 mm->data_vm += npages; in vm_stat_account()
3362 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) in special_mapping_mremap()
3428 struct mm_struct *mm, in __install_special_mapping() argument
3436 vma = vm_area_alloc(mm); in __install_special_mapping()
3443 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3449 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
3453 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); in __install_special_mapping()
3482 struct mm_struct *mm, in _install_special_mapping() argument
3486 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, in _install_special_mapping()
3490 int install_special_mapping(struct mm_struct *mm, in install_special_mapping() argument
3495 mm, addr, len, vm_flags, (void *)pages, in install_special_mapping()
3503 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) in vm_lock_anon_vma() argument
3510 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); in vm_lock_anon_vma()
3526 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) in vm_lock_mapping() argument
3540 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); in vm_lock_mapping()
3581 int mm_take_all_locks(struct mm_struct *mm) in mm_take_all_locks() argument
3586 BUG_ON(mmap_read_trylock(mm)); in mm_take_all_locks()
3590 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3595 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3598 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3603 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3606 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3611 vm_lock_anon_vma(mm, avc->anon_vma); in mm_take_all_locks()
3617 mm_drop_all_locks(mm); in mm_take_all_locks()
3661 void mm_drop_all_locks(struct mm_struct *mm) in mm_drop_all_locks() argument
3666 BUG_ON(mmap_read_trylock(mm)); in mm_drop_all_locks()
3669 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_drop_all_locks()