Lines Matching refs:mm

516 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)  in dup_mm_exe_file()  argument
521 RCU_INIT_POINTER(mm->exe_file, exe_file); in dup_mm_exe_file()
531 static inline int mm_alloc_pgd(struct mm_struct *mm) in mm_alloc_pgd() argument
533 mm->pgd = pgd_alloc(mm); in mm_alloc_pgd()
534 if (unlikely(!mm->pgd)) in mm_alloc_pgd()
539 static inline void mm_free_pgd(struct mm_struct *mm) in mm_free_pgd() argument
541 pgd_free(mm, mm->pgd); in mm_free_pgd()
544 #define mm_alloc_pgd(mm) (0) argument
545 #define mm_free_pgd(mm) argument
551 static inline int mm_alloc_id(struct mm_struct *mm) in mm_alloc_id() argument
558 mm->mm_id = ret; in mm_alloc_id()
562 static inline void mm_free_id(struct mm_struct *mm) in mm_free_id() argument
564 const mm_id_t id = mm->mm_id; in mm_free_id()
566 mm->mm_id = MM_ID_DUMMY; in mm_free_id()
574 static inline int mm_alloc_id(struct mm_struct *mm) { return 0; } in mm_alloc_id() argument
575 static inline void mm_free_id(struct mm_struct *mm) {} in mm_free_id() argument
578 static void check_mm(struct mm_struct *mm) in check_mm() argument
586 long x = percpu_counter_sum(&mm->rss_stat[i]); in check_mm()
590 mm, resident_page_types[i], x, in check_mm()
596 if (mm_pgtables_bytes(mm)) in check_mm()
598 mm_pgtables_bytes(mm)); in check_mm()
601 VM_BUG_ON_MM(mm->pmd_huge_pte, mm); in check_mm()
606 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) argument
610 struct mm_struct *mm = arg; in do_check_lazy_tlb() local
612 WARN_ON_ONCE(current->active_mm == mm); in do_check_lazy_tlb()
617 struct mm_struct *mm = arg; in do_shoot_lazy_tlb() local
619 if (current->active_mm == mm) { in do_shoot_lazy_tlb()
620 WARN_ON_ONCE(current->mm); in do_shoot_lazy_tlb()
622 switch_mm(mm, &init_mm, current); in do_shoot_lazy_tlb()
626 static void cleanup_lazy_tlbs(struct mm_struct *mm) in cleanup_lazy_tlbs() argument
664 on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1); in cleanup_lazy_tlbs()
666 on_each_cpu(do_check_lazy_tlb, (void *)mm, 1); in cleanup_lazy_tlbs()
674 void __mmdrop(struct mm_struct *mm) in __mmdrop() argument
676 BUG_ON(mm == &init_mm); in __mmdrop()
677 WARN_ON_ONCE(mm == current->mm); in __mmdrop()
680 cleanup_lazy_tlbs(mm); in __mmdrop()
682 WARN_ON_ONCE(mm == current->active_mm); in __mmdrop()
683 mm_free_pgd(mm); in __mmdrop()
684 mm_free_id(mm); in __mmdrop()
685 destroy_context(mm); in __mmdrop()
686 mmu_notifier_subscriptions_destroy(mm); in __mmdrop()
687 check_mm(mm); in __mmdrop()
688 put_user_ns(mm->user_ns); in __mmdrop()
689 mm_pasid_drop(mm); in __mmdrop()
690 mm_destroy_cid(mm); in __mmdrop()
691 percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); in __mmdrop()
692 futex_hash_free(mm); in __mmdrop()
694 free_mm(mm); in __mmdrop()
700 struct mm_struct *mm; in mmdrop_async_fn() local
702 mm = container_of(work, struct mm_struct, async_put_work); in mmdrop_async_fn()
703 __mmdrop(mm); in mmdrop_async_fn()
706 static void mmdrop_async(struct mm_struct *mm) in mmdrop_async() argument
708 if (unlikely(atomic_dec_and_test(&mm->mm_count))) { in mmdrop_async()
709 INIT_WORK(&mm->async_put_work, mmdrop_async_fn); in mmdrop_async()
710 schedule_work(&mm->async_put_work); in mmdrop_async()
990 static void mm_init_aio(struct mm_struct *mm) in mm_init_aio() argument
993 spin_lock_init(&mm->ioctx_lock); in mm_init_aio()
994 mm->ioctx_table = NULL; in mm_init_aio()
998 static __always_inline void mm_clear_owner(struct mm_struct *mm, in mm_clear_owner() argument
1002 if (mm->owner == p) in mm_clear_owner()
1003 WRITE_ONCE(mm->owner, NULL); in mm_clear_owner()
1007 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) in mm_init_owner() argument
1010 mm->owner = p; in mm_init_owner()
1014 static void mm_init_uprobes_state(struct mm_struct *mm) in mm_init_uprobes_state() argument
1017 mm->uprobes_state.xol_area = NULL; in mm_init_uprobes_state()
1021 static void mmap_init_lock(struct mm_struct *mm) in mmap_init_lock() argument
1023 init_rwsem(&mm->mmap_lock); in mmap_init_lock()
1024 mm_lock_seqcount_init(mm); in mmap_init_lock()
1026 rcuwait_init(&mm->vma_writer_wait); in mmap_init_lock()
1030 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, in mm_init() argument
1033 mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); in mm_init()
1034 mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); in mm_init()
1035 atomic_set(&mm->mm_users, 1); in mm_init()
1036 atomic_set(&mm->mm_count, 1); in mm_init()
1037 seqcount_init(&mm->write_protect_seq); in mm_init()
1038 mmap_init_lock(mm); in mm_init()
1039 INIT_LIST_HEAD(&mm->mmlist); in mm_init()
1040 mm_pgtables_bytes_init(mm); in mm_init()
1041 mm->map_count = 0; in mm_init()
1042 mm->locked_vm = 0; in mm_init()
1043 atomic64_set(&mm->pinned_vm, 0); in mm_init()
1044 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); in mm_init()
1045 spin_lock_init(&mm->page_table_lock); in mm_init()
1046 spin_lock_init(&mm->arg_lock); in mm_init()
1047 mm_init_cpumask(mm); in mm_init()
1048 mm_init_aio(mm); in mm_init()
1049 mm_init_owner(mm, p); in mm_init()
1050 mm_pasid_init(mm); in mm_init()
1051 RCU_INIT_POINTER(mm->exe_file, NULL); in mm_init()
1052 mmu_notifier_subscriptions_init(mm); in mm_init()
1053 init_tlb_flush_pending(mm); in mm_init()
1055 mm->pmd_huge_pte = NULL; in mm_init()
1057 mm_init_uprobes_state(mm); in mm_init()
1058 hugetlb_count_init(mm); in mm_init()
1060 if (current->mm) { in mm_init()
1061 mm->flags = mmf_init_flags(current->mm->flags); in mm_init()
1062 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; in mm_init()
1064 mm->flags = default_dump_filter; in mm_init()
1065 mm->def_flags = 0; in mm_init()
1068 if (futex_mm_init(mm)) in mm_init()
1071 if (mm_alloc_pgd(mm)) in mm_init()
1074 if (mm_alloc_id(mm)) in mm_init()
1077 if (init_new_context(p, mm)) in mm_init()
1080 if (mm_alloc_cid(mm, p)) in mm_init()
1083 if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT, in mm_init()
1087 mm->user_ns = get_user_ns(user_ns); in mm_init()
1088 lru_gen_init_mm(mm); in mm_init()
1089 return mm; in mm_init()
1092 mm_destroy_cid(mm); in mm_init()
1094 destroy_context(mm); in mm_init()
1096 mm_free_id(mm); in mm_init()
1098 mm_free_pgd(mm); in mm_init()
1100 futex_hash_free(mm); in mm_init()
1102 free_mm(mm); in mm_init()
1111 struct mm_struct *mm; in mm_alloc() local
1113 mm = allocate_mm(); in mm_alloc()
1114 if (!mm) in mm_alloc()
1117 memset(mm, 0, sizeof(*mm)); in mm_alloc()
1118 return mm_init(mm, current, current_user_ns()); in mm_alloc()
1122 static inline void __mmput(struct mm_struct *mm) in __mmput() argument
1124 VM_BUG_ON(atomic_read(&mm->mm_users)); in __mmput()
1126 uprobe_clear_state(mm); in __mmput()
1127 exit_aio(mm); in __mmput()
1128 ksm_exit(mm); in __mmput()
1129 khugepaged_exit(mm); /* must run before exit_mmap */ in __mmput()
1130 exit_mmap(mm); in __mmput()
1131 mm_put_huge_zero_folio(mm); in __mmput()
1132 set_mm_exe_file(mm, NULL); in __mmput()
1133 if (!list_empty(&mm->mmlist)) { in __mmput()
1135 list_del(&mm->mmlist); in __mmput()
1138 if (mm->binfmt) in __mmput()
1139 module_put(mm->binfmt->module); in __mmput()
1140 lru_gen_del_mm(mm); in __mmput()
1141 mmdrop(mm); in __mmput()
1147 void mmput(struct mm_struct *mm) in mmput() argument
1151 if (atomic_dec_and_test(&mm->mm_users)) in mmput()
1152 __mmput(mm); in mmput()
1159 struct mm_struct *mm = container_of(work, struct mm_struct, in mmput_async_fn() local
1162 __mmput(mm); in mmput_async_fn()
1165 void mmput_async(struct mm_struct *mm) in mmput_async() argument
1167 if (atomic_dec_and_test(&mm->mm_users)) { in mmput_async()
1168 INIT_WORK(&mm->async_put_work, mmput_async_fn); in mmput_async()
1169 schedule_work(&mm->async_put_work); in mmput_async()
1188 int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) in set_mm_exe_file() argument
1197 old_exe_file = rcu_dereference_raw(mm->exe_file); in set_mm_exe_file()
1208 rcu_assign_pointer(mm->exe_file, new_exe_file); in set_mm_exe_file()
1225 int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) in replace_mm_exe_file() argument
1232 old_exe_file = get_mm_exe_file(mm); in replace_mm_exe_file()
1234 VMA_ITERATOR(vmi, mm, 0); in replace_mm_exe_file()
1235 mmap_read_lock(mm); in replace_mm_exe_file()
1245 mmap_read_unlock(mm); in replace_mm_exe_file()
1257 mmap_write_lock(mm); in replace_mm_exe_file()
1258 old_exe_file = rcu_dereference_raw(mm->exe_file); in replace_mm_exe_file()
1259 rcu_assign_pointer(mm->exe_file, new_exe_file); in replace_mm_exe_file()
1260 mmap_write_unlock(mm); in replace_mm_exe_file()
1276 struct file *get_mm_exe_file(struct mm_struct *mm) in get_mm_exe_file() argument
1281 exe_file = get_file_rcu(&mm->exe_file); in get_mm_exe_file()
1297 struct mm_struct *mm; in get_task_exe_file() local
1303 mm = task->mm; in get_task_exe_file()
1304 if (mm) in get_task_exe_file()
1305 exe_file = get_mm_exe_file(mm); in get_task_exe_file()
1322 struct mm_struct *mm; in get_task_mm() local
1328 mm = task->mm; in get_task_mm()
1329 if (mm) in get_task_mm()
1330 mmget(mm); in get_task_mm()
1332 return mm; in get_task_mm()
1336 static bool may_access_mm(struct mm_struct *mm, struct task_struct *task, unsigned int mode) in may_access_mm() argument
1338 if (mm == current->mm) in may_access_mm()
1349 struct mm_struct *mm; in mm_access() local
1356 mm = get_task_mm(task); in mm_access()
1357 if (!mm) { in mm_access()
1358 mm = ERR_PTR(-ESRCH); in mm_access()
1359 } else if (!may_access_mm(mm, task, mode)) { in mm_access()
1360 mmput(mm); in mm_access()
1361 mm = ERR_PTR(-EACCES); in mm_access()
1365 return mm; in mm_access()
1414 static void mm_release(struct task_struct *tsk, struct mm_struct *mm) in mm_release() argument
1419 deactivate_mm(tsk, mm); in mm_release()
1427 if (atomic_read(&mm->mm_users) > 1) { in mm_release()
1447 void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) in exit_mm_release() argument
1450 mm_release(tsk, mm); in exit_mm_release()
1453 void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) in exec_mm_release() argument
1456 mm_release(tsk, mm); in exec_mm_release()
1472 struct mm_struct *mm; in dup_mm() local
1475 mm = allocate_mm(); in dup_mm()
1476 if (!mm) in dup_mm()
1479 memcpy(mm, oldmm, sizeof(*mm)); in dup_mm()
1481 if (!mm_init(mm, tsk, mm->user_ns)) in dup_mm()
1485 err = dup_mmap(mm, oldmm); in dup_mm()
1490 mm->hiwater_rss = get_mm_rss(mm); in dup_mm()
1491 mm->hiwater_vm = mm->total_vm; in dup_mm()
1493 if (mm->binfmt && !try_module_get(mm->binfmt->module)) in dup_mm()
1496 return mm; in dup_mm()
1500 mm->binfmt = NULL; in dup_mm()
1501 mm_init_owner(mm, NULL); in dup_mm()
1502 mmput(mm); in dup_mm()
1512 struct mm_struct *mm, *oldmm; in copy_mm() local
1521 tsk->mm = NULL; in copy_mm()
1529 oldmm = current->mm; in copy_mm()
1535 mm = oldmm; in copy_mm()
1537 mm = dup_mm(tsk, current->mm); in copy_mm()
1538 if (!mm) in copy_mm()
1542 tsk->mm = mm; in copy_mm()
1543 tsk->active_mm = mm; in copy_mm()
1878 if (!tsk->mm) in copy_oom_score_adj()
1887 set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); in copy_oom_score_adj()
2452 if (p->mm) { in copy_process()
2453 mm_clear_owner(p->mm, p); in copy_process()
2454 mmput(p->mm); in copy_process()
2632 lru_gen_add_mm(p->mm); in kernel_clone()