Lines Matching refs:mm

2768 	mm_set_cpus_allowed(p->mm, ctx->new_mask);  in __do_set_cpus_allowed()
3589 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
5186 struct mm_struct *mm = rq->prev_mm; in finish_task_switch() local
5248 if (mm) { in finish_task_switch()
5249 membarrier_mm_sync_core_before_usermode(mm); in finish_task_switch()
5250 mmdrop_lazy_tlb_sched(mm); in finish_task_switch()
5323 if (!next->mm) { // to kernel in context_switch()
5327 if (prev->mm) // from user in context_switch()
5332 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
5341 switch_mm_irqs_off(prev->active_mm, next->mm, next); in context_switch()
5342 lru_gen_use_mm(next->mm); in context_switch()
5344 if (!prev->mm) { // from kernel in context_switch()
8154 struct mm_struct *mm = current->active_mm; in sched_force_init_mm() local
8156 if (mm != &init_mm) { in sched_force_init_mm()
8160 switch_mm_irqs_off(mm, &init_mm, current); in sched_force_init_mm()
8163 mmdrop_lazy_tlb(mm); in sched_force_init_mm()
10528 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_fetch_cid() local
10532 if (!mm) in __sched_mm_cid_migrate_from_fetch_cid()
10554 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_fetch_cid()
10569 struct mm_struct *mm = t->mm; in __sched_mm_cid_migrate_from_try_steal_cid() local
10603 if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { in __sched_mm_cid_migrate_from_try_steal_cid()
10630 struct mm_struct *mm = t->mm; in sched_mm_cid_migrate_to() local
10637 if (!mm) in sched_mm_cid_migrate_to()
10658 dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); in sched_mm_cid_migrate_to()
10661 if (dst_cid_is_set && atomic_read(&mm->mm_users) >= READ_ONCE(mm->nr_cpus_allowed)) in sched_mm_cid_migrate_to()
10663 src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); in sched_mm_cid_migrate_to()
10673 __mm_cid_put(mm, src_cid); in sched_mm_cid_migrate_to()
10677 mm_cid_snapshot_time(dst_rq, mm); in sched_mm_cid_migrate_to()
10682 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid, in sched_mm_cid_remote_clear() argument
10723 if (READ_ONCE(t->mm_cid_active) && t->mm == mm) in sched_mm_cid_remote_clear()
10734 __mm_cid_put(mm, cid); in sched_mm_cid_remote_clear()
10738 static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu) in sched_mm_cid_remote_clear_old() argument
10750 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_old()
10759 if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { in sched_mm_cid_remote_clear_old()
10767 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); in sched_mm_cid_remote_clear_old()
10770 static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu, in sched_mm_cid_remote_clear_weight() argument
10776 pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); in sched_mm_cid_remote_clear_weight()
10780 sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); in sched_mm_cid_remote_clear_weight()
10788 struct mm_struct *mm; in task_mm_cid_work() local
10796 mm = t->mm; in task_mm_cid_work()
10797 if (!mm) in task_mm_cid_work()
10799 old_scan = READ_ONCE(mm->mm_cid_next_scan); in task_mm_cid_work()
10804 res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); in task_mm_cid_work()
10812 if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) in task_mm_cid_work()
10814 cidmask = mm_cidmask(mm); in task_mm_cid_work()
10817 sched_mm_cid_remote_clear_old(mm, cpu); in task_mm_cid_work()
10824 sched_mm_cid_remote_clear_weight(mm, cpu, weight); in task_mm_cid_work()
10829 struct mm_struct *mm = t->mm; in init_sched_mm_cid() local
10832 if (mm) { in init_sched_mm_cid()
10833 mm_users = atomic_read(&mm->mm_users); in init_sched_mm_cid()
10835 mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); in init_sched_mm_cid()
10846 if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || in task_tick_mm_cid()
10849 if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) in task_tick_mm_cid()
10858 struct mm_struct *mm = t->mm; in sched_mm_cid_exit_signals() local
10861 if (!mm) in sched_mm_cid_exit_signals()
10874 mm_cid_put(mm); in sched_mm_cid_exit_signals()
10880 struct mm_struct *mm = t->mm; in sched_mm_cid_before_execve() local
10883 if (!mm) in sched_mm_cid_before_execve()
10896 mm_cid_put(mm); in sched_mm_cid_before_execve()
10902 struct mm_struct *mm = t->mm; in sched_mm_cid_after_execve() local
10905 if (!mm) in sched_mm_cid_after_execve()
10918 t->last_mm_cid = t->mm_cid = mm_cid_get(rq, t, mm); in sched_mm_cid_after_execve()
10924 WARN_ON_ONCE(!t->mm || t->mm_cid != -1); in sched_mm_cid_fork()