Lines Matching refs:t

18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
684 struct task_struct *t; in rcu_spawn_tasks_kthread_generic() local
686 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic()
687 …if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavio… in rcu_spawn_tasks_kthread_generic()
804 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
823 struct task_struct *t; in rcu_tasks_wait_gp() local
837 for_each_process_thread(g, t) in rcu_tasks_wait_gp()
838 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp()
981 static bool rcu_tasks_is_holdout(struct task_struct *t) in rcu_tasks_is_holdout() argument
986 if (!READ_ONCE(t->on_rq)) in rcu_tasks_is_holdout()
1003 if (is_idle_task(t)) in rcu_tasks_is_holdout()
1006 cpu = task_cpu(t); in rcu_tasks_is_holdout()
1009 if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) in rcu_tasks_is_holdout()
1016 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) in rcu_tasks_pertask() argument
1018 if (t != current && rcu_tasks_is_holdout(t)) { in rcu_tasks_pertask()
1019 get_task_struct(t); in rcu_tasks_pertask()
1020 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); in rcu_tasks_pertask()
1021 WRITE_ONCE(t->rcu_tasks_holdout, true); in rcu_tasks_pertask()
1022 list_add(&t->rcu_tasks_holdout_list, hop); in rcu_tasks_pertask()
1061 struct task_struct *t; in rcu_tasks_postscan() local
1066 list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { in rcu_tasks_postscan()
1067 if (list_empty(&t->rcu_tasks_holdout_list)) in rcu_tasks_postscan()
1068 rcu_tasks_pertask(t, hop); in rcu_tasks_postscan()
1078 list_add(&tmp, &t->rcu_tasks_exit_list); in rcu_tasks_postscan()
1094 static void check_holdout_task(struct task_struct *t, in check_holdout_task() argument
1099 if (!READ_ONCE(t->rcu_tasks_holdout) || in check_holdout_task()
1100 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task()
1101 !rcu_tasks_is_holdout(t) || in check_holdout_task()
1103 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) { in check_holdout_task()
1104 WRITE_ONCE(t->rcu_tasks_holdout, false); in check_holdout_task()
1105 list_del_init(&t->rcu_tasks_holdout_list); in check_holdout_task()
1106 put_task_struct(t); in check_holdout_task()
1109 rcu_request_urgent_qs_task(t); in check_holdout_task()
1116 cpu = task_cpu(t); in check_holdout_task()
1118 t, ".I"[is_idle_task(t)], in check_holdout_task()
1120 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, in check_holdout_task()
1121 data_race(t->rcu_tasks_idle_cpu), cpu); in check_holdout_task()
1122 sched_show_task(t); in check_holdout_task()
1129 struct task_struct *t, *t1; in check_all_holdout_tasks() local
1131 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { in check_all_holdout_tasks()
1132 check_holdout_task(t, needreport, firstreport); in check_all_holdout_tasks()
1297 struct task_struct *t = current; in exit_tasks_rcu_start() local
1299 WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list)); in exit_tasks_rcu_start()
1302 t->rcu_tasks_exit_cpu = smp_processor_id(); in exit_tasks_rcu_start()
1305 list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list); in exit_tasks_rcu_start()
1318 struct task_struct *t = current; in exit_tasks_rcu_finish() local
1320 WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list)); in exit_tasks_rcu_finish()
1321 rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu); in exit_tasks_rcu_finish()
1323 list_del_init(&t->rcu_tasks_exit_list); in exit_tasks_rcu_finish()
1326 exit_tasks_rcu_finish_trace(t); in exit_tasks_rcu_finish()
1533 static u8 rcu_ld_need_qs(struct task_struct *t) in rcu_ld_need_qs() argument
1536 return smp_load_acquire(&t->trc_reader_special.b.need_qs); in rcu_ld_need_qs()
1540 static void rcu_st_need_qs(struct task_struct *t, u8 v) in rcu_st_need_qs() argument
1542 smp_store_release(&t->trc_reader_special.b.need_qs, v); in rcu_st_need_qs()
1552 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) in rcu_trc_cmpxchg_need_qs() argument
1554 return cmpxchg(&t->trc_reader_special.b.need_qs, old, new); in rcu_trc_cmpxchg_need_qs()
1562 void rcu_read_unlock_trace_special(struct task_struct *t) in rcu_read_unlock_trace_special() argument
1570 trs = smp_load_acquire(&t->trc_reader_special); in rcu_read_unlock_trace_special()
1572 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) in rcu_read_unlock_trace_special()
1576 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, in rcu_read_unlock_trace_special()
1582 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); in rcu_read_unlock_trace_special()
1584 list_del_init(&t->trc_blkd_node); in rcu_read_unlock_trace_special()
1585 WRITE_ONCE(t->trc_reader_special.b.blocked, false); in rcu_read_unlock_trace_special()
1588 WRITE_ONCE(t->trc_reader_nesting, 0); in rcu_read_unlock_trace_special()
1593 void rcu_tasks_trace_qs_blkd(struct task_struct *t) in rcu_tasks_trace_qs_blkd() argument
1601 t->trc_blkd_cpu = smp_processor_id(); in rcu_tasks_trace_qs_blkd()
1604 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); in rcu_tasks_trace_qs_blkd()
1605 WRITE_ONCE(t->trc_reader_special.b.blocked, true); in rcu_tasks_trace_qs_blkd()
1611 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) in trc_add_holdout() argument
1613 if (list_empty(&t->trc_holdout_list)) { in trc_add_holdout()
1614 get_task_struct(t); in trc_add_holdout()
1615 list_add(&t->trc_holdout_list, bhp); in trc_add_holdout()
1621 static void trc_del_holdout(struct task_struct *t) in trc_del_holdout() argument
1623 if (!list_empty(&t->trc_holdout_list)) { in trc_del_holdout()
1624 list_del_init(&t->trc_holdout_list); in trc_del_holdout()
1625 put_task_struct(t); in trc_del_holdout()
1634 struct task_struct *t = current; in trc_read_check_handler() local
1638 if (unlikely(texp != t)) in trc_read_check_handler()
1643 nesting = READ_ONCE(t->trc_reader_nesting); in trc_read_check_handler()
1645 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in trc_read_check_handler()
1655 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); in trc_read_check_handler()
1666 static int trc_inspect_reader(struct task_struct *t, void *bhp_in) in trc_inspect_reader() argument
1669 int cpu = task_cpu(t); in trc_inspect_reader()
1673 if (task_curr(t) && !ofl) { in trc_inspect_reader()
1683 if (!rcu_watching_zero_in_eqs(cpu, &t->trc_reader_nesting)) in trc_inspect_reader()
1689 nesting = t->trc_reader_nesting; in trc_inspect_reader()
1690 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); in trc_inspect_reader()
1699 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in trc_inspect_reader()
1708 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) in trc_inspect_reader()
1709 trc_add_holdout(t, bhp); in trc_inspect_reader()
1714 static void trc_wait_for_one_reader(struct task_struct *t, in trc_wait_for_one_reader() argument
1720 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI in trc_wait_for_one_reader()
1724 if (t == current) { in trc_wait_for_one_reader()
1725 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in trc_wait_for_one_reader()
1726 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in trc_wait_for_one_reader()
1731 get_task_struct(t); in trc_wait_for_one_reader()
1732 if (!task_call_func(t, trc_inspect_reader, bhp)) { in trc_wait_for_one_reader()
1733 put_task_struct(t); in trc_wait_for_one_reader()
1736 put_task_struct(t); in trc_wait_for_one_reader()
1745 trc_add_holdout(t, bhp); in trc_wait_for_one_reader()
1746 if (task_curr(t) && in trc_wait_for_one_reader()
1749 cpu = task_cpu(t); in trc_wait_for_one_reader()
1752 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) in trc_wait_for_one_reader()
1756 t->trc_ipi_to_cpu = cpu; in trc_wait_for_one_reader()
1758 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { in trc_wait_for_one_reader()
1765 t->trc_ipi_to_cpu = -1; in trc_wait_for_one_reader()
1774 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) in rcu_tasks_trace_pertask_prep() argument
1780 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) in rcu_tasks_trace_pertask_prep()
1783 rcu_st_need_qs(t, 0); in rcu_tasks_trace_pertask_prep()
1784 t->trc_ipi_to_cpu = -1; in rcu_tasks_trace_pertask_prep()
1789 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) in rcu_tasks_trace_pertask() argument
1791 if (rcu_tasks_trace_pertask_prep(t, true)) in rcu_tasks_trace_pertask()
1792 trc_wait_for_one_reader(t, hop); in rcu_tasks_trace_pertask()
1802 struct task_struct *t; in rcu_tasks_trace_pregp_step() local
1827 t = cpu_curr_snapshot(cpu); in rcu_tasks_trace_pregp_step()
1828 if (rcu_tasks_trace_pertask_prep(t, true)) in rcu_tasks_trace_pregp_step()
1829 trc_add_holdout(t, hop); in rcu_tasks_trace_pregp_step()
1843 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); in rcu_tasks_trace_pregp_step()
1844 list_del_init(&t->trc_blkd_node); in rcu_tasks_trace_pregp_step()
1845 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); in rcu_tasks_trace_pregp_step()
1847 rcu_tasks_trace_pertask(t, hop); in rcu_tasks_trace_pregp_step()
1880 static int trc_check_slow_task(struct task_struct *t, void *arg) in trc_check_slow_task() argument
1884 if (task_curr(t) && cpu_online(task_cpu(t))) in trc_check_slow_task()
1886 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); in trc_check_slow_task()
1887 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); in trc_check_slow_task()
1888 trc_rdrp->needqs = rcu_ld_need_qs(t); in trc_check_slow_task()
1893 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) in show_stalled_task_trace() argument
1897 bool is_idle_tsk = is_idle_task(t); in show_stalled_task_trace()
1903 cpu = task_cpu(t); in show_stalled_task_trace()
1904 if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) in show_stalled_task_trace()
1906 t->pid, in show_stalled_task_trace()
1907 ".I"[t->trc_ipi_to_cpu >= 0], in show_stalled_task_trace()
1911 t->pid, in show_stalled_task_trace()
1915 ".B"[!!data_race(t->trc_reader_special.b.blocked)], in show_stalled_task_trace()
1920 sched_show_task(t); in show_stalled_task_trace()
1937 struct task_struct *g, *t; in check_all_holdout_tasks_trace() local
1942 list_for_each_entry_safe(t, g, hop, trc_holdout_list) { in check_all_holdout_tasks_trace()
1944 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1945 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) in check_all_holdout_tasks_trace()
1946 trc_wait_for_one_reader(t, hop); in check_all_holdout_tasks_trace()
1949 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1950 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) in check_all_holdout_tasks_trace()
1951 trc_del_holdout(t); in check_all_holdout_tasks_trace()
1953 show_stalled_task_trace(t, firstreport); in check_all_holdout_tasks_trace()
1990 static void exit_tasks_rcu_finish_trace(struct task_struct *t) in exit_tasks_rcu_finish_trace() argument
1992 union rcu_special trs = READ_ONCE(t->trc_reader_special); in exit_tasks_rcu_finish_trace()
1994 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in exit_tasks_rcu_finish_trace()
1995 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in exit_tasks_rcu_finish_trace()
1996 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) in exit_tasks_rcu_finish_trace()
1997 rcu_read_unlock_trace_special(t); in exit_tasks_rcu_finish_trace()
1999 WRITE_ONCE(t->trc_reader_nesting, 0); in exit_tasks_rcu_finish_trace()
2119 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } in exit_tasks_rcu_finish_trace() argument