Lines Matching refs:t

117 static void rcu_read_unlock_special(struct task_struct *t);
169 struct task_struct *t = current; in rcu_preempt_ctxt_queue() local
196 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
214 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
227 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
238 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
255 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
259 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
326 struct task_struct *t = current; in rcu_note_context_switch() local
334 !t->rcu_read_unlock_special.b.blocked) { in rcu_note_context_switch()
339 t->rcu_read_unlock_special.b.blocked = true; in rcu_note_context_switch()
340 t->rcu_blocked_node = rnp; in rcu_note_context_switch()
348 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); in rcu_note_context_switch()
350 t->pid, in rcu_note_context_switch()
356 rcu_preempt_deferred_qs(t); in rcu_note_context_switch()
432 struct task_struct *t = current; in __rcu_read_unlock() local
437 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) in __rcu_read_unlock()
438 rcu_read_unlock_special(t); in __rcu_read_unlock()
452 static struct list_head *rcu_next_node_entry(struct task_struct *t, in rcu_next_node_entry() argument
457 np = t->rcu_node_entry.next; in rcu_next_node_entry()
478 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) in rcu_preempt_deferred_qs_irqrestore() argument
498 special = t->rcu_read_unlock_special; in rcu_preempt_deferred_qs_irqrestore()
503 t->rcu_read_unlock_special.s = 0; in rcu_preempt_deferred_qs_irqrestore()
532 rnp = t->rcu_blocked_node; in rcu_preempt_deferred_qs_irqrestore()
534 WARN_ON_ONCE(rnp != t->rcu_blocked_node); in rcu_preempt_deferred_qs_irqrestore()
540 np = rcu_next_node_entry(t, rnp); in rcu_preempt_deferred_qs_irqrestore()
541 list_del_init(&t->rcu_node_entry); in rcu_preempt_deferred_qs_irqrestore()
542 t->rcu_blocked_node = NULL; in rcu_preempt_deferred_qs_irqrestore()
544 rnp->gp_seq, t->pid); in rcu_preempt_deferred_qs_irqrestore()
545 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_preempt_deferred_qs_irqrestore()
547 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_preempt_deferred_qs_irqrestore()
551 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; in rcu_preempt_deferred_qs_irqrestore()
552 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_preempt_deferred_qs_irqrestore()
600 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) in rcu_preempt_need_deferred_qs() argument
603 READ_ONCE(t->rcu_read_unlock_special.s)) && in rcu_preempt_need_deferred_qs()
614 notrace void rcu_preempt_deferred_qs(struct task_struct *t) in rcu_preempt_deferred_qs() argument
618 if (!rcu_preempt_need_deferred_qs(t)) in rcu_preempt_deferred_qs()
621 rcu_preempt_deferred_qs_irqrestore(t, flags); in rcu_preempt_deferred_qs()
671 static bool rcu_unlock_needs_exp_handling(struct task_struct *t, in rcu_unlock_needs_exp_handling() argument
684 if (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) in rcu_unlock_needs_exp_handling()
705 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) in rcu_unlock_needs_exp_handling()
717 if (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && t->rcu_blocked_node) in rcu_unlock_needs_exp_handling()
728 static void rcu_read_unlock_special(struct task_struct *t) in rcu_read_unlock_special() argument
746 needs_exp = rcu_unlock_needs_exp_handling(t, rdp, rnp, irqs_were_disabled); in rcu_read_unlock_special()
773 rcu_preempt_deferred_qs_irqrestore(t, flags); in rcu_read_unlock_special()
787 struct task_struct *t; in rcu_preempt_check_blocked_tasks() local
796 t = container_of(rnp->gp_tasks, struct task_struct, in rcu_preempt_check_blocked_tasks()
799 rnp->gp_seq, t->pid); in rcu_preempt_check_blocked_tasks()
813 struct task_struct *t = current; in rcu_flavor_sched_clock_irq() local
819 if (rcu_preempt_need_deferred_qs(t)) { in rcu_flavor_sched_clock_irq()
820 set_tsk_need_resched(t); in rcu_flavor_sched_clock_irq()
823 } else if (rcu_preempt_need_deferred_qs(t)) { in rcu_flavor_sched_clock_irq()
824 rcu_preempt_deferred_qs(t); /* Report deferred QS. */ in rcu_flavor_sched_clock_irq()
835 !t->rcu_read_unlock_special.b.need_qs && in rcu_flavor_sched_clock_irq()
837 t->rcu_read_unlock_special.b.need_qs = true; in rcu_flavor_sched_clock_irq()
850 struct task_struct *t = current; in exit_rcu() local
855 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); in exit_rcu()
1035 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) in rcu_preempt_need_deferred_qs() argument
1047 notrace void rcu_preempt_deferred_qs(struct task_struct *t) in rcu_preempt_deferred_qs() argument
1160 struct task_struct *t; in rcu_boost() local
1205 t = container_of(tb, struct task_struct, rcu_node_entry); in rcu_boost()
1206 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); in rcu_boost()
1306 struct task_struct *t; in rcu_spawn_one_boost_kthread() local
1311 t = kthread_create(rcu_boost_kthread, (void *)rnp, in rcu_spawn_one_boost_kthread()
1313 if (WARN_ON_ONCE(IS_ERR(t))) in rcu_spawn_one_boost_kthread()
1317 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1321 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); in rcu_spawn_one_boost_kthread()
1322 rcu_thread_affine_rnp(t, rnp); in rcu_spawn_one_boost_kthread()
1323 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ in rcu_spawn_one_boost_kthread()