Lines Matching refs:rnp

107 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
153 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_preempt_ctxt_queue() argument
154 __releases(rnp->lock) /* But leaves rrupts disabled. */ in rcu_preempt_ctxt_queue()
156 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue()
157 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue()
158 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue()
159 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue()
162 raw_lockdep_assert_held_rcu_node(rnp); in rcu_preempt_ctxt_queue()
163 WARN_ON_ONCE(rdp->mynode != rnp); in rcu_preempt_ctxt_queue()
164 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); in rcu_preempt_ctxt_queue()
166 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & in rcu_preempt_ctxt_queue()
187 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
205 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
218 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
229 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
245 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { in rcu_preempt_ctxt_queue()
246 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
247 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); in rcu_preempt_ctxt_queue()
249 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) in rcu_preempt_ctxt_queue()
250 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
252 !(rnp->qsmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
254 !(rnp->expmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
255 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ in rcu_preempt_ctxt_queue()
312 struct rcu_node *rnp; in rcu_note_context_switch() local
321 rnp = rdp->mynode; in rcu_note_context_switch()
322 raw_spin_lock_rcu_node(rnp); in rcu_note_context_switch()
324 t->rcu_blocked_node = rnp; in rcu_note_context_switch()
331 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); in rcu_note_context_switch()
335 (rnp->qsmask & rdp->grpmask) in rcu_note_context_switch()
336 ? rnp->gp_seq in rcu_note_context_switch()
337 : rcu_seq_snap(&rnp->gp_seq)); in rcu_note_context_switch()
338 rcu_preempt_ctxt_queue(rnp, rdp); in rcu_note_context_switch()
365 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
367 return READ_ONCE(rnp->gp_tasks) != NULL; in rcu_preempt_blocked_readers_cgp()
437 struct rcu_node *rnp) in rcu_next_node_entry() argument
442 if (np == &rnp->blkd_tasks) in rcu_next_node_entry()
451 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
453 return !list_empty(&rnp->blkd_tasks); in rcu_preempt_has_tasks()
470 struct rcu_node *rnp; in rcu_preempt_deferred_qs_irqrestore() local
512 rnp = t->rcu_blocked_node; in rcu_preempt_deferred_qs_irqrestore()
513 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_preempt_deferred_qs_irqrestore()
514 WARN_ON_ONCE(rnp != t->rcu_blocked_node); in rcu_preempt_deferred_qs_irqrestore()
515 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); in rcu_preempt_deferred_qs_irqrestore()
516 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); in rcu_preempt_deferred_qs_irqrestore()
517 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && in rcu_preempt_deferred_qs_irqrestore()
518 (!empty_norm || rnp->qsmask)); in rcu_preempt_deferred_qs_irqrestore()
519 empty_exp = sync_rcu_exp_done(rnp); in rcu_preempt_deferred_qs_irqrestore()
521 np = rcu_next_node_entry(t, rnp); in rcu_preempt_deferred_qs_irqrestore()
525 rnp->gp_seq, t->pid); in rcu_preempt_deferred_qs_irqrestore()
526 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_preempt_deferred_qs_irqrestore()
527 WRITE_ONCE(rnp->gp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
528 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_preempt_deferred_qs_irqrestore()
529 WRITE_ONCE(rnp->exp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
532 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; in rcu_preempt_deferred_qs_irqrestore()
533 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_preempt_deferred_qs_irqrestore()
534 WRITE_ONCE(rnp->boost_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
543 empty_exp_now = sync_rcu_exp_done(rnp); in rcu_preempt_deferred_qs_irqrestore()
544 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_preempt_deferred_qs_irqrestore()
546 rnp->gp_seq, in rcu_preempt_deferred_qs_irqrestore()
547 0, rnp->qsmask, in rcu_preempt_deferred_qs_irqrestore()
548 rnp->level, in rcu_preempt_deferred_qs_irqrestore()
549 rnp->grplo, in rcu_preempt_deferred_qs_irqrestore()
550 rnp->grphi, in rcu_preempt_deferred_qs_irqrestore()
551 !!rnp->gp_tasks); in rcu_preempt_deferred_qs_irqrestore()
552 rcu_report_unblock_qs_rnp(rnp, flags); in rcu_preempt_deferred_qs_irqrestore()
554 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_preempt_deferred_qs_irqrestore()
559 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex); in rcu_preempt_deferred_qs_irqrestore()
566 rcu_report_exp_rnp(rnp, true); in rcu_preempt_deferred_qs_irqrestore()
637 struct rcu_node *rnp = rdp->mynode; in rcu_read_unlock_special() local
640 (rdp->grpmask & READ_ONCE(rnp->expmask)) || in rcu_read_unlock_special()
681 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
686 raw_lockdep_assert_held_rcu_node(rnp); in rcu_preempt_check_blocked_tasks()
687 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) in rcu_preempt_check_blocked_tasks()
688 dump_blkd_tasks(rnp, 10); in rcu_preempt_check_blocked_tasks()
689 if (rcu_preempt_has_tasks(rnp) && in rcu_preempt_check_blocked_tasks()
690 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { in rcu_preempt_check_blocked_tasks()
691 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); in rcu_preempt_check_blocked_tasks()
692 t = container_of(rnp->gp_tasks, struct task_struct, in rcu_preempt_check_blocked_tasks()
695 rnp->gp_seq, t->pid); in rcu_preempt_check_blocked_tasks()
697 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
769 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) in dump_blkd_tasks() argument
778 raw_lockdep_assert_held_rcu_node(rnp); in dump_blkd_tasks()
780 __func__, rnp->grplo, rnp->grphi, rnp->level, in dump_blkd_tasks()
781 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs); in dump_blkd_tasks()
782 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in dump_blkd_tasks()
786 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), in dump_blkd_tasks()
787 READ_ONCE(rnp->exp_tasks)); in dump_blkd_tasks()
790 list_for_each(lhp, &rnp->blkd_tasks) { in dump_blkd_tasks()
796 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { in dump_blkd_tasks()
798 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in dump_blkd_tasks()
907 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
915 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
935 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
937 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
976 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) in dump_blkd_tasks() argument
978 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); in dump_blkd_tasks()
1006 static int rcu_boost(struct rcu_node *rnp) in rcu_boost() argument
1012 if (READ_ONCE(rnp->exp_tasks) == NULL && in rcu_boost()
1013 READ_ONCE(rnp->boost_tasks) == NULL) in rcu_boost()
1016 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_boost()
1022 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { in rcu_boost()
1023 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_boost()
1033 if (rnp->exp_tasks != NULL) in rcu_boost()
1034 tb = rnp->exp_tasks; in rcu_boost()
1036 tb = rnp->boost_tasks; in rcu_boost()
1055 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); in rcu_boost()
1056 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_boost()
1058 rt_mutex_lock(&rnp->boost_mtx); in rcu_boost()
1059 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ in rcu_boost()
1060 rnp->n_boosts++; in rcu_boost()
1062 return READ_ONCE(rnp->exp_tasks) != NULL || in rcu_boost()
1063 READ_ONCE(rnp->boost_tasks) != NULL; in rcu_boost()
1071 struct rcu_node *rnp = (struct rcu_node *)arg; in rcu_boost_kthread() local
1077 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING); in rcu_boost_kthread()
1079 rcu_wait(READ_ONCE(rnp->boost_tasks) || in rcu_boost_kthread()
1080 READ_ONCE(rnp->exp_tasks)); in rcu_boost_kthread()
1082 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING); in rcu_boost_kthread()
1083 more2boost = rcu_boost(rnp); in rcu_boost_kthread()
1089 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING); in rcu_boost_kthread()
1111 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1112 __releases(rnp->lock) in rcu_initiate_boost()
1114 raw_lockdep_assert_held_rcu_node(rnp); in rcu_initiate_boost()
1115 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { in rcu_initiate_boost()
1116 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1119 if (rnp->exp_tasks != NULL || in rcu_initiate_boost()
1120 (rnp->gp_tasks != NULL && in rcu_initiate_boost()
1121 rnp->boost_tasks == NULL && in rcu_initiate_boost()
1122 rnp->qsmask == 0 && in rcu_initiate_boost()
1123 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld))) { in rcu_initiate_boost()
1124 if (rnp->exp_tasks == NULL) in rcu_initiate_boost()
1125 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks); in rcu_initiate_boost()
1126 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1127 rcu_wake_cond(rnp->boost_kthread_task, in rcu_initiate_boost()
1128 READ_ONCE(rnp->boost_kthread_status)); in rcu_initiate_boost()
1130 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1148 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1150 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; in rcu_preempt_boost_start_gp()
1158 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) in rcu_spawn_one_boost_kthread() argument
1161 int rnp_index = rnp - rcu_get_root(); in rcu_spawn_one_boost_kthread()
1165 if (rnp->boost_kthread_task || !rcu_scheduler_fully_active) in rcu_spawn_one_boost_kthread()
1170 t = kthread_create(rcu_boost_kthread, (void *)rnp, in rcu_spawn_one_boost_kthread()
1175 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_spawn_one_boost_kthread()
1176 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1177 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_spawn_one_boost_kthread()
1192 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument
1194 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity()
1195 unsigned long mask = rcu_rnp_online_cpus(rnp); in rcu_boost_kthread_setaffinity()
1203 for_each_leaf_node_possible_cpu(rnp, cpu) in rcu_boost_kthread_setaffinity()
1204 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && in rcu_boost_kthread_setaffinity()
1218 struct rcu_node *rnp; in rcu_spawn_boost_kthreads() local
1220 rcu_for_each_leaf_node(rnp) in rcu_spawn_boost_kthreads()
1221 if (rcu_rnp_online_cpus(rnp)) in rcu_spawn_boost_kthreads()
1222 rcu_spawn_one_boost_kthread(rnp); in rcu_spawn_boost_kthreads()
1227 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1228 __releases(rnp->lock) in rcu_initiate_boost()
1230 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1238 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1242 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) in rcu_spawn_one_boost_kthread() argument
1246 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument
1325 struct rcu_node *rnp; in rcu_try_advance_all_cbs() local
1332 rnp = rdp->mynode; in rcu_try_advance_all_cbs()
1340 rcu_seq_current(&rnp->gp_seq)) || in rcu_try_advance_all_cbs()
1398 struct rcu_node *rnp; in rcu_prepare_for_idle() local
1424 rnp = rdp->mynode; in rcu_prepare_for_idle()
1425 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_prepare_for_idle()
1426 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_prepare_for_idle()
1427 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_prepare_for_idle()