Lines Matching refs:rnp

105 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
151 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_preempt_ctxt_queue() argument
152 __releases(rnp->lock) /* But leaves rrupts disabled. */ in rcu_preempt_ctxt_queue()
154 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue()
155 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue()
156 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue()
157 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue()
160 raw_lockdep_assert_held_rcu_node(rnp); in rcu_preempt_ctxt_queue()
161 WARN_ON_ONCE(rdp->mynode != rnp); in rcu_preempt_ctxt_queue()
162 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); in rcu_preempt_ctxt_queue()
164 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & in rcu_preempt_ctxt_queue()
185 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
203 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
216 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
227 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
243 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { in rcu_preempt_ctxt_queue()
244 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
245 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); in rcu_preempt_ctxt_queue()
247 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) in rcu_preempt_ctxt_queue()
248 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
250 !(rnp->qsmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
252 !(rnp->expmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
253 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */ in rcu_preempt_ctxt_queue()
314 struct rcu_node *rnp; in rcu_note_context_switch() local
323 rnp = rdp->mynode; in rcu_note_context_switch()
324 raw_spin_lock_rcu_node(rnp); in rcu_note_context_switch()
326 t->rcu_blocked_node = rnp; in rcu_note_context_switch()
337 (rnp->qsmask & rdp->grpmask) in rcu_note_context_switch()
338 ? rnp->gp_seq in rcu_note_context_switch()
339 : rcu_seq_snap(&rnp->gp_seq)); in rcu_note_context_switch()
340 rcu_preempt_ctxt_queue(rnp, rdp); in rcu_note_context_switch()
367 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
369 return READ_ONCE(rnp->gp_tasks) != NULL; in rcu_preempt_blocked_readers_cgp()
439 struct rcu_node *rnp) in rcu_next_node_entry() argument
444 if (np == &rnp->blkd_tasks) in rcu_next_node_entry()
453 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
455 return !list_empty(&rnp->blkd_tasks); in rcu_preempt_has_tasks()
472 struct rcu_node *rnp; in rcu_preempt_deferred_qs_irqrestore() local
515 rnp = t->rcu_blocked_node; in rcu_preempt_deferred_qs_irqrestore()
516 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_preempt_deferred_qs_irqrestore()
517 WARN_ON_ONCE(rnp != t->rcu_blocked_node); in rcu_preempt_deferred_qs_irqrestore()
518 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); in rcu_preempt_deferred_qs_irqrestore()
519 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); in rcu_preempt_deferred_qs_irqrestore()
520 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && in rcu_preempt_deferred_qs_irqrestore()
521 (!empty_norm || rnp->qsmask)); in rcu_preempt_deferred_qs_irqrestore()
522 empty_exp = sync_rcu_exp_done(rnp); in rcu_preempt_deferred_qs_irqrestore()
524 np = rcu_next_node_entry(t, rnp); in rcu_preempt_deferred_qs_irqrestore()
528 rnp->gp_seq, t->pid); in rcu_preempt_deferred_qs_irqrestore()
529 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_preempt_deferred_qs_irqrestore()
530 WRITE_ONCE(rnp->gp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
531 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_preempt_deferred_qs_irqrestore()
532 WRITE_ONCE(rnp->exp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
535 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; in rcu_preempt_deferred_qs_irqrestore()
536 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_preempt_deferred_qs_irqrestore()
537 WRITE_ONCE(rnp->boost_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
546 empty_exp_now = sync_rcu_exp_done(rnp); in rcu_preempt_deferred_qs_irqrestore()
547 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_preempt_deferred_qs_irqrestore()
549 rnp->gp_seq, in rcu_preempt_deferred_qs_irqrestore()
550 0, rnp->qsmask, in rcu_preempt_deferred_qs_irqrestore()
551 rnp->level, in rcu_preempt_deferred_qs_irqrestore()
552 rnp->grplo, in rcu_preempt_deferred_qs_irqrestore()
553 rnp->grphi, in rcu_preempt_deferred_qs_irqrestore()
554 !!rnp->gp_tasks); in rcu_preempt_deferred_qs_irqrestore()
555 rcu_report_unblock_qs_rnp(rnp, flags); in rcu_preempt_deferred_qs_irqrestore()
557 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_preempt_deferred_qs_irqrestore()
565 rcu_report_exp_rnp(rnp, true); in rcu_preempt_deferred_qs_irqrestore()
569 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex); in rcu_preempt_deferred_qs_irqrestore()
640 struct rcu_node *rnp = rdp->mynode; in rcu_read_unlock_special() local
643 (rdp->grpmask & READ_ONCE(rnp->expmask)) || in rcu_read_unlock_special()
645 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) || in rcu_read_unlock_special()
691 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
696 raw_lockdep_assert_held_rcu_node(rnp); in rcu_preempt_check_blocked_tasks()
697 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) in rcu_preempt_check_blocked_tasks()
698 dump_blkd_tasks(rnp, 10); in rcu_preempt_check_blocked_tasks()
699 if (rcu_preempt_has_tasks(rnp) && in rcu_preempt_check_blocked_tasks()
700 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { in rcu_preempt_check_blocked_tasks()
701 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); in rcu_preempt_check_blocked_tasks()
702 t = container_of(rnp->gp_tasks, struct task_struct, in rcu_preempt_check_blocked_tasks()
705 rnp->gp_seq, t->pid); in rcu_preempt_check_blocked_tasks()
707 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
776 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) in dump_blkd_tasks() argument
784 raw_lockdep_assert_held_rcu_node(rnp); in dump_blkd_tasks()
786 __func__, rnp->grplo, rnp->grphi, rnp->level, in dump_blkd_tasks()
787 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs); in dump_blkd_tasks()
788 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in dump_blkd_tasks()
792 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), in dump_blkd_tasks()
793 READ_ONCE(rnp->exp_tasks)); in dump_blkd_tasks()
796 list_for_each(lhp, &rnp->blkd_tasks) { in dump_blkd_tasks()
802 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { in dump_blkd_tasks()
911 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
919 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
953 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
955 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
993 dump_blkd_tasks(struct rcu_node *rnp, int ncheck) in dump_blkd_tasks() argument
995 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); in dump_blkd_tasks()
1045 static int rcu_boost(struct rcu_node *rnp) in rcu_boost() argument
1051 if (READ_ONCE(rnp->exp_tasks) == NULL && in rcu_boost()
1052 READ_ONCE(rnp->boost_tasks) == NULL) in rcu_boost()
1055 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_boost()
1061 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { in rcu_boost()
1062 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_boost()
1072 if (rnp->exp_tasks != NULL) in rcu_boost()
1073 tb = rnp->exp_tasks; in rcu_boost()
1075 tb = rnp->boost_tasks; in rcu_boost()
1094 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); in rcu_boost()
1095 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_boost()
1097 rt_mutex_lock(&rnp->boost_mtx); in rcu_boost()
1098 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ in rcu_boost()
1099 rnp->n_boosts++; in rcu_boost()
1101 return READ_ONCE(rnp->exp_tasks) != NULL || in rcu_boost()
1102 READ_ONCE(rnp->boost_tasks) != NULL; in rcu_boost()
1110 struct rcu_node *rnp = (struct rcu_node *)arg; in rcu_boost_kthread() local
1116 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING); in rcu_boost_kthread()
1118 rcu_wait(READ_ONCE(rnp->boost_tasks) || in rcu_boost_kthread()
1119 READ_ONCE(rnp->exp_tasks)); in rcu_boost_kthread()
1121 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING); in rcu_boost_kthread()
1122 more2boost = rcu_boost(rnp); in rcu_boost_kthread()
1128 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING); in rcu_boost_kthread()
1150 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1151 __releases(rnp->lock) in rcu_initiate_boost()
1153 raw_lockdep_assert_held_rcu_node(rnp); in rcu_initiate_boost()
1154 if (!rnp->boost_kthread_task || in rcu_initiate_boost()
1155 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) { in rcu_initiate_boost()
1156 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1159 if (rnp->exp_tasks != NULL || in rcu_initiate_boost()
1160 (rnp->gp_tasks != NULL && in rcu_initiate_boost()
1161 rnp->boost_tasks == NULL && in rcu_initiate_boost()
1162 rnp->qsmask == 0 && in rcu_initiate_boost()
1163 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld || in rcu_initiate_boost()
1165 if (rnp->exp_tasks == NULL) in rcu_initiate_boost()
1166 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks); in rcu_initiate_boost()
1167 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1168 rcu_wake_cond(rnp->boost_kthread_task, in rcu_initiate_boost()
1169 READ_ONCE(rnp->boost_kthread_status)); in rcu_initiate_boost()
1171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1180 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1182 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; in rcu_preempt_boost_start_gp()
1189 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) in rcu_spawn_one_boost_kthread() argument
1192 int rnp_index = rnp - rcu_get_root(); in rcu_spawn_one_boost_kthread()
1196 mutex_lock(&rnp->boost_kthread_mutex); in rcu_spawn_one_boost_kthread()
1197 if (rnp->boost_kthread_task || !rcu_scheduler_fully_active) in rcu_spawn_one_boost_kthread()
1200 t = kthread_create(rcu_boost_kthread, (void *)rnp, in rcu_spawn_one_boost_kthread()
1205 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_spawn_one_boost_kthread()
1206 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1207 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_spawn_one_boost_kthread()
1213 mutex_unlock(&rnp->boost_kthread_mutex); in rcu_spawn_one_boost_kthread()
1227 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument
1229 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity()
1238 mutex_lock(&rnp->boost_kthread_mutex); in rcu_boost_kthread_setaffinity()
1239 mask = rcu_rnp_online_cpus(rnp); in rcu_boost_kthread_setaffinity()
1240 for_each_leaf_node_possible_cpu(rnp, cpu) in rcu_boost_kthread_setaffinity()
1241 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && in rcu_boost_kthread_setaffinity()
1251 mutex_unlock(&rnp->boost_kthread_mutex); in rcu_boost_kthread_setaffinity()
1257 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1258 __releases(rnp->lock) in rcu_initiate_boost()
1260 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_initiate_boost()
1263 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1267 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) in rcu_spawn_one_boost_kthread() argument
1271 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument