Lines Matching refs:rnp

13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp);
83 struct rcu_node *rnp; in sync_exp_reset_tree_hotplug() local
95 rcu_for_each_leaf_node(rnp) { in sync_exp_reset_tree_hotplug()
96 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
97 if (rnp->expmaskinit == rnp->expmaskinitnext) { in sync_exp_reset_tree_hotplug()
98 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
103 oldmask = rnp->expmaskinit; in sync_exp_reset_tree_hotplug()
104 rnp->expmaskinit = rnp->expmaskinitnext; in sync_exp_reset_tree_hotplug()
105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree_hotplug()
112 mask = rnp->grpmask; in sync_exp_reset_tree_hotplug()
113 rnp_up = rnp->parent; in sync_exp_reset_tree_hotplug()
136 struct rcu_node *rnp; in sync_exp_reset_tree() local
139 rcu_for_each_node_breadth_first(rnp) { in sync_exp_reset_tree()
140 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_exp_reset_tree()
141 WARN_ON_ONCE(rnp->expmask); in sync_exp_reset_tree()
142 WRITE_ONCE(rnp->expmask, rnp->expmaskinit); in sync_exp_reset_tree()
143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_exp_reset_tree()
153 static bool sync_rcu_exp_done(struct rcu_node *rnp) in sync_rcu_exp_done() argument
155 raw_lockdep_assert_held_rcu_node(rnp); in sync_rcu_exp_done()
156 return READ_ONCE(rnp->exp_tasks) == NULL && in sync_rcu_exp_done()
157 READ_ONCE(rnp->expmask) == 0; in sync_rcu_exp_done()
164 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp) in sync_rcu_exp_done_unlocked() argument
169 raw_spin_lock_irqsave_rcu_node(rnp, flags); in sync_rcu_exp_done_unlocked()
170 ret = sync_rcu_exp_done(rnp); in sync_rcu_exp_done_unlocked()
171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in sync_rcu_exp_done_unlocked()
185 static void __rcu_report_exp_rnp(struct rcu_node *rnp, in __rcu_report_exp_rnp() argument
187 __releases(rnp->lock) in __rcu_report_exp_rnp()
191 raw_lockdep_assert_held_rcu_node(rnp); in __rcu_report_exp_rnp()
193 if (!sync_rcu_exp_done(rnp)) { in __rcu_report_exp_rnp()
194 if (!rnp->expmask) in __rcu_report_exp_rnp()
195 rcu_initiate_boost(rnp, flags); in __rcu_report_exp_rnp()
197 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in __rcu_report_exp_rnp()
200 if (rnp->parent == NULL) { in __rcu_report_exp_rnp()
201 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in __rcu_report_exp_rnp()
208 mask = rnp->grpmask; in __rcu_report_exp_rnp()
209 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ in __rcu_report_exp_rnp()
210 rnp = rnp->parent; in __rcu_report_exp_rnp()
211 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ in __rcu_report_exp_rnp()
212 WARN_ON_ONCE(!(rnp->expmask & mask)); in __rcu_report_exp_rnp()
213 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); in __rcu_report_exp_rnp()
221 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) in rcu_report_exp_rnp() argument
225 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_exp_rnp()
226 __rcu_report_exp_rnp(rnp, wake, flags); in rcu_report_exp_rnp()
233 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, in rcu_report_exp_cpu_mult() argument
240 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_exp_cpu_mult()
241 if (!(rnp->expmask & mask)) { in rcu_report_exp_cpu_mult()
242 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_exp_cpu_mult()
245 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask); in rcu_report_exp_cpu_mult()
246 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { in rcu_report_exp_cpu_mult()
253 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ in rcu_report_exp_cpu_mult()
286 struct rcu_node *rnp = rdp->mynode; in exp_funnel_lock() local
290 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && in exp_funnel_lock()
291 (rnp == rnp_root || in exp_funnel_lock()
303 for (; rnp != NULL; rnp = rnp->parent) { in exp_funnel_lock()
308 spin_lock(&rnp->exp_lock); in exp_funnel_lock()
309 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { in exp_funnel_lock()
312 spin_unlock(&rnp->exp_lock); in exp_funnel_lock()
313 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, in exp_funnel_lock()
314 rnp->grplo, rnp->grphi, in exp_funnel_lock()
316 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], in exp_funnel_lock()
320 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */ in exp_funnel_lock()
321 spin_unlock(&rnp->exp_lock); in exp_funnel_lock()
322 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, in exp_funnel_lock()
323 rnp->grplo, rnp->grphi, TPS("nxtlvl")); in exp_funnel_lock()
347 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); in __sync_rcu_exp_select_node_cpus() local
349 raw_spin_lock_irqsave_rcu_node(rnp, flags); in __sync_rcu_exp_select_node_cpus()
353 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { in __sync_rcu_exp_select_node_cpus()
359 !(rnp->qsmaskinitnext & mask)) { in __sync_rcu_exp_select_node_cpus()
369 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; in __sync_rcu_exp_select_node_cpus()
376 if (rcu_preempt_has_tasks(rnp)) in __sync_rcu_exp_select_node_cpus()
377 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next); in __sync_rcu_exp_select_node_cpus()
378 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in __sync_rcu_exp_select_node_cpus()
381 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) { in __sync_rcu_exp_select_node_cpus()
402 raw_spin_lock_irqsave_rcu_node(rnp, flags); in __sync_rcu_exp_select_node_cpus()
403 if ((rnp->qsmaskinitnext & mask) && in __sync_rcu_exp_select_node_cpus()
404 (rnp->expmask & mask)) { in __sync_rcu_exp_select_node_cpus()
406 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in __sync_rcu_exp_select_node_cpus()
412 if (rnp->expmask & mask) in __sync_rcu_exp_select_node_cpus()
414 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in __sync_rcu_exp_select_node_cpus()
418 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); in __sync_rcu_exp_select_node_cpus()
437 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) in sync_rcu_exp_select_cpus_queue_work() argument
439 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); in sync_rcu_exp_select_cpus_queue_work()
445 kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work); in sync_rcu_exp_select_cpus_queue_work()
448 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) in sync_rcu_exp_select_cpus_flush_work() argument
450 kthread_flush_work(&rnp->rew.rew_work); in sync_rcu_exp_select_cpus_flush_work()
487 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp) in sync_rcu_exp_select_cpus_queue_work() argument
489 int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); in sync_rcu_exp_select_cpus_queue_work()
491 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); in sync_rcu_exp_select_cpus_queue_work()
493 if (unlikely(cpu > rnp->grphi - rnp->grplo)) in sync_rcu_exp_select_cpus_queue_work()
496 cpu += rnp->grplo; in sync_rcu_exp_select_cpus_queue_work()
497 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); in sync_rcu_exp_select_cpus_queue_work()
500 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp) in sync_rcu_exp_select_cpus_flush_work() argument
502 flush_work(&rnp->rew.rew_work); in sync_rcu_exp_select_cpus_flush_work()
534 struct rcu_node *rnp; in sync_rcu_exp_select_cpus() local
541 rcu_for_each_leaf_node(rnp) { in sync_rcu_exp_select_cpus()
542 rnp->exp_need_flush = false; in sync_rcu_exp_select_cpus()
543 if (!READ_ONCE(rnp->expmask)) in sync_rcu_exp_select_cpus()
547 rcu_is_last_leaf_node(rnp)) { in sync_rcu_exp_select_cpus()
549 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); in sync_rcu_exp_select_cpus()
552 sync_rcu_exp_select_cpus_queue_work(rnp); in sync_rcu_exp_select_cpus()
553 rnp->exp_need_flush = true; in sync_rcu_exp_select_cpus()
557 rcu_for_each_leaf_node(rnp) in sync_rcu_exp_select_cpus()
558 if (rnp->exp_need_flush) in sync_rcu_exp_select_cpus()
559 sync_rcu_exp_select_cpus_flush_work(rnp); in sync_rcu_exp_select_cpus()
595 struct rcu_node *rnp; in synchronize_rcu_expedited_wait() local
604 rcu_for_each_leaf_node(rnp) { in synchronize_rcu_expedited_wait()
605 mask = READ_ONCE(rnp->expmask); in synchronize_rcu_expedited_wait()
606 for_each_leaf_node_cpu_mask(rnp, cpu, mask) { in synchronize_rcu_expedited_wait()
631 rcu_for_each_leaf_node(rnp) { in synchronize_rcu_expedited_wait()
632 ndetected += rcu_print_task_exp_stall(rnp); in synchronize_rcu_expedited_wait()
633 for_each_leaf_node_possible_cpu(rnp, cpu) { in synchronize_rcu_expedited_wait()
636 mask = leaf_node_cpu_bit(rnp, cpu); in synchronize_rcu_expedited_wait()
637 if (!(READ_ONCE(rnp->expmask) & mask)) in synchronize_rcu_expedited_wait()
643 "o."[!!(rdp->grpmask & rnp->expmaskinit)], in synchronize_rcu_expedited_wait()
644 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)], in synchronize_rcu_expedited_wait()
654 rcu_for_each_node_breadth_first(rnp) { in synchronize_rcu_expedited_wait()
655 if (rnp == rnp_root) in synchronize_rcu_expedited_wait()
657 if (sync_rcu_exp_done_unlocked(rnp)) in synchronize_rcu_expedited_wait()
660 rnp->level, rnp->grplo, rnp->grphi, in synchronize_rcu_expedited_wait()
661 data_race(rnp->expmask), in synchronize_rcu_expedited_wait()
662 ".T"[!!data_race(rnp->exp_tasks)]); in synchronize_rcu_expedited_wait()
666 rcu_for_each_leaf_node(rnp) { in synchronize_rcu_expedited_wait()
667 for_each_leaf_node_possible_cpu(rnp, cpu) { in synchronize_rcu_expedited_wait()
668 mask = leaf_node_cpu_bit(rnp, cpu); in synchronize_rcu_expedited_wait()
669 if (!(READ_ONCE(rnp->expmask) & mask)) in synchronize_rcu_expedited_wait()
675 rcu_exp_print_detail_task_stall_rnp(rnp); in synchronize_rcu_expedited_wait()
690 struct rcu_node *rnp; in rcu_exp_wait_wake() local
701 rcu_for_each_node_breadth_first(rnp) { in rcu_exp_wait_wake()
702 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { in rcu_exp_wait_wake()
703 spin_lock(&rnp->exp_lock); in rcu_exp_wait_wake()
705 if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) in rcu_exp_wait_wake()
706 WRITE_ONCE(rnp->exp_seq_rq, s); in rcu_exp_wait_wake()
707 spin_unlock(&rnp->exp_lock); in rcu_exp_wait_wake()
710 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]); in rcu_exp_wait_wake()
743 struct rcu_node *rnp = rdp->mynode; in rcu_exp_handler() local
776 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_exp_handler()
777 if (rnp->expmask & rdp->grpmask) { in rcu_exp_handler()
781 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_exp_handler()
799 static int rcu_print_task_exp_stall(struct rcu_node *rnp) in rcu_print_task_exp_stall() argument
805 if (!READ_ONCE(rnp->exp_tasks)) in rcu_print_task_exp_stall()
807 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_print_task_exp_stall()
808 t = list_entry(rnp->exp_tasks->prev, in rcu_print_task_exp_stall()
810 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_exp_stall()
814 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_print_task_exp_stall()
823 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) in rcu_exp_print_detail_task_stall_rnp() argument
830 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_exp_print_detail_task_stall_rnp()
831 if (!READ_ONCE(rnp->exp_tasks)) { in rcu_exp_print_detail_task_stall_rnp()
832 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_exp_print_detail_task_stall_rnp()
835 t = list_entry(rnp->exp_tasks->prev, in rcu_exp_print_detail_task_stall_rnp()
837 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_exp_print_detail_task_stall_rnp()
845 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_exp_print_detail_task_stall_rnp()
864 struct rcu_node *rnp = rdp->mynode; in rcu_exp_handler() local
867 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || in rcu_exp_handler()
885 struct rcu_node *rnp; in sync_sched_exp_online_cleanup() local
888 rnp = rdp->mynode; in sync_sched_exp_online_cleanup()
891 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || in sync_sched_exp_online_cleanup()
915 static int rcu_print_task_exp_stall(struct rcu_node *rnp) in rcu_print_task_exp_stall() argument
925 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp) in rcu_exp_print_detail_task_stall_rnp() argument
956 struct rcu_node *rnp; in synchronize_rcu_expedited() local
1003 rnp = rcu_get_root(); in synchronize_rcu_expedited()
1004 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], in synchronize_rcu_expedited()
1024 struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq); in sync_rcu_do_polled_gp() local
1027 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); in sync_rcu_do_polled_gp()
1028 s = rnp->exp_seq_poll_rq; in sync_rcu_do_polled_gp()
1029 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; in sync_rcu_do_polled_gp()
1030 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); in sync_rcu_do_polled_gp()
1039 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); in sync_rcu_do_polled_gp()
1040 s = rnp->exp_seq_poll_rq; in sync_rcu_do_polled_gp()
1042 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; in sync_rcu_do_polled_gp()
1043 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); in sync_rcu_do_polled_gp()
1059 struct rcu_node *rnp; in start_poll_synchronize_rcu_expedited() local
1064 rnp = rdp->mynode; in start_poll_synchronize_rcu_expedited()
1066 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); in start_poll_synchronize_rcu_expedited()
1068 rnp->exp_seq_poll_rq = s; in start_poll_synchronize_rcu_expedited()
1070 queue_work(rcu_gp_wq, &rnp->exp_poll_wq); in start_poll_synchronize_rcu_expedited()
1073 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); in start_poll_synchronize_rcu_expedited()