Lines Matching refs:rdp
154 static void rcu_report_exp_rdp(struct rcu_data *rdp);
156 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
157 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
158 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
243 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_get_n_cbs_cpu() local
245 if (rcu_segcblist_is_enabled(&rdp->cblist)) in rcu_get_n_cbs_cpu()
246 return rcu_segcblist_n_cbs(&rdp->cblist); in rcu_get_n_cbs_cpu()
324 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap) in rcu_watching_snap_stopped_since() argument
337 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu); in rcu_watching_snap_stopped_since()
520 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
586 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_irq_work_resched() local
595 if (do_nocb_deferred_wakeup(rdp) && need_resched()) { in rcu_irq_work_resched()
649 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in __rcu_irq_enter_check_tick() local
658 if (!tick_nohz_full_cpu(rdp->cpu) || in __rcu_irq_enter_check_tick()
659 !READ_ONCE(rdp->rcu_urgent_qs) || in __rcu_irq_enter_check_tick()
660 READ_ONCE(rdp->rcu_forced_tick)) { in __rcu_irq_enter_check_tick()
672 raw_spin_lock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
673 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { in __rcu_irq_enter_check_tick()
676 WRITE_ONCE(rdp->rcu_forced_tick, true); in __rcu_irq_enter_check_tick()
677 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in __rcu_irq_enter_check_tick()
679 raw_spin_unlock_rcu_node(rdp->mynode); in __rcu_irq_enter_check_tick()
706 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) in rcu_disable_urgency_upon_qs() argument
708 raw_lockdep_assert_held_rcu_node(rdp->mynode); in rcu_disable_urgency_upon_qs()
709 WRITE_ONCE(rdp->rcu_urgent_qs, false); in rcu_disable_urgency_upon_qs()
710 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); in rcu_disable_urgency_upon_qs()
711 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { in rcu_disable_urgency_upon_qs()
712 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); in rcu_disable_urgency_upon_qs()
713 WRITE_ONCE(rdp->rcu_forced_tick, false); in rcu_disable_urgency_upon_qs()
768 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
771 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, in rcu_gpnum_ovf()
773 WRITE_ONCE(rdp->gpwrap, true); in rcu_gpnum_ovf()
774 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
775 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
783 static int rcu_watching_snap_save(struct rcu_data *rdp) in rcu_watching_snap_save() argument
796 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu); in rcu_watching_snap_save()
797 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) { in rcu_watching_snap_save()
798 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_watching_snap_save()
799 rcu_gpnum_ovf(rdp->mynode, rdp); in rcu_watching_snap_save()
815 static int rcu_watching_snap_recheck(struct rcu_data *rdp) in rcu_watching_snap_recheck() argument
819 struct rcu_node *rnp = rdp->mynode; in rcu_watching_snap_recheck()
829 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) { in rcu_watching_snap_recheck()
830 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_watching_snap_recheck()
831 rcu_gpnum_ovf(rnp, rdp); in rcu_watching_snap_recheck()
853 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) { in rcu_watching_snap_recheck()
863 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], in rcu_watching_snap_recheck()
864 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, in rcu_watching_snap_recheck()
865 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); in rcu_watching_snap_recheck()
881 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && in rcu_watching_snap_recheck()
885 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); in rcu_watching_snap_recheck()
887 smp_store_release(&rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
889 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
900 if (tick_nohz_full_cpu(rdp->cpu) && in rcu_watching_snap_recheck()
901 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || in rcu_watching_snap_recheck()
903 WRITE_ONCE(rdp->rcu_urgent_qs, true); in rcu_watching_snap_recheck()
904 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_watching_snap_recheck()
917 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { in rcu_watching_snap_recheck()
918 WRITE_ONCE(rdp->last_fqs_resched, jiffies); in rcu_watching_snap_recheck()
922 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_watching_snap_recheck()
923 (rnp->ffmask & rdp->grpmask)) { in rcu_watching_snap_recheck()
924 rdp->rcu_iw_pending = true; in rcu_watching_snap_recheck()
925 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_watching_snap_recheck()
926 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); in rcu_watching_snap_recheck()
929 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { in rcu_watching_snap_recheck()
930 int cpu = rdp->cpu; in rcu_watching_snap_recheck()
936 rsrp = &rdp->snap_record; in rcu_watching_snap_recheck()
940 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu); in rcu_watching_snap_recheck()
941 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu); in rcu_watching_snap_recheck()
942 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu); in rcu_watching_snap_recheck()
944 rsrp->gp_seq = rdp->gp_seq; in rcu_watching_snap_recheck()
952 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
976 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, in rcu_start_this_gp() argument
992 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); in rcu_start_this_gp()
1000 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
1012 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, in rcu_start_this_gp()
1024 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
1027 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
1031 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
1040 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1054 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_future_gp_cleanup() local
1059 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1135 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1140 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs()
1144 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_accelerate_cbs()
1147 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); in rcu_accelerate_cbs()
1160 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) in rcu_accelerate_cbs()
1161 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1164 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) in rcu_accelerate_cbs()
1169 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); in rcu_accelerate_cbs()
1182 struct rcu_data *rdp) in rcu_accelerate_cbs_unlocked() argument
1187 rcu_lockdep_assert_cblist_protected(rdp); in rcu_accelerate_cbs_unlocked()
1189 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { in rcu_accelerate_cbs_unlocked()
1191 (void)rcu_segcblist_accelerate(&rdp->cblist, c); in rcu_accelerate_cbs_unlocked()
1195 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_accelerate_cbs_unlocked()
1211 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs() argument
1213 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs()
1217 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) in rcu_advance_cbs()
1224 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1227 return rcu_accelerate_cbs(rnp, rdp); in rcu_advance_cbs()
1235 struct rcu_data *rdp) in rcu_advance_cbs_nowake() argument
1237 rcu_lockdep_assert_cblist_protected(rdp); in rcu_advance_cbs_nowake()
1242 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); in rcu_advance_cbs_nowake()
1265 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes() argument
1269 const bool offloaded = rcu_rdp_is_offloaded(rdp); in __note_gp_changes()
1273 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1277 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1278 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1280 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ in __note_gp_changes()
1281 rdp->core_needs_qs = false; in __note_gp_changes()
1282 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1285 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ in __note_gp_changes()
1286 if (rdp->core_needs_qs) in __note_gp_changes()
1287 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1291 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1292 unlikely(READ_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1299 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1300 rdp->cpu_no_qs.b.norm = need_qs; in __note_gp_changes()
1301 rdp->core_needs_qs = need_qs; in __note_gp_changes()
1302 zero_cpu_stall_ticks(rdp); in __note_gp_changes()
1304 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1305 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1306 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1307 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap)) in __note_gp_changes()
1308 WRITE_ONCE(rdp->last_sched_clock, jiffies); in __note_gp_changes()
1309 WRITE_ONCE(rdp->gpwrap, false); in __note_gp_changes()
1310 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1314 static void note_gp_changes(struct rcu_data *rdp) in note_gp_changes() argument
1321 rnp = rdp->mynode; in note_gp_changes()
1322 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1323 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ in note_gp_changes()
1328 needwake = __note_gp_changes(rnp, rdp); in note_gp_changes()
1801 struct rcu_data *rdp; in rcu_gp_init() local
1923 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_init()
1927 if (rnp == rdp->mynode) in rcu_gp_init()
1928 (void)__note_gp_changes(rnp, rdp); in rcu_gp_init()
2107 struct rcu_data *rdp; in rcu_gp_cleanup() local
2148 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2149 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2150 needgp = __note_gp_changes(rnp, rdp) || needgp; in rcu_gp_cleanup()
2156 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_gp_cleanup()
2157 check_cb_ovld_locked(rdp, rnp); in rcu_gp_cleanup()
2175 rdp = this_cpu_ptr(&rcu_data); in rcu_gp_cleanup()
2177 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2182 offloaded = rcu_rdp_is_offloaded(rdp); in rcu_gp_cleanup()
2183 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { in rcu_gp_cleanup()
2393 rcu_report_qs_rdp(struct rcu_data *rdp) in rcu_report_qs_rdp() argument
2399 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); in rcu_report_qs_rdp()
2400 rnp = rdp->mynode; in rcu_report_qs_rdp()
2402 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2403 rdp->gpwrap) { in rcu_report_qs_rdp()
2411 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ in rcu_report_qs_rdp()
2415 mask = rdp->grpmask; in rcu_report_qs_rdp()
2416 rdp->core_needs_qs = false; in rcu_report_qs_rdp()
2426 if (!rcu_rdp_is_offloaded(rdp)) { in rcu_report_qs_rdp()
2432 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp)); in rcu_report_qs_rdp()
2435 rcu_disable_urgency_upon_qs(rdp); in rcu_report_qs_rdp()
2448 rcu_check_quiescent_state(struct rcu_data *rdp) in rcu_check_quiescent_state() argument
2451 note_gp_changes(rdp); in rcu_check_quiescent_state()
2457 if (!rdp->core_needs_qs) in rcu_check_quiescent_state()
2464 if (rdp->cpu_no_qs.b.norm) in rcu_check_quiescent_state()
2471 rcu_report_qs_rdp(rdp); in rcu_check_quiescent_state()
2490 static void rcu_do_batch(struct rcu_data *rdp) in rcu_do_batch() argument
2505 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { in rcu_do_batch()
2507 rcu_segcblist_n_cbs(&rdp->cblist), 0); in rcu_do_batch()
2509 !rcu_segcblist_empty(&rdp->cblist), in rcu_do_batch()
2511 rcu_is_callbacks_kthread(rdp)); in rcu_do_batch()
2526 rcu_nocb_lock_irqsave(rdp, flags); in rcu_do_batch()
2528 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL); in rcu_do_batch()
2531 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()
2532 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) && in rcu_do_batch()
2543 rcu_segcblist_n_cbs(&rdp->cblist), bl); in rcu_do_batch()
2544 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2545 if (rcu_rdp_is_offloaded(rdp)) in rcu_do_batch()
2546 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2548 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); in rcu_do_batch()
2549 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2593 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING && in rcu_do_batch()
2595 rdp->rcu_cpu_has_work = 1; in rcu_do_batch()
2601 rcu_nocb_lock_irqsave(rdp, flags); in rcu_do_batch()
2602 rdp->n_cbs_invoked += count; in rcu_do_batch()
2604 is_idle_task(current), rcu_is_callbacks_kthread(rdp)); in rcu_do_batch()
2607 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); in rcu_do_batch()
2608 rcu_segcblist_add_len(&rdp->cblist, -count); in rcu_do_batch()
2611 count = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2612 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) in rcu_do_batch()
2613 rdp->blimit = blimit; in rcu_do_batch()
2616 if (count == 0 && rdp->qlen_last_fqs_check != 0) { in rcu_do_batch()
2617 rdp->qlen_last_fqs_check = 0; in rcu_do_batch()
2618 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcu_do_batch()
2619 } else if (count < rdp->qlen_last_fqs_check - qhimark) in rcu_do_batch()
2620 rdp->qlen_last_fqs_check = count; in rcu_do_batch()
2626 empty = rcu_segcblist_empty(&rdp->cblist); in rcu_do_batch()
2630 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); in rcu_do_batch()
2631 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); in rcu_do_batch()
2633 rcu_nocb_unlock_irqrestore(rdp, flags); in rcu_do_batch()
2684 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) in force_qs_rnp() argument
2714 struct rcu_data *rdp; in force_qs_rnp() local
2717 rdp = per_cpu_ptr(&rcu_data, cpu); in force_qs_rnp()
2718 ret = f(rdp); in force_qs_rnp()
2720 mask |= rdp->grpmask; in force_qs_rnp()
2721 rcu_disable_urgency_upon_qs(rdp); in force_qs_rnp()
2724 rsmask |= rdp->grpmask; in force_qs_rnp()
2790 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); in rcu_core() local
2791 struct rcu_node *rnp = rdp->mynode; in rcu_core()
2796 WARN_ON_ONCE(!rdp->beenonline); in rcu_core()
2807 rcu_check_quiescent_state(rdp); in rcu_core()
2811 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) { in rcu_core()
2813 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_core()
2814 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_core()
2818 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); in rcu_core()
2821 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) && in rcu_core()
2823 rcu_do_batch(rdp); in rcu_core()
2825 if (rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_core()
2830 do_nocb_deferred_wakeup(rdp); in rcu_core()
2835 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); in rcu_core()
2952 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func) in rcutree_enqueue() argument
2954 rcu_segcblist_enqueue(&rdp->cblist, head); in rcutree_enqueue()
2958 rcu_segcblist_n_cbs(&rdp->cblist)); in rcutree_enqueue()
2961 rcu_segcblist_n_cbs(&rdp->cblist)); in rcutree_enqueue()
2962 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); in rcutree_enqueue()
2968 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, in call_rcu_core() argument
2971 rcutree_enqueue(rdp, head, func); in call_rcu_core()
2990 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > in call_rcu_core()
2991 rdp->qlen_last_fqs_check + qhimark)) { in call_rcu_core()
2994 note_gp_changes(rdp); in call_rcu_core()
2998 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); in call_rcu_core()
3001 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; in call_rcu_core()
3002 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && in call_rcu_core()
3003 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) in call_rcu_core()
3005 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in call_rcu_core()
3006 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); in call_rcu_core()
3024 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked() argument
3029 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) in check_cb_ovld_locked()
3030 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
3032 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
3047 static void check_cb_ovld(struct rcu_data *rdp) in check_cb_ovld() argument
3049 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld()
3052 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == in check_cb_ovld()
3053 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
3056 check_cb_ovld_locked(rdp, rnp); in check_cb_ovld()
3066 struct rcu_data *rdp; in __call_rcu_common() local
3088 rdp = this_cpu_ptr(&rcu_data); in __call_rcu_common()
3092 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { in __call_rcu_common()
3098 if (rcu_segcblist_empty(&rdp->cblist)) in __call_rcu_common()
3099 rcu_segcblist_init(&rdp->cblist); in __call_rcu_common()
3102 check_cb_ovld(rdp); in __call_rcu_common()
3104 if (unlikely(rcu_rdp_is_offloaded(rdp))) in __call_rcu_common()
3105 call_rcu_nocb(rdp, head, func, flags, lazy); in __call_rcu_common()
3107 call_rcu_core(rdp, head, func, flags); in __call_rcu_common()
4194 struct rcu_data *rdp; in start_poll_synchronize_rcu_common() local
4199 rdp = this_cpu_ptr(&rcu_data); in start_poll_synchronize_rcu_common()
4200 rnp = rdp->mynode; in start_poll_synchronize_rcu_common()
4208 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq)); in start_poll_synchronize_rcu_common()
4412 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_pending() local
4413 struct rcu_node *rnp = rdp->mynode; in rcu_pending()
4418 check_cpu_stall(rdp); in rcu_pending()
4421 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) in rcu_pending()
4434 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) in rcu_pending()
4438 if (!rcu_rdp_is_offloaded(rdp) && in rcu_pending()
4439 rcu_segcblist_ready_cbs(&rdp->cblist)) in rcu_pending()
4443 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && in rcu_pending()
4444 !rcu_rdp_is_offloaded(rdp) && in rcu_pending()
4445 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) in rcu_pending()
4449 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
4450 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ in rcu_pending()
4493 static void rcu_barrier_entrain(struct rcu_data *rdp) in rcu_barrier_entrain() argument
4496 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); in rcu_barrier_entrain()
4504 rdp->barrier_head.func = rcu_barrier_callback; in rcu_barrier_entrain()
4505 debug_rcu_head_queue(&rdp->barrier_head); in rcu_barrier_entrain()
4506 rcu_nocb_lock(rdp); in rcu_barrier_entrain()
4512 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
4513 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false)); in rcu_barrier_entrain()
4514 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); in rcu_barrier_entrain()
4515 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { in rcu_barrier_entrain()
4518 debug_rcu_head_unqueue(&rdp->barrier_head); in rcu_barrier_entrain()
4521 rcu_nocb_unlock(rdp); in rcu_barrier_entrain()
4523 wake_nocb_gp(rdp, false); in rcu_barrier_entrain()
4524 smp_store_release(&rdp->barrier_seq_snap, gseq); in rcu_barrier_entrain()
4533 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier_handler() local
4536 WARN_ON_ONCE(cpu != rdp->cpu); in rcu_barrier_handler()
4539 rcu_barrier_entrain(rdp); in rcu_barrier_handler()
4556 struct rcu_data *rdp; in rcu_barrier() local
4595 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4597 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) in rcu_barrier()
4600 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { in rcu_barrier()
4601 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
4606 if (!rcu_rdp_cpu_online(rdp)) { in rcu_barrier()
4607 rcu_barrier_entrain(rdp); in rcu_barrier()
4608 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
4618 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); in rcu_barrier()
4637 rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_barrier()
4639 WRITE_ONCE(rdp->barrier_seq_snap, gseq); in rcu_barrier()
4739 static bool rcu_rdp_cpu_online(struct rcu_data *rdp) in rcu_rdp_cpu_online() argument
4741 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); in rcu_rdp_cpu_online()
4746 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_online() local
4748 return rcu_rdp_cpu_online(rdp); in rcu_cpu_online()
4768 struct rcu_data *rdp; in rcu_lockdep_current_cpu_online() local
4774 rdp = this_cpu_ptr(&rcu_data); in rcu_lockdep_current_cpu_online()
4782 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock)) in rcu_lockdep_current_cpu_online()
4878 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_boot_init_percpu_data() local
4881 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); in rcu_boot_init_percpu_data()
4882 INIT_WORK(&rdp->strict_work, strict_work_handler); in rcu_boot_init_percpu_data()
4885 rdp->barrier_seq_snap = rcu_state.barrier_sequence; in rcu_boot_init_percpu_data()
4886 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4887 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4888 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4889 rdp->rcu_onl_gp_state = RCU_GP_CLEANED; in rcu_boot_init_percpu_data()
4890 rdp->last_sched_clock = jiffies; in rcu_boot_init_percpu_data()
4891 rdp->cpu = cpu; in rcu_boot_init_percpu_data()
4892 rcu_boot_init_nocb_percpu_data(rdp); in rcu_boot_init_percpu_data()
4969 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_prepare_cpu() local
4974 rdp->qlen_last_fqs_check = 0; in rcutree_prepare_cpu()
4975 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcutree_prepare_cpu()
4976 rdp->blimit = blimit; in rcutree_prepare_cpu()
4984 if (!rcu_segcblist_is_enabled(&rdp->cblist)) in rcutree_prepare_cpu()
4985 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ in rcutree_prepare_cpu()
4992 rnp = rdp->mynode; in rcutree_prepare_cpu()
4994 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4995 rdp->gp_seq_needed = rdp->gp_seq; in rcutree_prepare_cpu()
4996 rdp->cpu_no_qs.b.norm = true; in rcutree_prepare_cpu()
4997 rdp->core_needs_qs = false; in rcutree_prepare_cpu()
4998 rdp->rcu_iw_pending = false; in rcutree_prepare_cpu()
4999 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); in rcutree_prepare_cpu()
5000 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; in rcutree_prepare_cpu()
5001 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
5028 struct rcu_data *rdp; in rcutree_affinity_setting() local
5032 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_affinity_setting()
5033 rnp = rdp->mynode; in rcutree_affinity_setting()
5077 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcu_cpu_beenfullyonline() local
5079 return smp_load_acquire(&rdp->beenonline); in rcu_cpu_beenfullyonline()
5089 struct rcu_data *rdp; in rcutree_online_cpu() local
5092 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_online_cpu()
5093 rnp = rdp->mynode; in rcutree_online_cpu()
5095 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
5124 struct rcu_data *rdp; in rcutree_report_cpu_starting() local
5129 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_report_cpu_starting()
5130 if (rdp->cpu_started) in rcutree_report_cpu_starting()
5132 rdp->cpu_started = true; in rcutree_report_cpu_starting()
5134 rnp = rdp->mynode; in rcutree_report_cpu_starting()
5135 mask = rdp->grpmask; in rcutree_report_cpu_starting()
5147 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcutree_report_cpu_starting()
5148 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcutree_report_cpu_starting()
5149 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state); in rcutree_report_cpu_starting()
5157 rcu_disable_urgency_upon_qs(rdp); in rcutree_report_cpu_starting()
5164 smp_store_release(&rdp->beenonline, true); in rcutree_report_cpu_starting()
5182 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcutree_report_cpu_dead() local
5183 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_report_cpu_dead()
5191 do_nocb_deferred_wakeup(rdp); in rcutree_report_cpu_dead()
5196 mask = rdp->grpmask; in rcutree_report_cpu_dead()
5199 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcutree_report_cpu_dead()
5200 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state); in rcutree_report_cpu_dead()
5203 rcu_disable_urgency_upon_qs(rdp); in rcutree_report_cpu_dead()
5210 rdp->cpu_started = false; in rcutree_report_cpu_dead()
5224 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_migrate_callbacks() local
5227 if (rcu_rdp_is_offloaded(rdp)) in rcutree_migrate_callbacks()
5231 if (rcu_segcblist_empty(&rdp->cblist)) { in rcutree_migrate_callbacks()
5236 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp)); in rcutree_migrate_callbacks()
5237 rcu_barrier_entrain(rdp); in rcutree_migrate_callbacks()
5244 needwake = rcu_advance_cbs(my_rnp, rdp) || in rcutree_migrate_callbacks()
5246 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); in rcutree_migrate_callbacks()
5249 rcu_segcblist_disable(&rdp->cblist); in rcutree_migrate_callbacks()
5263 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || in rcutree_migrate_callbacks()
5264 !rcu_segcblist_empty(&rdp->cblist), in rcutree_migrate_callbacks()
5266 cpu, rcu_segcblist_n_cbs(&rdp->cblist), in rcutree_migrate_callbacks()
5267 rcu_segcblist_first_cb(&rdp->cblist)); in rcutree_migrate_callbacks()
5292 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_dying_cpu() local
5293 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu()
5295 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); in rcutree_dying_cpu()
5308 struct rcu_data *rdp; in rcutree_offline_cpu() local
5311 rdp = per_cpu_ptr(&rcu_data, cpu); in rcutree_offline_cpu()
5312 rnp = rdp->mynode; in rcutree_offline_cpu()
5314 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
5358 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); in rcu_spawn_gp_kthread() local
5383 rcu_spawn_rnp_kthreads(rdp->mynode); in rcu_spawn_gp_kthread()