Lines Matching refs:rnp

145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
149 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
153 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
219 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) in rcu_rnp_online_cpus() argument
221 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
1163 struct rcu_node *rnp; in rcu_lockdep_current_cpu_online() local
1170 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
1171 if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1) in rcu_lockdep_current_cpu_online()
1187 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_gpnum_ovf() argument
1189 raw_lockdep_assert_held_rcu_node(rnp); in rcu_gpnum_ovf()
1191 rnp->gp_seq)) in rcu_gpnum_ovf()
1193 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) in rcu_gpnum_ovf()
1194 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; in rcu_gpnum_ovf()
1222 struct rcu_node *rnp = rdp->mynode; in rcu_implicit_dynticks_qs() local
1234 rcu_gpnum_ovf(rnp, rdp); in rcu_implicit_dynticks_qs()
1256 if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) { in rcu_implicit_dynticks_qs()
1261 __func__, rnp->grplo, rnp->grphi, rnp->level, in rcu_implicit_dynticks_qs()
1262 (long)rnp->gp_seq, (long)rnp->completedqs); in rcu_implicit_dynticks_qs()
1263 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in rcu_implicit_dynticks_qs()
1266 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); in rcu_implicit_dynticks_qs()
1327 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && in rcu_implicit_dynticks_qs()
1328 (rnp->ffmask & rdp->grpmask)) { in rcu_implicit_dynticks_qs()
1330 rdp->rcu_iw_gp_seq = rnp->gp_seq; in rcu_implicit_dynticks_qs()
1339 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_this_gp() argument
1342 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in trace_rcu_this_gp()
1343 gp_seq_req, rnp->level, in trace_rcu_this_gp()
1344 rnp->grplo, rnp->grphi, s); in trace_rcu_this_gp()
1367 struct rcu_node *rnp; in rcu_start_this_gp() local
1380 for (rnp = rnp_start; 1; rnp = rnp->parent) { in rcu_start_this_gp()
1381 if (rnp != rnp_start) in rcu_start_this_gp()
1382 raw_spin_lock_rcu_node(rnp); in rcu_start_this_gp()
1383 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || in rcu_start_this_gp()
1384 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || in rcu_start_this_gp()
1385 (rnp != rnp_start && in rcu_start_this_gp()
1386 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { in rcu_start_this_gp()
1387 trace_rcu_this_gp(rnp, rdp, gp_seq_req, in rcu_start_this_gp()
1391 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); in rcu_start_this_gp()
1392 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { in rcu_start_this_gp()
1403 if (rnp != rnp_start && rnp->parent != NULL) in rcu_start_this_gp()
1404 raw_spin_unlock_rcu_node(rnp); in rcu_start_this_gp()
1405 if (!rnp->parent) in rcu_start_this_gp()
1411 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); in rcu_start_this_gp()
1414 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); in rcu_start_this_gp()
1418 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); in rcu_start_this_gp()
1425 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { in rcu_start_this_gp()
1426 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1427 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in rcu_start_this_gp()
1429 if (rnp != rnp_start) in rcu_start_this_gp()
1430 raw_spin_unlock_rcu_node(rnp); in rcu_start_this_gp()
1438 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) in rcu_future_gp_cleanup() argument
1443 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); in rcu_future_gp_cleanup()
1445 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ in rcu_future_gp_cleanup()
1446 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, in rcu_future_gp_cleanup()
1490 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_accelerate_cbs() argument
1496 raw_lockdep_assert_held_rcu_node(rnp); in rcu_accelerate_cbs()
1516 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); in rcu_accelerate_cbs()
1536 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, in rcu_accelerate_cbs_unlocked() argument
1549 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_accelerate_cbs_unlocked()
1550 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_accelerate_cbs_unlocked()
1551 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_accelerate_cbs_unlocked()
1566 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) in rcu_advance_cbs() argument
1569 raw_lockdep_assert_held_rcu_node(rnp); in rcu_advance_cbs()
1579 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); in rcu_advance_cbs()
1582 return rcu_accelerate_cbs(rnp, rdp); in rcu_advance_cbs()
1589 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, in rcu_advance_cbs_nowake() argument
1593 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || in rcu_advance_cbs_nowake()
1594 !raw_spin_trylock_rcu_node(rnp)) in rcu_advance_cbs_nowake()
1596 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); in rcu_advance_cbs_nowake()
1597 raw_spin_unlock_rcu_node(rnp); in rcu_advance_cbs_nowake()
1619 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) in __note_gp_changes() argument
1625 raw_lockdep_assert_held_rcu_node(rnp); in __note_gp_changes()
1627 if (rdp->gp_seq == rnp->gp_seq) in __note_gp_changes()
1631 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1634 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ in __note_gp_changes()
1639 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ in __note_gp_changes()
1641 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1645 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || in __note_gp_changes()
1652 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); in __note_gp_changes()
1653 need_qs = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1658 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ in __note_gp_changes()
1659 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) in __note_gp_changes()
1660 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); in __note_gp_changes()
1662 rcu_gpnum_ovf(rnp, rdp); in __note_gp_changes()
1670 struct rcu_node *rnp; in note_gp_changes() local
1673 rnp = rdp->mynode; in note_gp_changes()
1674 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && in note_gp_changes()
1676 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ in note_gp_changes()
1680 needwake = __note_gp_changes(rnp, rdp); in note_gp_changes()
1681 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in note_gp_changes()
1739 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_init() local
1742 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_init()
1745 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1755 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1765 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1777 rcu_for_each_leaf_node(rnp) { in rcu_gp_init()
1781 firstseq = READ_ONCE(rnp->ofl_seq); in rcu_gp_init()
1783 while (firstseq == READ_ONCE(rnp->ofl_seq)) in rcu_gp_init()
1787 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_init()
1788 if (rnp->qsmaskinit == rnp->qsmaskinitnext && in rcu_gp_init()
1789 !rnp->wait_blkd_tasks) { in rcu_gp_init()
1791 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1797 oldmask = rnp->qsmaskinit; in rcu_gp_init()
1798 rnp->qsmaskinit = rnp->qsmaskinitnext; in rcu_gp_init()
1801 if (!oldmask != !rnp->qsmaskinit) { in rcu_gp_init()
1803 if (!rnp->wait_blkd_tasks) /* Ever offline? */ in rcu_gp_init()
1804 rcu_init_new_rnp(rnp); in rcu_gp_init()
1805 } else if (rcu_preempt_has_tasks(rnp)) { in rcu_gp_init()
1806 rnp->wait_blkd_tasks = true; /* blocked tasks */ in rcu_gp_init()
1808 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1820 if (rnp->wait_blkd_tasks && in rcu_gp_init()
1821 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { in rcu_gp_init()
1822 rnp->wait_blkd_tasks = false; in rcu_gp_init()
1823 if (!rnp->qsmaskinit) in rcu_gp_init()
1824 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1827 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1845 rcu_for_each_node_breadth_first(rnp) { in rcu_gp_init()
1847 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_gp_init()
1849 rcu_preempt_check_blocked_tasks(rnp); in rcu_gp_init()
1850 rnp->qsmask = rnp->qsmaskinit; in rcu_gp_init()
1851 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); in rcu_gp_init()
1852 if (rnp == rdp->mynode) in rcu_gp_init()
1853 (void)__note_gp_changes(rnp, rdp); in rcu_gp_init()
1854 rcu_preempt_boost_start_gp(rnp); in rcu_gp_init()
1855 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, in rcu_gp_init()
1856 rnp->level, rnp->grplo, in rcu_gp_init()
1857 rnp->grphi, rnp->qsmask); in rcu_gp_init()
1859 mask = rnp->qsmask & ~rnp->qsmaskinitnext; in rcu_gp_init()
1860 rnp->rcu_gp_init_mask = mask; in rcu_gp_init()
1861 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) in rcu_gp_init()
1862 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_gp_init()
1864 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_init()
1882 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_fqs_check_wake() local
1894 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_check_wake()
1905 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_fqs() local
1918 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_fqs()
1921 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_fqs()
1934 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_fqs_loop() local
1961 if (!READ_ONCE(rnp->qsmask) && in rcu_gp_fqs_loop()
1962 !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_fqs_loop()
2010 struct rcu_node *rnp = rcu_get_root(); in rcu_gp_cleanup() local
2014 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2028 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2041 rcu_for_each_node_breadth_first(rnp) { in rcu_gp_cleanup()
2042 raw_spin_lock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2043 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) in rcu_gp_cleanup()
2044 dump_blkd_tasks(rnp, 10); in rcu_gp_cleanup()
2045 WARN_ON_ONCE(rnp->qsmask); in rcu_gp_cleanup()
2046 WRITE_ONCE(rnp->gp_seq, new_gp_seq); in rcu_gp_cleanup()
2048 if (rnp == rdp->mynode) in rcu_gp_cleanup()
2049 needgp = __note_gp_changes(rnp, rdp) || needgp; in rcu_gp_cleanup()
2051 needgp = rcu_future_gp_cleanup(rnp) || needgp; in rcu_gp_cleanup()
2053 if (rcu_is_leaf_node(rnp)) in rcu_gp_cleanup()
2054 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { in rcu_gp_cleanup()
2056 check_cb_ovld_locked(rdp, rnp); in rcu_gp_cleanup()
2058 sq = rcu_nocb_gp_get(rnp); in rcu_gp_cleanup()
2059 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2065 rnp = rcu_get_root(); in rcu_gp_cleanup()
2066 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ in rcu_gp_cleanup()
2075 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { in rcu_gp_cleanup()
2076 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, in rcu_gp_cleanup()
2082 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { in rcu_gp_cleanup()
2092 raw_spin_unlock_irq_rcu_node(rnp); in rcu_gp_cleanup()
2171 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, in rcu_report_qs_rnp() argument
2173 __releases(rnp->lock) in rcu_report_qs_rnp()
2178 raw_lockdep_assert_held_rcu_node(rnp); in rcu_report_qs_rnp()
2182 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp()
2188 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2192 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && in rcu_report_qs_rnp()
2193 rcu_preempt_blocked_readers_cgp(rnp)); in rcu_report_qs_rnp()
2194 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); in rcu_report_qs_rnp()
2195 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, in rcu_report_qs_rnp()
2196 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
2197 rnp->grplo, rnp->grphi, in rcu_report_qs_rnp()
2198 !!rnp->gp_tasks); in rcu_report_qs_rnp()
2199 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_qs_rnp()
2202 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2205 rnp->completedqs = rnp->gp_seq; in rcu_report_qs_rnp()
2206 mask = rnp->grpmask; in rcu_report_qs_rnp()
2207 if (rnp->parent == NULL) { in rcu_report_qs_rnp()
2213 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2214 rnp_c = rnp; in rcu_report_qs_rnp()
2215 rnp = rnp->parent; in rcu_report_qs_rnp()
2216 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_qs_rnp()
2236 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) in rcu_report_unblock_qs_rnp() argument
2237 __releases(rnp->lock) in rcu_report_unblock_qs_rnp()
2243 raw_lockdep_assert_held_rcu_node(rnp); in rcu_report_unblock_qs_rnp()
2245 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || in rcu_report_unblock_qs_rnp()
2246 rnp->qsmask != 0) { in rcu_report_unblock_qs_rnp()
2247 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_unblock_qs_rnp()
2251 rnp->completedqs = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2252 rnp_p = rnp->parent; in rcu_report_unblock_qs_rnp()
2263 gps = rnp->gp_seq; in rcu_report_unblock_qs_rnp()
2264 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp()
2265 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_report_unblock_qs_rnp()
2281 struct rcu_node *rnp; in rcu_report_qs_rdp() local
2284 rnp = rdp->mynode; in rcu_report_qs_rdp()
2285 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_qs_rdp()
2286 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || in rcu_report_qs_rdp()
2296 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rdp()
2301 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp()
2302 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_qs_rdp()
2309 needwake = rcu_accelerate_cbs(rnp, rdp); in rcu_report_qs_rdp()
2312 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_report_qs_rdp()
2360 struct rcu_node *rnp = rdp->mynode; in rcutree_dying_cpu() local
2365 blkd = !!(rnp->qsmask & rdp->grpmask); in rcutree_dying_cpu()
2366 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in rcutree_dying_cpu()
2391 struct rcu_node *rnp = rnp_leaf; in rcu_cleanup_dead_rnp() local
2399 mask = rnp->grpmask; in rcu_cleanup_dead_rnp()
2400 rnp = rnp->parent; in rcu_cleanup_dead_rnp()
2401 if (!rnp) in rcu_cleanup_dead_rnp()
2403 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcu_cleanup_dead_rnp()
2404 rnp->qsmaskinit &= ~mask; in rcu_cleanup_dead_rnp()
2406 WARN_ON_ONCE(rnp->qsmask); in rcu_cleanup_dead_rnp()
2407 if (rnp->qsmaskinit) { in rcu_cleanup_dead_rnp()
2408 raw_spin_unlock_rcu_node(rnp); in rcu_cleanup_dead_rnp()
2412 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcu_cleanup_dead_rnp()
2425 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcutree_dead_cpu() local
2432 rcu_boost_kthread_setaffinity(rnp, -1); in rcutree_dead_cpu()
2617 struct rcu_node *rnp; in force_qs_rnp() local
2621 rcu_for_each_leaf_node(rnp) { in force_qs_rnp()
2624 raw_spin_lock_irqsave_rcu_node(rnp, flags); in force_qs_rnp()
2625 rcu_state.cbovldnext |= !!rnp->cbovldmask; in force_qs_rnp()
2626 if (rnp->qsmask == 0) { in force_qs_rnp()
2627 if (rcu_preempt_blocked_readers_cgp(rnp)) { in force_qs_rnp()
2633 rcu_initiate_boost(rnp, flags); in force_qs_rnp()
2637 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in force_qs_rnp()
2640 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { in force_qs_rnp()
2649 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in force_qs_rnp()
2652 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in force_qs_rnp()
2665 struct rcu_node *rnp; in rcu_force_quiescent_state() local
2669 rnp = __this_cpu_read(rcu_data.mynode); in rcu_force_quiescent_state()
2670 for (; rnp != NULL; rnp = rnp->parent) { in rcu_force_quiescent_state()
2672 !raw_spin_trylock(&rnp->fqslock); in rcu_force_quiescent_state()
2677 rnp_old = rnp; in rcu_force_quiescent_state()
2708 struct rcu_node *rnp = rdp->mynode; in rcu_core() local
2732 rcu_accelerate_cbs_unlocked(rnp, rdp); in rcu_core()
2736 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); in rcu_core()
2921 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) in check_cb_ovld_locked() argument
2923 raw_lockdep_assert_held_rcu_node(rnp); in check_cb_ovld_locked()
2927 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); in check_cb_ovld_locked()
2929 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); in check_cb_ovld_locked()
2946 struct rcu_node *const rnp = rdp->mynode; in check_cb_ovld() local
2950 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) in check_cb_ovld()
2952 raw_spin_lock_rcu_node(rnp); in check_cb_ovld()
2953 check_cb_ovld_locked(rdp, rnp); in check_cb_ovld()
2954 raw_spin_unlock_rcu_node(rnp); in check_cb_ovld()
3786 struct rcu_node *rnp; in start_poll_synchronize_rcu() local
3791 rnp = rdp->mynode; in start_poll_synchronize_rcu()
3792 raw_spin_lock_rcu_node(rnp); // irqs already disabled. in start_poll_synchronize_rcu()
3793 needwake = rcu_start_this_gp(rnp, rdp, gp_seq); in start_poll_synchronize_rcu()
3794 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in start_poll_synchronize_rcu()
3873 struct rcu_node *rnp = rdp->mynode; in rcu_pending() local
3905 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || in rcu_pending()
4071 struct rcu_node *rnp = rnp_leaf; in rcu_init_new_rnp() local
4074 WARN_ON_ONCE(rnp->wait_blkd_tasks); in rcu_init_new_rnp()
4076 mask = rnp->grpmask; in rcu_init_new_rnp()
4077 rnp = rnp->parent; in rcu_init_new_rnp()
4078 if (rnp == NULL) in rcu_init_new_rnp()
4080 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ in rcu_init_new_rnp()
4081 oldmask = rnp->qsmaskinit; in rcu_init_new_rnp()
4082 rnp->qsmaskinit |= mask; in rcu_init_new_rnp()
4083 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ in rcu_init_new_rnp()
4124 struct rcu_node *rnp = rcu_get_root(); in rcutree_prepare_cpu() local
4127 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_prepare_cpu()
4132 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ in rcutree_prepare_cpu()
4146 rnp = rdp->mynode; in rcutree_prepare_cpu()
4147 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ in rcutree_prepare_cpu()
4149 rdp->gp_seq = READ_ONCE(rnp->gp_seq); in rcutree_prepare_cpu()
4157 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_prepare_cpu()
4158 rcu_spawn_one_boost_kthread(rnp); in rcutree_prepare_cpu()
4183 struct rcu_node *rnp; in rcutree_online_cpu() local
4186 rnp = rdp->mynode; in rcutree_online_cpu()
4187 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_online_cpu()
4188 rnp->ffmask |= rdp->grpmask; in rcutree_online_cpu()
4189 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_online_cpu()
4208 struct rcu_node *rnp; in rcutree_offline_cpu() local
4211 rnp = rdp->mynode; in rcutree_offline_cpu()
4212 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcutree_offline_cpu()
4213 rnp->ffmask &= ~rdp->grpmask; in rcutree_offline_cpu()
4214 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcutree_offline_cpu()
4239 struct rcu_node *rnp; in rcu_cpu_starting() local
4247 rnp = rdp->mynode; in rcu_cpu_starting()
4249 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); in rcu_cpu_starting()
4250 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); in rcu_cpu_starting()
4253 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_cpu_starting()
4254 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); in rcu_cpu_starting()
4255 newcpu = !(rnp->expmaskinitnext & mask); in rcu_cpu_starting()
4256 rnp->expmaskinitnext |= mask; in rcu_cpu_starting()
4260 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ in rcu_cpu_starting()
4265 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ in rcu_cpu_starting()
4268 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_cpu_starting()
4270 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_cpu_starting()
4273 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); in rcu_cpu_starting()
4274 WARN_ON_ONCE(rnp->ofl_seq & 0x1); in rcu_cpu_starting()
4291 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_report_dead() local
4302 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); in rcu_report_dead()
4303 WARN_ON_ONCE(!(rnp->ofl_seq & 0x1)); in rcu_report_dead()
4306 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ in rcu_report_dead()
4309 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ in rcu_report_dead()
4311 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); in rcu_report_dead()
4312 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_report_dead()
4314 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); in rcu_report_dead()
4315 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_report_dead()
4318 WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1); in rcu_report_dead()
4319 WARN_ON_ONCE(rnp->ofl_seq & 0x1); in rcu_report_dead()
4403 struct rcu_node *rnp; in rcu_spawn_gp_kthread() local
4430 rnp = rcu_get_root(); in rcu_spawn_gp_kthread()
4431 raw_spin_lock_irqsave_rcu_node(rnp, flags); in rcu_spawn_gp_kthread()
4436 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); in rcu_spawn_gp_kthread()
4478 struct rcu_node *rnp; in rcu_init_one() local
4497 rnp = rcu_state.level[i]; in rcu_init_one()
4498 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { in rcu_init_one()
4499 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); in rcu_init_one()
4500 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), in rcu_init_one()
4502 raw_spin_lock_init(&rnp->fqslock); in rcu_init_one()
4503 lockdep_set_class_and_name(&rnp->fqslock, in rcu_init_one()
4505 rnp->gp_seq = rcu_state.gp_seq; in rcu_init_one()
4506 rnp->gp_seq_needed = rcu_state.gp_seq; in rcu_init_one()
4507 rnp->completedqs = rcu_state.gp_seq; in rcu_init_one()
4508 rnp->qsmask = 0; in rcu_init_one()
4509 rnp->qsmaskinit = 0; in rcu_init_one()
4510 rnp->grplo = j * cpustride; in rcu_init_one()
4511 rnp->grphi = (j + 1) * cpustride - 1; in rcu_init_one()
4512 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
4513 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
4515 rnp->grpnum = 0; in rcu_init_one()
4516 rnp->grpmask = 0; in rcu_init_one()
4517 rnp->parent = NULL; in rcu_init_one()
4519 rnp->grpnum = j % levelspread[i - 1]; in rcu_init_one()
4520 rnp->grpmask = BIT(rnp->grpnum); in rcu_init_one()
4521 rnp->parent = rcu_state.level[i - 1] + in rcu_init_one()
4524 rnp->level = i; in rcu_init_one()
4525 INIT_LIST_HEAD(&rnp->blkd_tasks); in rcu_init_one()
4526 rcu_init_one_nocb(rnp); in rcu_init_one()
4527 init_waitqueue_head(&rnp->exp_wq[0]); in rcu_init_one()
4528 init_waitqueue_head(&rnp->exp_wq[1]); in rcu_init_one()
4529 init_waitqueue_head(&rnp->exp_wq[2]); in rcu_init_one()
4530 init_waitqueue_head(&rnp->exp_wq[3]); in rcu_init_one()
4531 spin_lock_init(&rnp->exp_lock); in rcu_init_one()
4537 rnp = rcu_first_leaf_node(); in rcu_init_one()
4539 while (i > rnp->grphi) in rcu_init_one()
4540 rnp++; in rcu_init_one()
4541 per_cpu_ptr(&rcu_data, i)->mynode = rnp; in rcu_init_one()
4647 struct rcu_node *rnp; in rcu_dump_rcu_node_tree() local
4651 rcu_for_each_node_breadth_first(rnp) { in rcu_dump_rcu_node_tree()
4652 if (rnp->level != level) { in rcu_dump_rcu_node_tree()
4655 level = rnp->level; in rcu_dump_rcu_node_tree()
4657 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); in rcu_dump_rcu_node_tree()