Lines Matching refs:rcu_state

84 static struct rcu_state rcu_state = {  variable
85 .level = { &rcu_state.node[0] },
88 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
89 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
92 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
93 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
226 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); in rcu_gp_in_progress()
483 return READ_ONCE(rcu_state.gp_seq); in rcu_get_gp_seq()
495 return rcu_state.expedited_sequence; in rcu_exp_batches_completed()
504 return &rcu_state.node[0]; in rcu_get_root()
515 *flags = READ_ONCE(rcu_state.gp_flags); in rcutorture_get_gp_data()
516 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); in rcutorture_get_gp_data()
744 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in dyntick_save_progress_counter()
771 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); in rcu_implicit_dynticks_qs()
823 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || in rcu_implicit_dynticks_qs()
824 time_after(jiffies, rcu_state.jiffies_resched) || in rcu_implicit_dynticks_qs()
825 rcu_state.cbovld)) { in rcu_implicit_dynticks_qs()
829 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { in rcu_implicit_dynticks_qs()
843 rcu_state.cbovld)) { in rcu_implicit_dynticks_qs()
856 if (time_after(jiffies, rcu_state.jiffies_resched)) { in rcu_implicit_dynticks_qs()
896 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in trace_rcu_this_gp()
969 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); in rcu_start_this_gp()
970 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); in rcu_start_this_gp()
971 if (!READ_ONCE(rcu_state.gp_kthread)) { in rcu_start_this_gp()
975 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); in rcu_start_this_gp()
1022 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); in rcu_gp_kthread_wake()
1025 !READ_ONCE(rcu_state.gp_flags) || !t) in rcu_gp_kthread_wake()
1027 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); in rcu_gp_kthread_wake()
1028 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); in rcu_gp_kthread_wake()
1029 swake_up_one(&rcu_state.gp_wq); in rcu_gp_kthread_wake()
1068 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); in rcu_accelerate_cbs()
1074 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB")); in rcu_accelerate_cbs()
1076 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB")); in rcu_accelerate_cbs()
1097 c = rcu_seq_snap(&rcu_state.gp_seq); in rcu_accelerate_cbs_unlocked()
1191 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); in __note_gp_changes()
1207 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); in __note_gp_changes()
1274 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) in rcu_gp_slow()
1321 if (!rcu_seq_state(rcu_state.gp_seq_polled)) in rcu_poll_gp_seq_start()
1322 rcu_seq_start(&rcu_state.gp_seq_polled); in rcu_poll_gp_seq_start()
1325 *snap = rcu_state.gp_seq_polled; in rcu_poll_gp_seq_start()
1339 if (*snap && *snap == rcu_state.gp_seq_polled) { in rcu_poll_gp_seq_end()
1340 rcu_seq_end(&rcu_state.gp_seq_polled); in rcu_poll_gp_seq_end()
1341 rcu_state.gp_seq_polled_snap = 0; in rcu_poll_gp_seq_end()
1342 rcu_state.gp_seq_polled_exp_snap = 0; in rcu_poll_gp_seq_end()
1393 WRITE_ONCE(rcu_state.gp_activity, jiffies); in rcu_gp_init()
1395 if (!READ_ONCE(rcu_state.gp_flags)) { in rcu_gp_init()
1400 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ in rcu_gp_init()
1414 rcu_seq_start(&rcu_state.gp_seq); in rcu_gp_init()
1415 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); in rcu_gp_init()
1416 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); in rcu_gp_init()
1417 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap); in rcu_gp_init()
1429 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF); in rcu_gp_init()
1433 arch_spin_lock(&rcu_state.ofl_lock); in rcu_gp_init()
1439 arch_spin_unlock(&rcu_state.ofl_lock); in rcu_gp_init()
1476 arch_spin_unlock(&rcu_state.ofl_lock); in rcu_gp_init()
1493 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT); in rcu_gp_init()
1500 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); in rcu_gp_init()
1504 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, in rcu_gp_init()
1515 WRITE_ONCE(rcu_state.gp_activity, jiffies); in rcu_gp_init()
1538 *gfp = READ_ONCE(rcu_state.gp_flags); in rcu_gp_fqs_check_wake()
1556 WRITE_ONCE(rcu_state.gp_activity, jiffies); in rcu_gp_fqs()
1557 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1); in rcu_gp_fqs()
1566 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { in rcu_gp_fqs()
1568 WRITE_ONCE(rcu_state.gp_flags, in rcu_gp_fqs()
1569 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS); in rcu_gp_fqs()
1586 if (rcu_state.cbovld) in rcu_gp_fqs_loop()
1590 if (rcu_state.cbovld) { in rcu_gp_fqs_loop()
1595 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) { in rcu_gp_fqs_loop()
1596 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j); in rcu_gp_fqs_loop()
1602 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, in rcu_gp_fqs_loop()
1605 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, in rcu_gp_fqs_loop()
1607 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS); in rcu_gp_fqs_loop()
1608 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq, in rcu_gp_fqs_loop()
1611 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS); in rcu_gp_fqs_loop()
1626 if (!time_after(rcu_state.jiffies_force_qs, jiffies) || in rcu_gp_fqs_loop()
1628 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, in rcu_gp_fqs_loop()
1634 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0; in rcu_gp_fqs_loop()
1636 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, in rcu_gp_fqs_loop()
1639 WRITE_ONCE(rcu_state.gp_activity, jiffies); in rcu_gp_fqs_loop()
1645 WRITE_ONCE(rcu_state.gp_activity, jiffies); in rcu_gp_fqs_loop()
1647 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, in rcu_gp_fqs_loop()
1651 if (time_after(jiffies, rcu_state.jiffies_force_qs)) in rcu_gp_fqs_loop()
1654 j = rcu_state.jiffies_force_qs - j; in rcu_gp_fqs_loop()
1674 WRITE_ONCE(rcu_state.gp_activity, jiffies); in rcu_gp_cleanup()
1676 rcu_state.gp_end = jiffies; in rcu_gp_cleanup()
1677 gp_duration = rcu_state.gp_end - rcu_state.gp_start; in rcu_gp_cleanup()
1678 if (gp_duration > rcu_state.gp_max) in rcu_gp_cleanup()
1679 rcu_state.gp_max = gp_duration; in rcu_gp_cleanup()
1689 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap); in rcu_gp_cleanup()
1701 new_gp_seq = rcu_state.gp_seq; in rcu_gp_cleanup()
1726 WRITE_ONCE(rcu_state.gp_activity, jiffies); in rcu_gp_cleanup()
1733 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); in rcu_gp_cleanup()
1734 rcu_seq_end(&rcu_state.gp_seq); in rcu_gp_cleanup()
1735 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); in rcu_gp_cleanup()
1736 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE); in rcu_gp_cleanup()
1758 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); in rcu_gp_cleanup()
1759 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); in rcu_gp_cleanup()
1760 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq")); in rcu_gp_cleanup()
1769 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT); in rcu_gp_cleanup()
1788 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, in rcu_gp_kthread()
1790 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS); in rcu_gp_kthread()
1791 swait_event_idle_exclusive(rcu_state.gp_wq, in rcu_gp_kthread()
1792 READ_ONCE(rcu_state.gp_flags) & in rcu_gp_kthread()
1795 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS); in rcu_gp_kthread()
1800 WRITE_ONCE(rcu_state.gp_activity, jiffies); in rcu_gp_kthread()
1802 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, in rcu_gp_kthread()
1810 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP); in rcu_gp_kthread()
1812 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED); in rcu_gp_kthread()
1830 WRITE_ONCE(rcu_state.gp_flags, in rcu_report_qs_rsp()
1831 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); in rcu_report_qs_rsp()
1874 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, in rcu_report_qs_rnp()
2062 trace_rcu_batch_start(rcu_state.name, in rcu_do_batch()
2064 trace_rcu_batch_end(rcu_state.name, 0, in rcu_do_batch()
2088 trace_rcu_batch_start(rcu_state.name, in rcu_do_batch()
2108 trace_rcu_invoke_callback(rcu_state.name, rhp); in rcu_do_batch()
2144 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), in rcu_do_batch()
2159 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcu_do_batch()
2233 rcu_state.cbovld = rcu_state.cbovldnext; in force_qs_rnp()
2234 rcu_state.cbovldnext = false; in force_qs_rnp()
2239 rcu_state.cbovldnext |= !!rnp->cbovldmask; in force_qs_rnp()
2285 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || in rcu_force_quiescent_state()
2298 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { in rcu_force_quiescent_state()
2302 WRITE_ONCE(rcu_state.gp_flags, in rcu_force_quiescent_state()
2303 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS); in rcu_force_quiescent_state()
2537 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && in __call_rcu_core()
2540 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in __call_rcu_core()
2644 trace_rcu_kvfree_callback(rcu_state.name, head, in __call_rcu_common()
2648 trace_rcu_callback(rcu_state.name, head, in __call_rcu_common()
2923 rcu_state.name, bnode->nr_records, in kvfree_rcu_bulk()
2930 rcu_state.name, bnode->records[i], 0); in kvfree_rcu_bulk()
2960 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset); in kvfree_rcu_list()
3495 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap); in synchronize_rcu()
3496 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap); in synchronize_rcu()
3504 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT); in synchronize_rcu()
3506 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; in synchronize_rcu()
3540 return rcu_seq_snap(&rcu_state.gp_seq_polled); in get_state_synchronize_rcu()
3570 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); in get_state_synchronize_rcu_full()
3596 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq)); in start_poll_synchronize_rcu_common()
3681 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) { in poll_state_synchronize_rcu()
3730 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) { in poll_state_synchronize_rcu_full()
3847 trace_rcu_barrier(rcu_state.name, s, cpu, in rcu_barrier_trace()
3848 atomic_read(&rcu_state.barrier_cpu_count), done); in rcu_barrier_trace()
3863 unsigned long __maybe_unused s = rcu_state.barrier_sequence; in rcu_barrier_callback()
3865 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { in rcu_barrier_callback()
3867 complete(&rcu_state.barrier_completion); in rcu_barrier_callback()
3878 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence); in rcu_barrier_entrain()
3883 lockdep_assert_held(&rcu_state.barrier_lock); in rcu_barrier_entrain()
3886 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); in rcu_barrier_entrain()
3899 atomic_inc(&rcu_state.barrier_cpu_count); in rcu_barrier_entrain()
3902 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence); in rcu_barrier_entrain()
3921 raw_spin_lock(&rcu_state.barrier_lock); in rcu_barrier_handler()
3923 raw_spin_unlock(&rcu_state.barrier_lock); in rcu_barrier_handler()
3940 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); in rcu_barrier()
3945 mutex_lock(&rcu_state.barrier_mutex); in rcu_barrier()
3948 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { in rcu_barrier()
3949 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3951 mutex_unlock(&rcu_state.barrier_mutex); in rcu_barrier()
3956 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); in rcu_barrier()
3957 rcu_seq_start(&rcu_state.barrier_sequence); in rcu_barrier()
3958 gseq = rcu_state.barrier_sequence; in rcu_barrier()
3959 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); in rcu_barrier()
3968 init_completion(&rcu_state.barrier_completion); in rcu_barrier()
3969 atomic_set(&rcu_state.barrier_cpu_count, 2); in rcu_barrier()
3970 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); in rcu_barrier()
3982 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); in rcu_barrier()
3985 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); in rcu_barrier()
3986 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence); in rcu_barrier()
3992 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); in rcu_barrier()
3993 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence); in rcu_barrier()
3996 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); in rcu_barrier()
4002 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence); in rcu_barrier()
4009 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) in rcu_barrier()
4010 complete(&rcu_state.barrier_completion); in rcu_barrier()
4013 wait_for_completion(&rcu_state.barrier_completion); in rcu_barrier()
4016 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); in rcu_barrier()
4017 rcu_seq_end(&rcu_state.barrier_sequence); in rcu_barrier()
4018 gseq = rcu_state.barrier_sequence; in rcu_barrier()
4026 mutex_unlock(&rcu_state.barrier_mutex); in rcu_barrier()
4082 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock)) in rcu_lockdep_current_cpu_online()
4095 return !!rcu_state.n_online_cpus; in rcu_init_invoked()
4112 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), in rcutree_dying_cpu()
4173 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); in rcutree_dead_cpu()
4221 rdp->barrier_seq_snap = rcu_state.barrier_sequence; in rcu_boot_init_percpu_data()
4222 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4224 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; in rcu_boot_init_percpu_data()
4251 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); in rcutree_prepare_cpu()
4278 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); in rcutree_prepare_cpu()
4282 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); in rcutree_prepare_cpu()
4372 arch_spin_lock(&rcu_state.ofl_lock); in rcu_cpu_starting()
4374 raw_spin_lock(&rcu_state.barrier_lock); in rcu_cpu_starting()
4377 raw_spin_unlock(&rcu_state.barrier_lock); in rcu_cpu_starting()
4381 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */ in rcu_cpu_starting()
4382 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus); in rcu_cpu_starting()
4384 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_cpu_starting()
4385 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_cpu_starting()
4399 arch_spin_unlock(&rcu_state.ofl_lock); in rcu_cpu_starting()
4427 arch_spin_lock(&rcu_state.ofl_lock); in rcu_report_dead()
4429 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); in rcu_report_dead()
4430 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); in rcu_report_dead()
4439 arch_spin_unlock(&rcu_state.ofl_lock); in rcu_report_dead()
4463 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); in rcutree_migrate_callbacks()
4475 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */ in rcutree_migrate_callbacks()
4579 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); in rcu_spawn_gp_kthread()
4588 WRITE_ONCE(rcu_state.gp_activity, jiffies); in rcu_spawn_gp_kthread()
4589 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); in rcu_spawn_gp_kthread()
4591 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ in rcu_spawn_gp_kthread()
4631 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; in rcu_scheduler_starting()
4664 rcu_state.level[i] = in rcu_init_one()
4665 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; in rcu_init_one()
4672 rnp = rcu_state.level[i]; in rcu_init_one()
4680 rnp->gp_seq = rcu_state.gp_seq; in rcu_init_one()
4681 rnp->gp_seq_needed = rcu_state.gp_seq; in rcu_init_one()
4682 rnp->completedqs = rcu_state.gp_seq; in rcu_init_one()
4696 rnp->parent = rcu_state.level[i - 1] + in rcu_init_one()
4714 init_swait_queue_head(&rcu_state.gp_wq); in rcu_init_one()
4715 init_swait_queue_head(&rcu_state.expedited_wq); in rcu_init_one()