1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 #include "rcu_segcblist.h"
10
11 ////////////////////////////////////////////////////////////////////////
12 //
13 // Generic data structures.
14
15 struct rcu_tasks;
16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17 typedef void (*pregp_func_t)(struct list_head *hop);
18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19 typedef void (*postscan_func_t)(struct list_head *hop);
20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
22
23 /**
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25 * @cblist: Callback list.
26 * @lock: Lock protecting per-CPU callback list.
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @rtp_n_lock_retries: Rough lock-contention statistic.
29 * @rtp_work: Work queue for invoking callbacks.
30 * @rtp_irq_work: IRQ work queue for deferred wakeups.
31 * @barrier_q_head: RCU callback for barrier operation.
32 * @rtp_blkd_tasks: List of tasks blocked as readers.
33 * @cpu: CPU number corresponding to this entry.
34 * @rtpp: Pointer to the rcu_tasks structure.
35 */
36 struct rcu_tasks_percpu {
37 struct rcu_segcblist cblist;
38 raw_spinlock_t __private lock;
39 unsigned long rtp_jiffies;
40 unsigned long rtp_n_lock_retries;
41 struct work_struct rtp_work;
42 struct irq_work rtp_irq_work;
43 struct rcu_head barrier_q_head;
44 struct list_head rtp_blkd_tasks;
45 int cpu;
46 struct rcu_tasks *rtpp;
47 };
48
49 /**
50 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
51 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
52 * @cbs_gbl_lock: Lock protecting callback list.
53 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
54 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
55 * @gp_func: This flavor's grace-period-wait function.
56 * @gp_state: Grace period's most recent state transition (debugging).
57 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
58 * @init_fract: Initial backoff sleep interval.
59 * @gp_jiffies: Time of last @gp_state transition.
60 * @gp_start: Most recent grace-period start in jiffies.
61 * @tasks_gp_seq: Number of grace periods completed since boot.
62 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
63 * @n_ipis_fails: Number of IPI-send failures.
64 * @pregp_func: This flavor's pre-grace-period function (optional).
65 * @pertask_func: This flavor's per-task scan function (optional).
66 * @postscan_func: This flavor's post-task scan function (optional).
67 * @holdouts_func: This flavor's holdout-list scan function (optional).
68 * @postgp_func: This flavor's post-grace-period function (optional).
69 * @call_func: This flavor's call_rcu()-equivalent function.
70 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
71 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
72 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
73 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
74 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
75 * @barrier_q_mutex: Serialize barrier operations.
76 * @barrier_q_count: Number of queues being waited on.
77 * @barrier_q_completion: Barrier wait/wakeup mechanism.
78 * @barrier_q_seq: Sequence number for barrier operations.
79 * @name: This flavor's textual name.
80 * @kname: This flavor's kthread name.
81 */
82 struct rcu_tasks {
83 struct rcuwait cbs_wait;
84 raw_spinlock_t cbs_gbl_lock;
85 struct mutex tasks_gp_mutex;
86 int gp_state;
87 int gp_sleep;
88 int init_fract;
89 unsigned long gp_jiffies;
90 unsigned long gp_start;
91 unsigned long tasks_gp_seq;
92 unsigned long n_ipis;
93 unsigned long n_ipis_fails;
94 struct task_struct *kthread_ptr;
95 rcu_tasks_gp_func_t gp_func;
96 pregp_func_t pregp_func;
97 pertask_func_t pertask_func;
98 postscan_func_t postscan_func;
99 holdouts_func_t holdouts_func;
100 postgp_func_t postgp_func;
101 call_rcu_func_t call_func;
102 struct rcu_tasks_percpu __percpu *rtpcpu;
103 int percpu_enqueue_shift;
104 int percpu_enqueue_lim;
105 int percpu_dequeue_lim;
106 unsigned long percpu_dequeue_gpseq;
107 struct mutex barrier_q_mutex;
108 atomic_t barrier_q_count;
109 struct completion barrier_q_completion;
110 unsigned long barrier_q_seq;
111 char *name;
112 char *kname;
113 };
114
115 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
116
117 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
118 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
119 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
120 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
121 }; \
122 static struct rcu_tasks rt_name = \
123 { \
124 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
125 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
126 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
127 .gp_func = gp, \
128 .call_func = call, \
129 .rtpcpu = &rt_name ## __percpu, \
130 .name = n, \
131 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
132 .percpu_enqueue_lim = 1, \
133 .percpu_dequeue_lim = 1, \
134 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
135 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
136 .kname = #rt_name, \
137 }
138
139 /* Track exiting tasks in order to allow them to be waited for. */
140 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
141
142 /* Avoid IPIing CPUs early in the grace period. */
143 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
144 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
145 module_param(rcu_task_ipi_delay, int, 0644);
146
147 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
148 #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
149 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
150 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
151 module_param(rcu_task_stall_timeout, int, 0644);
152 #define RCU_TASK_STALL_INFO (HZ * 10)
153 static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
154 module_param(rcu_task_stall_info, int, 0644);
155 static int rcu_task_stall_info_mult __read_mostly = 3;
156 module_param(rcu_task_stall_info_mult, int, 0444);
157
158 static int rcu_task_enqueue_lim __read_mostly = -1;
159 module_param(rcu_task_enqueue_lim, int, 0444);
160
161 static bool rcu_task_cb_adjust;
162 static int rcu_task_contend_lim __read_mostly = 100;
163 module_param(rcu_task_contend_lim, int, 0444);
164 static int rcu_task_collapse_lim __read_mostly = 10;
165 module_param(rcu_task_collapse_lim, int, 0444);
166
167 /* RCU tasks grace-period state for debugging. */
168 #define RTGS_INIT 0
169 #define RTGS_WAIT_WAIT_CBS 1
170 #define RTGS_WAIT_GP 2
171 #define RTGS_PRE_WAIT_GP 3
172 #define RTGS_SCAN_TASKLIST 4
173 #define RTGS_POST_SCAN_TASKLIST 5
174 #define RTGS_WAIT_SCAN_HOLDOUTS 6
175 #define RTGS_SCAN_HOLDOUTS 7
176 #define RTGS_POST_GP 8
177 #define RTGS_WAIT_READERS 9
178 #define RTGS_INVOKE_CBS 10
179 #define RTGS_WAIT_CBS 11
180 #ifndef CONFIG_TINY_RCU
181 static const char * const rcu_tasks_gp_state_names[] = {
182 "RTGS_INIT",
183 "RTGS_WAIT_WAIT_CBS",
184 "RTGS_WAIT_GP",
185 "RTGS_PRE_WAIT_GP",
186 "RTGS_SCAN_TASKLIST",
187 "RTGS_POST_SCAN_TASKLIST",
188 "RTGS_WAIT_SCAN_HOLDOUTS",
189 "RTGS_SCAN_HOLDOUTS",
190 "RTGS_POST_GP",
191 "RTGS_WAIT_READERS",
192 "RTGS_INVOKE_CBS",
193 "RTGS_WAIT_CBS",
194 };
195 #endif /* #ifndef CONFIG_TINY_RCU */
196
197 ////////////////////////////////////////////////////////////////////////
198 //
199 // Generic code.
200
201 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
202
203 /* Record grace-period phase and time. */
set_tasks_gp_state(struct rcu_tasks * rtp,int newstate)204 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
205 {
206 rtp->gp_state = newstate;
207 rtp->gp_jiffies = jiffies;
208 }
209
210 #ifndef CONFIG_TINY_RCU
211 /* Return state name. */
tasks_gp_state_getname(struct rcu_tasks * rtp)212 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
213 {
214 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
215 int j = READ_ONCE(i); // Prevent the compiler from reading twice
216
217 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
218 return "???";
219 return rcu_tasks_gp_state_names[j];
220 }
221 #endif /* #ifndef CONFIG_TINY_RCU */
222
223 // Initialize per-CPU callback lists for the specified flavor of
224 // Tasks RCU.
cblist_init_generic(struct rcu_tasks * rtp)225 static void cblist_init_generic(struct rcu_tasks *rtp)
226 {
227 int cpu;
228 unsigned long flags;
229 int lim;
230 int shift;
231
232 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
233 if (rcu_task_enqueue_lim < 0) {
234 rcu_task_enqueue_lim = 1;
235 rcu_task_cb_adjust = true;
236 pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
237 } else if (rcu_task_enqueue_lim == 0) {
238 rcu_task_enqueue_lim = 1;
239 }
240 lim = rcu_task_enqueue_lim;
241
242 if (lim > nr_cpu_ids)
243 lim = nr_cpu_ids;
244 shift = ilog2(nr_cpu_ids / lim);
245 if (((nr_cpu_ids - 1) >> shift) >= lim)
246 shift++;
247 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
248 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
249 smp_store_release(&rtp->percpu_enqueue_lim, lim);
250 for_each_possible_cpu(cpu) {
251 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
252
253 WARN_ON_ONCE(!rtpcp);
254 if (cpu)
255 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
256 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
257 if (rcu_segcblist_empty(&rtpcp->cblist))
258 rcu_segcblist_init(&rtpcp->cblist);
259 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
260 rtpcp->cpu = cpu;
261 rtpcp->rtpp = rtp;
262 if (!rtpcp->rtp_blkd_tasks.next)
263 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
264 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
265 }
266 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
267 pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
268 }
269
270 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
call_rcu_tasks_iw_wakeup(struct irq_work * iwp)271 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
272 {
273 struct rcu_tasks *rtp;
274 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
275
276 rtp = rtpcp->rtpp;
277 rcuwait_wake_up(&rtp->cbs_wait);
278 }
279
280 // Enqueue a callback for the specified flavor of Tasks RCU.
call_rcu_tasks_generic(struct rcu_head * rhp,rcu_callback_t func,struct rcu_tasks * rtp)281 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
282 struct rcu_tasks *rtp)
283 {
284 int chosen_cpu;
285 unsigned long flags;
286 int ideal_cpu;
287 unsigned long j;
288 bool needadjust = false;
289 bool needwake;
290 struct rcu_tasks_percpu *rtpcp;
291
292 rhp->next = NULL;
293 rhp->func = func;
294 local_irq_save(flags);
295 rcu_read_lock();
296 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
297 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
298 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
299 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
300 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
301 j = jiffies;
302 if (rtpcp->rtp_jiffies != j) {
303 rtpcp->rtp_jiffies = j;
304 rtpcp->rtp_n_lock_retries = 0;
305 }
306 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
307 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
308 needadjust = true; // Defer adjustment to avoid deadlock.
309 }
310 if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
311 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
312 cblist_init_generic(rtp);
313 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
314 }
315 needwake = rcu_segcblist_empty(&rtpcp->cblist);
316 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
317 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
318 if (unlikely(needadjust)) {
319 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
320 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
321 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
322 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
323 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
324 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
325 }
326 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
327 }
328 rcu_read_unlock();
329 /* We can't create the thread unless interrupts are enabled. */
330 if (needwake && READ_ONCE(rtp->kthread_ptr))
331 irq_work_queue(&rtpcp->rtp_irq_work);
332 }
333
334 // RCU callback function for rcu_barrier_tasks_generic().
rcu_barrier_tasks_generic_cb(struct rcu_head * rhp)335 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
336 {
337 struct rcu_tasks *rtp;
338 struct rcu_tasks_percpu *rtpcp;
339
340 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
341 rtp = rtpcp->rtpp;
342 if (atomic_dec_and_test(&rtp->barrier_q_count))
343 complete(&rtp->barrier_q_completion);
344 }
345
346 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
347 // Operates in a manner similar to rcu_barrier().
rcu_barrier_tasks_generic(struct rcu_tasks * rtp)348 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
349 {
350 int cpu;
351 unsigned long flags;
352 struct rcu_tasks_percpu *rtpcp;
353 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
354
355 mutex_lock(&rtp->barrier_q_mutex);
356 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
357 smp_mb();
358 mutex_unlock(&rtp->barrier_q_mutex);
359 return;
360 }
361 rcu_seq_start(&rtp->barrier_q_seq);
362 init_completion(&rtp->barrier_q_completion);
363 atomic_set(&rtp->barrier_q_count, 2);
364 for_each_possible_cpu(cpu) {
365 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
366 break;
367 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
368 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
369 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
370 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
371 atomic_inc(&rtp->barrier_q_count);
372 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
373 }
374 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
375 complete(&rtp->barrier_q_completion);
376 wait_for_completion(&rtp->barrier_q_completion);
377 rcu_seq_end(&rtp->barrier_q_seq);
378 mutex_unlock(&rtp->barrier_q_mutex);
379 }
380
381 // Advance callbacks and indicate whether either a grace period or
382 // callback invocation is needed.
rcu_tasks_need_gpcb(struct rcu_tasks * rtp)383 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
384 {
385 int cpu;
386 unsigned long flags;
387 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
388 long n;
389 long ncbs = 0;
390 long ncbsnz = 0;
391 int needgpcb = 0;
392
393 for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
394 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
395
396 /* Advance and accelerate any new callbacks. */
397 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
398 continue;
399 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
400 // Should we shrink down to a single callback queue?
401 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
402 if (n) {
403 ncbs += n;
404 if (cpu > 0)
405 ncbsnz += n;
406 }
407 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
408 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
409 if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
410 needgpcb |= 0x3;
411 if (!rcu_segcblist_empty(&rtpcp->cblist))
412 needgpcb |= 0x1;
413 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
414 }
415
416 // Shrink down to a single callback queue if appropriate.
417 // This is done in two stages: (1) If there are no more than
418 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
419 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
420 // if there has not been an increase in callbacks, limit dequeuing
421 // to CPU 0. Note the matching RCU read-side critical section in
422 // call_rcu_tasks_generic().
423 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
424 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
425 if (rtp->percpu_enqueue_lim > 1) {
426 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
427 smp_store_release(&rtp->percpu_enqueue_lim, 1);
428 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
429 gpdone = false;
430 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
431 }
432 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
433 }
434 if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
435 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
436 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
437 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
438 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
439 }
440 if (rtp->percpu_dequeue_lim == 1) {
441 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
442 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
443
444 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
445 }
446 }
447 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
448 }
449
450 return needgpcb;
451 }
452
453 // Advance callbacks and invoke any that are ready.
rcu_tasks_invoke_cbs(struct rcu_tasks * rtp,struct rcu_tasks_percpu * rtpcp)454 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
455 {
456 int cpu;
457 int cpunext;
458 unsigned long flags;
459 int len;
460 struct rcu_head *rhp;
461 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
462 struct rcu_tasks_percpu *rtpcp_next;
463
464 cpu = rtpcp->cpu;
465 cpunext = cpu * 2 + 1;
466 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
467 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
468 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
469 cpunext++;
470 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
471 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
472 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
473 }
474 }
475
476 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
477 return;
478 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
479 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
480 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
481 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
482 len = rcl.len;
483 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
484 local_bh_disable();
485 rhp->func(rhp);
486 local_bh_enable();
487 cond_resched();
488 }
489 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
490 rcu_segcblist_add_len(&rtpcp->cblist, -len);
491 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
492 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
493 }
494
495 // Workqueue flood to advance callbacks and invoke any that are ready.
rcu_tasks_invoke_cbs_wq(struct work_struct * wp)496 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
497 {
498 struct rcu_tasks *rtp;
499 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
500
501 rtp = rtpcp->rtpp;
502 rcu_tasks_invoke_cbs(rtp, rtpcp);
503 }
504
505 // Wait for one grace period.
rcu_tasks_one_gp(struct rcu_tasks * rtp,bool midboot)506 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
507 {
508 int needgpcb;
509
510 mutex_lock(&rtp->tasks_gp_mutex);
511
512 // If there were none, wait a bit and start over.
513 if (unlikely(midboot)) {
514 needgpcb = 0x2;
515 } else {
516 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
517 rcuwait_wait_event(&rtp->cbs_wait,
518 (needgpcb = rcu_tasks_need_gpcb(rtp)),
519 TASK_IDLE);
520 }
521
522 if (needgpcb & 0x2) {
523 // Wait for one grace period.
524 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
525 rtp->gp_start = jiffies;
526 rcu_seq_start(&rtp->tasks_gp_seq);
527 rtp->gp_func(rtp);
528 rcu_seq_end(&rtp->tasks_gp_seq);
529 }
530
531 // Invoke callbacks.
532 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
533 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
534 mutex_unlock(&rtp->tasks_gp_mutex);
535 }
536
537 // RCU-tasks kthread that detects grace periods and invokes callbacks.
rcu_tasks_kthread(void * arg)538 static int __noreturn rcu_tasks_kthread(void *arg)
539 {
540 struct rcu_tasks *rtp = arg;
541
542 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
543 housekeeping_affine(current, HK_TYPE_RCU);
544 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
545
546 /*
547 * Each pass through the following loop makes one check for
548 * newly arrived callbacks, and, if there are some, waits for
549 * one RCU-tasks grace period and then invokes the callbacks.
550 * This loop is terminated by the system going down. ;-)
551 */
552 for (;;) {
553 // Wait for one grace period and invoke any callbacks
554 // that are ready.
555 rcu_tasks_one_gp(rtp, false);
556
557 // Paranoid sleep to keep this from entering a tight loop.
558 schedule_timeout_idle(rtp->gp_sleep);
559 }
560 }
561
562 // Wait for a grace period for the specified flavor of Tasks RCU.
synchronize_rcu_tasks_generic(struct rcu_tasks * rtp)563 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
564 {
565 /* Complain if the scheduler has not started. */
566 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
567 "synchronize_%s() called too soon", rtp->name))
568 return;
569
570 // If the grace-period kthread is running, use it.
571 if (READ_ONCE(rtp->kthread_ptr)) {
572 wait_rcu_gp(rtp->call_func);
573 return;
574 }
575 rcu_tasks_one_gp(rtp, true);
576 }
577
578 /* Spawn RCU-tasks grace-period kthread. */
rcu_spawn_tasks_kthread_generic(struct rcu_tasks * rtp)579 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
580 {
581 struct task_struct *t;
582
583 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
584 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
585 return;
586 smp_mb(); /* Ensure others see full kthread. */
587 }
588
589 #ifndef CONFIG_TINY_RCU
590
591 /*
592 * Print any non-default Tasks RCU settings.
593 */
rcu_tasks_bootup_oddness(void)594 static void __init rcu_tasks_bootup_oddness(void)
595 {
596 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
597 int rtsimc;
598
599 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
600 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
601 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
602 if (rtsimc != rcu_task_stall_info_mult) {
603 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
604 rcu_task_stall_info_mult = rtsimc;
605 }
606 #endif /* #ifdef CONFIG_TASKS_RCU */
607 #ifdef CONFIG_TASKS_RCU
608 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
609 #endif /* #ifdef CONFIG_TASKS_RCU */
610 #ifdef CONFIG_TASKS_RUDE_RCU
611 pr_info("\tRude variant of Tasks RCU enabled.\n");
612 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
613 #ifdef CONFIG_TASKS_TRACE_RCU
614 pr_info("\tTracing variant of Tasks RCU enabled.\n");
615 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
616 }
617
618 #endif /* #ifndef CONFIG_TINY_RCU */
619
620 #ifndef CONFIG_TINY_RCU
621 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
show_rcu_tasks_generic_gp_kthread(struct rcu_tasks * rtp,char * s)622 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
623 {
624 int cpu;
625 bool havecbs = false;
626
627 for_each_possible_cpu(cpu) {
628 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
629
630 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
631 havecbs = true;
632 break;
633 }
634 }
635 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
636 rtp->kname,
637 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
638 jiffies - data_race(rtp->gp_jiffies),
639 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
640 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
641 ".k"[!!data_race(rtp->kthread_ptr)],
642 ".C"[havecbs],
643 s);
644 }
645 #endif // #ifndef CONFIG_TINY_RCU
646
647 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
648
649 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
650
651 ////////////////////////////////////////////////////////////////////////
652 //
653 // Shared code between task-list-scanning variants of Tasks RCU.
654
655 /* Wait for one RCU-tasks grace period. */
rcu_tasks_wait_gp(struct rcu_tasks * rtp)656 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
657 {
658 struct task_struct *g;
659 int fract;
660 LIST_HEAD(holdouts);
661 unsigned long j;
662 unsigned long lastinfo;
663 unsigned long lastreport;
664 bool reported = false;
665 int rtsi;
666 struct task_struct *t;
667
668 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
669 rtp->pregp_func(&holdouts);
670
671 /*
672 * There were callbacks, so we need to wait for an RCU-tasks
673 * grace period. Start off by scanning the task list for tasks
674 * that are not already voluntarily blocked. Mark these tasks
675 * and make a list of them in holdouts.
676 */
677 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
678 if (rtp->pertask_func) {
679 rcu_read_lock();
680 for_each_process_thread(g, t)
681 rtp->pertask_func(t, &holdouts);
682 rcu_read_unlock();
683 }
684
685 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
686 rtp->postscan_func(&holdouts);
687
688 /*
689 * Each pass through the following loop scans the list of holdout
690 * tasks, removing any that are no longer holdouts. When the list
691 * is empty, we are done.
692 */
693 lastreport = jiffies;
694 lastinfo = lastreport;
695 rtsi = READ_ONCE(rcu_task_stall_info);
696
697 // Start off with initial wait and slowly back off to 1 HZ wait.
698 fract = rtp->init_fract;
699
700 while (!list_empty(&holdouts)) {
701 ktime_t exp;
702 bool firstreport;
703 bool needreport;
704 int rtst;
705
706 // Slowly back off waiting for holdouts
707 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
708 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
709 schedule_timeout_idle(fract);
710 } else {
711 exp = jiffies_to_nsecs(fract);
712 __set_current_state(TASK_IDLE);
713 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
714 }
715
716 if (fract < HZ)
717 fract++;
718
719 rtst = READ_ONCE(rcu_task_stall_timeout);
720 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
721 if (needreport) {
722 lastreport = jiffies;
723 reported = true;
724 }
725 firstreport = true;
726 WARN_ON(signal_pending(current));
727 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
728 rtp->holdouts_func(&holdouts, needreport, &firstreport);
729
730 // Print pre-stall informational messages if needed.
731 j = jiffies;
732 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
733 lastinfo = j;
734 rtsi = rtsi * rcu_task_stall_info_mult;
735 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
736 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
737 }
738 }
739
740 set_tasks_gp_state(rtp, RTGS_POST_GP);
741 rtp->postgp_func(rtp);
742 }
743
744 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
745
746 #ifdef CONFIG_TASKS_RCU
747
748 ////////////////////////////////////////////////////////////////////////
749 //
750 // Simple variant of RCU whose quiescent states are voluntary context
751 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
752 // As such, grace periods can take one good long time. There are no
753 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
754 // because this implementation is intended to get the system into a safe
755 // state for some of the manipulations involved in tracing and the like.
756 // Finally, this implementation does not support high call_rcu_tasks()
757 // rates from multiple CPUs. If this is required, per-CPU callback lists
758 // will be needed.
759 //
760 // The implementation uses rcu_tasks_wait_gp(), which relies on function
761 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
762 // function sets these function pointers up so that rcu_tasks_wait_gp()
763 // invokes these functions in this order:
764 //
765 // rcu_tasks_pregp_step():
766 // Invokes synchronize_rcu() in order to wait for all in-flight
767 // t->on_rq and t->nvcsw transitions to complete. This works because
768 // all such transitions are carried out with interrupts disabled.
769 // rcu_tasks_pertask(), invoked on every non-idle task:
770 // For every runnable non-idle task other than the current one, use
771 // get_task_struct() to pin down that task, snapshot that task's
772 // number of voluntary context switches, and add that task to the
773 // holdout list.
774 // rcu_tasks_postscan():
775 // Invoke synchronize_srcu() to ensure that all tasks that were
776 // in the process of exiting (and which thus might not know to
777 // synchronize with this RCU Tasks grace period) have completed
778 // exiting.
779 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
780 // Scans the holdout list, attempting to identify a quiescent state
781 // for each task on the list. If there is a quiescent state, the
782 // corresponding task is removed from the holdout list.
783 // rcu_tasks_postgp():
784 // Invokes synchronize_rcu() in order to ensure that all prior
785 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
786 // to have happened before the end of this RCU Tasks grace period.
787 // Again, this works because all such transitions are carried out
788 // with interrupts disabled.
789 //
790 // For each exiting task, the exit_tasks_rcu_start() and
791 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
792 // read-side critical sections waited for by rcu_tasks_postscan().
793 //
794 // Pre-grace-period update-side code is ordered before the grace
795 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
796 // is ordered before the grace period via synchronize_rcu() call in
797 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
798 // disabling.
799
800 /* Pre-grace-period preparation. */
rcu_tasks_pregp_step(struct list_head * hop)801 static void rcu_tasks_pregp_step(struct list_head *hop)
802 {
803 /*
804 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
805 * to complete. Invoking synchronize_rcu() suffices because all
806 * these transitions occur with interrupts disabled. Without this
807 * synchronize_rcu(), a read-side critical section that started
808 * before the grace period might be incorrectly seen as having
809 * started after the grace period.
810 *
811 * This synchronize_rcu() also dispenses with the need for a
812 * memory barrier on the first store to t->rcu_tasks_holdout,
813 * as it forces the store to happen after the beginning of the
814 * grace period.
815 */
816 synchronize_rcu();
817 }
818
819 /* Per-task initial processing. */
rcu_tasks_pertask(struct task_struct * t,struct list_head * hop)820 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
821 {
822 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
823 get_task_struct(t);
824 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
825 WRITE_ONCE(t->rcu_tasks_holdout, true);
826 list_add(&t->rcu_tasks_holdout_list, hop);
827 }
828 }
829
830 /* Processing between scanning taskslist and draining the holdout list. */
rcu_tasks_postscan(struct list_head * hop)831 static void rcu_tasks_postscan(struct list_head *hop)
832 {
833 /*
834 * Exiting tasks may escape the tasklist scan. Those are vulnerable
835 * until their final schedule() with TASK_DEAD state. To cope with
836 * this, divide the fragile exit path part in two intersecting
837 * read side critical sections:
838 *
839 * 1) An _SRCU_ read side starting before calling exit_notify(),
840 * which may remove the task from the tasklist, and ending after
841 * the final preempt_disable() call in do_exit().
842 *
843 * 2) An _RCU_ read side starting with the final preempt_disable()
844 * call in do_exit() and ending with the final call to schedule()
845 * with TASK_DEAD state.
846 *
847 * This handles the part 1). And postgp will handle part 2) with a
848 * call to synchronize_rcu().
849 */
850 synchronize_srcu(&tasks_rcu_exit_srcu);
851 }
852
853 /* See if tasks are still holding out, complain if so. */
check_holdout_task(struct task_struct * t,bool needreport,bool * firstreport)854 static void check_holdout_task(struct task_struct *t,
855 bool needreport, bool *firstreport)
856 {
857 int cpu;
858
859 if (!READ_ONCE(t->rcu_tasks_holdout) ||
860 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
861 !READ_ONCE(t->on_rq) ||
862 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
863 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
864 WRITE_ONCE(t->rcu_tasks_holdout, false);
865 list_del_init(&t->rcu_tasks_holdout_list);
866 put_task_struct(t);
867 return;
868 }
869 rcu_request_urgent_qs_task(t);
870 if (!needreport)
871 return;
872 if (*firstreport) {
873 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
874 *firstreport = false;
875 }
876 cpu = task_cpu(t);
877 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
878 t, ".I"[is_idle_task(t)],
879 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
880 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
881 t->rcu_tasks_idle_cpu, cpu);
882 sched_show_task(t);
883 }
884
885 /* Scan the holdout lists for tasks no longer holding out. */
check_all_holdout_tasks(struct list_head * hop,bool needreport,bool * firstreport)886 static void check_all_holdout_tasks(struct list_head *hop,
887 bool needreport, bool *firstreport)
888 {
889 struct task_struct *t, *t1;
890
891 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
892 check_holdout_task(t, needreport, firstreport);
893 cond_resched();
894 }
895 }
896
897 /* Finish off the Tasks-RCU grace period. */
rcu_tasks_postgp(struct rcu_tasks * rtp)898 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
899 {
900 /*
901 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
902 * memory barriers prior to them in the schedule() path, memory
903 * reordering on other CPUs could cause their RCU-tasks read-side
904 * critical sections to extend past the end of the grace period.
905 * However, because these ->nvcsw updates are carried out with
906 * interrupts disabled, we can use synchronize_rcu() to force the
907 * needed ordering on all such CPUs.
908 *
909 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
910 * accesses to be within the grace period, avoiding the need for
911 * memory barriers for ->rcu_tasks_holdout accesses.
912 *
913 * In addition, this synchronize_rcu() waits for exiting tasks
914 * to complete their final preempt_disable() region of execution,
915 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
916 * enforcing the whole region before tasklist removal until
917 * the final schedule() with TASK_DEAD state to be an RCU TASKS
918 * read side critical section.
919 */
920 synchronize_rcu();
921 }
922
923 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
924 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
925
926 /**
927 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
928 * @rhp: structure to be used for queueing the RCU updates.
929 * @func: actual callback function to be invoked after the grace period
930 *
931 * The callback function will be invoked some time after a full grace
932 * period elapses, in other words after all currently executing RCU
933 * read-side critical sections have completed. call_rcu_tasks() assumes
934 * that the read-side critical sections end at a voluntary context
935 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
936 * or transition to usermode execution. As such, there are no read-side
937 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
938 * this primitive is intended to determine that all tasks have passed
939 * through a safe state, not so much for data-structure synchronization.
940 *
941 * See the description of call_rcu() for more detailed information on
942 * memory ordering guarantees.
943 */
call_rcu_tasks(struct rcu_head * rhp,rcu_callback_t func)944 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
945 {
946 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
947 }
948 EXPORT_SYMBOL_GPL(call_rcu_tasks);
949
950 /**
951 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
952 *
953 * Control will return to the caller some time after a full rcu-tasks
954 * grace period has elapsed, in other words after all currently
955 * executing rcu-tasks read-side critical sections have elapsed. These
956 * read-side critical sections are delimited by calls to schedule(),
957 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
958 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
959 *
960 * This is a very specialized primitive, intended only for a few uses in
961 * tracing and other situations requiring manipulation of function
962 * preambles and profiling hooks. The synchronize_rcu_tasks() function
963 * is not (yet) intended for heavy use from multiple CPUs.
964 *
965 * See the description of synchronize_rcu() for more detailed information
966 * on memory ordering guarantees.
967 */
synchronize_rcu_tasks(void)968 void synchronize_rcu_tasks(void)
969 {
970 synchronize_rcu_tasks_generic(&rcu_tasks);
971 }
972 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
973
974 /**
975 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
976 *
977 * Although the current implementation is guaranteed to wait, it is not
978 * obligated to, for example, if there are no pending callbacks.
979 */
rcu_barrier_tasks(void)980 void rcu_barrier_tasks(void)
981 {
982 rcu_barrier_tasks_generic(&rcu_tasks);
983 }
984 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
985
rcu_spawn_tasks_kthread(void)986 static int __init rcu_spawn_tasks_kthread(void)
987 {
988 cblist_init_generic(&rcu_tasks);
989 rcu_tasks.gp_sleep = HZ / 10;
990 rcu_tasks.init_fract = HZ / 10;
991 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
992 rcu_tasks.pertask_func = rcu_tasks_pertask;
993 rcu_tasks.postscan_func = rcu_tasks_postscan;
994 rcu_tasks.holdouts_func = check_all_holdout_tasks;
995 rcu_tasks.postgp_func = rcu_tasks_postgp;
996 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
997 return 0;
998 }
999
1000 #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_classic_gp_kthread(void)1001 void show_rcu_tasks_classic_gp_kthread(void)
1002 {
1003 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1004 }
1005 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
1006 #endif // !defined(CONFIG_TINY_RCU)
1007
1008 /*
1009 * Contribute to protect against tasklist scan blind spot while the
1010 * task is exiting and may be removed from the tasklist. See
1011 * corresponding synchronize_srcu() for further details.
1012 */
exit_tasks_rcu_start(void)1013 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
1014 {
1015 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
1016 }
1017
1018 /*
1019 * Contribute to protect against tasklist scan blind spot while the
1020 * task is exiting and may be removed from the tasklist. See
1021 * corresponding synchronize_srcu() for further details.
1022 */
exit_tasks_rcu_stop(void)1023 void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
1024 {
1025 struct task_struct *t = current;
1026
1027 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
1028 }
1029
1030 /*
1031 * Contribute to protect against tasklist scan blind spot while the
1032 * task is exiting and may be removed from the tasklist. See
1033 * corresponding synchronize_srcu() for further details.
1034 */
exit_tasks_rcu_finish(void)1035 void exit_tasks_rcu_finish(void)
1036 {
1037 exit_tasks_rcu_stop();
1038 exit_tasks_rcu_finish_trace(current);
1039 }
1040
1041 #else /* #ifdef CONFIG_TASKS_RCU */
exit_tasks_rcu_start(void)1042 void exit_tasks_rcu_start(void) { }
exit_tasks_rcu_stop(void)1043 void exit_tasks_rcu_stop(void) { }
exit_tasks_rcu_finish(void)1044 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1045 #endif /* #else #ifdef CONFIG_TASKS_RCU */
1046
1047 #ifdef CONFIG_TASKS_RUDE_RCU
1048
1049 ////////////////////////////////////////////////////////////////////////
1050 //
1051 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1052 // passing an empty function to schedule_on_each_cpu(). This approach
1053 // provides an asynchronous call_rcu_tasks_rude() API and batching of
1054 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
1055 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1056 // and induces otherwise unnecessary context switches on all online CPUs,
1057 // whether idle or not.
1058 //
1059 // Callback handling is provided by the rcu_tasks_kthread() function.
1060 //
1061 // Ordering is provided by the scheduler's context-switch code.
1062
1063 // Empty function to allow workqueues to force a context switch.
rcu_tasks_be_rude(struct work_struct * work)1064 static void rcu_tasks_be_rude(struct work_struct *work)
1065 {
1066 }
1067
1068 // Wait for one rude RCU-tasks grace period.
rcu_tasks_rude_wait_gp(struct rcu_tasks * rtp)1069 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1070 {
1071 rtp->n_ipis += cpumask_weight(cpu_online_mask);
1072 schedule_on_each_cpu(rcu_tasks_be_rude);
1073 }
1074
1075 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1076 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1077 "RCU Tasks Rude");
1078
1079 /**
1080 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1081 * @rhp: structure to be used for queueing the RCU updates.
1082 * @func: actual callback function to be invoked after the grace period
1083 *
1084 * The callback function will be invoked some time after a full grace
1085 * period elapses, in other words after all currently executing RCU
1086 * read-side critical sections have completed. call_rcu_tasks_rude()
1087 * assumes that the read-side critical sections end at context switch,
1088 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1089 * usermode execution is schedulable). As such, there are no read-side
1090 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1091 * this primitive is intended to determine that all tasks have passed
1092 * through a safe state, not so much for data-structure synchronization.
1093 *
1094 * See the description of call_rcu() for more detailed information on
1095 * memory ordering guarantees.
1096 */
call_rcu_tasks_rude(struct rcu_head * rhp,rcu_callback_t func)1097 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1098 {
1099 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1100 }
1101 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1102
1103 /**
1104 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1105 *
1106 * Control will return to the caller some time after a rude rcu-tasks
1107 * grace period has elapsed, in other words after all currently
1108 * executing rcu-tasks read-side critical sections have elapsed. These
1109 * read-side critical sections are delimited by calls to schedule(),
1110 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1111 * context), and (in theory, anyway) cond_resched().
1112 *
1113 * This is a very specialized primitive, intended only for a few uses in
1114 * tracing and other situations requiring manipulation of function preambles
1115 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1116 * (yet) intended for heavy use from multiple CPUs.
1117 *
1118 * See the description of synchronize_rcu() for more detailed information
1119 * on memory ordering guarantees.
1120 */
synchronize_rcu_tasks_rude(void)1121 void synchronize_rcu_tasks_rude(void)
1122 {
1123 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1124 }
1125 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1126
1127 /**
1128 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1129 *
1130 * Although the current implementation is guaranteed to wait, it is not
1131 * obligated to, for example, if there are no pending callbacks.
1132 */
rcu_barrier_tasks_rude(void)1133 void rcu_barrier_tasks_rude(void)
1134 {
1135 rcu_barrier_tasks_generic(&rcu_tasks_rude);
1136 }
1137 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1138
rcu_spawn_tasks_rude_kthread(void)1139 static int __init rcu_spawn_tasks_rude_kthread(void)
1140 {
1141 cblist_init_generic(&rcu_tasks_rude);
1142 rcu_tasks_rude.gp_sleep = HZ / 10;
1143 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1144 return 0;
1145 }
1146
1147 #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_rude_gp_kthread(void)1148 void show_rcu_tasks_rude_gp_kthread(void)
1149 {
1150 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1151 }
1152 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1153 #endif // !defined(CONFIG_TINY_RCU)
1154 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1155
1156 ////////////////////////////////////////////////////////////////////////
1157 //
1158 // Tracing variant of Tasks RCU. This variant is designed to be used
1159 // to protect tracing hooks, including those of BPF. This variant
1160 // therefore:
1161 //
1162 // 1. Has explicit read-side markers to allow finite grace periods
1163 // in the face of in-kernel loops for PREEMPT=n builds.
1164 //
1165 // 2. Protects code in the idle loop, exception entry/exit, and
1166 // CPU-hotplug code paths, similar to the capabilities of SRCU.
1167 //
1168 // 3. Avoids expensive read-side instructions, having overhead similar
1169 // to that of Preemptible RCU.
1170 //
1171 // There are of course downsides. For example, the grace-period code
1172 // can send IPIs to CPUs, even when those CPUs are in the idle loop or
1173 // in nohz_full userspace. If needed, these downsides can be at least
1174 // partially remedied.
1175 //
1176 // Perhaps most important, this variant of RCU does not affect the vanilla
1177 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1178 // readers can operate from idle, offline, and exception entry/exit in no
1179 // way allows rcu_preempt and rcu_sched readers to also do so.
1180 //
1181 // The implementation uses rcu_tasks_wait_gp(), which relies on function
1182 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1183 // function sets these function pointers up so that rcu_tasks_wait_gp()
1184 // invokes these functions in this order:
1185 //
1186 // rcu_tasks_trace_pregp_step():
1187 // Disables CPU hotplug, adds all currently executing tasks to the
1188 // holdout list, then checks the state of all tasks that blocked
1189 // or were preempted within their current RCU Tasks Trace read-side
1190 // critical section, adding them to the holdout list if appropriate.
1191 // Finally, this function re-enables CPU hotplug.
1192 // The ->pertask_func() pointer is NULL, so there is no per-task processing.
1193 // rcu_tasks_trace_postscan():
1194 // Invokes synchronize_rcu() to wait for late-stage exiting tasks
1195 // to finish exiting.
1196 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1197 // Scans the holdout list, attempting to identify a quiescent state
1198 // for each task on the list. If there is a quiescent state, the
1199 // corresponding task is removed from the holdout list. Once this
1200 // list is empty, the grace period has completed.
1201 // rcu_tasks_trace_postgp():
1202 // Provides the needed full memory barrier and does debug checks.
1203 //
1204 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1205 //
1206 // Pre-grace-period update-side code is ordered before the grace period
1207 // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1208 // read-side code is ordered before the grace period by atomic operations
1209 // on .b.need_qs flag of each task involved in this process, or by scheduler
1210 // context-switch ordering (for locked-down non-running readers).
1211
1212 // The lockdep state must be outside of #ifdef to be useful.
1213 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1214 static struct lock_class_key rcu_lock_trace_key;
1215 struct lockdep_map rcu_trace_lock_map =
1216 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1217 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1218 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1219
1220 #ifdef CONFIG_TASKS_TRACE_RCU
1221
1222 // Record outstanding IPIs to each CPU. No point in sending two...
1223 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1224
1225 // The number of detections of task quiescent state relying on
1226 // heavyweight readers executing explicit memory barriers.
1227 static unsigned long n_heavy_reader_attempts;
1228 static unsigned long n_heavy_reader_updates;
1229 static unsigned long n_heavy_reader_ofl_updates;
1230 static unsigned long n_trc_holdouts;
1231
1232 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1233 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1234 "RCU Tasks Trace");
1235
1236 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
rcu_ld_need_qs(struct task_struct * t)1237 static u8 rcu_ld_need_qs(struct task_struct *t)
1238 {
1239 smp_mb(); // Enforce full grace-period ordering.
1240 return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1241 }
1242
1243 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
rcu_st_need_qs(struct task_struct * t,u8 v)1244 static void rcu_st_need_qs(struct task_struct *t, u8 v)
1245 {
1246 smp_store_release(&t->trc_reader_special.b.need_qs, v);
1247 smp_mb(); // Enforce full grace-period ordering.
1248 }
1249
1250 /*
1251 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1252 * the four-byte operand-size restriction of some platforms.
1253 * Returns the old value, which is often ignored.
1254 */
rcu_trc_cmpxchg_need_qs(struct task_struct * t,u8 old,u8 new)1255 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1256 {
1257 union rcu_special ret;
1258 union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1259 union rcu_special trs_new = trs_old;
1260
1261 if (trs_old.b.need_qs != old)
1262 return trs_old.b.need_qs;
1263 trs_new.b.need_qs = new;
1264 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1265 return ret.b.need_qs;
1266 }
1267 EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1268
1269 /*
1270 * If we are the last reader, signal the grace-period kthread.
1271 * Also remove from the per-CPU list of blocked tasks.
1272 */
rcu_read_unlock_trace_special(struct task_struct * t)1273 void rcu_read_unlock_trace_special(struct task_struct *t)
1274 {
1275 unsigned long flags;
1276 struct rcu_tasks_percpu *rtpcp;
1277 union rcu_special trs;
1278
1279 // Open-coded full-word version of rcu_ld_need_qs().
1280 smp_mb(); // Enforce full grace-period ordering.
1281 trs = smp_load_acquire(&t->trc_reader_special);
1282
1283 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1284 smp_mb(); // Pairs with update-side barriers.
1285 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1286 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1287 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1288 TRC_NEED_QS_CHECKED);
1289
1290 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1291 }
1292 if (trs.b.blocked) {
1293 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1294 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1295 list_del_init(&t->trc_blkd_node);
1296 WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1297 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1298 }
1299 WRITE_ONCE(t->trc_reader_nesting, 0);
1300 }
1301 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1302
1303 /* Add a newly blocked reader task to its CPU's list. */
rcu_tasks_trace_qs_blkd(struct task_struct * t)1304 void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1305 {
1306 unsigned long flags;
1307 struct rcu_tasks_percpu *rtpcp;
1308
1309 local_irq_save(flags);
1310 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1311 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1312 t->trc_blkd_cpu = smp_processor_id();
1313 if (!rtpcp->rtp_blkd_tasks.next)
1314 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1315 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1316 WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1317 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1318 }
1319 EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1320
1321 /* Add a task to the holdout list, if it is not already on the list. */
trc_add_holdout(struct task_struct * t,struct list_head * bhp)1322 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1323 {
1324 if (list_empty(&t->trc_holdout_list)) {
1325 get_task_struct(t);
1326 list_add(&t->trc_holdout_list, bhp);
1327 n_trc_holdouts++;
1328 }
1329 }
1330
1331 /* Remove a task from the holdout list, if it is in fact present. */
trc_del_holdout(struct task_struct * t)1332 static void trc_del_holdout(struct task_struct *t)
1333 {
1334 if (!list_empty(&t->trc_holdout_list)) {
1335 list_del_init(&t->trc_holdout_list);
1336 put_task_struct(t);
1337 n_trc_holdouts--;
1338 }
1339 }
1340
1341 /* IPI handler to check task state. */
trc_read_check_handler(void * t_in)1342 static void trc_read_check_handler(void *t_in)
1343 {
1344 int nesting;
1345 struct task_struct *t = current;
1346 struct task_struct *texp = t_in;
1347
1348 // If the task is no longer running on this CPU, leave.
1349 if (unlikely(texp != t))
1350 goto reset_ipi; // Already on holdout list, so will check later.
1351
1352 // If the task is not in a read-side critical section, and
1353 // if this is the last reader, awaken the grace-period kthread.
1354 nesting = READ_ONCE(t->trc_reader_nesting);
1355 if (likely(!nesting)) {
1356 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1357 goto reset_ipi;
1358 }
1359 // If we are racing with an rcu_read_unlock_trace(), try again later.
1360 if (unlikely(nesting < 0))
1361 goto reset_ipi;
1362
1363 // Get here if the task is in a read-side critical section.
1364 // Set its state so that it will update state for the grace-period
1365 // kthread upon exit from that critical section.
1366 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1367
1368 reset_ipi:
1369 // Allow future IPIs to be sent on CPU and for task.
1370 // Also order this IPI handler against any later manipulations of
1371 // the intended task.
1372 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1373 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1374 }
1375
1376 /* Callback function for scheduler to check locked-down task. */
trc_inspect_reader(struct task_struct * t,void * bhp_in)1377 static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1378 {
1379 struct list_head *bhp = bhp_in;
1380 int cpu = task_cpu(t);
1381 int nesting;
1382 bool ofl = cpu_is_offline(cpu);
1383
1384 if (task_curr(t) && !ofl) {
1385 // If no chance of heavyweight readers, do it the hard way.
1386 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1387 return -EINVAL;
1388
1389 // If heavyweight readers are enabled on the remote task,
1390 // we can inspect its state despite its currently running.
1391 // However, we cannot safely change its state.
1392 n_heavy_reader_attempts++;
1393 // Check for "running" idle tasks on offline CPUs.
1394 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1395 return -EINVAL; // No quiescent state, do it the hard way.
1396 n_heavy_reader_updates++;
1397 nesting = 0;
1398 } else {
1399 // The task is not running, so C-language access is safe.
1400 nesting = t->trc_reader_nesting;
1401 WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t));
1402 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1403 n_heavy_reader_ofl_updates++;
1404 }
1405
1406 // If not exiting a read-side critical section, mark as checked
1407 // so that the grace-period kthread will remove it from the
1408 // holdout list.
1409 if (!nesting) {
1410 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1411 return 0; // In QS, so done.
1412 }
1413 if (nesting < 0)
1414 return -EINVAL; // Reader transitioning, try again later.
1415
1416 // The task is in a read-side critical section, so set up its
1417 // state so that it will update state upon exit from that critical
1418 // section.
1419 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1420 trc_add_holdout(t, bhp);
1421 return 0;
1422 }
1423
1424 /* Attempt to extract the state for the specified task. */
trc_wait_for_one_reader(struct task_struct * t,struct list_head * bhp)1425 static void trc_wait_for_one_reader(struct task_struct *t,
1426 struct list_head *bhp)
1427 {
1428 int cpu;
1429
1430 // If a previous IPI is still in flight, let it complete.
1431 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1432 return;
1433
1434 // The current task had better be in a quiescent state.
1435 if (t == current) {
1436 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1437 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1438 return;
1439 }
1440
1441 // Attempt to nail down the task for inspection.
1442 get_task_struct(t);
1443 if (!task_call_func(t, trc_inspect_reader, bhp)) {
1444 put_task_struct(t);
1445 return;
1446 }
1447 put_task_struct(t);
1448
1449 // If this task is not yet on the holdout list, then we are in
1450 // an RCU read-side critical section. Otherwise, the invocation of
1451 // trc_add_holdout() that added it to the list did the necessary
1452 // get_task_struct(). Either way, the task cannot be freed out
1453 // from under this code.
1454
1455 // If currently running, send an IPI, either way, add to list.
1456 trc_add_holdout(t, bhp);
1457 if (task_curr(t) &&
1458 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1459 // The task is currently running, so try IPIing it.
1460 cpu = task_cpu(t);
1461
1462 // If there is already an IPI outstanding, let it happen.
1463 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1464 return;
1465
1466 per_cpu(trc_ipi_to_cpu, cpu) = true;
1467 t->trc_ipi_to_cpu = cpu;
1468 rcu_tasks_trace.n_ipis++;
1469 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1470 // Just in case there is some other reason for
1471 // failure than the target CPU being offline.
1472 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1473 __func__, cpu);
1474 rcu_tasks_trace.n_ipis_fails++;
1475 per_cpu(trc_ipi_to_cpu, cpu) = false;
1476 t->trc_ipi_to_cpu = -1;
1477 }
1478 }
1479 }
1480
1481 /*
1482 * Initialize for first-round processing for the specified task.
1483 * Return false if task is NULL or already taken care of, true otherwise.
1484 */
rcu_tasks_trace_pertask_prep(struct task_struct * t,bool notself)1485 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1486 {
1487 // During early boot when there is only the one boot CPU, there
1488 // is no idle task for the other CPUs. Also, the grace-period
1489 // kthread is always in a quiescent state. In addition, just return
1490 // if this task is already on the list.
1491 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1492 return false;
1493
1494 rcu_st_need_qs(t, 0);
1495 t->trc_ipi_to_cpu = -1;
1496 return true;
1497 }
1498
1499 /* Do first-round processing for the specified task. */
rcu_tasks_trace_pertask(struct task_struct * t,struct list_head * hop)1500 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1501 {
1502 if (rcu_tasks_trace_pertask_prep(t, true))
1503 trc_wait_for_one_reader(t, hop);
1504 }
1505
1506 /* Initialize for a new RCU-tasks-trace grace period. */
rcu_tasks_trace_pregp_step(struct list_head * hop)1507 static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1508 {
1509 LIST_HEAD(blkd_tasks);
1510 int cpu;
1511 unsigned long flags;
1512 struct rcu_tasks_percpu *rtpcp;
1513 struct task_struct *t;
1514
1515 // There shouldn't be any old IPIs, but...
1516 for_each_possible_cpu(cpu)
1517 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1518
1519 // Disable CPU hotplug across the CPU scan for the benefit of
1520 // any IPIs that might be needed. This also waits for all readers
1521 // in CPU-hotplug code paths.
1522 cpus_read_lock();
1523
1524 // These rcu_tasks_trace_pertask_prep() calls are serialized to
1525 // allow safe access to the hop list.
1526 for_each_online_cpu(cpu) {
1527 rcu_read_lock();
1528 t = cpu_curr_snapshot(cpu);
1529 if (rcu_tasks_trace_pertask_prep(t, true))
1530 trc_add_holdout(t, hop);
1531 rcu_read_unlock();
1532 cond_resched_tasks_rcu_qs();
1533 }
1534
1535 // Only after all running tasks have been accounted for is it
1536 // safe to take care of the tasks that have blocked within their
1537 // current RCU tasks trace read-side critical section.
1538 for_each_possible_cpu(cpu) {
1539 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1540 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1541 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1542 while (!list_empty(&blkd_tasks)) {
1543 rcu_read_lock();
1544 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1545 list_del_init(&t->trc_blkd_node);
1546 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1547 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1548 rcu_tasks_trace_pertask(t, hop);
1549 rcu_read_unlock();
1550 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1551 }
1552 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1553 cond_resched_tasks_rcu_qs();
1554 }
1555
1556 // Re-enable CPU hotplug now that the holdout list is populated.
1557 cpus_read_unlock();
1558 }
1559
1560 /*
1561 * Do intermediate processing between task and holdout scans.
1562 */
rcu_tasks_trace_postscan(struct list_head * hop)1563 static void rcu_tasks_trace_postscan(struct list_head *hop)
1564 {
1565 // Wait for late-stage exiting tasks to finish exiting.
1566 // These might have passed the call to exit_tasks_rcu_finish().
1567
1568 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1569 synchronize_rcu();
1570 // Any tasks that exit after this point will set
1571 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1572 }
1573
1574 /* Communicate task state back to the RCU tasks trace stall warning request. */
1575 struct trc_stall_chk_rdr {
1576 int nesting;
1577 int ipi_to_cpu;
1578 u8 needqs;
1579 };
1580
trc_check_slow_task(struct task_struct * t,void * arg)1581 static int trc_check_slow_task(struct task_struct *t, void *arg)
1582 {
1583 struct trc_stall_chk_rdr *trc_rdrp = arg;
1584
1585 if (task_curr(t) && cpu_online(task_cpu(t)))
1586 return false; // It is running, so decline to inspect it.
1587 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1588 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1589 trc_rdrp->needqs = rcu_ld_need_qs(t);
1590 return true;
1591 }
1592
1593 /* Show the state of a task stalling the current RCU tasks trace GP. */
show_stalled_task_trace(struct task_struct * t,bool * firstreport)1594 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1595 {
1596 int cpu;
1597 struct trc_stall_chk_rdr trc_rdr;
1598 bool is_idle_tsk = is_idle_task(t);
1599
1600 if (*firstreport) {
1601 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1602 *firstreport = false;
1603 }
1604 cpu = task_cpu(t);
1605 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1606 pr_alert("P%d: %c%c\n",
1607 t->pid,
1608 ".I"[t->trc_ipi_to_cpu >= 0],
1609 ".i"[is_idle_tsk]);
1610 else
1611 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1612 t->pid,
1613 ".I"[trc_rdr.ipi_to_cpu >= 0],
1614 ".i"[is_idle_tsk],
1615 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1616 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1617 trc_rdr.nesting,
1618 " !CN"[trc_rdr.needqs & 0x3],
1619 " ?"[trc_rdr.needqs > 0x3],
1620 cpu, cpu_online(cpu) ? "" : "(offline)");
1621 sched_show_task(t);
1622 }
1623
1624 /* List stalled IPIs for RCU tasks trace. */
show_stalled_ipi_trace(void)1625 static void show_stalled_ipi_trace(void)
1626 {
1627 int cpu;
1628
1629 for_each_possible_cpu(cpu)
1630 if (per_cpu(trc_ipi_to_cpu, cpu))
1631 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1632 }
1633
1634 /* Do one scan of the holdout list. */
check_all_holdout_tasks_trace(struct list_head * hop,bool needreport,bool * firstreport)1635 static void check_all_holdout_tasks_trace(struct list_head *hop,
1636 bool needreport, bool *firstreport)
1637 {
1638 struct task_struct *g, *t;
1639
1640 // Disable CPU hotplug across the holdout list scan for IPIs.
1641 cpus_read_lock();
1642
1643 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1644 // If safe and needed, try to check the current task.
1645 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1646 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1647 trc_wait_for_one_reader(t, hop);
1648
1649 // If check succeeded, remove this task from the list.
1650 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1651 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1652 trc_del_holdout(t);
1653 else if (needreport)
1654 show_stalled_task_trace(t, firstreport);
1655 cond_resched_tasks_rcu_qs();
1656 }
1657
1658 // Re-enable CPU hotplug now that the holdout list scan has completed.
1659 cpus_read_unlock();
1660
1661 if (needreport) {
1662 if (*firstreport)
1663 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1664 show_stalled_ipi_trace();
1665 }
1666 }
1667
rcu_tasks_trace_empty_fn(void * unused)1668 static void rcu_tasks_trace_empty_fn(void *unused)
1669 {
1670 }
1671
1672 /* Wait for grace period to complete and provide ordering. */
rcu_tasks_trace_postgp(struct rcu_tasks * rtp)1673 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1674 {
1675 int cpu;
1676
1677 // Wait for any lingering IPI handlers to complete. Note that
1678 // if a CPU has gone offline or transitioned to userspace in the
1679 // meantime, all IPI handlers should have been drained beforehand.
1680 // Yes, this assumes that CPUs process IPIs in order. If that ever
1681 // changes, there will need to be a recheck and/or timed wait.
1682 for_each_online_cpu(cpu)
1683 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1684 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1685
1686 smp_mb(); // Caller's code must be ordered after wakeup.
1687 // Pairs with pretty much every ordering primitive.
1688 }
1689
1690 /* Report any needed quiescent state for this exiting task. */
exit_tasks_rcu_finish_trace(struct task_struct * t)1691 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1692 {
1693 union rcu_special trs = READ_ONCE(t->trc_reader_special);
1694
1695 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1696 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1697 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1698 rcu_read_unlock_trace_special(t);
1699 else
1700 WRITE_ONCE(t->trc_reader_nesting, 0);
1701 }
1702
1703 /**
1704 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1705 * @rhp: structure to be used for queueing the RCU updates.
1706 * @func: actual callback function to be invoked after the grace period
1707 *
1708 * The callback function will be invoked some time after a trace rcu-tasks
1709 * grace period elapses, in other words after all currently executing
1710 * trace rcu-tasks read-side critical sections have completed. These
1711 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1712 * and rcu_read_unlock_trace().
1713 *
1714 * See the description of call_rcu() for more detailed information on
1715 * memory ordering guarantees.
1716 */
call_rcu_tasks_trace(struct rcu_head * rhp,rcu_callback_t func)1717 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1718 {
1719 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1720 }
1721 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1722
1723 /**
1724 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1725 *
1726 * Control will return to the caller some time after a trace rcu-tasks
1727 * grace period has elapsed, in other words after all currently executing
1728 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1729 * critical sections are delimited by calls to rcu_read_lock_trace()
1730 * and rcu_read_unlock_trace().
1731 *
1732 * This is a very specialized primitive, intended only for a few uses in
1733 * tracing and other situations requiring manipulation of function preambles
1734 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1735 * (yet) intended for heavy use from multiple CPUs.
1736 *
1737 * See the description of synchronize_rcu() for more detailed information
1738 * on memory ordering guarantees.
1739 */
synchronize_rcu_tasks_trace(void)1740 void synchronize_rcu_tasks_trace(void)
1741 {
1742 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1743 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1744 }
1745 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1746
1747 /**
1748 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1749 *
1750 * Although the current implementation is guaranteed to wait, it is not
1751 * obligated to, for example, if there are no pending callbacks.
1752 */
rcu_barrier_tasks_trace(void)1753 void rcu_barrier_tasks_trace(void)
1754 {
1755 rcu_barrier_tasks_generic(&rcu_tasks_trace);
1756 }
1757 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1758
rcu_spawn_tasks_trace_kthread(void)1759 static int __init rcu_spawn_tasks_trace_kthread(void)
1760 {
1761 cblist_init_generic(&rcu_tasks_trace);
1762 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1763 rcu_tasks_trace.gp_sleep = HZ / 10;
1764 rcu_tasks_trace.init_fract = HZ / 10;
1765 } else {
1766 rcu_tasks_trace.gp_sleep = HZ / 200;
1767 if (rcu_tasks_trace.gp_sleep <= 0)
1768 rcu_tasks_trace.gp_sleep = 1;
1769 rcu_tasks_trace.init_fract = HZ / 200;
1770 if (rcu_tasks_trace.init_fract <= 0)
1771 rcu_tasks_trace.init_fract = 1;
1772 }
1773 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1774 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1775 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1776 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1777 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1778 return 0;
1779 }
1780
1781 #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_trace_gp_kthread(void)1782 void show_rcu_tasks_trace_gp_kthread(void)
1783 {
1784 char buf[64];
1785
1786 sprintf(buf, "N%lu h:%lu/%lu/%lu",
1787 data_race(n_trc_holdouts),
1788 data_race(n_heavy_reader_ofl_updates),
1789 data_race(n_heavy_reader_updates),
1790 data_race(n_heavy_reader_attempts));
1791 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1792 }
1793 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1794 #endif // !defined(CONFIG_TINY_RCU)
1795
1796 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
exit_tasks_rcu_finish_trace(struct task_struct * t)1797 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1798 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1799
1800 #ifndef CONFIG_TINY_RCU
show_rcu_tasks_gp_kthreads(void)1801 void show_rcu_tasks_gp_kthreads(void)
1802 {
1803 show_rcu_tasks_classic_gp_kthread();
1804 show_rcu_tasks_rude_gp_kthread();
1805 show_rcu_tasks_trace_gp_kthread();
1806 }
1807 #endif /* #ifndef CONFIG_TINY_RCU */
1808
1809 #ifdef CONFIG_PROVE_RCU
1810 struct rcu_tasks_test_desc {
1811 struct rcu_head rh;
1812 const char *name;
1813 bool notrun;
1814 unsigned long runstart;
1815 };
1816
1817 static struct rcu_tasks_test_desc tests[] = {
1818 {
1819 .name = "call_rcu_tasks()",
1820 /* If not defined, the test is skipped. */
1821 .notrun = IS_ENABLED(CONFIG_TASKS_RCU),
1822 },
1823 {
1824 .name = "call_rcu_tasks_rude()",
1825 /* If not defined, the test is skipped. */
1826 .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1827 },
1828 {
1829 .name = "call_rcu_tasks_trace()",
1830 /* If not defined, the test is skipped. */
1831 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1832 }
1833 };
1834
test_rcu_tasks_callback(struct rcu_head * rhp)1835 static void test_rcu_tasks_callback(struct rcu_head *rhp)
1836 {
1837 struct rcu_tasks_test_desc *rttd =
1838 container_of(rhp, struct rcu_tasks_test_desc, rh);
1839
1840 pr_info("Callback from %s invoked.\n", rttd->name);
1841
1842 rttd->notrun = false;
1843 }
1844
rcu_tasks_initiate_self_tests(void)1845 static void rcu_tasks_initiate_self_tests(void)
1846 {
1847 pr_info("Running RCU-tasks wait API self tests\n");
1848 #ifdef CONFIG_TASKS_RCU
1849 tests[0].runstart = jiffies;
1850 synchronize_rcu_tasks();
1851 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1852 #endif
1853
1854 #ifdef CONFIG_TASKS_RUDE_RCU
1855 tests[1].runstart = jiffies;
1856 synchronize_rcu_tasks_rude();
1857 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1858 #endif
1859
1860 #ifdef CONFIG_TASKS_TRACE_RCU
1861 tests[2].runstart = jiffies;
1862 synchronize_rcu_tasks_trace();
1863 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1864 #endif
1865 }
1866
1867 /*
1868 * Return: 0 - test passed
1869 * 1 - test failed, but have not timed out yet
1870 * -1 - test failed and timed out
1871 */
rcu_tasks_verify_self_tests(void)1872 static int rcu_tasks_verify_self_tests(void)
1873 {
1874 int ret = 0;
1875 int i;
1876 unsigned long bst = rcu_task_stall_timeout;
1877
1878 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
1879 bst = RCU_TASK_BOOT_STALL_TIMEOUT;
1880 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1881 while (tests[i].notrun) { // still hanging.
1882 if (time_after(jiffies, tests[i].runstart + bst)) {
1883 pr_err("%s has failed boot-time tests.\n", tests[i].name);
1884 ret = -1;
1885 break;
1886 }
1887 ret = 1;
1888 break;
1889 }
1890 }
1891 WARN_ON(ret < 0);
1892
1893 return ret;
1894 }
1895
1896 /*
1897 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
1898 * test passes or has timed out.
1899 */
1900 static struct delayed_work rcu_tasks_verify_work;
rcu_tasks_verify_work_fn(struct work_struct * work __maybe_unused)1901 static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
1902 {
1903 int ret = rcu_tasks_verify_self_tests();
1904
1905 if (ret <= 0)
1906 return;
1907
1908 /* Test fails but not timed out yet, reschedule another check */
1909 schedule_delayed_work(&rcu_tasks_verify_work, HZ);
1910 }
1911
rcu_tasks_verify_schedule_work(void)1912 static int rcu_tasks_verify_schedule_work(void)
1913 {
1914 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
1915 rcu_tasks_verify_work_fn(NULL);
1916 return 0;
1917 }
1918 late_initcall(rcu_tasks_verify_schedule_work);
1919 #else /* #ifdef CONFIG_PROVE_RCU */
rcu_tasks_initiate_self_tests(void)1920 static void rcu_tasks_initiate_self_tests(void) { }
1921 #endif /* #else #ifdef CONFIG_PROVE_RCU */
1922
rcu_init_tasks_generic(void)1923 void __init rcu_init_tasks_generic(void)
1924 {
1925 #ifdef CONFIG_TASKS_RCU
1926 rcu_spawn_tasks_kthread();
1927 #endif
1928
1929 #ifdef CONFIG_TASKS_RUDE_RCU
1930 rcu_spawn_tasks_rude_kthread();
1931 #endif
1932
1933 #ifdef CONFIG_TASKS_TRACE_RCU
1934 rcu_spawn_tasks_trace_kthread();
1935 #endif
1936
1937 // Run the self-tests.
1938 rcu_tasks_initiate_self_tests();
1939 }
1940
1941 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
rcu_tasks_bootup_oddness(void)1942 static inline void rcu_tasks_bootup_oddness(void) {}
1943 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
1944