1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
4 * tiny variant.
5 *
6 * Copyright (C) IBM Corporation, 2017
7 *
8 * Author: Paul McKenney <paulmck@linux.ibm.com>
9 */
10
11 #ifndef _LINUX_SRCU_TINY_H
12 #define _LINUX_SRCU_TINY_H
13
14 #include <linux/swait.h>
15
16 struct srcu_struct {
17 short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
18 u8 srcu_gp_running; /* GP workqueue running? */
19 u8 srcu_gp_waiting; /* GP waiting for readers? */
20 unsigned long srcu_idx; /* Current reader array element in bit 0x2. */
21 unsigned long srcu_idx_max; /* Furthest future srcu_idx request. */
22 struct swait_queue_head srcu_wq;
23 /* Last srcu_read_unlock() wakes GP. */
24 struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
25 struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */
26 struct work_struct srcu_work; /* For driving grace periods. */
27 #ifdef CONFIG_DEBUG_LOCK_ALLOC
28 struct lockdep_map dep_map;
29 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
30 };
31
32 void srcu_drive_gp(struct work_struct *wp);
33
34 #define __SRCU_STRUCT_INIT(name, __ignored) \
35 { \
36 .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
37 .srcu_cb_tail = &name.srcu_cb_head, \
38 .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
39 __SRCU_DEP_MAP_INIT(name) \
40 }
41
42 /*
43 * This odd _STATIC_ arrangement is needed for API compatibility with
44 * Tree SRCU, which needs some per-CPU data.
45 */
46 #define DEFINE_SRCU(name) \
47 struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
48 #define DEFINE_STATIC_SRCU(name) \
49 static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
50
51 void synchronize_srcu(struct srcu_struct *ssp);
52
53 /*
54 * Counts the new reader in the appropriate per-CPU element of the
55 * srcu_struct. Can be invoked from irq/bh handlers, but the matching
56 * __srcu_read_unlock() must be in the same handler instance. Returns an
57 * index that must be passed to the matching srcu_read_unlock().
58 */
__srcu_read_lock(struct srcu_struct * ssp)59 static inline int __srcu_read_lock(struct srcu_struct *ssp)
60 {
61 int idx;
62
63 idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
64 WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
65 return idx;
66 }
67
synchronize_srcu_expedited(struct srcu_struct * ssp)68 static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
69 {
70 synchronize_srcu(ssp);
71 }
72
srcu_barrier(struct srcu_struct * ssp)73 static inline void srcu_barrier(struct srcu_struct *ssp)
74 {
75 synchronize_srcu(ssp);
76 }
77
78 /* Defined here to avoid size increase for non-torture kernels. */
srcu_torture_stats_print(struct srcu_struct * ssp,char * tt,char * tf)79 static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
80 char *tt, char *tf)
81 {
82 int idx;
83
84 idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
85 pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
86 tt, tf, idx,
87 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
88 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
89 data_race(READ_ONCE(ssp->srcu_idx)),
90 data_race(READ_ONCE(ssp->srcu_idx_max)));
91 }
92
93 #endif
94