1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; If not, see <http://www.gnu.org/licenses/>.
16  *
17  * Copyright (C) IBM Corporation, 2001
18  *
19  * Author: Dipankar Sarma <dipankar@in.ibm.com>
20  *
21  * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
22  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
23  * Papers:
24  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
25  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
26  *
27  * For detailed explanation of Read-Copy Update mechanism see -
28  * http://lse.sourceforge.net/locking/rcupdate.html
29  */
30 
31 #ifndef __XEN_RCUPDATE_H
32 #define __XEN_RCUPDATE_H
33 
34 #include <xen/cache.h>
35 #include <xen/spinlock.h>
36 #include <xen/percpu.h>
37 #include <xen/cpumask.h>
38 #include <xen/preempt.h>
39 
40 #define __rcu
41 
42 /**
43  * struct rcu_head - callback structure for use with RCU
44  * @next: next update requests in a list
45  * @func: actual update function to call after the grace period.
46  */
47 struct rcu_head {
48     struct rcu_head *next;
49     void (*func)(struct rcu_head *head);
50 };
51 
52 #define RCU_HEAD_INIT   { .next = NULL, .func = NULL }
53 #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
54 #define INIT_RCU_HEAD(ptr) do { \
55        (ptr)->next = NULL; (ptr)->func = NULL; \
56 } while (0)
57 
58 
59 int rcu_pending(int cpu);
60 int rcu_needs_cpu(int cpu);
61 
62 /*
63  * Dummy lock type for passing to rcu_read_{lock,unlock}. Currently exists
64  * only to document the reason for rcu_read_lock() critical sections.
65  */
66 struct _rcu_read_lock {};
67 typedef struct _rcu_read_lock rcu_read_lock_t;
68 #define DEFINE_RCU_READ_LOCK(x) rcu_read_lock_t x
69 
70 /**
71  * rcu_read_lock - mark the beginning of an RCU read-side critical section.
72  *
73  * When call_rcu() is invoked
74  * on one CPU while other CPUs are within RCU read-side critical
75  * sections, invocation of the corresponding RCU callback is deferred
76  * until after the all the other CPUs exit their critical sections.
77  *
78  * Note, however, that RCU callbacks are permitted to run concurrently
79  * with RCU read-side critical sections.  One way that this can happen
80  * is via the following sequence of events: (1) CPU 0 enters an RCU
81  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
82  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
83  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
84  * callback is invoked.  This is legal, because the RCU read-side critical
85  * section that was running concurrently with the call_rcu() (and which
86  * therefore might be referencing something that the corresponding RCU
87  * callback would free up) has completed before the corresponding
88  * RCU callback is invoked.
89  *
90  * RCU read-side critical sections may be nested.  Any deferred actions
91  * will be deferred until the outermost RCU read-side critical section
92  * completes.
93  *
94  * It is illegal to block while in an RCU read-side critical section.
95  */
96 #define rcu_read_lock(x)       ({ ((void)(x)); preempt_disable(); })
97 
98 /**
99  * rcu_read_unlock - marks the end of an RCU read-side critical section.
100  *
101  * See rcu_read_lock() for more information.
102  */
103 #define rcu_read_unlock(x)     ({ ((void)(x)); preempt_enable(); })
104 
105 /*
106  * So where is rcu_write_lock()?  It does not exist, as there is no
107  * way for writers to lock out RCU readers.  This is a feature, not
108  * a bug -- this property is what provides RCU's performance benefits.
109  * Of course, writers must coordinate with each other.  The normal
110  * spinlock primitives work well for this, but any other technique may be
111  * used as well.  RCU does not care how the writers keep out of each
112  * others' way, as long as they do so.
113  */
114 
115 /**
116  * rcu_dereference - fetch an RCU-protected pointer in an
117  * RCU read-side critical section.  This pointer may later
118  * be safely dereferenced.
119  *
120  * Inserts memory barriers on architectures that require them
121  * (currently only the Alpha), and, more importantly, documents
122  * exactly which pointers are protected by RCU.
123  */
124 #define rcu_dereference(p)     (p)
125 
126 /**
127  * rcu_assign_pointer - assign (publicize) a pointer to a newly
128  * initialized structure that will be dereferenced by RCU read-side
129  * critical sections.  Returns the value assigned.
130  *
131  * Inserts memory barriers on architectures that require them
132  * (pretty much all of them other than x86), and also prevents
133  * the compiler from reordering the code that initializes the
134  * structure after the pointer assignment.  More importantly, this
135  * call documents which pointers will be dereferenced by RCU read-side
136  * code.
137  */
138 #define rcu_assign_pointer(p, v) ({ smp_wmb(); (p) = (v); })
139 
140 void rcu_init(void);
141 void rcu_check_callbacks(int cpu);
142 
143 /* Exported interfaces */
144 void call_rcu(struct rcu_head *head,
145               void (*func)(struct rcu_head *head));
146 
147 int rcu_barrier(void);
148 
149 void rcu_idle_enter(unsigned int cpu);
150 void rcu_idle_exit(unsigned int cpu);
151 
152 void rcu_idle_timer_start(void);
153 void rcu_idle_timer_stop(void);
154 
155 #endif /* __XEN_RCUPDATE_H */
156