1 #ifndef __RWLOCK_H__
2 #define __RWLOCK_H__
3 
4 #include <xen/spinlock.h>
5 
6 #include <asm/atomic.h>
7 #include <asm/system.h>
8 
9 typedef struct {
10     atomic_t cnts;
11     spinlock_t lock;
12 } rwlock_t;
13 
14 #define    RW_LOCK_UNLOCKED {           \
15     .cnts = ATOMIC_INIT(0),             \
16     .lock = SPIN_LOCK_UNLOCKED          \
17 }
18 
19 #define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
20 #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
21 
22 /*
23  * Writer states & reader shift and bias.
24  *
25  * Writer field is 8 bit to allow for potential optimisation, see
26  * _write_unlock().
27  */
28 #define    _QW_WAITING  1               /* A writer is waiting     */
29 #define    _QW_LOCKED   0xff            /* A writer holds the lock */
30 #define    _QW_WMASK    0xff            /* Writer mask.*/
31 #define    _QR_SHIFT    8               /* Reader count shift      */
32 #define    _QR_BIAS     (1U << _QR_SHIFT)
33 
34 void queue_read_lock_slowpath(rwlock_t *lock);
35 void queue_write_lock_slowpath(rwlock_t *lock);
36 
37 /*
38  * _read_trylock - try to acquire read lock of a queue rwlock.
39  * @lock : Pointer to queue rwlock structure.
40  * Return: 1 if lock acquired, 0 if failed.
41  */
_read_trylock(rwlock_t * lock)42 static inline int _read_trylock(rwlock_t *lock)
43 {
44     u32 cnts;
45 
46     cnts = atomic_read(&lock->cnts);
47     if ( likely(!(cnts & _QW_WMASK)) )
48     {
49         cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
50         if ( likely(!(cnts & _QW_WMASK)) )
51             return 1;
52         atomic_sub(_QR_BIAS, &lock->cnts);
53     }
54     return 0;
55 }
56 
57 /*
58  * _read_lock - acquire read lock of a queue rwlock.
59  * @lock: Pointer to queue rwlock structure.
60  */
_read_lock(rwlock_t * lock)61 static inline void _read_lock(rwlock_t *lock)
62 {
63     u32 cnts;
64 
65     cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
66     if ( likely(!(cnts & _QW_WMASK)) )
67         return;
68 
69     /* The slowpath will decrement the reader count, if necessary. */
70     queue_read_lock_slowpath(lock);
71 }
72 
_read_lock_irq(rwlock_t * lock)73 static inline void _read_lock_irq(rwlock_t *lock)
74 {
75     ASSERT(local_irq_is_enabled());
76     local_irq_disable();
77     _read_lock(lock);
78 }
79 
_read_lock_irqsave(rwlock_t * lock)80 static inline unsigned long _read_lock_irqsave(rwlock_t *lock)
81 {
82     unsigned long flags;
83     local_irq_save(flags);
84     _read_lock(lock);
85     return flags;
86 }
87 
88 /*
89  * _read_unlock - release read lock of a queue rwlock.
90  * @lock : Pointer to queue rwlock structure.
91  */
_read_unlock(rwlock_t * lock)92 static inline void _read_unlock(rwlock_t *lock)
93 {
94     /*
95      * Atomically decrement the reader count
96      */
97     atomic_sub(_QR_BIAS, &lock->cnts);
98 }
99 
_read_unlock_irq(rwlock_t * lock)100 static inline void _read_unlock_irq(rwlock_t *lock)
101 {
102     _read_unlock(lock);
103     local_irq_enable();
104 }
105 
_read_unlock_irqrestore(rwlock_t * lock,unsigned long flags)106 static inline void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
107 {
108     _read_unlock(lock);
109     local_irq_restore(flags);
110 }
111 
_rw_is_locked(rwlock_t * lock)112 static inline int _rw_is_locked(rwlock_t *lock)
113 {
114     return atomic_read(&lock->cnts);
115 }
116 
117 /*
118  * queue_write_lock - acquire write lock of a queue rwlock.
119  * @lock : Pointer to queue rwlock structure.
120  */
_write_lock(rwlock_t * lock)121 static inline void _write_lock(rwlock_t *lock)
122 {
123     /* Optimize for the unfair lock case where the fair flag is 0. */
124     if ( atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0 )
125         return;
126 
127     queue_write_lock_slowpath(lock);
128 }
129 
_write_lock_irq(rwlock_t * lock)130 static inline void _write_lock_irq(rwlock_t *lock)
131 {
132     ASSERT(local_irq_is_enabled());
133     local_irq_disable();
134     _write_lock(lock);
135 }
136 
_write_lock_irqsave(rwlock_t * lock)137 static inline unsigned long _write_lock_irqsave(rwlock_t *lock)
138 {
139     unsigned long flags;
140 
141     local_irq_save(flags);
142     _write_lock(lock);
143     return flags;
144 }
145 
146 /*
147  * queue_write_trylock - try to acquire write lock of a queue rwlock.
148  * @lock : Pointer to queue rwlock structure.
149  * Return: 1 if lock acquired, 0 if failed.
150  */
_write_trylock(rwlock_t * lock)151 static inline int _write_trylock(rwlock_t *lock)
152 {
153     u32 cnts;
154 
155     cnts = atomic_read(&lock->cnts);
156     if ( unlikely(cnts) )
157         return 0;
158 
159     return likely(atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0);
160 }
161 
_write_unlock(rwlock_t * lock)162 static inline void _write_unlock(rwlock_t *lock)
163 {
164     /*
165      * If the writer field is atomic, it can be cleared directly.
166      * Otherwise, an atomic subtraction will be used to clear it.
167      */
168     atomic_sub(_QW_LOCKED, &lock->cnts);
169 }
170 
_write_unlock_irq(rwlock_t * lock)171 static inline void _write_unlock_irq(rwlock_t *lock)
172 {
173     _write_unlock(lock);
174     local_irq_enable();
175 }
176 
_write_unlock_irqrestore(rwlock_t * lock,unsigned long flags)177 static inline void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
178 {
179     _write_unlock(lock);
180     local_irq_restore(flags);
181 }
182 
_rw_is_write_locked(rwlock_t * lock)183 static inline int _rw_is_write_locked(rwlock_t *lock)
184 {
185     return (atomic_read(&lock->cnts) & _QW_WMASK) == _QW_LOCKED;
186 }
187 
188 #define read_lock(l)                  _read_lock(l)
189 #define read_lock_irq(l)              _read_lock_irq(l)
190 #define read_lock_irqsave(l, f)                                 \
191     ({                                                          \
192         BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
193         ((f) = _read_lock_irqsave(l));                          \
194     })
195 
196 #define read_unlock(l)                _read_unlock(l)
197 #define read_unlock_irq(l)            _read_unlock_irq(l)
198 #define read_unlock_irqrestore(l, f)  _read_unlock_irqrestore(l, f)
199 #define read_trylock(l)               _read_trylock(l)
200 
201 #define write_lock(l)                 _write_lock(l)
202 #define write_lock_irq(l)             _write_lock_irq(l)
203 #define write_lock_irqsave(l, f)                                \
204     ({                                                          \
205         BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
206         ((f) = _write_lock_irqsave(l));                         \
207     })
208 #define write_trylock(l)              _write_trylock(l)
209 
210 #define write_unlock(l)               _write_unlock(l)
211 #define write_unlock_irq(l)           _write_unlock_irq(l)
212 #define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
213 
214 #define rw_is_locked(l)               _rw_is_locked(l)
215 #define rw_is_write_locked(l)         _rw_is_write_locked(l)
216 
217 
218 typedef struct percpu_rwlock percpu_rwlock_t;
219 
220 struct percpu_rwlock {
221     rwlock_t            rwlock;
222     bool_t              writer_activating;
223 #ifndef NDEBUG
224     percpu_rwlock_t     **percpu_owner;
225 #endif
226 };
227 
228 #ifndef NDEBUG
229 #define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0, owner }
_percpu_rwlock_owner_check(percpu_rwlock_t ** per_cpudata,percpu_rwlock_t * percpu_rwlock)230 static inline void _percpu_rwlock_owner_check(percpu_rwlock_t **per_cpudata,
231                                          percpu_rwlock_t *percpu_rwlock)
232 {
233     ASSERT(per_cpudata == percpu_rwlock->percpu_owner);
234 }
235 #else
236 #define PERCPU_RW_LOCK_UNLOCKED(owner) { RW_LOCK_UNLOCKED, 0 }
237 #define _percpu_rwlock_owner_check(data, lock) ((void)0)
238 #endif
239 
240 #define DEFINE_PERCPU_RWLOCK_RESOURCE(l, owner) \
241     percpu_rwlock_t l = PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner))
242 #define percpu_rwlock_resource_init(l, owner) \
243     (*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
244 
_percpu_read_lock(percpu_rwlock_t ** per_cpudata,percpu_rwlock_t * percpu_rwlock)245 static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
246                                          percpu_rwlock_t *percpu_rwlock)
247 {
248     /* Validate the correct per_cpudata variable has been provided. */
249     _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
250 
251     /* We cannot support recursion on the same lock. */
252     ASSERT(this_cpu_ptr(per_cpudata) != percpu_rwlock);
253     /*
254      * Detect using a second percpu_rwlock_t simulatenously and fallback
255      * to standard read_lock.
256      */
257     if ( unlikely(this_cpu_ptr(per_cpudata) != NULL ) )
258     {
259         read_lock(&percpu_rwlock->rwlock);
260         return;
261     }
262 
263     /* Indicate this cpu is reading. */
264     this_cpu_ptr(per_cpudata) = percpu_rwlock;
265     smp_mb();
266     /* Check if a writer is waiting. */
267     if ( unlikely(percpu_rwlock->writer_activating) )
268     {
269         /* Let the waiting writer know we aren't holding the lock. */
270         this_cpu_ptr(per_cpudata) = NULL;
271         /* Wait using the read lock to keep the lock fair. */
272         read_lock(&percpu_rwlock->rwlock);
273         /* Set the per CPU data again and continue. */
274         this_cpu_ptr(per_cpudata) = percpu_rwlock;
275         /* Drop the read lock because we don't need it anymore. */
276         read_unlock(&percpu_rwlock->rwlock);
277     }
278 }
279 
_percpu_read_unlock(percpu_rwlock_t ** per_cpudata,percpu_rwlock_t * percpu_rwlock)280 static inline void _percpu_read_unlock(percpu_rwlock_t **per_cpudata,
281                 percpu_rwlock_t *percpu_rwlock)
282 {
283     /* Validate the correct per_cpudata variable has been provided. */
284     _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
285 
286     /* Verify the read lock was taken for this lock */
287     ASSERT(this_cpu_ptr(per_cpudata) != NULL);
288     /*
289      * Detect using a second percpu_rwlock_t simulatenously and fallback
290      * to standard read_unlock.
291      */
292     if ( unlikely(this_cpu_ptr(per_cpudata) != percpu_rwlock ) )
293     {
294         read_unlock(&percpu_rwlock->rwlock);
295         return;
296     }
297     this_cpu_ptr(per_cpudata) = NULL;
298     smp_wmb();
299 }
300 
301 /* Don't inline percpu write lock as it's a complex function. */
302 void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
303                         percpu_rwlock_t *percpu_rwlock);
304 
_percpu_write_unlock(percpu_rwlock_t ** per_cpudata,percpu_rwlock_t * percpu_rwlock)305 static inline void _percpu_write_unlock(percpu_rwlock_t **per_cpudata,
306                 percpu_rwlock_t *percpu_rwlock)
307 {
308     /* Validate the correct per_cpudata variable has been provided. */
309     _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
310 
311     ASSERT(percpu_rwlock->writer_activating);
312     percpu_rwlock->writer_activating = 0;
313     write_unlock(&percpu_rwlock->rwlock);
314 }
315 
316 #define percpu_rw_is_write_locked(l)         _rw_is_write_locked(&((l)->rwlock))
317 
318 #define percpu_read_lock(percpu, lock) \
319     _percpu_read_lock(&get_per_cpu_var(percpu), lock)
320 #define percpu_read_unlock(percpu, lock) \
321     _percpu_read_unlock(&get_per_cpu_var(percpu), lock)
322 #define percpu_write_lock(percpu, lock) \
323     _percpu_write_lock(&get_per_cpu_var(percpu), lock)
324 #define percpu_write_unlock(percpu, lock) \
325     _percpu_write_unlock(&get_per_cpu_var(percpu), lock)
326 
327 #define DEFINE_PERCPU_RWLOCK_GLOBAL(name) DEFINE_PER_CPU(percpu_rwlock_t *, \
328                                                          name)
329 #define DECLARE_PERCPU_RWLOCK_GLOBAL(name) DECLARE_PER_CPU(percpu_rwlock_t *, \
330                                                            name)
331 
332 #endif /* __RWLOCK_H__ */
333