1 #ifndef __SPINLOCK_H__
2 #define __SPINLOCK_H__
3 
4 #include <xen/nospec.h>
5 #include <xen/time.h>
6 #include <xen/types.h>
7 
8 #include <asm/system.h>
9 #include <asm/spinlock.h>
10 
11 #define SPINLOCK_CPU_BITS  16
12 
13 #ifdef CONFIG_DEBUG_LOCKS
14 union lock_debug {
15     uint32_t val;
16 #define LOCK_DEBUG_INITVAL 0xffffffffU
17     struct {
18         unsigned int cpu:SPINLOCK_CPU_BITS;
19 #define LOCK_DEBUG_PAD_BITS (30 - SPINLOCK_CPU_BITS)
20         unsigned int :LOCK_DEBUG_PAD_BITS;
21         bool irq_safe:1;
22         bool unseen:1;
23     };
24 };
25 #define LOCK_DEBUG_ { .val = LOCK_DEBUG_INITVAL }
26 void check_lock(union lock_debug *debug, bool try);
27 void lock_enter(const union lock_debug *debug);
28 void lock_exit(const union lock_debug *debug);
29 void spin_debug_enable(void);
30 void spin_debug_disable(void);
31 #else
32 union lock_debug { };
33 #define LOCK_DEBUG_ { }
34 #define check_lock(l, t) ((void)0)
35 #define lock_enter(l) ((void)0)
36 #define lock_exit(l) ((void)0)
37 #define spin_debug_enable() ((void)0)
38 #define spin_debug_disable() ((void)0)
39 #endif
40 
41 #ifdef CONFIG_DEBUG_LOCK_PROFILE
42 
43 #include <public/sysctl.h>
44 
45 /*
46     lock profiling on:
47 
48     Global locks which should be subject to profiling must be declared via
49     DEFINE_[R]SPINLOCK.
50 
51     For locks in structures further measures are necessary:
52     - the structure definition must include a profile_head with exactly this
53       name:
54 
55       struct lock_profile_qhead   profile_head;
56 
57     - the single locks which are subject to profiling have to be initialized
58       via
59 
60       [r]spin_lock_init_prof(ptr, lock);
61 
62       with ptr being the main structure pointer and lock the spinlock field
63 
64     - each structure has to be added to profiling with
65 
66       lock_profile_register_struct(type, ptr, idx, print);
67 
68       with:
69         type:  something like LOCKPROF_TYPE_PERDOM
70         ptr:   pointer to the structure
71         idx:   index of that structure, e.g. domid
72         print: descriptive string like "domain"
73 
74     - removing of a structure is done via
75 
76       lock_profile_deregister_struct(type, ptr);
77 */
78 
79 struct spinlock;
80 
81 struct lock_profile {
82     struct lock_profile *next;       /* forward link */
83     const char          *name;       /* lock name */
84     union {
85         struct spinlock *lock;       /* the lock itself */
86         struct rspinlock *rlock;     /* the recursive lock itself */
87     } ptr;
88     uint64_t            lock_cnt;    /* # of complete locking ops */
89     uint64_t            block_cnt:63; /* # of complete wait for lock */
90     bool                is_rlock:1;  /* use rlock pointer */
91     s_time_t            time_hold;   /* cumulated lock time */
92     s_time_t            time_block;  /* cumulated wait time */
93     s_time_t            time_locked; /* system time of last locking */
94 };
95 
96 struct lock_profile_qhead {
97     struct lock_profile_qhead *head_q; /* next head of this type */
98     struct lock_profile       *elem_q; /* first element in q */
99     int32_t                   idx;     /* index for printout */
100 };
101 
102 #define LOCK_PROFILE_(lockname) { .name = #lockname, .ptr.lock = &(lockname), }
103 #define RLOCK_PROFILE_(lockname) { .name = #lockname,                         \
104                                    .ptr.rlock = &(lockname),                  \
105                                    .is_rlock = true, }
106 #define LOCK_PROFILE_PTR_(name)                                               \
107     static struct lock_profile * const lock_profile__##name                   \
108     __used_section(".lockprofile.data") =                                     \
109     &lock_profile_data__##name
110 #define SPIN_LOCK_UNLOCKED_(x) {                                              \
111     .debug = LOCK_DEBUG_,                                                     \
112     .profile = x,                                                             \
113 }
114 #define RSPIN_LOCK_UNLOCKED_(x) {                                             \
115     .recurse_cpu = SPINLOCK_NO_CPU,                                           \
116     .debug = LOCK_DEBUG_,                                                     \
117     .profile = x,                                                             \
118 }
119 #define SPIN_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED_(NULL)
120 #define DEFINE_SPINLOCK(l)                                                    \
121     spinlock_t l = SPIN_LOCK_UNLOCKED_(NULL);                                 \
122     static struct lock_profile lock_profile_data__##l = LOCK_PROFILE_(l);     \
123     LOCK_PROFILE_PTR_(l)
124 #define RSPIN_LOCK_UNLOCKED RSPIN_LOCK_UNLOCKED_(NULL)
125 #define DEFINE_RSPINLOCK(l)                                                   \
126     rspinlock_t l = RSPIN_LOCK_UNLOCKED_(NULL);                               \
127     static struct lock_profile lock_profile_data__##l = RLOCK_PROFILE_(l);    \
128     LOCK_PROFILE_PTR_(l)
129 
130 #define spin_lock_init_prof__(s, l, lockptr, locktype, isr)                   \
131     do {                                                                      \
132         struct lock_profile *prof;                                            \
133         prof = xzalloc(struct lock_profile);                                  \
134         (s)->l = (locktype)SPIN_LOCK_UNLOCKED_(prof);                         \
135         if ( !prof )                                                          \
136         {                                                                     \
137             printk(XENLOG_WARNING                                             \
138                    "lock profiling unavailable for %p(%d)'s %s\n",            \
139                    s, (s)->profile_head.idx, #l);                             \
140             break;                                                            \
141         }                                                                     \
142         prof->name = #l;                                                      \
143         prof->ptr.lockptr = &(s)->l;                                          \
144         prof->is_rlock = (isr);                                               \
145         prof->next = (s)->profile_head.elem_q;                                \
146         (s)->profile_head.elem_q = prof;                                      \
147     } while( 0 )
148 
149 #define spin_lock_init_prof(s, l)                                             \
150     spin_lock_init_prof__(s, l, lock, spinlock_t, false)
151 #define rspin_lock_init_prof(s, l) do {                                       \
152         spin_lock_init_prof__(s, l, rlock, rspinlock_t, true);                \
153         (s)->l.recurse_cpu = SPINLOCK_NO_CPU;                                 \
154         (s)->l.recurse_cnt = 0;                                               \
155     } while (0)
156 
157 void _lock_profile_register_struct(
158     int32_t type, struct lock_profile_qhead *qhead, int32_t idx);
159 void _lock_profile_deregister_struct(int32_t type,
160     struct lock_profile_qhead *qhead);
161 
162 #define lock_profile_register_struct(type, ptr, idx)                          \
163     _lock_profile_register_struct(type, &((ptr)->profile_head), idx)
164 #define lock_profile_deregister_struct(type, ptr)                             \
165     _lock_profile_deregister_struct(type, &((ptr)->profile_head))
166 
167 extern int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc);
168 extern void cf_check spinlock_profile_printall(unsigned char key);
169 extern void cf_check spinlock_profile_reset(unsigned char key);
170 
171 #else
172 
173 struct lock_profile_qhead { };
174 struct lock_profile { };
175 
176 #define SPIN_LOCK_UNLOCKED {                                                  \
177     .debug = LOCK_DEBUG_,                                                     \
178 }
179 #define RSPIN_LOCK_UNLOCKED {                                                 \
180     .recurse_cpu = SPINLOCK_NO_CPU,                                           \
181     .debug = LOCK_DEBUG_,                                                     \
182 }
183 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
184 #define DEFINE_RSPINLOCK(l) rspinlock_t l = RSPIN_LOCK_UNLOCKED
185 
186 #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l))
187 #define rspin_lock_init_prof(s, l) rspin_lock_init(&((s)->l))
188 #define lock_profile_register_struct(type, ptr, idx)
189 #define lock_profile_deregister_struct(type, ptr)
190 #define spinlock_profile_printall(key)
191 
192 #endif
193 
194 typedef union {
195     uint32_t head_tail;
196     struct {
197         uint16_t head;
198         uint16_t tail;
199     };
200 } spinlock_tickets_t;
201 
202 #define SPINLOCK_TICKET_INC { .head_tail = 0x10000, }
203 
204 typedef struct spinlock {
205     spinlock_tickets_t tickets;
206     union lock_debug debug;
207 #ifdef CONFIG_DEBUG_LOCK_PROFILE
208     struct lock_profile *profile;
209 #endif
210 } spinlock_t;
211 
212 typedef struct rspinlock {
213     spinlock_tickets_t tickets;
214     uint16_t recurse_cpu;
215 #define SPINLOCK_NO_CPU        ((1u << SPINLOCK_CPU_BITS) - 1)
216 #define SPINLOCK_RECURSE_BITS  8
217     uint8_t recurse_cnt;
218 #define SPINLOCK_MAX_RECURSE   15
219     union lock_debug debug;
220 #ifdef CONFIG_DEBUG_LOCK_PROFILE
221     struct lock_profile *profile;
222 #endif
223 } rspinlock_t;
224 
225 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
226 #define rspin_lock_init(l) (*(l) = (rspinlock_t)RSPIN_LOCK_UNLOCKED)
227 
228 void _spin_lock(spinlock_t *lock);
229 void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *data), void *data);
230 void _spin_lock_irq(spinlock_t *lock);
231 unsigned long _spin_lock_irqsave(spinlock_t *lock);
232 
233 void _spin_unlock(spinlock_t *lock);
234 void _spin_unlock_irq(spinlock_t *lock);
235 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
236 
237 bool _spin_is_locked(const spinlock_t *lock);
238 bool _spin_trylock(spinlock_t *lock);
239 void _spin_barrier(spinlock_t *lock);
240 
spin_lock(spinlock_t * l)241 static always_inline void spin_lock(spinlock_t *l)
242 {
243     _spin_lock(l);
244     block_lock_speculation();
245 }
246 
spin_lock_cb(spinlock_t * l,void (* c)(void * data),void * d)247 static always_inline void spin_lock_cb(spinlock_t *l, void (*c)(void *data),
248                                        void *d)
249 {
250     _spin_lock_cb(l, c, d);
251     block_lock_speculation();
252 }
253 
spin_lock_irq(spinlock_t * l)254 static always_inline void spin_lock_irq(spinlock_t *l)
255 {
256     _spin_lock_irq(l);
257     block_lock_speculation();
258 }
259 
260 #define spin_lock_irqsave(l, f)                                 \
261     ({                                                          \
262         BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
263         ((f) = _spin_lock_irqsave(l));                          \
264         block_lock_speculation();                               \
265     })
266 
267 /* Conditionally take a spinlock in a speculation safe way. */
spin_lock_if(bool condition,spinlock_t * l)268 static always_inline void spin_lock_if(bool condition, spinlock_t *l)
269 {
270     if ( condition )
271         _spin_lock(l);
272     block_lock_speculation();
273 }
274 
275 #define spin_unlock(l)                _spin_unlock(l)
276 #define spin_unlock_irq(l)            _spin_unlock_irq(l)
277 #define spin_unlock_irqrestore(l, f)  _spin_unlock_irqrestore(l, f)
278 
279 #define spin_is_locked(l)             _spin_is_locked(l)
280 #define spin_trylock(l)               lock_evaluate_nospec(_spin_trylock(l))
281 
282 #define spin_trylock_irqsave(lock, flags)       \
283 ({                                              \
284     local_irq_save(flags);                      \
285     spin_trylock(lock) ?                        \
286     1 : ({ local_irq_restore(flags); 0; });     \
287 })
288 
289 #define spin_lock_kick(l)             arch_lock_signal_wmb()
290 
291 /* Ensure a lock is quiescent between two critical operations. */
292 #define spin_barrier(l)               _spin_barrier(l)
293 
294 /*
295  * rspin_[un]lock(): Use these forms when the lock can (safely!) be
296  * reentered recursively on the same CPU. All critical regions that may form
297  * part of a recursively-nested set must be protected by these forms. If there
298  * are any critical regions that cannot form part of such a set, they can use
299  * nrspin_[un]lock().
300  * The nrspin_[un]lock() forms act the same way as normal spin_[un]lock()
301  * calls, but operate on rspinlock_t locks. nrspin_lock() and rspin_lock()
302  * calls are blocking to each other for a specific lock even on the same cpu.
303  */
304 bool _rspin_trylock(rspinlock_t *lock);
305 void _rspin_lock(rspinlock_t *lock);
306 unsigned long _rspin_lock_irqsave(rspinlock_t *lock);
307 void _rspin_unlock(rspinlock_t *lock);
308 void _rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags);
309 bool _rspin_is_locked(const rspinlock_t *lock);
310 void _rspin_barrier(rspinlock_t *lock);
311 
rspin_lock(rspinlock_t * lock)312 static always_inline void rspin_lock(rspinlock_t *lock)
313 {
314     _rspin_lock(lock);
315     block_lock_speculation();
316 }
317 
rspin_lock_irqsave(rspinlock_t * lock)318 static always_inline unsigned long rspin_lock_irqsave(rspinlock_t *lock)
319 {
320     unsigned long flags = _rspin_lock_irqsave(lock);
321 
322     block_lock_speculation();
323     return flags;
324 }
325 
326 #define rspin_trylock(l)              lock_evaluate_nospec(_rspin_trylock(l))
327 #define rspin_unlock(l)               _rspin_unlock(l)
328 #define rspin_unlock_irqrestore(l, f) _rspin_unlock_irqrestore(l, f)
329 #define rspin_barrier(l)              _rspin_barrier(l)
330 #define rspin_is_locked(l)            _rspin_is_locked(l)
331 
332 bool _nrspin_trylock(rspinlock_t *lock);
333 void _nrspin_lock(rspinlock_t *lock);
334 #define nrspin_lock_irqsave(l, f)                               \
335     ({                                                          \
336         BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
337         (f) = _nrspin_lock_irqsave(l);                         \
338         block_lock_speculation();                               \
339     })
340 unsigned long _nrspin_lock_irqsave(rspinlock_t *lock);
341 void _nrspin_unlock(rspinlock_t *lock);
342 void _nrspin_lock_irq(rspinlock_t *lock);
343 void _nrspin_unlock_irq(rspinlock_t *lock);
344 void _nrspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags);
345 
nrspin_lock(rspinlock_t * lock)346 static always_inline void nrspin_lock(rspinlock_t *lock)
347 {
348     _nrspin_lock(lock);
349     block_lock_speculation();
350 }
351 
nrspin_lock_irq(rspinlock_t * l)352 static always_inline void nrspin_lock_irq(rspinlock_t *l)
353 {
354     _nrspin_lock_irq(l);
355     block_lock_speculation();
356 }
357 
358 #define nrspin_trylock(l)              lock_evaluate_nospec(_nrspin_trylock(l))
359 #define nrspin_unlock(l)               _nrspin_unlock(l)
360 #define nrspin_unlock_irqrestore(l, f) _nrspin_unlock_irqrestore(l, f)
361 #define nrspin_unlock_irq(l)           _nrspin_unlock_irq(l)
362 
363 #endif /* __SPINLOCK_H__ */
364