1 #ifndef __SPINLOCK_H__
2 #define __SPINLOCK_H__
3 
4 #include <asm/system.h>
5 #include <asm/spinlock.h>
6 #include <asm/types.h>
7 #include <xen/percpu.h>
8 
9 #ifndef NDEBUG
10 struct lock_debug {
11     s16 irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */
12 };
13 #define _LOCK_DEBUG { -1 }
14 void spin_debug_enable(void);
15 void spin_debug_disable(void);
16 #else
17 struct lock_debug { };
18 #define _LOCK_DEBUG { }
19 #define spin_debug_enable() ((void)0)
20 #define spin_debug_disable() ((void)0)
21 #endif
22 
23 #ifdef CONFIG_LOCK_PROFILE
24 
25 #include <public/sysctl.h>
26 
27 /*
28     lock profiling on:
29 
30     Global locks which should be subject to profiling must be declared via
31     DEFINE_SPINLOCK.
32 
33     For locks in structures further measures are necessary:
34     - the structure definition must include a profile_head with exactly this
35       name:
36 
37       struct lock_profile_qhead   profile_head;
38 
39     - the single locks which are subject to profiling have to be initialized
40       via
41 
42       spin_lock_init_prof(ptr, lock);
43 
44       with ptr being the main structure pointer and lock the spinlock field
45 
46     - each structure has to be added to profiling with
47 
48       lock_profile_register_struct(type, ptr, idx, print);
49 
50       with:
51         type:  something like LOCKPROF_TYPE_PERDOM
52         ptr:   pointer to the structure
53         idx:   index of that structure, e.g. domid
54         print: descriptive string like "domain"
55 
56     - removing of a structure is done via
57 
58       lock_profile_deregister_struct(type, ptr);
59 */
60 
61 struct spinlock;
62 
63 struct lock_profile {
64     struct lock_profile *next;       /* forward link */
65     char                *name;       /* lock name */
66     struct spinlock     *lock;       /* the lock itself */
67     u64                 lock_cnt;    /* # of complete locking ops */
68     u64                 block_cnt;   /* # of complete wait for lock */
69     s64                 time_hold;   /* cumulated lock time */
70     s64                 time_block;  /* cumulated wait time */
71     s64                 time_locked; /* system time of last locking */
72 };
73 
74 struct lock_profile_qhead {
75     struct lock_profile_qhead *head_q; /* next head of this type */
76     struct lock_profile       *elem_q; /* first element in q */
77     int32_t                   idx;     /* index for printout */
78 };
79 
80 #define _LOCK_PROFILE(name) { 0, #name, &name, 0, 0, 0, 0, 0 }
81 #define _LOCK_PROFILE_PTR(name)                                               \
82     static struct lock_profile * const __lock_profile_##name                  \
83     __used_section(".lockprofile.data") =                                     \
84     &__lock_profile_data_##name
85 #define _SPIN_LOCK_UNLOCKED(x) { { 0 }, SPINLOCK_NO_CPU, 0, _LOCK_DEBUG, x }
86 #define SPIN_LOCK_UNLOCKED _SPIN_LOCK_UNLOCKED(NULL)
87 #define DEFINE_SPINLOCK(l)                                                    \
88     spinlock_t l = _SPIN_LOCK_UNLOCKED(NULL);                                 \
89     static struct lock_profile __lock_profile_data_##l = _LOCK_PROFILE(l);    \
90     _LOCK_PROFILE_PTR(l)
91 
92 #define spin_lock_init_prof(s, l)                                             \
93     do {                                                                      \
94         struct lock_profile *prof;                                            \
95         prof = xzalloc(struct lock_profile);                                  \
96         if (!prof) break;                                                     \
97         prof->name = #l;                                                      \
98         prof->lock = &(s)->l;                                                 \
99         (s)->l = (spinlock_t)_SPIN_LOCK_UNLOCKED(prof);                       \
100         prof->next = (s)->profile_head.elem_q;                                \
101         (s)->profile_head.elem_q = prof;                                      \
102     } while(0)
103 
104 void _lock_profile_register_struct(
105     int32_t, struct lock_profile_qhead *, int32_t, char *);
106 void _lock_profile_deregister_struct(int32_t, struct lock_profile_qhead *);
107 
108 #define lock_profile_register_struct(type, ptr, idx, print)                   \
109     _lock_profile_register_struct(type, &((ptr)->profile_head), idx, print)
110 #define lock_profile_deregister_struct(type, ptr)                             \
111     _lock_profile_deregister_struct(type, &((ptr)->profile_head))
112 
113 extern int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc);
114 extern void spinlock_profile_printall(unsigned char key);
115 extern void spinlock_profile_reset(unsigned char key);
116 
117 #else
118 
119 struct lock_profile_qhead { };
120 
121 #define SPIN_LOCK_UNLOCKED { { 0 }, SPINLOCK_NO_CPU, 0, _LOCK_DEBUG }
122 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
123 
124 #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l))
125 #define lock_profile_register_struct(type, ptr, idx, print)
126 #define lock_profile_deregister_struct(type, ptr)
127 
128 #endif
129 
130 typedef union {
131     u32 head_tail;
132     struct {
133         u16 head;
134         u16 tail;
135     };
136 } spinlock_tickets_t;
137 
138 #define SPINLOCK_TICKET_INC { .head_tail = 0x10000, }
139 
140 typedef struct spinlock {
141     spinlock_tickets_t tickets;
142     u16 recurse_cpu:12;
143 #define SPINLOCK_NO_CPU 0xfffu
144     u16 recurse_cnt:4;
145 #define SPINLOCK_MAX_RECURSE 0xfu
146     struct lock_debug debug;
147 #ifdef CONFIG_LOCK_PROFILE
148     struct lock_profile *profile;
149 #endif
150 } spinlock_t;
151 
152 
153 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
154 
155 void _spin_lock(spinlock_t *lock);
156 void _spin_lock_cb(spinlock_t *lock, void (*cond)(void *), void *data);
157 void _spin_lock_irq(spinlock_t *lock);
158 unsigned long _spin_lock_irqsave(spinlock_t *lock);
159 
160 void _spin_unlock(spinlock_t *lock);
161 void _spin_unlock_irq(spinlock_t *lock);
162 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
163 
164 int _spin_is_locked(spinlock_t *lock);
165 int _spin_trylock(spinlock_t *lock);
166 void _spin_barrier(spinlock_t *lock);
167 
168 int _spin_trylock_recursive(spinlock_t *lock);
169 void _spin_lock_recursive(spinlock_t *lock);
170 void _spin_unlock_recursive(spinlock_t *lock);
171 
172 #define spin_lock(l)                  _spin_lock(l)
173 #define spin_lock_cb(l, c, d)         _spin_lock_cb(l, c, d)
174 #define spin_lock_irq(l)              _spin_lock_irq(l)
175 #define spin_lock_irqsave(l, f)                                 \
176     ({                                                          \
177         BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
178         ((f) = _spin_lock_irqsave(l));                          \
179     })
180 
181 #define spin_unlock(l)                _spin_unlock(l)
182 #define spin_unlock_irq(l)            _spin_unlock_irq(l)
183 #define spin_unlock_irqrestore(l, f)  _spin_unlock_irqrestore(l, f)
184 
185 #define spin_is_locked(l)             _spin_is_locked(l)
186 #define spin_trylock(l)               _spin_trylock(l)
187 
188 #define spin_trylock_irqsave(lock, flags)       \
189 ({                                              \
190     local_irq_save(flags);                      \
191     spin_trylock(lock) ?                        \
192     1 : ({ local_irq_restore(flags); 0; });     \
193 })
194 
195 #define spin_lock_kick(l)             arch_lock_signal_wmb()
196 
197 /* Ensure a lock is quiescent between two critical operations. */
198 #define spin_barrier(l)               _spin_barrier(l)
199 
200 /*
201  * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
202  * reentered recursively on the same CPU. All critical regions that may form
203  * part of a recursively-nested set must be protected by these forms. If there
204  * are any critical regions that cannot form part of such a set, they can use
205  * standard spin_[un]lock().
206  */
207 #define spin_trylock_recursive(l)     _spin_trylock_recursive(l)
208 #define spin_lock_recursive(l)        _spin_lock_recursive(l)
209 #define spin_unlock_recursive(l)      _spin_unlock_recursive(l)
210 
211 #endif /* __SPINLOCK_H__ */
212