1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2017, Linaro Limited
4  */
5 
6 #include <kernel/mutex.h>
7 #include <kernel/panic.h>
8 #include <kernel/refcount.h>
9 #include <kernel/spinlock.h>
10 #include <kernel/thread.h>
11 #include <trace.h>
12 
13 #include "mutex_lockdep.h"
14 
mutex_init(struct mutex * m)15 void mutex_init(struct mutex *m)
16 {
17 	*m = (struct mutex)MUTEX_INITIALIZER;
18 }
19 
mutex_init_recursive(struct recursive_mutex * m)20 void mutex_init_recursive(struct recursive_mutex *m)
21 {
22 	*m = (struct recursive_mutex)RECURSIVE_MUTEX_INITIALIZER;
23 }
24 
__mutex_lock(struct mutex * m,const char * fname,int lineno)25 static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
26 {
27 	assert_have_no_spinlock();
28 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
29 	assert(thread_is_in_normal_mode());
30 
31 	mutex_lock_check(m);
32 
33 	while (true) {
34 		uint32_t old_itr_status;
35 		bool can_lock;
36 		struct wait_queue_elem wqe;
37 
38 		/*
39 		 * If the mutex is locked we need to initialize the wqe
40 		 * before releasing the spinlock to guarantee that we don't
41 		 * miss the wakeup from mutex_unlock().
42 		 *
43 		 * If the mutex is unlocked we don't need to use the wqe at
44 		 * all.
45 		 */
46 
47 		old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
48 
49 		can_lock = !m->state;
50 		if (!can_lock) {
51 			wq_wait_init(&m->wq, &wqe, false /* wait_read */);
52 		} else {
53 			m->state = -1; /* write locked */
54 		}
55 
56 		cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
57 
58 		if (!can_lock) {
59 			/*
60 			 * Someone else is holding the lock, wait in normal
61 			 * world for the lock to become available.
62 			 */
63 			wq_wait_final(&m->wq, &wqe, m, fname, lineno);
64 		} else
65 			return;
66 	}
67 }
68 
__mutex_lock_recursive(struct recursive_mutex * m,const char * fname,int lineno)69 static void __mutex_lock_recursive(struct recursive_mutex *m, const char *fname,
70 				   int lineno)
71 {
72 	short int ct = thread_get_id();
73 
74 	assert_have_no_spinlock();
75 	assert(thread_is_in_normal_mode());
76 
77 	if (atomic_load_short(&m->owner) == ct) {
78 		if (!refcount_inc(&m->lock_depth))
79 			panic();
80 		return;
81 	}
82 
83 	__mutex_lock(&m->m, fname, lineno);
84 
85 	assert(m->owner == THREAD_ID_INVALID);
86 	atomic_store_short(&m->owner, ct);
87 	refcount_set(&m->lock_depth, 1);
88 }
89 
__mutex_unlock(struct mutex * m,const char * fname,int lineno)90 static void __mutex_unlock(struct mutex *m, const char *fname, int lineno)
91 {
92 	uint32_t old_itr_status;
93 
94 	assert_have_no_spinlock();
95 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
96 
97 	mutex_unlock_check(m);
98 
99 	old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
100 
101 	if (!m->state)
102 		panic();
103 
104 	m->state = 0;
105 
106 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
107 
108 	wq_wake_next(&m->wq, m, fname, lineno);
109 }
110 
__mutex_unlock_recursive(struct recursive_mutex * m,const char * fname,int lineno)111 static void __mutex_unlock_recursive(struct recursive_mutex *m,
112 				     const char *fname, int lineno)
113 {
114 	assert_have_no_spinlock();
115 	assert(m->owner == thread_get_id());
116 
117 	if (refcount_dec(&m->lock_depth)) {
118 		/*
119 		 * Do an atomic store to match the atomic load in
120 		 * __mutex_lock_recursive()
121 		 */
122 		atomic_store_short(&m->owner, THREAD_ID_INVALID);
123 		__mutex_unlock(&m->m, fname, lineno);
124 	}
125 }
126 
__mutex_trylock(struct mutex * m,const char * fname __unused,int lineno __unused)127 static bool __mutex_trylock(struct mutex *m, const char *fname __unused,
128 			int lineno __unused)
129 {
130 	uint32_t old_itr_status;
131 	bool can_lock_write;
132 
133 	assert_have_no_spinlock();
134 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
135 
136 	old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
137 
138 	can_lock_write = !m->state;
139 	if (can_lock_write)
140 		m->state = -1;
141 
142 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
143 
144 	if (can_lock_write)
145 		mutex_trylock_check(m);
146 
147 	return can_lock_write;
148 }
149 
__mutex_read_unlock(struct mutex * m,const char * fname,int lineno)150 static void __mutex_read_unlock(struct mutex *m, const char *fname, int lineno)
151 {
152 	uint32_t old_itr_status;
153 	short new_state;
154 
155 	assert_have_no_spinlock();
156 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
157 
158 	old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
159 
160 	if (m->state <= 0)
161 		panic();
162 	m->state--;
163 	new_state = m->state;
164 
165 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
166 
167 	/* Wake eventual waiters if the mutex was unlocked */
168 	if (!new_state)
169 		wq_wake_next(&m->wq, m, fname, lineno);
170 }
171 
__mutex_read_lock(struct mutex * m,const char * fname,int lineno)172 static void __mutex_read_lock(struct mutex *m, const char *fname, int lineno)
173 {
174 	assert_have_no_spinlock();
175 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
176 	assert(thread_is_in_normal_mode());
177 
178 	while (true) {
179 		uint32_t old_itr_status;
180 		bool can_lock;
181 		struct wait_queue_elem wqe;
182 
183 		/*
184 		 * If the mutex is locked we need to initialize the wqe
185 		 * before releasing the spinlock to guarantee that we don't
186 		 * miss the wakeup from mutex_unlock().
187 		 *
188 		 * If the mutex is unlocked we don't need to use the wqe at
189 		 * all.
190 		 */
191 
192 		old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
193 
194 		can_lock = m->state != -1;
195 		if (!can_lock) {
196 			wq_wait_init(&m->wq, &wqe, true /* wait_read */);
197 		} else {
198 			m->state++; /* read_locked */
199 		}
200 
201 		cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
202 
203 		if (!can_lock) {
204 			/*
205 			 * Someone else is holding the lock, wait in normal
206 			 * world for the lock to become available.
207 			 */
208 			wq_wait_final(&m->wq, &wqe, m, fname, lineno);
209 		} else
210 			return;
211 	}
212 }
213 
__mutex_read_trylock(struct mutex * m,const char * fname __unused,int lineno __unused)214 static bool __mutex_read_trylock(struct mutex *m, const char *fname __unused,
215 				 int lineno __unused)
216 {
217 	uint32_t old_itr_status;
218 	bool can_lock;
219 
220 	assert_have_no_spinlock();
221 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
222 	assert(thread_is_in_normal_mode());
223 
224 	old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
225 
226 	can_lock = m->state != -1;
227 	if (can_lock)
228 		m->state++;
229 
230 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
231 
232 	return can_lock;
233 }
234 
235 #ifdef CFG_MUTEX_DEBUG
mutex_unlock_debug(struct mutex * m,const char * fname,int lineno)236 void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno)
237 {
238 	__mutex_unlock(m, fname, lineno);
239 }
240 
mutex_lock_debug(struct mutex * m,const char * fname,int lineno)241 void mutex_lock_debug(struct mutex *m, const char *fname, int lineno)
242 {
243 	__mutex_lock(m, fname, lineno);
244 }
245 
mutex_trylock_debug(struct mutex * m,const char * fname,int lineno)246 bool mutex_trylock_debug(struct mutex *m, const char *fname, int lineno)
247 {
248 	return __mutex_trylock(m, fname, lineno);
249 }
250 
mutex_read_unlock_debug(struct mutex * m,const char * fname,int lineno)251 void mutex_read_unlock_debug(struct mutex *m, const char *fname, int lineno)
252 {
253 	__mutex_read_unlock(m, fname, lineno);
254 }
255 
mutex_read_lock_debug(struct mutex * m,const char * fname,int lineno)256 void mutex_read_lock_debug(struct mutex *m, const char *fname, int lineno)
257 {
258 	__mutex_read_lock(m, fname, lineno);
259 }
260 
mutex_read_trylock_debug(struct mutex * m,const char * fname,int lineno)261 bool mutex_read_trylock_debug(struct mutex *m, const char *fname, int lineno)
262 {
263 	return __mutex_read_trylock(m, fname, lineno);
264 }
265 
mutex_unlock_recursive_debug(struct recursive_mutex * m,const char * fname,int lineno)266 void mutex_unlock_recursive_debug(struct recursive_mutex *m, const char *fname,
267 				  int lineno)
268 {
269 	__mutex_unlock_recursive(m, fname, lineno);
270 }
271 
mutex_lock_recursive_debug(struct recursive_mutex * m,const char * fname,int lineno)272 void mutex_lock_recursive_debug(struct recursive_mutex *m, const char *fname,
273 				int lineno)
274 {
275 	__mutex_lock_recursive(m, fname, lineno);
276 }
277 #else
mutex_unlock(struct mutex * m)278 void mutex_unlock(struct mutex *m)
279 {
280 	__mutex_unlock(m, NULL, -1);
281 }
282 
mutex_unlock_recursive(struct recursive_mutex * m)283 void mutex_unlock_recursive(struct recursive_mutex *m)
284 {
285 	__mutex_unlock_recursive(m, NULL, -1);
286 }
287 
mutex_lock(struct mutex * m)288 void mutex_lock(struct mutex *m)
289 {
290 	__mutex_lock(m, NULL, -1);
291 }
292 
mutex_lock_recursive(struct recursive_mutex * m)293 void mutex_lock_recursive(struct recursive_mutex *m)
294 {
295 	__mutex_lock_recursive(m, NULL, -1);
296 }
297 
mutex_trylock(struct mutex * m)298 bool mutex_trylock(struct mutex *m)
299 {
300 	return __mutex_trylock(m, NULL, -1);
301 }
302 
mutex_read_unlock(struct mutex * m)303 void mutex_read_unlock(struct mutex *m)
304 {
305 	__mutex_read_unlock(m, NULL, -1);
306 }
307 
mutex_read_lock(struct mutex * m)308 void mutex_read_lock(struct mutex *m)
309 {
310 	__mutex_read_lock(m, NULL, -1);
311 }
312 
mutex_read_trylock(struct mutex * m)313 bool mutex_read_trylock(struct mutex *m)
314 {
315 	return __mutex_read_trylock(m, NULL, -1);
316 }
317 #endif
318 
mutex_destroy(struct mutex * m)319 void mutex_destroy(struct mutex *m)
320 {
321 	/*
322 	 * Caller guarantees that no one will try to take the mutex so
323 	 * there's no need to take the spinlock before accessing it.
324 	 */
325 	if (m->state)
326 		panic();
327 	if (!wq_is_empty(&m->wq))
328 		panic("waitqueue not empty");
329 	mutex_destroy_check(m);
330 }
331 
mutex_destroy_recursive(struct recursive_mutex * m)332 void mutex_destroy_recursive(struct recursive_mutex *m)
333 {
334 	mutex_destroy(&m->m);
335 }
336 
mutex_get_recursive_lock_depth(struct recursive_mutex * m)337 unsigned int mutex_get_recursive_lock_depth(struct recursive_mutex *m)
338 {
339 	assert_have_no_spinlock();
340 	assert(m->owner == thread_get_id());
341 
342 	return refcount_val(&m->lock_depth);
343 }
344 
condvar_init(struct condvar * cv)345 void condvar_init(struct condvar *cv)
346 {
347 	*cv = (struct condvar)CONDVAR_INITIALIZER;
348 }
349 
condvar_destroy(struct condvar * cv)350 void condvar_destroy(struct condvar *cv)
351 {
352 	if (cv->m && wq_have_condvar(&cv->m->wq, cv))
353 		panic();
354 
355 	condvar_init(cv);
356 }
357 
cv_signal(struct condvar * cv,bool only_one,const char * fname,int lineno)358 static void cv_signal(struct condvar *cv, bool only_one, const char *fname,
359 			int lineno)
360 {
361 	uint32_t old_itr_status;
362 	struct mutex *m;
363 
364 	old_itr_status = cpu_spin_lock_xsave(&cv->spin_lock);
365 	m = cv->m;
366 	cpu_spin_unlock_xrestore(&cv->spin_lock, old_itr_status);
367 
368 	if (m)
369 		wq_promote_condvar(&m->wq, cv, only_one, m, fname, lineno);
370 
371 }
372 
373 #ifdef CFG_MUTEX_DEBUG
condvar_signal_debug(struct condvar * cv,const char * fname,int lineno)374 void condvar_signal_debug(struct condvar *cv, const char *fname, int lineno)
375 {
376 	cv_signal(cv, true /* only one */, fname, lineno);
377 }
378 
condvar_broadcast_debug(struct condvar * cv,const char * fname,int lineno)379 void condvar_broadcast_debug(struct condvar *cv, const char *fname, int lineno)
380 {
381 	cv_signal(cv, false /* all */, fname, lineno);
382 }
383 
384 #else
condvar_signal(struct condvar * cv)385 void condvar_signal(struct condvar *cv)
386 {
387 	cv_signal(cv, true /* only one */, NULL, -1);
388 }
389 
condvar_broadcast(struct condvar * cv)390 void condvar_broadcast(struct condvar *cv)
391 {
392 	cv_signal(cv, false /* all */, NULL, -1);
393 }
394 #endif /*CFG_MUTEX_DEBUG*/
395 
__condvar_wait(struct condvar * cv,struct mutex * m,const char * fname,int lineno)396 static void __condvar_wait(struct condvar *cv, struct mutex *m,
397 			const char *fname, int lineno)
398 {
399 	uint32_t old_itr_status;
400 	struct wait_queue_elem wqe;
401 	short old_state;
402 	short new_state;
403 
404 	mutex_unlock_check(m);
405 
406 	/* Link this condvar to this mutex until reinitialized */
407 	old_itr_status = cpu_spin_lock_xsave(&cv->spin_lock);
408 	if (cv->m && cv->m != m)
409 		panic("invalid mutex");
410 
411 	cv->m = m;
412 	cpu_spin_unlock(&cv->spin_lock);
413 
414 	cpu_spin_lock(&m->spin_lock);
415 
416 	if (!m->state)
417 		panic();
418 	old_state = m->state;
419 	/* Add to mutex wait queue as a condvar waiter */
420 	wq_wait_init_condvar(&m->wq, &wqe, cv, m->state > 0);
421 
422 	if (m->state > 1) {
423 		/* Multiple read locks, remove one */
424 		m->state--;
425 	} else {
426 		/* Only one lock (read or write), unlock the mutex */
427 		m->state = 0;
428 	}
429 	new_state = m->state;
430 
431 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
432 
433 	/* Wake eventual waiters if the mutex was unlocked */
434 	if (!new_state)
435 		wq_wake_next(&m->wq, m, fname, lineno);
436 
437 	wq_wait_final(&m->wq, &wqe, m, fname, lineno);
438 
439 	if (old_state > 0)
440 		mutex_read_lock(m);
441 	else
442 		mutex_lock(m);
443 }
444 
445 #ifdef CFG_MUTEX_DEBUG
condvar_wait_debug(struct condvar * cv,struct mutex * m,const char * fname,int lineno)446 void condvar_wait_debug(struct condvar *cv, struct mutex *m,
447 			const char *fname, int lineno)
448 {
449 	__condvar_wait(cv, m, fname, lineno);
450 }
451 #else
condvar_wait(struct condvar * cv,struct mutex * m)452 void condvar_wait(struct condvar *cv, struct mutex *m)
453 {
454 	__condvar_wait(cv, m, NULL, -1);
455 }
456 #endif
457