1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2017, Linaro Limited
4  */
5 
6 #include <kernel/mutex.h>
7 #include <kernel/mutex_pm_aware.h>
8 #include <kernel/panic.h>
9 #include <kernel/refcount.h>
10 #include <kernel/spinlock.h>
11 #include <kernel/thread.h>
12 #include <trace.h>
13 
14 #include "mutex_lockdep.h"
15 
mutex_init(struct mutex * m)16 void mutex_init(struct mutex *m)
17 {
18 	*m = (struct mutex)MUTEX_INITIALIZER;
19 }
20 
mutex_init_recursive(struct recursive_mutex * m)21 void mutex_init_recursive(struct recursive_mutex *m)
22 {
23 	*m = (struct recursive_mutex)RECURSIVE_MUTEX_INITIALIZER;
24 }
25 
__mutex_lock(struct mutex * m,const char * fname,int lineno)26 static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
27 {
28 	assert_have_no_spinlock();
29 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
30 	assert(thread_is_in_normal_mode());
31 
32 	mutex_lock_check(m);
33 
34 	while (true) {
35 		uint32_t old_itr_status;
36 		bool can_lock;
37 		struct wait_queue_elem wqe;
38 
39 		/*
40 		 * If the mutex is locked we need to initialize the wqe
41 		 * before releasing the spinlock to guarantee that we don't
42 		 * miss the wakeup from mutex_unlock().
43 		 *
44 		 * If the mutex is unlocked we don't need to use the wqe at
45 		 * all.
46 		 */
47 
48 		old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
49 
50 		can_lock = !m->state;
51 		if (!can_lock) {
52 			wq_wait_init(&m->wq, &wqe, false /* wait_read */);
53 		} else {
54 			m->state = -1; /* write locked */
55 		}
56 
57 		cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
58 
59 		if (!can_lock) {
60 			/*
61 			 * Someone else is holding the lock, wait in normal
62 			 * world for the lock to become available.
63 			 */
64 			wq_wait_final(&m->wq, &wqe, 0, m, fname, lineno);
65 		} else
66 			return;
67 	}
68 }
69 
__mutex_lock_recursive(struct recursive_mutex * m,const char * fname,int lineno)70 static void __mutex_lock_recursive(struct recursive_mutex *m, const char *fname,
71 				   int lineno)
72 {
73 	short int ct = thread_get_id();
74 
75 	assert_have_no_spinlock();
76 	assert(thread_is_in_normal_mode());
77 
78 	if (atomic_load_short(&m->owner) == ct) {
79 		if (!refcount_inc(&m->lock_depth))
80 			panic();
81 		return;
82 	}
83 
84 	__mutex_lock(&m->m, fname, lineno);
85 
86 	assert(m->owner == THREAD_ID_INVALID);
87 	atomic_store_short(&m->owner, ct);
88 	refcount_set(&m->lock_depth, 1);
89 }
90 
__mutex_unlock(struct mutex * m,const char * fname,int lineno)91 static void __mutex_unlock(struct mutex *m, const char *fname, int lineno)
92 {
93 	uint32_t old_itr_status;
94 
95 	assert_have_no_spinlock();
96 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
97 
98 	mutex_unlock_check(m);
99 
100 	old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
101 
102 	if (!m->state)
103 		panic();
104 
105 	m->state = 0;
106 
107 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
108 
109 	wq_wake_next(&m->wq, m, fname, lineno);
110 }
111 
__mutex_unlock_recursive(struct recursive_mutex * m,const char * fname,int lineno)112 static void __mutex_unlock_recursive(struct recursive_mutex *m,
113 				     const char *fname, int lineno)
114 {
115 	assert_have_no_spinlock();
116 	assert(m->owner == thread_get_id());
117 
118 	if (refcount_dec(&m->lock_depth)) {
119 		/*
120 		 * Do an atomic store to match the atomic load in
121 		 * __mutex_lock_recursive()
122 		 */
123 		atomic_store_short(&m->owner, THREAD_ID_INVALID);
124 		__mutex_unlock(&m->m, fname, lineno);
125 	}
126 }
127 
__mutex_trylock(struct mutex * m,const char * fname __unused,int lineno __unused)128 static bool __mutex_trylock(struct mutex *m, const char *fname __unused,
129 			int lineno __unused)
130 {
131 	uint32_t old_itr_status;
132 	bool can_lock_write;
133 
134 	assert_have_no_spinlock();
135 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
136 
137 	old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
138 
139 	can_lock_write = !m->state;
140 	if (can_lock_write)
141 		m->state = -1;
142 
143 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
144 
145 	if (can_lock_write)
146 		mutex_trylock_check(m);
147 
148 	return can_lock_write;
149 }
150 
__mutex_read_unlock(struct mutex * m,const char * fname,int lineno)151 static void __mutex_read_unlock(struct mutex *m, const char *fname, int lineno)
152 {
153 	uint32_t old_itr_status;
154 	short new_state;
155 
156 	assert_have_no_spinlock();
157 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
158 
159 	old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
160 
161 	if (m->state <= 0)
162 		panic();
163 	m->state--;
164 	new_state = m->state;
165 
166 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
167 
168 	/* Wake eventual waiters if the mutex was unlocked */
169 	if (!new_state)
170 		wq_wake_next(&m->wq, m, fname, lineno);
171 }
172 
__mutex_read_lock(struct mutex * m,const char * fname,int lineno)173 static void __mutex_read_lock(struct mutex *m, const char *fname, int lineno)
174 {
175 	assert_have_no_spinlock();
176 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
177 	assert(thread_is_in_normal_mode());
178 
179 	while (true) {
180 		uint32_t old_itr_status;
181 		bool can_lock;
182 		struct wait_queue_elem wqe;
183 
184 		/*
185 		 * If the mutex is locked we need to initialize the wqe
186 		 * before releasing the spinlock to guarantee that we don't
187 		 * miss the wakeup from mutex_unlock().
188 		 *
189 		 * If the mutex is unlocked we don't need to use the wqe at
190 		 * all.
191 		 */
192 
193 		old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
194 
195 		can_lock = m->state != -1;
196 		if (!can_lock) {
197 			wq_wait_init(&m->wq, &wqe, true /* wait_read */);
198 		} else {
199 			m->state++; /* read_locked */
200 		}
201 
202 		cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
203 
204 		if (!can_lock) {
205 			/*
206 			 * Someone else is holding the lock, wait in normal
207 			 * world for the lock to become available.
208 			 */
209 			wq_wait_final(&m->wq, &wqe, 0, m, fname, lineno);
210 		} else
211 			return;
212 	}
213 }
214 
__mutex_read_trylock(struct mutex * m,const char * fname __unused,int lineno __unused)215 static bool __mutex_read_trylock(struct mutex *m, const char *fname __unused,
216 				 int lineno __unused)
217 {
218 	uint32_t old_itr_status;
219 	bool can_lock;
220 
221 	assert_have_no_spinlock();
222 	assert(thread_get_id_may_fail() != THREAD_ID_INVALID);
223 	assert(thread_is_in_normal_mode());
224 
225 	old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);
226 
227 	can_lock = m->state != -1;
228 	if (can_lock)
229 		m->state++;
230 
231 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
232 
233 	return can_lock;
234 }
235 
236 #ifdef CFG_MUTEX_DEBUG
mutex_unlock_debug(struct mutex * m,const char * fname,int lineno)237 void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno)
238 {
239 	__mutex_unlock(m, fname, lineno);
240 }
241 
mutex_lock_debug(struct mutex * m,const char * fname,int lineno)242 void mutex_lock_debug(struct mutex *m, const char *fname, int lineno)
243 {
244 	__mutex_lock(m, fname, lineno);
245 }
246 
mutex_trylock_debug(struct mutex * m,const char * fname,int lineno)247 bool mutex_trylock_debug(struct mutex *m, const char *fname, int lineno)
248 {
249 	return __mutex_trylock(m, fname, lineno);
250 }
251 
mutex_read_unlock_debug(struct mutex * m,const char * fname,int lineno)252 void mutex_read_unlock_debug(struct mutex *m, const char *fname, int lineno)
253 {
254 	__mutex_read_unlock(m, fname, lineno);
255 }
256 
mutex_read_lock_debug(struct mutex * m,const char * fname,int lineno)257 void mutex_read_lock_debug(struct mutex *m, const char *fname, int lineno)
258 {
259 	__mutex_read_lock(m, fname, lineno);
260 }
261 
mutex_read_trylock_debug(struct mutex * m,const char * fname,int lineno)262 bool mutex_read_trylock_debug(struct mutex *m, const char *fname, int lineno)
263 {
264 	return __mutex_read_trylock(m, fname, lineno);
265 }
266 
mutex_unlock_recursive_debug(struct recursive_mutex * m,const char * fname,int lineno)267 void mutex_unlock_recursive_debug(struct recursive_mutex *m, const char *fname,
268 				  int lineno)
269 {
270 	__mutex_unlock_recursive(m, fname, lineno);
271 }
272 
mutex_lock_recursive_debug(struct recursive_mutex * m,const char * fname,int lineno)273 void mutex_lock_recursive_debug(struct recursive_mutex *m, const char *fname,
274 				int lineno)
275 {
276 	__mutex_lock_recursive(m, fname, lineno);
277 }
278 #else
mutex_unlock(struct mutex * m)279 void mutex_unlock(struct mutex *m)
280 {
281 	__mutex_unlock(m, NULL, -1);
282 }
283 
mutex_unlock_recursive(struct recursive_mutex * m)284 void mutex_unlock_recursive(struct recursive_mutex *m)
285 {
286 	__mutex_unlock_recursive(m, NULL, -1);
287 }
288 
mutex_lock(struct mutex * m)289 void mutex_lock(struct mutex *m)
290 {
291 	__mutex_lock(m, NULL, -1);
292 }
293 
mutex_lock_recursive(struct recursive_mutex * m)294 void mutex_lock_recursive(struct recursive_mutex *m)
295 {
296 	__mutex_lock_recursive(m, NULL, -1);
297 }
298 
mutex_trylock(struct mutex * m)299 bool mutex_trylock(struct mutex *m)
300 {
301 	return __mutex_trylock(m, NULL, -1);
302 }
303 
mutex_read_unlock(struct mutex * m)304 void mutex_read_unlock(struct mutex *m)
305 {
306 	__mutex_read_unlock(m, NULL, -1);
307 }
308 
mutex_read_lock(struct mutex * m)309 void mutex_read_lock(struct mutex *m)
310 {
311 	__mutex_read_lock(m, NULL, -1);
312 }
313 
mutex_read_trylock(struct mutex * m)314 bool mutex_read_trylock(struct mutex *m)
315 {
316 	return __mutex_read_trylock(m, NULL, -1);
317 }
318 #endif
319 
mutex_destroy(struct mutex * m)320 void mutex_destroy(struct mutex *m)
321 {
322 	/*
323 	 * Caller guarantees that no one will try to take the mutex so
324 	 * there's no need to take the spinlock before accessing it.
325 	 */
326 	if (m->state)
327 		panic();
328 	if (!wq_is_empty(&m->wq))
329 		panic("waitqueue not empty");
330 	mutex_destroy_check(m);
331 }
332 
mutex_destroy_recursive(struct recursive_mutex * m)333 void mutex_destroy_recursive(struct recursive_mutex *m)
334 {
335 	mutex_destroy(&m->m);
336 }
337 
mutex_get_recursive_lock_depth(struct recursive_mutex * m)338 unsigned int mutex_get_recursive_lock_depth(struct recursive_mutex *m)
339 {
340 	assert_have_no_spinlock();
341 	assert(m->owner == thread_get_id());
342 
343 	return refcount_val(&m->lock_depth);
344 }
345 
mutex_pm_aware_init(struct mutex_pm_aware * m)346 void mutex_pm_aware_init(struct mutex_pm_aware *m)
347 {
348 	*m = (struct mutex_pm_aware)MUTEX_PM_AWARE_INITIALIZER;
349 }
350 
mutex_pm_aware_destroy(struct mutex_pm_aware * m)351 void mutex_pm_aware_destroy(struct mutex_pm_aware *m)
352 {
353 	mutex_destroy(&m->mutex);
354 }
355 
mutex_pm_aware_lock(struct mutex_pm_aware * m)356 void mutex_pm_aware_lock(struct mutex_pm_aware *m)
357 {
358 	if (thread_get_id_may_fail() == THREAD_ID_INVALID) {
359 		if (!cpu_spin_trylock(&m->lock) || m->mutex.state)
360 			panic();
361 	} else {
362 		mutex_lock(&m->mutex);
363 		if (!thread_spin_trylock(&m->lock))
364 			panic();
365 	}
366 }
367 
mutex_pm_aware_unlock(struct mutex_pm_aware * m)368 void mutex_pm_aware_unlock(struct mutex_pm_aware *m)
369 {
370 	if (thread_get_id_may_fail() == THREAD_ID_INVALID) {
371 		assert(!m->mutex.state);
372 		cpu_spin_unlock(&m->lock);
373 	} else {
374 		thread_spin_unlock(&m->lock);
375 		mutex_unlock(&m->mutex);
376 	}
377 }
378 
condvar_init(struct condvar * cv)379 void condvar_init(struct condvar *cv)
380 {
381 	*cv = (struct condvar)CONDVAR_INITIALIZER;
382 }
383 
condvar_destroy(struct condvar * cv)384 void condvar_destroy(struct condvar *cv)
385 {
386 	if (cv->m && wq_have_condvar(&cv->m->wq, cv))
387 		panic();
388 
389 	condvar_init(cv);
390 }
391 
cv_signal(struct condvar * cv,bool only_one,const char * fname,int lineno)392 static void cv_signal(struct condvar *cv, bool only_one, const char *fname,
393 			int lineno)
394 {
395 	uint32_t old_itr_status;
396 	struct mutex *m;
397 
398 	old_itr_status = cpu_spin_lock_xsave(&cv->spin_lock);
399 	m = cv->m;
400 	cpu_spin_unlock_xrestore(&cv->spin_lock, old_itr_status);
401 
402 	if (m)
403 		wq_promote_condvar(&m->wq, cv, only_one, m, fname, lineno);
404 
405 }
406 
407 #ifdef CFG_MUTEX_DEBUG
condvar_signal_debug(struct condvar * cv,const char * fname,int lineno)408 void condvar_signal_debug(struct condvar *cv, const char *fname, int lineno)
409 {
410 	cv_signal(cv, true /* only one */, fname, lineno);
411 }
412 
condvar_broadcast_debug(struct condvar * cv,const char * fname,int lineno)413 void condvar_broadcast_debug(struct condvar *cv, const char *fname, int lineno)
414 {
415 	cv_signal(cv, false /* all */, fname, lineno);
416 }
417 
418 #else
condvar_signal(struct condvar * cv)419 void condvar_signal(struct condvar *cv)
420 {
421 	cv_signal(cv, true /* only one */, NULL, -1);
422 }
423 
condvar_broadcast(struct condvar * cv)424 void condvar_broadcast(struct condvar *cv)
425 {
426 	cv_signal(cv, false /* all */, NULL, -1);
427 }
428 #endif /*CFG_MUTEX_DEBUG*/
429 
__condvar_wait_timeout(struct condvar * cv,struct mutex * m,uint32_t timeout_ms,const char * fname,int lineno)430 static TEE_Result __condvar_wait_timeout(struct condvar *cv, struct mutex *m,
431 					 uint32_t timeout_ms, const char *fname,
432 					 int lineno)
433 {
434 	TEE_Result res = TEE_SUCCESS;
435 	uint32_t old_itr_status = 0;
436 	struct wait_queue_elem wqe = { };
437 	short old_state = 0;
438 	short new_state = 0;
439 
440 	mutex_unlock_check(m);
441 
442 	/* Link this condvar to this mutex until reinitialized */
443 	old_itr_status = cpu_spin_lock_xsave(&cv->spin_lock);
444 	if (cv->m && cv->m != m)
445 		panic("invalid mutex");
446 
447 	cv->m = m;
448 	cpu_spin_unlock(&cv->spin_lock);
449 
450 	cpu_spin_lock(&m->spin_lock);
451 
452 	if (!m->state)
453 		panic();
454 	old_state = m->state;
455 	/* Add to mutex wait queue as a condvar waiter */
456 	wq_wait_init_condvar(&m->wq, &wqe, cv, m->state > 0);
457 
458 	if (m->state > 1) {
459 		/* Multiple read locks, remove one */
460 		m->state--;
461 	} else {
462 		/* Only one lock (read or write), unlock the mutex */
463 		m->state = 0;
464 	}
465 	new_state = m->state;
466 
467 	cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);
468 
469 	/* Wake eventual waiters if the mutex was unlocked */
470 	if (!new_state)
471 		wq_wake_next(&m->wq, m, fname, lineno);
472 
473 	res = wq_wait_final(&m->wq, &wqe, timeout_ms, m, fname, lineno);
474 
475 	if (old_state > 0)
476 		mutex_read_lock(m);
477 	else
478 		mutex_lock(m);
479 
480 	return res;
481 }
482 
483 #ifdef CFG_MUTEX_DEBUG
condvar_wait_debug(struct condvar * cv,struct mutex * m,const char * fname,int lineno)484 void condvar_wait_debug(struct condvar *cv, struct mutex *m,
485 			const char *fname, int lineno)
486 {
487 	__condvar_wait_timeout(cv, m, 0, fname, lineno);
488 }
489 
condvar_wait_timeout_debug(struct condvar * cv,struct mutex * m,uint32_t timeout_ms,const char * fname,int lineno)490 TEE_Result condvar_wait_timeout_debug(struct condvar *cv, struct mutex *m,
491 				      uint32_t timeout_ms, const char *fname,
492 				      int lineno)
493 {
494 	return __condvar_wait_timeout(cv, m, timeout_ms, fname, lineno);
495 }
496 #else
condvar_wait(struct condvar * cv,struct mutex * m)497 void condvar_wait(struct condvar *cv, struct mutex *m)
498 {
499 	__condvar_wait_timeout(cv, m, 0, NULL, -1);
500 }
501 
condvar_wait_timeout(struct condvar * cv,struct mutex * m,uint32_t timeout_ms)502 TEE_Result condvar_wait_timeout(struct condvar *cv, struct mutex *m,
503 				uint32_t timeout_ms)
504 {
505 	return __condvar_wait_timeout(cv, m, timeout_ms, NULL, -1);
506 }
507 #endif
508