1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2024, STMicroelectronics
4  */
5 #ifndef __KERNEL_MUTEX_PM_AWARE_H
6 #define __KERNEL_MUTEX_PM_AWARE_H
7 
8 #include <kernel/mutex.h>
9 #include <kernel/spinlock.h>
10 
11 /*
12  * struct mutex_pm_aware - Mutex usable in PM atomic sequence
13  *
14  * Some resources need a mutex protection for runtime operations but are
15  * also accessed during specific system power transition (PM power off,
16  * suspend and resume) that operate in atomic execution environment where
17  * non-secure world is not operational, for example in fastcall SMC entries
18  * of the PSCI services. In such case we cannot take a mutex and we expect
19  * the mutex is unlocked. Additionally a spinning lock is attempted to be
20  * locked to check the resource access consistency.
21  *
22  * Core intentionally panics in case of unexpected resource access contention:
23  * - When a thread requests a mutex held by a non-thread context;
24  * - When a non-thread context requests a mutex held by a thread;
25  * - When a non-thread context requests a mutex held by a non-thread context.
26  */
27 struct mutex_pm_aware {
28 	struct mutex mutex;	/* access protection in thread context */
29 	unsigned int lock;	/* access consistency in PM context */
30 };
31 
32 #define MUTEX_PM_AWARE_INITIALIZER { \
33 		.mutex = MUTEX_INITIALIZER, \
34 		.lock = SPINLOCK_UNLOCK, \
35 	}
36 
37 void mutex_pm_aware_init(struct mutex_pm_aware *m);
38 void mutex_pm_aware_destroy(struct mutex_pm_aware *m);
39 void mutex_pm_aware_lock(struct mutex_pm_aware *m);
40 void mutex_pm_aware_unlock(struct mutex_pm_aware *m);
41 
42 #endif /*__KERNEL_MUTEX_PM_AWARE_H*/
43 
44