1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2018, Linaro Limited
4  */
5 
6 #ifndef __KERNEL_LOCKDEP_H
7 #define __KERNEL_LOCKDEP_H
8 
9 #include <compiler.h>
10 #include <kernel/panic.h>
11 #include <sys/queue.h>
12 #include <tee_api_types.h>
13 #include <trace.h>
14 #include <types_ext.h>
15 
16 /*
17  * Lock graph. If node A has an edge to node B, then A was locked before B in
18  * the same thread of execution.
19  */
20 
21 struct lockdep_edge {
22 	struct lockdep_node *to;
23 	uintptr_t thread_id;
24 	vaddr_t *call_stack_from;
25 	vaddr_t *call_stack_to;
26 	STAILQ_ENTRY(lockdep_edge) link;
27 };
28 
29 STAILQ_HEAD(lockdep_edge_head, lockdep_edge);
30 
31 struct lockdep_node {
32 	uintptr_t lock_id; /* For instance, address of actual lock object */
33 	struct lockdep_edge_head edges;
34 	TAILQ_ENTRY(lockdep_node) link;
35 	uint8_t flags; /* Used temporarily when walking the graph */
36 };
37 
38 TAILQ_HEAD(lockdep_node_head, lockdep_node);
39 
40 /* Per-thread queue of currently owned locks (point to nodes in the graph) */
41 
42 struct lockdep_lock {
43 	struct lockdep_node *node;
44 	vaddr_t *call_stack;
45 	TAILQ_ENTRY(lockdep_lock) link;
46 };
47 
48 TAILQ_HEAD(lockdep_lock_head, lockdep_lock);
49 
50 #ifdef CFG_LOCKDEP
51 
52 /*
53  * Functions used internally and for testing the algorithm. Actual locking code
54  * should use the wrappers below (which panic in case of error).
55  */
56 TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
57 				  struct lockdep_lock_head *owned,
58 				  uintptr_t id);
59 TEE_Result __lockdep_lock_tryacquire(struct lockdep_node_head *graph,
60 				     struct lockdep_lock_head *owned,
61 				     uintptr_t id);
62 TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned,
63 				  uintptr_t id);
64 
65 /* Delete all elements in @graph */
66 void lockdep_graph_delete(struct lockdep_node_head *graph);
67 
68 /* Delete all elements in @queue */
69 void lockdep_queue_delete(struct lockdep_lock_head *queue);
70 
71 /*
72  * Acquire lock @id, while already holding the locks in @owned.
73  * @owned represent the caller; there should be one instance per thread of
74  * execution. @graph is the directed acyclic graph (DAG) to be used for
75  * potential deadlock detection; use the same @graph for all the locks of the
76  * same type as lock @id.
77  *
78  * This function will panic() if the acquire operation would result in a lock
79  * hierarchy violation (potential deadlock).
80  */
lockdep_lock_acquire(struct lockdep_node_head * graph,struct lockdep_lock_head * owned,uintptr_t id)81 static inline void lockdep_lock_acquire(struct lockdep_node_head *graph,
82 					struct lockdep_lock_head *owned,
83 					uintptr_t id)
84 {
85 	TEE_Result res = __lockdep_lock_acquire(graph, owned, id);
86 
87 	if (res) {
88 		EMSG("lockdep: error %#" PRIx32, res);
89 		panic();
90 	}
91 }
92 
93 /*
94  * Non-blocking acquire lock @id, while already holding the locks in @owned.
95  * @owned represent the caller; there should be one instance per thread of
96  * execution. @graph is the directed acyclic graph (DAG) to be used for
97  * potential deadlock detection; use the same @graph for all the locks of the
98  * same type as lock @id.
99  */
lockdep_lock_tryacquire(struct lockdep_node_head * graph,struct lockdep_lock_head * owned,uintptr_t id)100 static inline void lockdep_lock_tryacquire(struct lockdep_node_head *graph,
101 					   struct lockdep_lock_head *owned,
102 					   uintptr_t id)
103 {
104 	TEE_Result res = __lockdep_lock_tryacquire(graph, owned, id);
105 
106 	if (res) {
107 		EMSG("lockdep: error %#" PRIx32, res);
108 		panic();
109 	}
110 }
111 
112 /*
113  * Release lock @id. The lock is removed from @owned.
114  *
115  * This function will panic() if the lock is not held by the caller.
116  */
lockdep_lock_release(struct lockdep_lock_head * owned,uintptr_t id)117 static inline void lockdep_lock_release(struct lockdep_lock_head *owned,
118 					uintptr_t id)
119 {
120 	TEE_Result res = __lockdep_lock_release(owned, id);
121 
122 	if (res) {
123 		EMSG("lockdep: error %#" PRIx32, res);
124 		panic();
125 	}
126 }
127 
128 /*
129  * Destroy lock @id in @graph. The lock is freed.
130  */
131 void lockdep_lock_destroy(struct lockdep_node_head *graph, uintptr_t id);
132 
133 /* Initialize lockdep for mutex objects (kernel/mutex.h) */
134 void mutex_lockdep_init(void);
135 
136 #else /* CFG_LOCKDEP */
137 
lockdep_lock_acquire(struct lockdep_node_head * g __unused,struct lockdep_lock_head * o __unused,uintptr_t id __unused)138 static inline void lockdep_lock_acquire(struct lockdep_node_head *g __unused,
139 					struct lockdep_lock_head *o __unused,
140 					uintptr_t id __unused)
141 {}
142 
lockdep_lock_release(struct lockdep_lock_head * o __unused,uintptr_t id __unused)143 static inline void lockdep_lock_release(struct lockdep_lock_head *o __unused,
144 					uintptr_t id __unused)
145 {}
146 
147 static inline void
lockdep_lock_destroy(struct lockdep_node_head * graph __unused,uintptr_t id __unused)148 lockdep_lock_destroy(struct lockdep_node_head *graph __unused,
149 		     uintptr_t id __unused)
150 {}
151 
mutex_lockdep_init(void)152 static inline void mutex_lockdep_init(void)
153 {}
154 
155 #endif /* !CFG_LOCKDEP */
156 
157 #endif /* !__KERNEL_LOCKDEP_H */
158