1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2018, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <config.h>
8 #include <kernel/lockdep.h>
9 #include <kernel/unwind.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 #include <util.h>
14
15 /* lockdep_node::flags values */
16 /* Flags used for depth-first topological sorting */
17 #define LOCKDEP_NODE_TEMP_MARK BIT(0)
18 #define LOCKDEP_NODE_PERM_MARK BIT(1)
19 /* Flag used during breadth-first search (print shortest cycle) */
20 #define LOCKDEP_NODE_BFS_VISITED BIT(2)
21
22 /* Find node in graph or add it */
lockdep_add_to_graph(struct lockdep_node_head * graph,uintptr_t lock_id)23 static struct lockdep_node *lockdep_add_to_graph(
24 struct lockdep_node_head *graph,
25 uintptr_t lock_id)
26 {
27 struct lockdep_node *node = NULL;
28
29 assert(graph);
30 TAILQ_FOREACH(node, graph, link)
31 if (node->lock_id == lock_id)
32 return node;
33
34 node = calloc(1, sizeof(*node));
35 if (!node)
36 return NULL;
37
38 node->lock_id = lock_id;
39 STAILQ_INIT(&node->edges);
40 TAILQ_INSERT_TAIL(graph, node, link);
41
42 return node;
43 }
44
dup_call_stack(vaddr_t * stack)45 static vaddr_t *dup_call_stack(vaddr_t *stack)
46 {
47 vaddr_t *nstack = NULL;
48 int n = 0;
49
50 if (!stack)
51 return NULL;
52
53 while (stack[n])
54 n++;
55
56 nstack = malloc((n + 1) * sizeof(vaddr_t));
57 if (!nstack)
58 return NULL;
59
60 memcpy(nstack, stack, (n + 1) * sizeof(vaddr_t));
61
62 return nstack;
63 }
64
lockdep_print_call_stack(vaddr_t * stack)65 static void lockdep_print_call_stack(vaddr_t *stack)
66 {
67 vaddr_t *p = NULL;
68
69 if (!IS_ENABLED(CFG_LOCKDEP_RECORD_STACK))
70 return;
71
72 EMSG_RAW("Call stack:");
73 for (p = stack; p && *p; p++)
74 EMSG_RAW(" %#" PRIxPTR, *p);
75 }
76
lockdep_add_edge(struct lockdep_node * from,struct lockdep_node * to,vaddr_t * call_stack_from,vaddr_t * call_stack_to,uintptr_t thread_id)77 static TEE_Result lockdep_add_edge(struct lockdep_node *from,
78 struct lockdep_node *to,
79 vaddr_t *call_stack_from,
80 vaddr_t *call_stack_to,
81 uintptr_t thread_id)
82 {
83 struct lockdep_edge *edge = NULL;
84
85 STAILQ_FOREACH(edge, &from->edges, link)
86 if (edge->to == to)
87 return TEE_SUCCESS;
88
89 edge = calloc(1, sizeof(*edge));
90 if (!edge)
91 return TEE_ERROR_OUT_OF_MEMORY;
92 edge->to = to;
93 edge->call_stack_from = dup_call_stack(call_stack_from);
94 edge->call_stack_to = dup_call_stack(call_stack_to);
95 edge->thread_id = thread_id;
96 STAILQ_INSERT_TAIL(&from->edges, edge, link);
97
98 return TEE_SUCCESS;
99 }
100
101 struct lockdep_bfs {
102 struct lockdep_node *node;
103 uintptr_t *path;
104 int pathlen;
105 TAILQ_ENTRY(lockdep_bfs) link;
106 };
107
108 TAILQ_HEAD(lockdep_bfs_head, lockdep_bfs);
109
lockdep_bfs_queue_delete(struct lockdep_bfs_head * queue)110 static void lockdep_bfs_queue_delete(struct lockdep_bfs_head *queue)
111 {
112 struct lockdep_bfs *cur = NULL;
113 struct lockdep_bfs *next = NULL;
114
115 TAILQ_FOREACH_SAFE(cur, queue, link, next) {
116 TAILQ_REMOVE(queue, cur, link);
117 free(cur->path);
118 free(cur);
119 }
120 }
121
122 /*
123 * Print shortest cycle in @graph that contains @node.
124 * This function performs an iterative breadth-first search starting from @node,
125 * and stops when it reaches @node again. In each node we're tracking the path
126 * from the start node.
127 */
lockdep_graph_get_shortest_cycle(struct lockdep_node * node)128 static uintptr_t *lockdep_graph_get_shortest_cycle(struct lockdep_node *node)
129 {
130 struct lockdep_bfs_head queue;
131 struct lockdep_bfs *qe = NULL;
132 uintptr_t *ret = NULL;
133
134 TAILQ_INIT(&queue);
135 node->flags |= LOCKDEP_NODE_BFS_VISITED;
136
137 qe = calloc(1, sizeof(*qe));
138 if (!qe)
139 goto out;
140 qe->node = node;
141 qe->path = malloc(sizeof(uintptr_t));
142 if (!qe->path)
143 goto out;
144 qe->path[0] = node->lock_id;
145 qe->pathlen = 1;
146 TAILQ_INSERT_TAIL(&queue, qe, link);
147
148 while (!TAILQ_EMPTY(&queue)) {
149 struct lockdep_node *n = NULL;
150 struct lockdep_edge *e = NULL;
151
152 qe = TAILQ_FIRST(&queue);
153 n = qe->node;
154 TAILQ_REMOVE(&queue, qe, link);
155
156 STAILQ_FOREACH(e, &n->edges, link) {
157 if (e->to->lock_id == node->lock_id) {
158 uintptr_t *tmp = NULL;
159 size_t nlen = qe->pathlen + 1;
160
161 /*
162 * Cycle found. Terminate cycle path with NULL
163 * and return it.
164 */
165 tmp = realloc(qe->path,
166 nlen * sizeof(uintptr_t));
167 if (!tmp) {
168 EMSG("Out of memory");
169 free(qe->path);
170 ret = NULL;
171 goto out;
172 }
173 qe->path = tmp;
174 qe->path[nlen - 1] = 0;
175 ret = qe->path;
176 goto out;
177 }
178
179 if (!(e->to->flags & LOCKDEP_NODE_BFS_VISITED)) {
180 size_t nlen = 0;
181 struct lockdep_bfs *nqe = NULL;
182
183 e->to->flags |= LOCKDEP_NODE_BFS_VISITED;
184
185 nlen = qe->pathlen + 1;
186 nqe = calloc(1, sizeof(*nqe));
187 if (!nqe)
188 goto out;
189 nqe->node = e->to;
190 nqe->path = malloc(nlen * sizeof(uintptr_t));
191 if (!nqe->path)
192 goto out;
193 nqe->pathlen = nlen;
194 memcpy(nqe->path, qe->path,
195 qe->pathlen * sizeof(uintptr_t));
196 nqe->path[nlen - 1] = e->to->lock_id;
197 TAILQ_INSERT_TAIL(&queue, nqe, link);
198 }
199 }
200 free(qe->path);
201 free(qe);
202 qe = NULL;
203 }
204
205 out:
206 free(qe);
207 lockdep_bfs_queue_delete(&queue);
208 return ret;
209 }
210
lockdep_visit(struct lockdep_node * node)211 static TEE_Result lockdep_visit(struct lockdep_node *node)
212 {
213 struct lockdep_edge *e = NULL;
214
215 if (node->flags & LOCKDEP_NODE_PERM_MARK)
216 return TEE_SUCCESS;
217
218 if (node->flags & LOCKDEP_NODE_TEMP_MARK)
219 return TEE_ERROR_BAD_STATE; /* Not a DAG! */
220
221 node->flags |= LOCKDEP_NODE_TEMP_MARK;
222
223 STAILQ_FOREACH(e, &node->edges, link) {
224 TEE_Result res = lockdep_visit(e->to);
225
226 if (res)
227 return res;
228 }
229
230 node->flags |= LOCKDEP_NODE_PERM_MARK;
231 return TEE_SUCCESS;
232 }
233
lockdep_graph_sort(struct lockdep_node_head * graph)234 static TEE_Result lockdep_graph_sort(struct lockdep_node_head *graph)
235 {
236 struct lockdep_node *node = NULL;
237
238 TAILQ_FOREACH(node, graph, link) {
239 if (!node->flags) {
240 /* Unmarked node */
241 TEE_Result res = lockdep_visit(node);
242
243 if (res)
244 return res;
245 }
246 }
247
248 TAILQ_FOREACH(node, graph, link)
249 node->flags = 0;
250
251 return TEE_SUCCESS;
252 }
253
lockdep_find_edge(struct lockdep_node_head * graph,uintptr_t from,uintptr_t to)254 static struct lockdep_edge *lockdep_find_edge(struct lockdep_node_head *graph,
255 uintptr_t from, uintptr_t to)
256 {
257 struct lockdep_node *node = NULL;
258 struct lockdep_edge *edge = NULL;
259
260 TAILQ_FOREACH(node, graph, link)
261 if (node->lock_id == from)
262 STAILQ_FOREACH(edge, &node->edges, link)
263 if (edge->to->lock_id == to)
264 return edge;
265 return NULL;
266 }
267
lockdep_print_edge_info(uintptr_t from __maybe_unused,struct lockdep_edge * edge)268 static void lockdep_print_edge_info(uintptr_t from __maybe_unused,
269 struct lockdep_edge *edge)
270 {
271 uintptr_t __maybe_unused to = edge->to->lock_id;
272 const char __maybe_unused *at_msg = "";
273 const char __maybe_unused *acq_msg = "";
274
275 if (IS_ENABLED(CFG_LOCKDEP_RECORD_STACK)) {
276 at_msg = " at:";
277 acq_msg = " acquired at:";
278 }
279
280 EMSG_RAW("-> Thread %#" PRIxPTR " acquired lock %#" PRIxPTR "%s",
281 edge->thread_id, to, at_msg);
282 lockdep_print_call_stack(edge->call_stack_to);
283 EMSG_RAW("...while holding lock %#" PRIxPTR "%s",
284 from, acq_msg);
285 lockdep_print_call_stack(edge->call_stack_from);
286 }
287
288 /*
289 * Find cycle containing @node in the lock graph, then print full debug
290 * information about each edge (thread that acquired the locks and call stacks)
291 */
lockdep_print_cycle_info(struct lockdep_node_head * graph,struct lockdep_node * node)292 static void lockdep_print_cycle_info(struct lockdep_node_head *graph,
293 struct lockdep_node *node)
294 {
295 struct lockdep_edge *edge = NULL;
296 uintptr_t *cycle = NULL;
297 uintptr_t *p = NULL;
298 uintptr_t from = 0;
299 uintptr_t to = 0;
300
301 cycle = lockdep_graph_get_shortest_cycle(node);
302 assert(cycle && cycle[0]);
303 EMSG_RAW("-> Shortest cycle:");
304 for (p = cycle; *p; p++)
305 EMSG_RAW(" Lock %#" PRIxPTR, *p);
306 for (p = cycle; ; p++) {
307 if (!*p) {
308 assert(p != cycle);
309 from = to;
310 to = cycle[0];
311 edge = lockdep_find_edge(graph, from, to);
312 lockdep_print_edge_info(from, edge);
313 break;
314 }
315 if (p != cycle)
316 from = to;
317 to = *p;
318 if (p != cycle) {
319 edge = lockdep_find_edge(graph, from, to);
320 lockdep_print_edge_info(from, edge);
321 }
322 }
323 free(cycle);
324 }
325
lockdep_get_kernel_stack(void)326 static vaddr_t *lockdep_get_kernel_stack(void)
327 {
328 if (IS_ENABLED(CFG_LOCKDEP_RECORD_STACK))
329 return unw_get_kernel_stack();
330
331 return NULL;
332 }
333
__lockdep_lock_acquire(struct lockdep_node_head * graph,struct lockdep_lock_head * owned,uintptr_t id)334 TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
335 struct lockdep_lock_head *owned,
336 uintptr_t id)
337 {
338 struct lockdep_node *node = lockdep_add_to_graph(graph, id);
339 struct lockdep_lock *lock = NULL;
340 TEE_Result res = TEE_SUCCESS;
341 vaddr_t *acq_stack = NULL;
342
343 if (!node)
344 return TEE_ERROR_OUT_OF_MEMORY;
345
346 acq_stack = lockdep_get_kernel_stack();
347
348 TAILQ_FOREACH(lock, owned, link) {
349 res = lockdep_add_edge(lock->node, node, lock->call_stack,
350 acq_stack, (uintptr_t)owned);
351 if (res)
352 return res;
353 }
354
355 res = lockdep_graph_sort(graph);
356 if (res) {
357 EMSG_RAW("Potential deadlock detected!");
358 EMSG_RAW("When trying to acquire lock %#" PRIxPTR, id);
359 lockdep_print_cycle_info(graph, node);
360 return res;
361 }
362
363 lock = calloc(1, sizeof(*lock));
364 if (!lock)
365 return TEE_ERROR_OUT_OF_MEMORY;
366
367 lock->node = node;
368 lock->call_stack = acq_stack;
369 TAILQ_INSERT_TAIL(owned, lock, link);
370
371 return TEE_SUCCESS;
372 }
373
374 /*
375 * Call this when it is known that the thread has been able to acquire the lock.
376 * Similar to __lockdep_lock_acquire(), but since the operation is non-blocking,
377 * no dependency to currently owned locks are created.
378 */
__lockdep_lock_tryacquire(struct lockdep_node_head * graph,struct lockdep_lock_head * owned,uintptr_t id)379 TEE_Result __lockdep_lock_tryacquire(struct lockdep_node_head *graph,
380 struct lockdep_lock_head *owned,
381 uintptr_t id)
382 {
383 struct lockdep_node *node = lockdep_add_to_graph(graph, id);
384 struct lockdep_lock *lock = NULL;
385 vaddr_t *acq_stack = NULL;
386
387 if (!node)
388 return TEE_ERROR_OUT_OF_MEMORY;
389
390 acq_stack = lockdep_get_kernel_stack();
391
392 lock = calloc(1, sizeof(*lock));
393 if (!lock)
394 return TEE_ERROR_OUT_OF_MEMORY;
395
396 lock->node = node;
397 lock->call_stack = acq_stack;
398 TAILQ_INSERT_TAIL(owned, lock, link);
399
400 return TEE_SUCCESS;
401 }
402
__lockdep_lock_release(struct lockdep_lock_head * owned,uintptr_t id)403 TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned, uintptr_t id)
404 {
405 struct lockdep_lock *lock = NULL;
406
407 TAILQ_FOREACH_REVERSE(lock, owned, lockdep_lock_head, link) {
408 if (lock->node->lock_id == id) {
409 TAILQ_REMOVE(owned, lock, link);
410 free(lock->call_stack);
411 free(lock);
412 return TEE_SUCCESS;
413 }
414 }
415
416 EMSG_RAW("Thread %p does not own lock %#" PRIxPTR, (void *)owned, id);
417 return TEE_ERROR_ITEM_NOT_FOUND;
418 }
419
lockdep_free_edge(struct lockdep_edge * edge)420 static void lockdep_free_edge(struct lockdep_edge *edge)
421 {
422 free(edge->call_stack_from);
423 free(edge->call_stack_to);
424 free(edge);
425 }
426
lockdep_node_delete(struct lockdep_node * node)427 static void lockdep_node_delete(struct lockdep_node *node)
428 {
429 struct lockdep_edge *edge = NULL;
430 struct lockdep_edge *next = NULL;
431
432 STAILQ_FOREACH_SAFE(edge, &node->edges, link, next)
433 lockdep_free_edge(edge);
434
435 free(node);
436 }
437
lockdep_graph_delete(struct lockdep_node_head * graph)438 void lockdep_graph_delete(struct lockdep_node_head *graph)
439 {
440 struct lockdep_node *node = NULL;
441 struct lockdep_node *next = NULL;
442
443 TAILQ_FOREACH_SAFE(node, graph, link, next) {
444 TAILQ_REMOVE(graph, node, link);
445 lockdep_node_delete(node);
446 }
447 }
448
lockdep_queue_delete(struct lockdep_lock_head * owned)449 void lockdep_queue_delete(struct lockdep_lock_head *owned)
450 {
451 struct lockdep_lock *lock = NULL;
452 struct lockdep_lock *next = NULL;
453
454 TAILQ_FOREACH_SAFE(lock, owned, link, next) {
455 TAILQ_REMOVE(owned, lock, link);
456 free(lock);
457 }
458 }
459
lockdep_node_destroy(struct lockdep_node_head * graph,struct lockdep_node * node)460 static void lockdep_node_destroy(struct lockdep_node_head *graph,
461 struct lockdep_node *node)
462 {
463 struct lockdep_edge *edge = NULL;
464 struct lockdep_edge *next = NULL;
465 struct lockdep_node *from = NULL;
466
467 TAILQ_REMOVE(graph, node, link);
468
469 /*
470 * Loop over all nodes in the graph to remove all edges with the
471 * node to remove in the "to" field.
472 */
473 TAILQ_FOREACH(from, graph, link) {
474 edge = STAILQ_FIRST(&from->edges);
475 while (edge && edge->to == node) {
476 STAILQ_REMOVE_HEAD(&from->edges, link);
477 lockdep_free_edge(edge);
478 edge = STAILQ_FIRST(&from->edges);
479 }
480
481 if (!edge)
482 continue;
483
484 next = STAILQ_NEXT(edge, link);
485 while (next) {
486 if (next->to == node) {
487 STAILQ_REMOVE_AFTER(&from->edges, edge, link);
488 lockdep_free_edge(next);
489 } else {
490 edge = next;
491 }
492 next = STAILQ_NEXT(edge, link);
493 }
494 }
495
496 STAILQ_FOREACH_SAFE(edge, &node->edges, link, next)
497 lockdep_free_edge(edge);
498
499 free(node);
500 }
501
lockdep_lock_destroy(struct lockdep_node_head * graph,uintptr_t lock_id)502 void lockdep_lock_destroy(struct lockdep_node_head *graph, uintptr_t lock_id)
503 {
504 struct lockdep_node *node = NULL;
505
506 assert(graph);
507 TAILQ_FOREACH(node, graph, link) {
508 if (node->lock_id == lock_id) {
509 lockdep_node_destroy(graph, node);
510 break;
511 }
512 }
513 }
514