xref: /optee_os/core/kernel/lockdep.c (revision 5b25c76ac40f830867e3d60800120ffd7874e8dc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <kernel/lockdep.h>
9 #include <kernel/unwind.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 #include <util.h>
14 
15 /* lockdep_node::flags values */
16 /* Flags used for depth-first topological sorting */
17 #define LOCKDEP_NODE_TEMP_MARK		BIT(0)
18 #define LOCKDEP_NODE_PERM_MARK		BIT(1)
19 /* Flag used during breadth-first search (print shortest cycle) */
20 #define LOCKDEP_NODE_BFS_VISITED	BIT(2)
21 
22 /* Find node in graph or add it */
23 static struct lockdep_node *lockdep_add_to_graph(
24 				struct lockdep_node_head *graph,
25 				uintptr_t lock_id)
26 {
27 	struct lockdep_node *node = NULL;
28 
29 	assert(graph);
30 	TAILQ_FOREACH(node, graph, link)
31 		if (node->lock_id == lock_id)
32 			return node;
33 
34 	node = calloc(1, sizeof(*node));
35 	if (!node)
36 		return NULL;
37 
38 	node->lock_id = lock_id;
39 	STAILQ_INIT(&node->edges);
40 	TAILQ_INSERT_TAIL(graph, node, link);
41 
42 	return node;
43 }
44 
45 static vaddr_t *dup_call_stack(vaddr_t *stack)
46 {
47 	vaddr_t *nstack = NULL;
48 	int n = 0;
49 
50 	if (!stack)
51 		return NULL;
52 
53 	while (stack[n])
54 		n++;
55 
56 	nstack = malloc((n + 1) * sizeof(vaddr_t));
57 	if (!nstack)
58 		return NULL;
59 
60 	memcpy(nstack, stack, (n + 1) * sizeof(vaddr_t));
61 
62 	return nstack;
63 }
64 
65 static void lockdep_print_call_stack(vaddr_t *stack)
66 {
67 	vaddr_t *p = NULL;
68 
69 	if (!IS_ENABLED(CFG_LOCKDEP_RECORD_STACK))
70 		return;
71 
72 	EMSG_RAW("Call stack:");
73 	for (p = stack; p && *p; p++)
74 		EMSG_RAW(" %#" PRIxPTR, *p);
75 }
76 
77 static TEE_Result lockdep_add_edge(struct lockdep_node *from,
78 				   struct lockdep_node *to,
79 				   vaddr_t *call_stack_from,
80 				   vaddr_t *call_stack_to,
81 				   uintptr_t thread_id)
82 {
83 	struct lockdep_edge *edge = NULL;
84 
85 	STAILQ_FOREACH(edge, &from->edges, link)
86 		if (edge->to == to)
87 			return TEE_SUCCESS;
88 
89 	edge = calloc(1, sizeof(*edge));
90 	if (!edge)
91 		return TEE_ERROR_OUT_OF_MEMORY;
92 	edge->to = to;
93 	edge->call_stack_from = dup_call_stack(call_stack_from);
94 	edge->call_stack_to = dup_call_stack(call_stack_to);
95 	edge->thread_id = thread_id;
96 	STAILQ_INSERT_TAIL(&from->edges, edge, link);
97 
98 	return TEE_SUCCESS;
99 }
100 
101 struct lockdep_bfs {
102 	struct lockdep_node *node;
103 	uintptr_t *path;
104 	int pathlen;
105 	TAILQ_ENTRY(lockdep_bfs) link;
106 };
107 
108 TAILQ_HEAD(lockdep_bfs_head, lockdep_bfs);
109 
110 static void lockdep_bfs_queue_delete(struct lockdep_bfs_head *queue)
111 {
112 	struct lockdep_bfs *cur = NULL;
113 	struct lockdep_bfs *next = NULL;
114 
115 	TAILQ_FOREACH_SAFE(cur, queue, link, next) {
116 		TAILQ_REMOVE(queue, cur, link);
117 		free(cur->path);
118 		free(cur);
119 	}
120 }
121 
122 /*
123  * Print shortest cycle in @graph that contains @node.
124  * This function performs an iterative breadth-first search starting from @node,
125  * and stops when it reaches @node again. In each node we're tracking the path
126  * from the start node.
127  */
128 static uintptr_t *lockdep_graph_get_shortest_cycle(struct lockdep_node *node)
129 {
130 	struct lockdep_bfs_head queue;
131 	struct lockdep_bfs *qe = NULL;
132 	uintptr_t *ret = NULL;
133 
134 	TAILQ_INIT(&queue);
135 	node->flags |= LOCKDEP_NODE_BFS_VISITED;
136 
137 	qe = calloc(1, sizeof(*qe));
138 	if (!qe)
139 		goto out;
140 	qe->node = node;
141 	qe->path = malloc(sizeof(uintptr_t));
142 	if (!qe->path)
143 		goto out;
144 	qe->path[0] = node->lock_id;
145 	qe->pathlen = 1;
146 	TAILQ_INSERT_TAIL(&queue, qe, link);
147 
148 	while (!TAILQ_EMPTY(&queue)) {
149 		qe = TAILQ_FIRST(&queue);
150 
151 		struct lockdep_node *n = qe->node;
152 
153 		TAILQ_REMOVE(&queue, qe, link);
154 
155 		struct lockdep_edge *e = NULL;
156 
157 		STAILQ_FOREACH(e, &n->edges, link) {
158 			if (e->to->lock_id == node->lock_id) {
159 				uintptr_t *tmp = NULL;
160 				size_t nlen = qe->pathlen + 1;
161 
162 				/*
163 				 * Cycle found. Terminate cycle path with NULL
164 				 * and return it.
165 				 */
166 				tmp = realloc(qe->path,
167 					      nlen * sizeof(uintptr_t));
168 				if (!tmp) {
169 					EMSG("Out of memory");
170 					free(qe->path);
171 					ret = NULL;
172 					goto out;
173 				}
174 				qe->path = tmp;
175 				qe->path[nlen - 1] = 0;
176 				ret = qe->path;
177 				goto out;
178 			}
179 
180 			if (!(e->to->flags & LOCKDEP_NODE_BFS_VISITED)) {
181 				e->to->flags |= LOCKDEP_NODE_BFS_VISITED;
182 
183 				size_t nlen = qe->pathlen + 1;
184 				struct lockdep_bfs *nqe = calloc(1,
185 								 sizeof(*nqe));
186 
187 				if (!nqe)
188 					goto out;
189 				nqe->node = e->to;
190 				nqe->path = malloc(nlen * sizeof(uintptr_t));
191 				if (!nqe->path)
192 					goto out;
193 				nqe->pathlen = nlen;
194 				memcpy(nqe->path, qe->path,
195 				       qe->pathlen * sizeof(uintptr_t));
196 				nqe->path[nlen - 1] = e->to->lock_id;
197 				TAILQ_INSERT_TAIL(&queue, nqe, link);
198 			}
199 		}
200 		free(qe->path);
201 		free(qe);
202 		qe = NULL;
203 	}
204 
205 out:
206 	free(qe);
207 	lockdep_bfs_queue_delete(&queue);
208 	return ret;
209 }
210 
211 static TEE_Result lockdep_visit(struct lockdep_node *node)
212 {
213 	if (node->flags & LOCKDEP_NODE_PERM_MARK)
214 		return TEE_SUCCESS;
215 
216 	if (node->flags & LOCKDEP_NODE_TEMP_MARK)
217 		return TEE_ERROR_BAD_STATE;	/* Not a DAG! */
218 
219 	node->flags |= LOCKDEP_NODE_TEMP_MARK;
220 
221 	struct lockdep_edge *e;
222 
223 	STAILQ_FOREACH(e, &node->edges, link) {
224 		TEE_Result res = lockdep_visit(e->to);
225 
226 		if (res)
227 			return res;
228 	}
229 
230 	node->flags |= LOCKDEP_NODE_PERM_MARK;
231 	return TEE_SUCCESS;
232 }
233 
234 static TEE_Result lockdep_graph_sort(struct lockdep_node_head *graph)
235 {
236 	struct lockdep_node *node = NULL;
237 
238 	TAILQ_FOREACH(node, graph, link) {
239 		if (!node->flags) {
240 			/* Unmarked node */
241 			TEE_Result res = lockdep_visit(node);
242 
243 			if (res)
244 				return res;
245 		}
246 	}
247 
248 	TAILQ_FOREACH(node, graph, link)
249 		node->flags = 0;
250 
251 	return TEE_SUCCESS;
252 }
253 
254 static struct lockdep_edge *lockdep_find_edge(struct lockdep_node_head *graph,
255 					      uintptr_t from, uintptr_t to)
256 {
257 	struct lockdep_node *node = NULL;
258 	struct lockdep_edge *edge = NULL;
259 
260 	TAILQ_FOREACH(node, graph, link)
261 		if (node->lock_id == from)
262 			STAILQ_FOREACH(edge, &node->edges, link)
263 				if (edge->to->lock_id == to)
264 					return edge;
265 	return NULL;
266 }
267 
268 static void lockdep_print_edge_info(uintptr_t from __maybe_unused,
269 				    struct lockdep_edge *edge)
270 {
271 	uintptr_t __maybe_unused to = edge->to->lock_id;
272 	const char __maybe_unused *at_msg = "";
273 	const char __maybe_unused *acq_msg = "";
274 
275 	if (IS_ENABLED(CFG_LOCKDEP_RECORD_STACK)) {
276 		at_msg = " at:";
277 		acq_msg = " acquired at:";
278 	}
279 
280 	EMSG_RAW("-> Thread %#" PRIxPTR " acquired lock %#" PRIxPTR "%s",
281 		 edge->thread_id, to, at_msg);
282 	lockdep_print_call_stack(edge->call_stack_to);
283 	EMSG_RAW("...while holding lock %#" PRIxPTR "%s",
284 		 from, acq_msg);
285 	lockdep_print_call_stack(edge->call_stack_from);
286 }
287 
288 /*
289  * Find cycle containing @node in the lock graph, then print full debug
290  * information about each edge (thread that acquired the locks and call stacks)
291  */
292 static void lockdep_print_cycle_info(struct lockdep_node_head *graph,
293 				     struct lockdep_node *node)
294 {
295 	struct lockdep_edge *edge = NULL;
296 	uintptr_t *cycle = NULL;
297 	uintptr_t *p = NULL;
298 	uintptr_t from = 0;
299 	uintptr_t to = 0;
300 
301 	cycle = lockdep_graph_get_shortest_cycle(node);
302 	assert(cycle && cycle[0]);
303 	EMSG_RAW("-> Shortest cycle:");
304 	for (p = cycle; *p; p++)
305 		EMSG_RAW(" Lock %#" PRIxPTR, *p);
306 	for (p = cycle; ; p++) {
307 		if (!*p) {
308 			assert(p != cycle);
309 			from = to;
310 			to = cycle[0];
311 			edge = lockdep_find_edge(graph, from, to);
312 			lockdep_print_edge_info(from, edge);
313 			break;
314 		}
315 		if (p != cycle)
316 			from = to;
317 		to = *p;
318 		if (p != cycle) {
319 			edge = lockdep_find_edge(graph, from, to);
320 			lockdep_print_edge_info(from, edge);
321 		}
322 	}
323 	free(cycle);
324 }
325 
326 static vaddr_t *lockdep_get_kernel_stack(void)
327 {
328 	if (IS_ENABLED(CFG_LOCKDEP_RECORD_STACK))
329 		return unw_get_kernel_stack();
330 
331 	return NULL;
332 }
333 
334 TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
335 				  struct lockdep_lock_head *owned,
336 				  uintptr_t id)
337 {
338 	struct lockdep_node *node = lockdep_add_to_graph(graph, id);
339 
340 	if (!node)
341 		return TEE_ERROR_OUT_OF_MEMORY;
342 
343 	struct lockdep_lock *lock = NULL;
344 	vaddr_t *acq_stack = lockdep_get_kernel_stack();
345 
346 	TAILQ_FOREACH(lock, owned, link) {
347 		TEE_Result res = lockdep_add_edge(lock->node, node,
348 						  lock->call_stack,
349 						  acq_stack,
350 						  (uintptr_t)owned);
351 
352 		if (res)
353 			return res;
354 	}
355 
356 	TEE_Result res = lockdep_graph_sort(graph);
357 
358 	if (res) {
359 		EMSG_RAW("Potential deadlock detected!");
360 		EMSG_RAW("When trying to acquire lock %#" PRIxPTR, id);
361 		lockdep_print_cycle_info(graph, node);
362 		return res;
363 	}
364 
365 	lock = calloc(1, sizeof(*lock));
366 	if (!lock)
367 		return TEE_ERROR_OUT_OF_MEMORY;
368 
369 	lock->node = node;
370 	lock->call_stack = acq_stack;
371 	TAILQ_INSERT_TAIL(owned, lock, link);
372 
373 	return TEE_SUCCESS;
374 }
375 
376 /*
377  * Call this when it is known that the thread has been able to acquire the lock.
378  * Similar to __lockdep_lock_acquire(), but since the operation is non-blocking,
379  * no dependency to currently owned locks are created.
380  */
381 TEE_Result __lockdep_lock_tryacquire(struct lockdep_node_head *graph,
382 				     struct lockdep_lock_head *owned,
383 				     uintptr_t id)
384 {
385 	struct lockdep_node *node = lockdep_add_to_graph(graph, id);
386 
387 	if (!node)
388 		return TEE_ERROR_OUT_OF_MEMORY;
389 
390 	struct lockdep_lock *lock = NULL;
391 	vaddr_t *acq_stack = lockdep_get_kernel_stack();
392 
393 	lock = calloc(1, sizeof(*lock));
394 	if (!lock)
395 		return TEE_ERROR_OUT_OF_MEMORY;
396 
397 	lock->node = node;
398 	lock->call_stack = acq_stack;
399 	TAILQ_INSERT_TAIL(owned, lock, link);
400 
401 	return TEE_SUCCESS;
402 }
403 
404 TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned, uintptr_t id)
405 {
406 	struct lockdep_lock *lock = NULL;
407 
408 	TAILQ_FOREACH_REVERSE(lock, owned, lockdep_lock_head, link) {
409 		if (lock->node->lock_id == id) {
410 			TAILQ_REMOVE(owned, lock, link);
411 			free(lock->call_stack);
412 			free(lock);
413 			return TEE_SUCCESS;
414 		}
415 	}
416 
417 	EMSG_RAW("Thread %p does not own lock %#" PRIxPTR, (void *)owned, id);
418 	return TEE_ERROR_ITEM_NOT_FOUND;
419 }
420 
421 static void lockdep_free_edge(struct lockdep_edge *edge)
422 {
423 	free(edge->call_stack_from);
424 	free(edge->call_stack_to);
425 	free(edge);
426 }
427 
428 static void lockdep_node_delete(struct lockdep_node *node)
429 {
430 	struct lockdep_edge *edge = NULL;
431 	struct lockdep_edge *next = NULL;
432 
433 	STAILQ_FOREACH_SAFE(edge, &node->edges, link, next)
434 		lockdep_free_edge(edge);
435 
436 	free(node);
437 }
438 
439 void lockdep_graph_delete(struct lockdep_node_head *graph)
440 {
441 	struct lockdep_node *node = NULL;
442 	struct lockdep_node *next = NULL;
443 
444 	TAILQ_FOREACH_SAFE(node, graph, link, next) {
445 		TAILQ_REMOVE(graph, node, link);
446 		lockdep_node_delete(node);
447 	}
448 }
449 
450 void lockdep_queue_delete(struct lockdep_lock_head *owned)
451 {
452 	struct lockdep_lock *lock = NULL;
453 	struct lockdep_lock *next = NULL;
454 
455 	TAILQ_FOREACH_SAFE(lock, owned, link, next) {
456 		TAILQ_REMOVE(owned, lock, link);
457 		free(lock);
458 	}
459 }
460 
461 static void lockdep_node_destroy(struct lockdep_node_head *graph,
462 				 struct lockdep_node *node)
463 {
464 	struct lockdep_edge *edge = NULL;
465 	struct lockdep_edge *next = NULL;
466 	struct lockdep_node *from = NULL;
467 
468 	TAILQ_REMOVE(graph, node, link);
469 
470 	/*
471 	 * Loop over all nodes in the graph to remove all edges with the
472 	 * node to remove in the "to" field.
473 	 */
474 	TAILQ_FOREACH(from, graph, link) {
475 		edge = STAILQ_FIRST(&from->edges);
476 		while (edge && edge->to == node) {
477 			STAILQ_REMOVE_HEAD(&from->edges, link);
478 			lockdep_free_edge(edge);
479 			edge = STAILQ_FIRST(&from->edges);
480 		}
481 
482 		if (!edge)
483 			continue;
484 
485 		next = STAILQ_NEXT(edge, link);
486 		while (next) {
487 			if (next->to == node) {
488 				STAILQ_REMOVE_AFTER(&from->edges, edge, link);
489 				lockdep_free_edge(next);
490 			} else {
491 				edge = next;
492 			}
493 			next = STAILQ_NEXT(edge, link);
494 		}
495 	}
496 
497 	STAILQ_FOREACH_SAFE(edge, &node->edges, link, next)
498 		lockdep_free_edge(edge);
499 
500 	free(node);
501 }
502 
503 void lockdep_lock_destroy(struct lockdep_node_head *graph, uintptr_t lock_id)
504 {
505 	struct lockdep_node *node = NULL;
506 
507 	assert(graph);
508 	TAILQ_FOREACH(node, graph, link) {
509 		if (node->lock_id == lock_id) {
510 			lockdep_node_destroy(graph, node);
511 			break;
512 		}
513 	}
514 }
515