xref: /optee_os/core/kernel/lockdep.c (revision 0d77037f5943c86560dd7c8f473fbc6a55d60a34)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <kernel/lockdep.h>
8 #include <kernel/unwind.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <util.h>
13 
14 /* lockdep_node::flags values */
15 /* Flags used for depth-first topological sorting */
16 #define LOCKDEP_NODE_TEMP_MARK		BIT(0)
17 #define LOCKDEP_NODE_PERM_MARK		BIT(1)
18 /* Flag used during breadth-first search (print shortest cycle) */
19 #define LOCKDEP_NODE_BFS_VISITED	BIT(2)
20 
21 /* Find node in graph or add it */
22 static struct lockdep_node *lockdep_add_to_graph(
23 				struct lockdep_node_head *graph,
24 				uintptr_t lock_id)
25 {
26 	struct lockdep_node *node = NULL;
27 
28 	assert(graph);
29 	TAILQ_FOREACH(node, graph, link)
30 		if (node->lock_id == lock_id)
31 			return node;
32 
33 	node = calloc(1, sizeof(*node));
34 	if (!node)
35 		return NULL;
36 
37 	node->lock_id = lock_id;
38 	STAILQ_INIT(&node->edges);
39 	TAILQ_INSERT_TAIL(graph, node, link);
40 
41 	return node;
42 }
43 
44 static vaddr_t *dup_call_stack(vaddr_t *stack)
45 {
46 	vaddr_t *nstack = NULL;
47 	int n = 0;
48 
49 	if (!stack)
50 		return NULL;
51 
52 	while (stack[n])
53 		n++;
54 
55 	nstack = malloc((n + 1) * sizeof(vaddr_t));
56 	if (!nstack)
57 		return NULL;
58 
59 	memcpy(nstack, stack, (n + 1) * sizeof(vaddr_t));
60 
61 	return nstack;
62 }
63 
64 static void lockdep_print_call_stack(vaddr_t *stack)
65 {
66 	vaddr_t *p = NULL;
67 
68 	EMSG_RAW("Call stack:");
69 	for (p = stack; p && *p; p++)
70 		EMSG_RAW(" %#" PRIxPTR, *p);
71 }
72 
73 static TEE_Result lockdep_add_edge(struct lockdep_node *from,
74 				   struct lockdep_node *to,
75 				   vaddr_t *call_stack_from,
76 				   vaddr_t *call_stack_to,
77 				   uintptr_t thread_id)
78 {
79 	struct lockdep_edge *edge = NULL;
80 
81 	STAILQ_FOREACH(edge, &from->edges, link)
82 		if (edge->to == to)
83 			return TEE_SUCCESS;
84 
85 	edge = calloc(1, sizeof(*edge));
86 	if (!edge)
87 		return TEE_ERROR_OUT_OF_MEMORY;
88 	edge->to = to;
89 	edge->call_stack_from = dup_call_stack(call_stack_from);
90 	edge->call_stack_to = dup_call_stack(call_stack_to);
91 	edge->thread_id = thread_id;
92 	STAILQ_INSERT_TAIL(&from->edges, edge, link);
93 
94 	return TEE_SUCCESS;
95 }
96 
97 struct lockdep_bfs {
98 	struct lockdep_node *node;
99 	uintptr_t *path;
100 	int pathlen;
101 	TAILQ_ENTRY(lockdep_bfs) link;
102 };
103 
104 TAILQ_HEAD(lockdep_bfs_head, lockdep_bfs);
105 
106 static void lockdep_bfs_queue_delete(struct lockdep_bfs_head *queue)
107 {
108 	struct lockdep_bfs *cur = NULL;
109 	struct lockdep_bfs *next = NULL;
110 
111 	TAILQ_FOREACH_SAFE(cur, queue, link, next) {
112 		TAILQ_REMOVE(queue, cur, link);
113 		free(cur->path);
114 		free(cur);
115 	}
116 }
117 
118 /*
119  * Print shortest cycle in @graph that contains @node.
120  * This function performs an iterative breadth-first search starting from @node,
121  * and stops when it reaches @node again. In each node we're tracking the path
122  * from the start node.
123  */
124 static uintptr_t *lockdep_graph_get_shortest_cycle(struct lockdep_node *node)
125 {
126 	struct lockdep_bfs_head queue;
127 	struct lockdep_bfs *qe = NULL;
128 	uintptr_t *ret = NULL;
129 
130 	TAILQ_INIT(&queue);
131 	node->flags |= LOCKDEP_NODE_BFS_VISITED;
132 
133 	qe = calloc(1, sizeof(*qe));
134 	if (!qe)
135 		goto out;
136 	qe->node = node;
137 	qe->path = malloc(sizeof(uintptr_t));
138 	if (!qe->path)
139 		goto out;
140 	qe->path[0] = node->lock_id;
141 	qe->pathlen = 1;
142 	TAILQ_INSERT_TAIL(&queue, qe, link);
143 
144 	while (!TAILQ_EMPTY(&queue)) {
145 		qe = TAILQ_FIRST(&queue);
146 
147 		struct lockdep_node *n = qe->node;
148 
149 		TAILQ_REMOVE(&queue, qe, link);
150 
151 		struct lockdep_edge *e = NULL;
152 
153 		STAILQ_FOREACH(e, &n->edges, link) {
154 			if (e->to->lock_id == node->lock_id) {
155 				uintptr_t *tmp = NULL;
156 				size_t nlen = qe->pathlen + 1;
157 
158 				/*
159 				 * Cycle found. Terminate cycle path with NULL
160 				 * and return it.
161 				 */
162 				tmp = realloc(qe->path,
163 					      nlen * sizeof(uintptr_t));
164 				if (!tmp) {
165 					EMSG("Out of memory");
166 					free(qe->path);
167 					ret = NULL;
168 					goto out;
169 				}
170 				qe->path = tmp;
171 				qe->path[nlen - 1] = 0;
172 				ret = qe->path;
173 				goto out;
174 			}
175 
176 			if (!(e->to->flags & LOCKDEP_NODE_BFS_VISITED)) {
177 				e->to->flags |= LOCKDEP_NODE_BFS_VISITED;
178 
179 				size_t nlen = qe->pathlen + 1;
180 				struct lockdep_bfs *nqe = calloc(1,
181 								 sizeof(*nqe));
182 
183 				if (!nqe)
184 					goto out;
185 				nqe->node = e->to;
186 				nqe->path = malloc(nlen * sizeof(uintptr_t));
187 				if (!nqe->path)
188 					goto out;
189 				nqe->pathlen = nlen;
190 				memcpy(nqe->path, qe->path,
191 				       qe->pathlen * sizeof(uintptr_t));
192 				nqe->path[nlen - 1] = e->to->lock_id;
193 				TAILQ_INSERT_TAIL(&queue, nqe, link);
194 			}
195 		}
196 		free(qe->path);
197 		free(qe);
198 		qe = NULL;
199 	}
200 
201 out:
202 	free(qe);
203 	lockdep_bfs_queue_delete(&queue);
204 	return ret;
205 }
206 
207 static TEE_Result lockdep_visit(struct lockdep_node *node)
208 {
209 	if (node->flags & LOCKDEP_NODE_PERM_MARK)
210 		return TEE_SUCCESS;
211 
212 	if (node->flags & LOCKDEP_NODE_TEMP_MARK)
213 		return TEE_ERROR_BAD_STATE;	/* Not a DAG! */
214 
215 	node->flags |= LOCKDEP_NODE_TEMP_MARK;
216 
217 	struct lockdep_edge *e;
218 
219 	STAILQ_FOREACH(e, &node->edges, link) {
220 		TEE_Result res = lockdep_visit(e->to);
221 
222 		if (res)
223 			return res;
224 	}
225 
226 	node->flags |= LOCKDEP_NODE_PERM_MARK;
227 	return TEE_SUCCESS;
228 }
229 
230 static TEE_Result lockdep_graph_sort(struct lockdep_node_head *graph)
231 {
232 	struct lockdep_node *node = NULL;
233 
234 	TAILQ_FOREACH(node, graph, link) {
235 		if (!node->flags) {
236 			/* Unmarked node */
237 			TEE_Result res = lockdep_visit(node);
238 
239 			if (res)
240 				return res;
241 		}
242 	}
243 
244 	TAILQ_FOREACH(node, graph, link)
245 		node->flags = 0;
246 
247 	return TEE_SUCCESS;
248 }
249 
250 static struct lockdep_edge *lockdep_find_edge(struct lockdep_node_head *graph,
251 					      uintptr_t from, uintptr_t to)
252 {
253 	struct lockdep_node *node = NULL;
254 	struct lockdep_edge *edge = NULL;
255 
256 	TAILQ_FOREACH(node, graph, link)
257 		if (node->lock_id == from)
258 			STAILQ_FOREACH(edge, &node->edges, link)
259 				if (edge->to->lock_id == to)
260 					return edge;
261 	return NULL;
262 }
263 
264 static void lockdep_print_edge_info(uintptr_t from __maybe_unused,
265 				    struct lockdep_edge *edge)
266 {
267 	uintptr_t __maybe_unused to = edge->to->lock_id;
268 
269 	EMSG_RAW("-> Thread %#" PRIxPTR " acquired lock %#" PRIxPTR " at:",
270 		 edge->thread_id, to);
271 	lockdep_print_call_stack(edge->call_stack_to);
272 	EMSG_RAW("...while holding lock %#" PRIxPTR " acquired at:",
273 		 from);
274 	lockdep_print_call_stack(edge->call_stack_from);
275 }
276 
277 /*
278  * Find cycle containing @node in the lock graph, then print full debug
279  * information about each edge (thread that acquired the locks and call stacks)
280  */
281 static void lockdep_print_cycle_info(struct lockdep_node_head *graph,
282 				     struct lockdep_node *node)
283 {
284 	struct lockdep_edge *edge = NULL;
285 	uintptr_t *cycle = NULL;
286 	uintptr_t *p = NULL;
287 	uintptr_t from = 0;
288 	uintptr_t to = 0;
289 
290 	cycle = lockdep_graph_get_shortest_cycle(node);
291 	assert(cycle && cycle[0]);
292 	EMSG_RAW("-> Shortest cycle:");
293 	for (p = cycle; *p; p++)
294 		EMSG_RAW(" Lock %#" PRIxPTR, *p);
295 	for (p = cycle; ; p++) {
296 		if (!*p) {
297 			assert(p != cycle);
298 			from = to;
299 			to = cycle[0];
300 			edge = lockdep_find_edge(graph, from, to);
301 			lockdep_print_edge_info(from, edge);
302 			break;
303 		}
304 		if (p != cycle)
305 			from = to;
306 		to = *p;
307 		if (p != cycle) {
308 			edge = lockdep_find_edge(graph, from, to);
309 			lockdep_print_edge_info(from, edge);
310 		}
311 	}
312 	free(cycle);
313 }
314 
315 TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
316 				  struct lockdep_lock_head *owned,
317 				  uintptr_t id)
318 {
319 	struct lockdep_node *node = lockdep_add_to_graph(graph, id);
320 
321 	if (!node)
322 		return TEE_ERROR_OUT_OF_MEMORY;
323 
324 	struct lockdep_lock *lock = NULL;
325 	vaddr_t *acq_stack = unw_get_kernel_stack();
326 
327 	TAILQ_FOREACH(lock, owned, link) {
328 		TEE_Result res = lockdep_add_edge(lock->node, node,
329 						  lock->call_stack,
330 						  acq_stack,
331 						  (uintptr_t)owned);
332 
333 		if (res)
334 			return res;
335 	}
336 
337 	TEE_Result res = lockdep_graph_sort(graph);
338 
339 	if (res) {
340 		EMSG_RAW("Potential deadlock detected!");
341 		EMSG_RAW("When trying to acquire lock %#" PRIxPTR, id);
342 		lockdep_print_cycle_info(graph, node);
343 		return res;
344 	}
345 
346 	lock = calloc(1, sizeof(*lock));
347 	if (!lock)
348 		return TEE_ERROR_OUT_OF_MEMORY;
349 
350 	lock->node = node;
351 	lock->call_stack = acq_stack;
352 	TAILQ_INSERT_TAIL(owned, lock, link);
353 
354 	return TEE_SUCCESS;
355 }
356 
357 /*
358  * Call this when it is known that the thread has been able to acquire the lock.
359  * Similar to __lockdep_lock_acquire(), but since the operation is non-blocking,
360  * no dependency to currently owned locks are created.
361  */
362 TEE_Result __lockdep_lock_tryacquire(struct lockdep_node_head *graph,
363 				     struct lockdep_lock_head *owned,
364 				     uintptr_t id)
365 {
366 	struct lockdep_node *node = lockdep_add_to_graph(graph, id);
367 
368 	if (!node)
369 		return TEE_ERROR_OUT_OF_MEMORY;
370 
371 	struct lockdep_lock *lock = NULL;
372 	vaddr_t *acq_stack = unw_get_kernel_stack();
373 
374 	lock = calloc(1, sizeof(*lock));
375 	if (!lock)
376 		return TEE_ERROR_OUT_OF_MEMORY;
377 
378 	lock->node = node;
379 	lock->call_stack = acq_stack;
380 	TAILQ_INSERT_TAIL(owned, lock, link);
381 
382 	return TEE_SUCCESS;
383 }
384 
385 TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned, uintptr_t id)
386 {
387 	struct lockdep_lock *lock = NULL;
388 
389 	TAILQ_FOREACH_REVERSE(lock, owned, lockdep_lock_head, link) {
390 		if (lock->node->lock_id == id) {
391 			TAILQ_REMOVE(owned, lock, link);
392 			free(lock->call_stack);
393 			free(lock);
394 			return TEE_SUCCESS;
395 		}
396 	}
397 
398 	EMSG_RAW("Thread %p does not own lock %#" PRIxPTR, (void *)owned, id);
399 	return TEE_ERROR_ITEM_NOT_FOUND;
400 }
401 
402 static void lockdep_free_edge(struct lockdep_edge *edge)
403 {
404 	free(edge->call_stack_from);
405 	free(edge->call_stack_to);
406 	free(edge);
407 }
408 
409 static void lockdep_node_delete(struct lockdep_node *node)
410 {
411 	struct lockdep_edge *edge = NULL;
412 	struct lockdep_edge *next = NULL;
413 
414 	STAILQ_FOREACH_SAFE(edge, &node->edges, link, next)
415 		lockdep_free_edge(edge);
416 
417 	free(node);
418 }
419 
420 void lockdep_graph_delete(struct lockdep_node_head *graph)
421 {
422 	struct lockdep_node *node = NULL;
423 	struct lockdep_node *next = NULL;
424 
425 	TAILQ_FOREACH_SAFE(node, graph, link, next) {
426 		TAILQ_REMOVE(graph, node, link);
427 		lockdep_node_delete(node);
428 	}
429 }
430 
431 void lockdep_queue_delete(struct lockdep_lock_head *owned)
432 {
433 	struct lockdep_lock *lock = NULL;
434 	struct lockdep_lock *next = NULL;
435 
436 	TAILQ_FOREACH_SAFE(lock, owned, link, next) {
437 		TAILQ_REMOVE(owned, lock, link);
438 		free(lock);
439 	}
440 }
441 
442 static void lockdep_node_destroy(struct lockdep_node_head *graph,
443 				 struct lockdep_node *node)
444 {
445 	struct lockdep_edge *edge = NULL;
446 	struct lockdep_edge *next = NULL;
447 	struct lockdep_node *from = NULL;
448 
449 	TAILQ_REMOVE(graph, node, link);
450 
451 	/*
452 	 * Loop over all nodes in the graph to remove all edges with the
453 	 * node to remove in the "to" field.
454 	 */
455 	TAILQ_FOREACH(from, graph, link) {
456 		edge = STAILQ_FIRST(&from->edges);
457 		while (edge && edge->to == node) {
458 			STAILQ_REMOVE_HEAD(&from->edges, link);
459 			lockdep_free_edge(edge);
460 			edge = STAILQ_FIRST(&from->edges);
461 		}
462 
463 		if (!edge)
464 			continue;
465 
466 		next = STAILQ_NEXT(edge, link);
467 		while (next) {
468 			if (next->to == node) {
469 				STAILQ_REMOVE_AFTER(&from->edges, edge, link);
470 				lockdep_free_edge(next);
471 			} else {
472 				edge = next;
473 			}
474 			next = STAILQ_NEXT(edge, link);
475 		}
476 	}
477 
478 	STAILQ_FOREACH_SAFE(edge, &node->edges, link, next)
479 		lockdep_free_edge(edge);
480 
481 	free(node);
482 }
483 
484 void lockdep_lock_destroy(struct lockdep_node_head *graph, uintptr_t lock_id)
485 {
486 	struct lockdep_node *node = NULL;
487 
488 	assert(graph);
489 	TAILQ_FOREACH(node, graph, link) {
490 		if (node->lock_id == lock_id) {
491 			lockdep_node_destroy(graph, node);
492 			break;
493 		}
494 	}
495 }
496