xref: /optee_os/core/kernel/lockdep.c (revision ed4b5e39c96bd035788ff4f07c87232d934c1264)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2018, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <kernel/lockdep.h>
8 #include <kernel/unwind.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/queue.h>
12 #include <util.h>
13 
14 /* lockdep_node::flags values */
15 /* Flags used for depth-first topological sorting */
16 #define LOCKDEP_NODE_TEMP_MARK		BIT(0)
17 #define LOCKDEP_NODE_PERM_MARK		BIT(1)
18 /* Flag used during breadth-first search (print shortest cycle) */
19 #define LOCKDEP_NODE_BFS_VISITED	BIT(2)
20 
21 /* Find node in graph or add it */
22 static struct lockdep_node *lockdep_add_to_graph(
23 				struct lockdep_node_head *graph,
24 				uintptr_t lock_id)
25 {
26 	struct lockdep_node *node = NULL;
27 
28 	assert(graph);
29 	TAILQ_FOREACH(node, graph, link)
30 		if (node->lock_id == lock_id)
31 			return node;
32 
33 	node = calloc(1, sizeof(*node));
34 	if (!node)
35 		return NULL;
36 
37 	node->lock_id = lock_id;
38 	STAILQ_INIT(&node->edges);
39 	TAILQ_INSERT_TAIL(graph, node, link);
40 
41 	return node;
42 }
43 
44 static vaddr_t *dup_call_stack(vaddr_t *stack)
45 {
46 	if (!stack)
47 		return NULL;
48 
49 	int n = 0;
50 
51 	while (stack[n])
52 		n++;
53 
54 	vaddr_t *nstack = malloc(n * sizeof(vaddr_t));
55 
56 	if (!nstack)
57 		return NULL;
58 	memcpy(nstack, stack, n * sizeof(vaddr_t));
59 	return nstack;
60 }
61 
62 static void lockdep_print_call_stack(vaddr_t *stack)
63 {
64 	vaddr_t *p = NULL;
65 
66 	EMSG_RAW("Call stack:");
67 	for (p = stack; p && *p; p++)
68 		EMSG_RAW(" %#" PRIxPTR, *p);
69 }
70 
71 static TEE_Result lockdep_add_edge(struct lockdep_node *from,
72 				   struct lockdep_node *to,
73 				   vaddr_t *call_stack_from,
74 				   vaddr_t *call_stack_to,
75 				   uintptr_t thread_id)
76 {
77 	struct lockdep_edge *edge = NULL;
78 
79 	STAILQ_FOREACH(edge, &from->edges, link)
80 		if (edge->to == to)
81 			return TEE_SUCCESS;
82 
83 	edge = calloc(1, sizeof(*edge));
84 	if (!edge)
85 		return TEE_ERROR_OUT_OF_MEMORY;
86 	edge->to = to;
87 	edge->call_stack_from = dup_call_stack(call_stack_from);
88 	edge->call_stack_to = dup_call_stack(call_stack_to);
89 	edge->thread_id = thread_id;
90 	STAILQ_INSERT_TAIL(&from->edges, edge, link);
91 
92 	return TEE_SUCCESS;
93 }
94 
95 struct lockdep_bfs {
96 	struct lockdep_node *node;
97 	uintptr_t *path;
98 	int pathlen;
99 	TAILQ_ENTRY(lockdep_bfs) link;
100 };
101 
102 TAILQ_HEAD(lockdep_bfs_head, lockdep_bfs);
103 
104 static void lockdep_bfs_queue_delete(struct lockdep_bfs_head *queue)
105 {
106 	struct lockdep_bfs *cur = NULL;
107 	struct lockdep_bfs *next = NULL;
108 
109 	TAILQ_FOREACH_SAFE(cur, queue, link, next) {
110 		TAILQ_REMOVE(queue, cur, link);
111 		free(cur->path);
112 		free(cur);
113 	}
114 }
115 
116 /*
117  * Print shortest cycle in @graph that contains @node.
118  * This function performs an iterative breadth-first search starting from @node,
119  * and stops when it reaches @node again. In each node we're tracking the path
120  * from the start node.
121  */
122 static uintptr_t *lockdep_graph_get_shortest_cycle(struct lockdep_node *node)
123 {
124 	struct lockdep_bfs_head queue;
125 	struct lockdep_bfs *qe = NULL;
126 	uintptr_t *ret = NULL;
127 
128 	TAILQ_INIT(&queue);
129 	node->flags |= LOCKDEP_NODE_BFS_VISITED;
130 
131 	qe = calloc(1, sizeof(*qe));
132 	if (!qe)
133 		goto out;
134 	qe->node = node;
135 	qe->path = malloc(sizeof(uintptr_t));
136 	if (!qe->path)
137 		goto out;
138 	qe->path[0] = node->lock_id;
139 	qe->pathlen = 1;
140 	TAILQ_INSERT_TAIL(&queue, qe, link);
141 
142 	while (!TAILQ_EMPTY(&queue)) {
143 		qe = TAILQ_FIRST(&queue);
144 
145 		struct lockdep_node *n = qe->node;
146 
147 		TAILQ_REMOVE(&queue, qe, link);
148 
149 		struct lockdep_edge *e = NULL;
150 
151 		STAILQ_FOREACH(e, &n->edges, link) {
152 			if (e->to->lock_id == node->lock_id) {
153 				uintptr_t *tmp = NULL;
154 				size_t nlen = qe->pathlen + 1;
155 
156 				/*
157 				 * Cycle found. Terminate cycle path with NULL
158 				 * and return it.
159 				 */
160 				tmp = realloc(qe->path,
161 					      nlen * sizeof(uintptr_t));
162 				if (!tmp) {
163 					EMSG("Out of memory");
164 					free(qe->path);
165 					ret = NULL;
166 					goto out;
167 				}
168 				qe->path = tmp;
169 				qe->path[nlen - 1] = 0;
170 				ret = qe->path;
171 				goto out;
172 			}
173 
174 			if (!(e->to->flags & LOCKDEP_NODE_BFS_VISITED)) {
175 				e->to->flags |= LOCKDEP_NODE_BFS_VISITED;
176 
177 				size_t nlen = qe->pathlen + 1;
178 				struct lockdep_bfs *nqe = calloc(1,
179 								 sizeof(*nqe));
180 
181 				if (!nqe)
182 					goto out;
183 				nqe->node = e->to;
184 				nqe->path = malloc(nlen * sizeof(uintptr_t));
185 				if (!nqe->path)
186 					goto out;
187 				nqe->pathlen = nlen;
188 				memcpy(nqe->path, qe->path,
189 				       qe->pathlen * sizeof(uintptr_t));
190 				nqe->path[nlen - 1] = e->to->lock_id;
191 				TAILQ_INSERT_TAIL(&queue, nqe, link);
192 			}
193 		}
194 		free(qe->path);
195 		free(qe);
196 		qe = NULL;
197 	}
198 
199 out:
200 	free(qe);
201 	lockdep_bfs_queue_delete(&queue);
202 	return ret;
203 }
204 
205 static TEE_Result lockdep_visit(struct lockdep_node *node)
206 {
207 	if (node->flags & LOCKDEP_NODE_PERM_MARK)
208 		return TEE_SUCCESS;
209 
210 	if (node->flags & LOCKDEP_NODE_TEMP_MARK)
211 		return TEE_ERROR_BAD_STATE;	/* Not a DAG! */
212 
213 	node->flags |= LOCKDEP_NODE_TEMP_MARK;
214 
215 	struct lockdep_edge *e;
216 
217 	STAILQ_FOREACH(e, &node->edges, link) {
218 		TEE_Result res = lockdep_visit(e->to);
219 
220 		if (res)
221 			return res;
222 	}
223 
224 	node->flags |= LOCKDEP_NODE_PERM_MARK;
225 	return TEE_SUCCESS;
226 }
227 
228 static TEE_Result lockdep_graph_sort(struct lockdep_node_head *graph)
229 {
230 	struct lockdep_node *node = NULL;
231 
232 	TAILQ_FOREACH(node, graph, link) {
233 		if (!node->flags) {
234 			/* Unmarked node */
235 			TEE_Result res = lockdep_visit(node);
236 
237 			if (res)
238 				return res;
239 		}
240 	}
241 
242 	TAILQ_FOREACH(node, graph, link)
243 		node->flags = 0;
244 
245 	return TEE_SUCCESS;
246 }
247 
248 static struct lockdep_edge *lockdep_find_edge(struct lockdep_node_head *graph,
249 					      uintptr_t from, uintptr_t to)
250 {
251 	struct lockdep_node *node = NULL;
252 	struct lockdep_edge *edge = NULL;
253 
254 	TAILQ_FOREACH(node, graph, link)
255 		if (node->lock_id == from)
256 			STAILQ_FOREACH(edge, &node->edges, link)
257 				if (edge->to->lock_id == to)
258 					return edge;
259 	return NULL;
260 }
261 
262 static void lockdep_print_edge_info(uintptr_t from __maybe_unused,
263 				    struct lockdep_edge *edge)
264 {
265 	uintptr_t __maybe_unused to = edge->to->lock_id;
266 
267 	EMSG_RAW("-> Thread %#" PRIxPTR " acquired lock %#" PRIxPTR " at:",
268 		 edge->thread_id, to);
269 	lockdep_print_call_stack(edge->call_stack_to);
270 	EMSG_RAW("...while holding lock %#" PRIxPTR " acquired at:",
271 		 from);
272 	lockdep_print_call_stack(edge->call_stack_from);
273 }
274 
275 /*
276  * Find cycle containing @node in the lock graph, then print full debug
277  * information about each edge (thread that acquired the locks and call stacks)
278  */
279 static void lockdep_print_cycle_info(struct lockdep_node_head *graph,
280 				     struct lockdep_node *node)
281 {
282 	struct lockdep_edge *edge = NULL;
283 	uintptr_t *cycle = NULL;
284 	uintptr_t *p = NULL;
285 	uintptr_t from = 0;
286 	uintptr_t to = 0;
287 
288 	cycle = lockdep_graph_get_shortest_cycle(node);
289 	assert(cycle && cycle[0]);
290 	EMSG_RAW("-> Shortest cycle:");
291 	for (p = cycle; *p; p++)
292 		EMSG_RAW(" Lock %#" PRIxPTR, *p);
293 	for (p = cycle; ; p++) {
294 		if (!*p) {
295 			assert(p != cycle);
296 			from = to;
297 			to = cycle[0];
298 			edge = lockdep_find_edge(graph, from, to);
299 			lockdep_print_edge_info(from, edge);
300 			break;
301 		}
302 		if (p != cycle)
303 			from = to;
304 		to = *p;
305 		if (p != cycle) {
306 			edge = lockdep_find_edge(graph, from, to);
307 			lockdep_print_edge_info(from, edge);
308 		}
309 	}
310 	free(cycle);
311 }
312 
313 TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
314 				  struct lockdep_lock_head *owned,
315 				  uintptr_t id)
316 {
317 	struct lockdep_node *node = lockdep_add_to_graph(graph, id);
318 
319 	if (!node)
320 		return TEE_ERROR_OUT_OF_MEMORY;
321 
322 	struct lockdep_lock *lock = NULL;
323 	vaddr_t *acq_stack = unw_get_kernel_stack();
324 
325 	TAILQ_FOREACH(lock, owned, link) {
326 		TEE_Result res = lockdep_add_edge(lock->node, node,
327 						  lock->call_stack,
328 						  acq_stack,
329 						  (uintptr_t)owned);
330 
331 		if (res)
332 			return res;
333 	}
334 
335 	TEE_Result res = lockdep_graph_sort(graph);
336 
337 	if (res) {
338 		EMSG_RAW("Potential deadlock detected!");
339 		EMSG_RAW("When trying to acquire lock %#" PRIxPTR, id);
340 		lockdep_print_cycle_info(graph, node);
341 		return res;
342 	}
343 
344 	lock = calloc(1, sizeof(*lock));
345 	if (!lock)
346 		return TEE_ERROR_OUT_OF_MEMORY;
347 
348 	lock->node = node;
349 	lock->call_stack = acq_stack;
350 	TAILQ_INSERT_TAIL(owned, lock, link);
351 
352 	return TEE_SUCCESS;
353 }
354 
355 TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned, uintptr_t id)
356 {
357 	struct lockdep_lock *lock = NULL;
358 
359 	TAILQ_FOREACH_REVERSE(lock, owned, lockdep_lock_head, link) {
360 		if (lock->node->lock_id == id) {
361 			TAILQ_REMOVE(owned, lock, link);
362 			free(lock->call_stack);
363 			free(lock);
364 			return TEE_SUCCESS;
365 		}
366 	}
367 
368 	EMSG_RAW("Thread %p does not own lock %#" PRIxPTR, (void *)owned, id);
369 	return TEE_ERROR_ITEM_NOT_FOUND;
370 }
371 
372 static void lockdep_node_delete(struct lockdep_node *node)
373 {
374 	struct lockdep_edge *edge = NULL;
375 	struct lockdep_edge *next = NULL;
376 
377 	STAILQ_FOREACH_SAFE(edge, &node->edges, link, next) {
378 		free(edge->call_stack_from);
379 		free(edge->call_stack_to);
380 		free(edge);
381 	}
382 	free(node);
383 }
384 
385 void lockdep_graph_delete(struct lockdep_node_head *graph)
386 {
387 	struct lockdep_node *node = NULL;
388 	struct lockdep_node *next = NULL;
389 
390 	TAILQ_FOREACH_SAFE(node, graph, link, next) {
391 		TAILQ_REMOVE(graph, node, link);
392 		lockdep_node_delete(node);
393 	}
394 }
395 
396 void lockdep_queue_delete(struct lockdep_lock_head *owned)
397 {
398 	struct lockdep_lock *lock = NULL;
399 	struct lockdep_lock *next = NULL;
400 
401 	TAILQ_FOREACH_SAFE(lock, owned, link, next) {
402 		TAILQ_REMOVE(owned, lock, link);
403 		free(lock);
404 	}
405 }
406