xref: /optee_os/core/kernel/lockdep.c (revision 2288b0710bbb11dd45a4bdcd87eacae5d1851c2e)
1b3fd78c4SJerome Forissier // SPDX-License-Identifier: BSD-2-Clause
2b3fd78c4SJerome Forissier /*
3b3fd78c4SJerome Forissier  * Copyright (c) 2018, Linaro Limited
4b3fd78c4SJerome Forissier  */
5b3fd78c4SJerome Forissier 
6b3fd78c4SJerome Forissier #include <assert.h>
7*2288b071SJerome Forissier #include <config.h>
8b3fd78c4SJerome Forissier #include <kernel/lockdep.h>
9b3fd78c4SJerome Forissier #include <kernel/unwind.h>
10b3fd78c4SJerome Forissier #include <stdlib.h>
11b3fd78c4SJerome Forissier #include <string.h>
12b3fd78c4SJerome Forissier #include <sys/queue.h>
13b3fd78c4SJerome Forissier #include <util.h>
14b3fd78c4SJerome Forissier 
15b3fd78c4SJerome Forissier /* lockdep_node::flags values */
16b3fd78c4SJerome Forissier /* Flags used for depth-first topological sorting */
17b3fd78c4SJerome Forissier #define LOCKDEP_NODE_TEMP_MARK		BIT(0)
18b3fd78c4SJerome Forissier #define LOCKDEP_NODE_PERM_MARK		BIT(1)
19b3fd78c4SJerome Forissier /* Flag used during breadth-first search (print shortest cycle) */
20b3fd78c4SJerome Forissier #define LOCKDEP_NODE_BFS_VISITED	BIT(2)
21b3fd78c4SJerome Forissier 
22b3fd78c4SJerome Forissier /* Find node in graph or add it */
23b3fd78c4SJerome Forissier static struct lockdep_node *lockdep_add_to_graph(
24b3fd78c4SJerome Forissier 				struct lockdep_node_head *graph,
25b3fd78c4SJerome Forissier 				uintptr_t lock_id)
26b3fd78c4SJerome Forissier {
27b3fd78c4SJerome Forissier 	struct lockdep_node *node = NULL;
28b3fd78c4SJerome Forissier 
29b3fd78c4SJerome Forissier 	assert(graph);
30b3fd78c4SJerome Forissier 	TAILQ_FOREACH(node, graph, link)
31b3fd78c4SJerome Forissier 		if (node->lock_id == lock_id)
32b3fd78c4SJerome Forissier 			return node;
33b3fd78c4SJerome Forissier 
34b3fd78c4SJerome Forissier 	node = calloc(1, sizeof(*node));
35b3fd78c4SJerome Forissier 	if (!node)
36b3fd78c4SJerome Forissier 		return NULL;
37b3fd78c4SJerome Forissier 
38b3fd78c4SJerome Forissier 	node->lock_id = lock_id;
39b3fd78c4SJerome Forissier 	STAILQ_INIT(&node->edges);
40b3fd78c4SJerome Forissier 	TAILQ_INSERT_TAIL(graph, node, link);
41b3fd78c4SJerome Forissier 
42b3fd78c4SJerome Forissier 	return node;
43b3fd78c4SJerome Forissier }
44b3fd78c4SJerome Forissier 
45b3fd78c4SJerome Forissier static vaddr_t *dup_call_stack(vaddr_t *stack)
46b3fd78c4SJerome Forissier {
475ee85d76SJerome Forissier 	vaddr_t *nstack = NULL;
485ee85d76SJerome Forissier 	int n = 0;
495ee85d76SJerome Forissier 
50b3fd78c4SJerome Forissier 	if (!stack)
51b3fd78c4SJerome Forissier 		return NULL;
52b3fd78c4SJerome Forissier 
53b3fd78c4SJerome Forissier 	while (stack[n])
54b3fd78c4SJerome Forissier 		n++;
55b3fd78c4SJerome Forissier 
565ee85d76SJerome Forissier 	nstack = malloc((n + 1) * sizeof(vaddr_t));
57b3fd78c4SJerome Forissier 	if (!nstack)
58b3fd78c4SJerome Forissier 		return NULL;
595ee85d76SJerome Forissier 
605ee85d76SJerome Forissier 	memcpy(nstack, stack, (n + 1) * sizeof(vaddr_t));
615ee85d76SJerome Forissier 
62b3fd78c4SJerome Forissier 	return nstack;
63b3fd78c4SJerome Forissier }
64b3fd78c4SJerome Forissier 
65b3fd78c4SJerome Forissier static void lockdep_print_call_stack(vaddr_t *stack)
66b3fd78c4SJerome Forissier {
67b3fd78c4SJerome Forissier 	vaddr_t *p = NULL;
68b3fd78c4SJerome Forissier 
69*2288b071SJerome Forissier 	if (!IS_ENABLED(CFG_LOCKDEP_RECORD_STACK))
70*2288b071SJerome Forissier 		return;
71*2288b071SJerome Forissier 
72b3fd78c4SJerome Forissier 	EMSG_RAW("Call stack:");
73b3fd78c4SJerome Forissier 	for (p = stack; p && *p; p++)
74b3fd78c4SJerome Forissier 		EMSG_RAW(" %#" PRIxPTR, *p);
75b3fd78c4SJerome Forissier }
76b3fd78c4SJerome Forissier 
77b3fd78c4SJerome Forissier static TEE_Result lockdep_add_edge(struct lockdep_node *from,
78b3fd78c4SJerome Forissier 				   struct lockdep_node *to,
79b3fd78c4SJerome Forissier 				   vaddr_t *call_stack_from,
80b3fd78c4SJerome Forissier 				   vaddr_t *call_stack_to,
81b3fd78c4SJerome Forissier 				   uintptr_t thread_id)
82b3fd78c4SJerome Forissier {
83b3fd78c4SJerome Forissier 	struct lockdep_edge *edge = NULL;
84b3fd78c4SJerome Forissier 
85b3fd78c4SJerome Forissier 	STAILQ_FOREACH(edge, &from->edges, link)
86b3fd78c4SJerome Forissier 		if (edge->to == to)
87b3fd78c4SJerome Forissier 			return TEE_SUCCESS;
88b3fd78c4SJerome Forissier 
89b3fd78c4SJerome Forissier 	edge = calloc(1, sizeof(*edge));
90b3fd78c4SJerome Forissier 	if (!edge)
91b3fd78c4SJerome Forissier 		return TEE_ERROR_OUT_OF_MEMORY;
92b3fd78c4SJerome Forissier 	edge->to = to;
93b3fd78c4SJerome Forissier 	edge->call_stack_from = dup_call_stack(call_stack_from);
94b3fd78c4SJerome Forissier 	edge->call_stack_to = dup_call_stack(call_stack_to);
95b3fd78c4SJerome Forissier 	edge->thread_id = thread_id;
96b3fd78c4SJerome Forissier 	STAILQ_INSERT_TAIL(&from->edges, edge, link);
97b3fd78c4SJerome Forissier 
98b3fd78c4SJerome Forissier 	return TEE_SUCCESS;
99b3fd78c4SJerome Forissier }
100b3fd78c4SJerome Forissier 
101b3fd78c4SJerome Forissier struct lockdep_bfs {
102b3fd78c4SJerome Forissier 	struct lockdep_node *node;
103b3fd78c4SJerome Forissier 	uintptr_t *path;
104b3fd78c4SJerome Forissier 	int pathlen;
105b3fd78c4SJerome Forissier 	TAILQ_ENTRY(lockdep_bfs) link;
106b3fd78c4SJerome Forissier };
107b3fd78c4SJerome Forissier 
108b3fd78c4SJerome Forissier TAILQ_HEAD(lockdep_bfs_head, lockdep_bfs);
109b3fd78c4SJerome Forissier 
110b3fd78c4SJerome Forissier static void lockdep_bfs_queue_delete(struct lockdep_bfs_head *queue)
111b3fd78c4SJerome Forissier {
112b3fd78c4SJerome Forissier 	struct lockdep_bfs *cur = NULL;
113b3fd78c4SJerome Forissier 	struct lockdep_bfs *next = NULL;
114b3fd78c4SJerome Forissier 
115b3fd78c4SJerome Forissier 	TAILQ_FOREACH_SAFE(cur, queue, link, next) {
116b3fd78c4SJerome Forissier 		TAILQ_REMOVE(queue, cur, link);
117b3fd78c4SJerome Forissier 		free(cur->path);
118b3fd78c4SJerome Forissier 		free(cur);
119b3fd78c4SJerome Forissier 	}
120b3fd78c4SJerome Forissier }
121b3fd78c4SJerome Forissier 
122b3fd78c4SJerome Forissier /*
123b3fd78c4SJerome Forissier  * Print shortest cycle in @graph that contains @node.
124b3fd78c4SJerome Forissier  * This function performs an iterative breadth-first search starting from @node,
125b3fd78c4SJerome Forissier  * and stops when it reaches @node again. In each node we're tracking the path
126b3fd78c4SJerome Forissier  * from the start node.
127b3fd78c4SJerome Forissier  */
128b3fd78c4SJerome Forissier static uintptr_t *lockdep_graph_get_shortest_cycle(struct lockdep_node *node)
129b3fd78c4SJerome Forissier {
130b3fd78c4SJerome Forissier 	struct lockdep_bfs_head queue;
131b3fd78c4SJerome Forissier 	struct lockdep_bfs *qe = NULL;
132b3fd78c4SJerome Forissier 	uintptr_t *ret = NULL;
133b3fd78c4SJerome Forissier 
134b3fd78c4SJerome Forissier 	TAILQ_INIT(&queue);
135b3fd78c4SJerome Forissier 	node->flags |= LOCKDEP_NODE_BFS_VISITED;
136b3fd78c4SJerome Forissier 
137b3fd78c4SJerome Forissier 	qe = calloc(1, sizeof(*qe));
138b3fd78c4SJerome Forissier 	if (!qe)
139b3fd78c4SJerome Forissier 		goto out;
140b3fd78c4SJerome Forissier 	qe->node = node;
141b3fd78c4SJerome Forissier 	qe->path = malloc(sizeof(uintptr_t));
142b3fd78c4SJerome Forissier 	if (!qe->path)
143b3fd78c4SJerome Forissier 		goto out;
144b3fd78c4SJerome Forissier 	qe->path[0] = node->lock_id;
145b3fd78c4SJerome Forissier 	qe->pathlen = 1;
146b3fd78c4SJerome Forissier 	TAILQ_INSERT_TAIL(&queue, qe, link);
147b3fd78c4SJerome Forissier 
148b3fd78c4SJerome Forissier 	while (!TAILQ_EMPTY(&queue)) {
149b3fd78c4SJerome Forissier 		qe = TAILQ_FIRST(&queue);
150b3fd78c4SJerome Forissier 
151b3fd78c4SJerome Forissier 		struct lockdep_node *n = qe->node;
152b3fd78c4SJerome Forissier 
153b3fd78c4SJerome Forissier 		TAILQ_REMOVE(&queue, qe, link);
154b3fd78c4SJerome Forissier 
155b3fd78c4SJerome Forissier 		struct lockdep_edge *e = NULL;
156b3fd78c4SJerome Forissier 
157b3fd78c4SJerome Forissier 		STAILQ_FOREACH(e, &n->edges, link) {
158b3fd78c4SJerome Forissier 			if (e->to->lock_id == node->lock_id) {
159b3fd78c4SJerome Forissier 				uintptr_t *tmp = NULL;
160b3fd78c4SJerome Forissier 				size_t nlen = qe->pathlen + 1;
161b3fd78c4SJerome Forissier 
162b3fd78c4SJerome Forissier 				/*
163b3fd78c4SJerome Forissier 				 * Cycle found. Terminate cycle path with NULL
164b3fd78c4SJerome Forissier 				 * and return it.
165b3fd78c4SJerome Forissier 				 */
166b3fd78c4SJerome Forissier 				tmp = realloc(qe->path,
167b3fd78c4SJerome Forissier 					      nlen * sizeof(uintptr_t));
168b3fd78c4SJerome Forissier 				if (!tmp) {
169b3fd78c4SJerome Forissier 					EMSG("Out of memory");
170b3fd78c4SJerome Forissier 					free(qe->path);
171b3fd78c4SJerome Forissier 					ret = NULL;
172b3fd78c4SJerome Forissier 					goto out;
173b3fd78c4SJerome Forissier 				}
174b3fd78c4SJerome Forissier 				qe->path = tmp;
175b3fd78c4SJerome Forissier 				qe->path[nlen - 1] = 0;
176b3fd78c4SJerome Forissier 				ret = qe->path;
177b3fd78c4SJerome Forissier 				goto out;
178b3fd78c4SJerome Forissier 			}
179b3fd78c4SJerome Forissier 
180b3fd78c4SJerome Forissier 			if (!(e->to->flags & LOCKDEP_NODE_BFS_VISITED)) {
181b3fd78c4SJerome Forissier 				e->to->flags |= LOCKDEP_NODE_BFS_VISITED;
182b3fd78c4SJerome Forissier 
183b3fd78c4SJerome Forissier 				size_t nlen = qe->pathlen + 1;
184b3fd78c4SJerome Forissier 				struct lockdep_bfs *nqe = calloc(1,
185b3fd78c4SJerome Forissier 								 sizeof(*nqe));
186b3fd78c4SJerome Forissier 
187b3fd78c4SJerome Forissier 				if (!nqe)
188b3fd78c4SJerome Forissier 					goto out;
189b3fd78c4SJerome Forissier 				nqe->node = e->to;
190b3fd78c4SJerome Forissier 				nqe->path = malloc(nlen * sizeof(uintptr_t));
191b3fd78c4SJerome Forissier 				if (!nqe->path)
192b3fd78c4SJerome Forissier 					goto out;
193b3fd78c4SJerome Forissier 				nqe->pathlen = nlen;
194b3fd78c4SJerome Forissier 				memcpy(nqe->path, qe->path,
195b3fd78c4SJerome Forissier 				       qe->pathlen * sizeof(uintptr_t));
196b3fd78c4SJerome Forissier 				nqe->path[nlen - 1] = e->to->lock_id;
197b3fd78c4SJerome Forissier 				TAILQ_INSERT_TAIL(&queue, nqe, link);
198b3fd78c4SJerome Forissier 			}
199b3fd78c4SJerome Forissier 		}
200b3fd78c4SJerome Forissier 		free(qe->path);
201b3fd78c4SJerome Forissier 		free(qe);
202b3fd78c4SJerome Forissier 		qe = NULL;
203b3fd78c4SJerome Forissier 	}
204b3fd78c4SJerome Forissier 
205b3fd78c4SJerome Forissier out:
206b3fd78c4SJerome Forissier 	free(qe);
207b3fd78c4SJerome Forissier 	lockdep_bfs_queue_delete(&queue);
208b3fd78c4SJerome Forissier 	return ret;
209b3fd78c4SJerome Forissier }
210b3fd78c4SJerome Forissier 
211b3fd78c4SJerome Forissier static TEE_Result lockdep_visit(struct lockdep_node *node)
212b3fd78c4SJerome Forissier {
213b3fd78c4SJerome Forissier 	if (node->flags & LOCKDEP_NODE_PERM_MARK)
214b3fd78c4SJerome Forissier 		return TEE_SUCCESS;
215b3fd78c4SJerome Forissier 
216b3fd78c4SJerome Forissier 	if (node->flags & LOCKDEP_NODE_TEMP_MARK)
217b3fd78c4SJerome Forissier 		return TEE_ERROR_BAD_STATE;	/* Not a DAG! */
218b3fd78c4SJerome Forissier 
219b3fd78c4SJerome Forissier 	node->flags |= LOCKDEP_NODE_TEMP_MARK;
220b3fd78c4SJerome Forissier 
221b3fd78c4SJerome Forissier 	struct lockdep_edge *e;
222b3fd78c4SJerome Forissier 
223b3fd78c4SJerome Forissier 	STAILQ_FOREACH(e, &node->edges, link) {
224b3fd78c4SJerome Forissier 		TEE_Result res = lockdep_visit(e->to);
225b3fd78c4SJerome Forissier 
226b3fd78c4SJerome Forissier 		if (res)
227b3fd78c4SJerome Forissier 			return res;
228b3fd78c4SJerome Forissier 	}
229b3fd78c4SJerome Forissier 
230b3fd78c4SJerome Forissier 	node->flags |= LOCKDEP_NODE_PERM_MARK;
231b3fd78c4SJerome Forissier 	return TEE_SUCCESS;
232b3fd78c4SJerome Forissier }
233b3fd78c4SJerome Forissier 
234b3fd78c4SJerome Forissier static TEE_Result lockdep_graph_sort(struct lockdep_node_head *graph)
235b3fd78c4SJerome Forissier {
236b3fd78c4SJerome Forissier 	struct lockdep_node *node = NULL;
237b3fd78c4SJerome Forissier 
238b3fd78c4SJerome Forissier 	TAILQ_FOREACH(node, graph, link) {
239b3fd78c4SJerome Forissier 		if (!node->flags) {
240b3fd78c4SJerome Forissier 			/* Unmarked node */
241b3fd78c4SJerome Forissier 			TEE_Result res = lockdep_visit(node);
242b3fd78c4SJerome Forissier 
243b3fd78c4SJerome Forissier 			if (res)
244b3fd78c4SJerome Forissier 				return res;
245b3fd78c4SJerome Forissier 		}
246b3fd78c4SJerome Forissier 	}
247b3fd78c4SJerome Forissier 
248b3fd78c4SJerome Forissier 	TAILQ_FOREACH(node, graph, link)
249b3fd78c4SJerome Forissier 		node->flags = 0;
250b3fd78c4SJerome Forissier 
251b3fd78c4SJerome Forissier 	return TEE_SUCCESS;
252b3fd78c4SJerome Forissier }
253b3fd78c4SJerome Forissier 
254b3fd78c4SJerome Forissier static struct lockdep_edge *lockdep_find_edge(struct lockdep_node_head *graph,
255b3fd78c4SJerome Forissier 					      uintptr_t from, uintptr_t to)
256b3fd78c4SJerome Forissier {
257b3fd78c4SJerome Forissier 	struct lockdep_node *node = NULL;
258b3fd78c4SJerome Forissier 	struct lockdep_edge *edge = NULL;
259b3fd78c4SJerome Forissier 
260b3fd78c4SJerome Forissier 	TAILQ_FOREACH(node, graph, link)
261b3fd78c4SJerome Forissier 		if (node->lock_id == from)
262b3fd78c4SJerome Forissier 			STAILQ_FOREACH(edge, &node->edges, link)
263b3fd78c4SJerome Forissier 				if (edge->to->lock_id == to)
264b3fd78c4SJerome Forissier 					return edge;
265b3fd78c4SJerome Forissier 	return NULL;
266b3fd78c4SJerome Forissier }
267b3fd78c4SJerome Forissier 
268b3fd78c4SJerome Forissier static void lockdep_print_edge_info(uintptr_t from __maybe_unused,
269b3fd78c4SJerome Forissier 				    struct lockdep_edge *edge)
270b3fd78c4SJerome Forissier {
271b3fd78c4SJerome Forissier 	uintptr_t __maybe_unused to = edge->to->lock_id;
272*2288b071SJerome Forissier 	const char __maybe_unused *at_msg = "";
273*2288b071SJerome Forissier 	const char __maybe_unused *acq_msg = "";
274b3fd78c4SJerome Forissier 
275*2288b071SJerome Forissier 	if (IS_ENABLED(CFG_LOCKDEP_RECORD_STACK)) {
276*2288b071SJerome Forissier 		at_msg = " at:";
277*2288b071SJerome Forissier 		acq_msg = " acquired at:";
278*2288b071SJerome Forissier 	}
279*2288b071SJerome Forissier 
280*2288b071SJerome Forissier 	EMSG_RAW("-> Thread %#" PRIxPTR " acquired lock %#" PRIxPTR "%s",
281*2288b071SJerome Forissier 		 edge->thread_id, to, at_msg);
282b3fd78c4SJerome Forissier 	lockdep_print_call_stack(edge->call_stack_to);
283*2288b071SJerome Forissier 	EMSG_RAW("...while holding lock %#" PRIxPTR "%s",
284*2288b071SJerome Forissier 		 from, acq_msg);
285b3fd78c4SJerome Forissier 	lockdep_print_call_stack(edge->call_stack_from);
286b3fd78c4SJerome Forissier }
287b3fd78c4SJerome Forissier 
288b3fd78c4SJerome Forissier /*
289b3fd78c4SJerome Forissier  * Find cycle containing @node in the lock graph, then print full debug
290b3fd78c4SJerome Forissier  * information about each edge (thread that acquired the locks and call stacks)
291b3fd78c4SJerome Forissier  */
292b3fd78c4SJerome Forissier static void lockdep_print_cycle_info(struct lockdep_node_head *graph,
293b3fd78c4SJerome Forissier 				     struct lockdep_node *node)
294b3fd78c4SJerome Forissier {
295b3fd78c4SJerome Forissier 	struct lockdep_edge *edge = NULL;
296b3fd78c4SJerome Forissier 	uintptr_t *cycle = NULL;
297b3fd78c4SJerome Forissier 	uintptr_t *p = NULL;
298b3fd78c4SJerome Forissier 	uintptr_t from = 0;
299b3fd78c4SJerome Forissier 	uintptr_t to = 0;
300b3fd78c4SJerome Forissier 
301b3fd78c4SJerome Forissier 	cycle = lockdep_graph_get_shortest_cycle(node);
302b3fd78c4SJerome Forissier 	assert(cycle && cycle[0]);
303b3fd78c4SJerome Forissier 	EMSG_RAW("-> Shortest cycle:");
304b3fd78c4SJerome Forissier 	for (p = cycle; *p; p++)
305b3fd78c4SJerome Forissier 		EMSG_RAW(" Lock %#" PRIxPTR, *p);
306b3fd78c4SJerome Forissier 	for (p = cycle; ; p++) {
307b3fd78c4SJerome Forissier 		if (!*p) {
308b3fd78c4SJerome Forissier 			assert(p != cycle);
309b3fd78c4SJerome Forissier 			from = to;
310b3fd78c4SJerome Forissier 			to = cycle[0];
311b3fd78c4SJerome Forissier 			edge = lockdep_find_edge(graph, from, to);
312b3fd78c4SJerome Forissier 			lockdep_print_edge_info(from, edge);
313b3fd78c4SJerome Forissier 			break;
314b3fd78c4SJerome Forissier 		}
315b3fd78c4SJerome Forissier 		if (p != cycle)
316b3fd78c4SJerome Forissier 			from = to;
317b3fd78c4SJerome Forissier 		to = *p;
318b3fd78c4SJerome Forissier 		if (p != cycle) {
319b3fd78c4SJerome Forissier 			edge = lockdep_find_edge(graph, from, to);
320b3fd78c4SJerome Forissier 			lockdep_print_edge_info(from, edge);
321b3fd78c4SJerome Forissier 		}
322b3fd78c4SJerome Forissier 	}
323b3fd78c4SJerome Forissier 	free(cycle);
324b3fd78c4SJerome Forissier }
325b3fd78c4SJerome Forissier 
326*2288b071SJerome Forissier static vaddr_t *lockdep_get_kernel_stack(void)
327*2288b071SJerome Forissier {
328*2288b071SJerome Forissier 	if (IS_ENABLED(CFG_LOCKDEP_RECORD_STACK))
329*2288b071SJerome Forissier 		return unw_get_kernel_stack();
330*2288b071SJerome Forissier 
331*2288b071SJerome Forissier 	return NULL;
332*2288b071SJerome Forissier }
333*2288b071SJerome Forissier 
334b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
335b3fd78c4SJerome Forissier 				  struct lockdep_lock_head *owned,
336b3fd78c4SJerome Forissier 				  uintptr_t id)
337b3fd78c4SJerome Forissier {
338b3fd78c4SJerome Forissier 	struct lockdep_node *node = lockdep_add_to_graph(graph, id);
339b3fd78c4SJerome Forissier 
340b3fd78c4SJerome Forissier 	if (!node)
341b3fd78c4SJerome Forissier 		return TEE_ERROR_OUT_OF_MEMORY;
342b3fd78c4SJerome Forissier 
343b3fd78c4SJerome Forissier 	struct lockdep_lock *lock = NULL;
344*2288b071SJerome Forissier 	vaddr_t *acq_stack = lockdep_get_kernel_stack();
345b3fd78c4SJerome Forissier 
346b3fd78c4SJerome Forissier 	TAILQ_FOREACH(lock, owned, link) {
347b3fd78c4SJerome Forissier 		TEE_Result res = lockdep_add_edge(lock->node, node,
348b3fd78c4SJerome Forissier 						  lock->call_stack,
349b3fd78c4SJerome Forissier 						  acq_stack,
350b3fd78c4SJerome Forissier 						  (uintptr_t)owned);
351b3fd78c4SJerome Forissier 
352b3fd78c4SJerome Forissier 		if (res)
353b3fd78c4SJerome Forissier 			return res;
354b3fd78c4SJerome Forissier 	}
355b3fd78c4SJerome Forissier 
356b3fd78c4SJerome Forissier 	TEE_Result res = lockdep_graph_sort(graph);
357b3fd78c4SJerome Forissier 
358b3fd78c4SJerome Forissier 	if (res) {
359b3fd78c4SJerome Forissier 		EMSG_RAW("Potential deadlock detected!");
360b3fd78c4SJerome Forissier 		EMSG_RAW("When trying to acquire lock %#" PRIxPTR, id);
361b3fd78c4SJerome Forissier 		lockdep_print_cycle_info(graph, node);
362b3fd78c4SJerome Forissier 		return res;
363b3fd78c4SJerome Forissier 	}
364b3fd78c4SJerome Forissier 
365b3fd78c4SJerome Forissier 	lock = calloc(1, sizeof(*lock));
366b3fd78c4SJerome Forissier 	if (!lock)
367b3fd78c4SJerome Forissier 		return TEE_ERROR_OUT_OF_MEMORY;
368b3fd78c4SJerome Forissier 
369b3fd78c4SJerome Forissier 	lock->node = node;
370b3fd78c4SJerome Forissier 	lock->call_stack = acq_stack;
371b3fd78c4SJerome Forissier 	TAILQ_INSERT_TAIL(owned, lock, link);
372b3fd78c4SJerome Forissier 
373b3fd78c4SJerome Forissier 	return TEE_SUCCESS;
374b3fd78c4SJerome Forissier }
375b3fd78c4SJerome Forissier 
37602fbb41aSJerome Forissier /*
37702fbb41aSJerome Forissier  * Call this when it is known that the thread has been able to acquire the lock.
37802fbb41aSJerome Forissier  * Similar to __lockdep_lock_acquire(), but since the operation is non-blocking,
37902fbb41aSJerome Forissier  * no dependency to currently owned locks are created.
38002fbb41aSJerome Forissier  */
38102fbb41aSJerome Forissier TEE_Result __lockdep_lock_tryacquire(struct lockdep_node_head *graph,
38202fbb41aSJerome Forissier 				     struct lockdep_lock_head *owned,
38302fbb41aSJerome Forissier 				     uintptr_t id)
38402fbb41aSJerome Forissier {
38502fbb41aSJerome Forissier 	struct lockdep_node *node = lockdep_add_to_graph(graph, id);
38602fbb41aSJerome Forissier 
38702fbb41aSJerome Forissier 	if (!node)
38802fbb41aSJerome Forissier 		return TEE_ERROR_OUT_OF_MEMORY;
38902fbb41aSJerome Forissier 
39002fbb41aSJerome Forissier 	struct lockdep_lock *lock = NULL;
391*2288b071SJerome Forissier 	vaddr_t *acq_stack = lockdep_get_kernel_stack();
39202fbb41aSJerome Forissier 
39302fbb41aSJerome Forissier 	lock = calloc(1, sizeof(*lock));
39402fbb41aSJerome Forissier 	if (!lock)
39502fbb41aSJerome Forissier 		return TEE_ERROR_OUT_OF_MEMORY;
39602fbb41aSJerome Forissier 
39702fbb41aSJerome Forissier 	lock->node = node;
39802fbb41aSJerome Forissier 	lock->call_stack = acq_stack;
39902fbb41aSJerome Forissier 	TAILQ_INSERT_TAIL(owned, lock, link);
40002fbb41aSJerome Forissier 
40102fbb41aSJerome Forissier 	return TEE_SUCCESS;
40202fbb41aSJerome Forissier }
40302fbb41aSJerome Forissier 
404b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned, uintptr_t id)
405b3fd78c4SJerome Forissier {
406b3fd78c4SJerome Forissier 	struct lockdep_lock *lock = NULL;
407b3fd78c4SJerome Forissier 
408b3fd78c4SJerome Forissier 	TAILQ_FOREACH_REVERSE(lock, owned, lockdep_lock_head, link) {
409b3fd78c4SJerome Forissier 		if (lock->node->lock_id == id) {
410b3fd78c4SJerome Forissier 			TAILQ_REMOVE(owned, lock, link);
411b3fd78c4SJerome Forissier 			free(lock->call_stack);
412b3fd78c4SJerome Forissier 			free(lock);
413b3fd78c4SJerome Forissier 			return TEE_SUCCESS;
414b3fd78c4SJerome Forissier 		}
415b3fd78c4SJerome Forissier 	}
416b3fd78c4SJerome Forissier 
417b3fd78c4SJerome Forissier 	EMSG_RAW("Thread %p does not own lock %#" PRIxPTR, (void *)owned, id);
418b3fd78c4SJerome Forissier 	return TEE_ERROR_ITEM_NOT_FOUND;
419b3fd78c4SJerome Forissier }
420b3fd78c4SJerome Forissier 
421ccbc05e1SJens Wiklander static void lockdep_free_edge(struct lockdep_edge *edge)
422ccbc05e1SJens Wiklander {
423ccbc05e1SJens Wiklander 	free(edge->call_stack_from);
424ccbc05e1SJens Wiklander 	free(edge->call_stack_to);
425ccbc05e1SJens Wiklander 	free(edge);
426ccbc05e1SJens Wiklander }
427ccbc05e1SJens Wiklander 
428b3fd78c4SJerome Forissier static void lockdep_node_delete(struct lockdep_node *node)
429b3fd78c4SJerome Forissier {
430b3fd78c4SJerome Forissier 	struct lockdep_edge *edge = NULL;
431b3fd78c4SJerome Forissier 	struct lockdep_edge *next = NULL;
432b3fd78c4SJerome Forissier 
433ccbc05e1SJens Wiklander 	STAILQ_FOREACH_SAFE(edge, &node->edges, link, next)
434ccbc05e1SJens Wiklander 		lockdep_free_edge(edge);
435ccbc05e1SJens Wiklander 
436b3fd78c4SJerome Forissier 	free(node);
437b3fd78c4SJerome Forissier }
438b3fd78c4SJerome Forissier 
439b3fd78c4SJerome Forissier void lockdep_graph_delete(struct lockdep_node_head *graph)
440b3fd78c4SJerome Forissier {
441b3fd78c4SJerome Forissier 	struct lockdep_node *node = NULL;
442b3fd78c4SJerome Forissier 	struct lockdep_node *next = NULL;
443b3fd78c4SJerome Forissier 
444b3fd78c4SJerome Forissier 	TAILQ_FOREACH_SAFE(node, graph, link, next) {
445b3fd78c4SJerome Forissier 		TAILQ_REMOVE(graph, node, link);
446b3fd78c4SJerome Forissier 		lockdep_node_delete(node);
447b3fd78c4SJerome Forissier 	}
448b3fd78c4SJerome Forissier }
449b3fd78c4SJerome Forissier 
450b3fd78c4SJerome Forissier void lockdep_queue_delete(struct lockdep_lock_head *owned)
451b3fd78c4SJerome Forissier {
452b3fd78c4SJerome Forissier 	struct lockdep_lock *lock = NULL;
453b3fd78c4SJerome Forissier 	struct lockdep_lock *next = NULL;
454b3fd78c4SJerome Forissier 
455b3fd78c4SJerome Forissier 	TAILQ_FOREACH_SAFE(lock, owned, link, next) {
456b3fd78c4SJerome Forissier 		TAILQ_REMOVE(owned, lock, link);
457b3fd78c4SJerome Forissier 		free(lock);
458b3fd78c4SJerome Forissier 	}
459b3fd78c4SJerome Forissier }
460ccbc05e1SJens Wiklander 
461ccbc05e1SJens Wiklander static void lockdep_node_destroy(struct lockdep_node_head *graph,
462ccbc05e1SJens Wiklander 				 struct lockdep_node *node)
463ccbc05e1SJens Wiklander {
464ccbc05e1SJens Wiklander 	struct lockdep_edge *edge = NULL;
465ccbc05e1SJens Wiklander 	struct lockdep_edge *next = NULL;
466ccbc05e1SJens Wiklander 	struct lockdep_node *from = NULL;
467ccbc05e1SJens Wiklander 
468ccbc05e1SJens Wiklander 	TAILQ_REMOVE(graph, node, link);
469ccbc05e1SJens Wiklander 
470ccbc05e1SJens Wiklander 	/*
471ccbc05e1SJens Wiklander 	 * Loop over all nodes in the graph to remove all edges with the
472ccbc05e1SJens Wiklander 	 * node to remove in the "to" field.
473ccbc05e1SJens Wiklander 	 */
474ccbc05e1SJens Wiklander 	TAILQ_FOREACH(from, graph, link) {
475ccbc05e1SJens Wiklander 		edge = STAILQ_FIRST(&from->edges);
476ccbc05e1SJens Wiklander 		while (edge && edge->to == node) {
477ccbc05e1SJens Wiklander 			STAILQ_REMOVE_HEAD(&from->edges, link);
478ccbc05e1SJens Wiklander 			lockdep_free_edge(edge);
479ccbc05e1SJens Wiklander 			edge = STAILQ_FIRST(&from->edges);
480ccbc05e1SJens Wiklander 		}
481ccbc05e1SJens Wiklander 
482ccbc05e1SJens Wiklander 		if (!edge)
483ccbc05e1SJens Wiklander 			continue;
484ccbc05e1SJens Wiklander 
485ccbc05e1SJens Wiklander 		next = STAILQ_NEXT(edge, link);
486ccbc05e1SJens Wiklander 		while (next) {
487ccbc05e1SJens Wiklander 			if (next->to == node) {
488ccbc05e1SJens Wiklander 				STAILQ_REMOVE_AFTER(&from->edges, edge, link);
489ccbc05e1SJens Wiklander 				lockdep_free_edge(next);
490ccbc05e1SJens Wiklander 			} else {
491ccbc05e1SJens Wiklander 				edge = next;
492ccbc05e1SJens Wiklander 			}
493ccbc05e1SJens Wiklander 			next = STAILQ_NEXT(edge, link);
494ccbc05e1SJens Wiklander 		}
495ccbc05e1SJens Wiklander 	}
496ccbc05e1SJens Wiklander 
497ccbc05e1SJens Wiklander 	STAILQ_FOREACH_SAFE(edge, &node->edges, link, next)
498ccbc05e1SJens Wiklander 		lockdep_free_edge(edge);
499ccbc05e1SJens Wiklander 
500ccbc05e1SJens Wiklander 	free(node);
501ccbc05e1SJens Wiklander }
502ccbc05e1SJens Wiklander 
503ccbc05e1SJens Wiklander void lockdep_lock_destroy(struct lockdep_node_head *graph, uintptr_t lock_id)
504ccbc05e1SJens Wiklander {
505ccbc05e1SJens Wiklander 	struct lockdep_node *node = NULL;
506ccbc05e1SJens Wiklander 
507ccbc05e1SJens Wiklander 	assert(graph);
508ccbc05e1SJens Wiklander 	TAILQ_FOREACH(node, graph, link) {
509ccbc05e1SJens Wiklander 		if (node->lock_id == lock_id) {
510ccbc05e1SJens Wiklander 			lockdep_node_destroy(graph, node);
511ccbc05e1SJens Wiklander 			break;
512ccbc05e1SJens Wiklander 		}
513ccbc05e1SJens Wiklander 	}
514ccbc05e1SJens Wiklander }
515