xref: /optee_os/core/include/kernel/lockdep.h (revision ccbc05e1edcc7d1eae3b230e7a18b1e0e756e429)
1b3fd78c4SJerome Forissier /* SPDX-License-Identifier: BSD-2-Clause */
2b3fd78c4SJerome Forissier /*
3b3fd78c4SJerome Forissier  * Copyright (c) 2018, Linaro Limited
4b3fd78c4SJerome Forissier  */
5b3fd78c4SJerome Forissier 
6b3fd78c4SJerome Forissier #ifndef __KERNEL_LOCKDEP_H
7b3fd78c4SJerome Forissier #define __KERNEL_LOCKDEP_H
8b3fd78c4SJerome Forissier 
9b3fd78c4SJerome Forissier #include <compiler.h>
10b3fd78c4SJerome Forissier #include <kernel/panic.h>
11b3fd78c4SJerome Forissier #include <sys/queue.h>
12b3fd78c4SJerome Forissier #include <tee_api_types.h>
13b3fd78c4SJerome Forissier #include <trace.h>
14b3fd78c4SJerome Forissier #include <types_ext.h>
15b3fd78c4SJerome Forissier 
16b3fd78c4SJerome Forissier /*
17b3fd78c4SJerome Forissier  * Lock graph. If node A has an edge to node B, then A was locked before B in
18b3fd78c4SJerome Forissier  * the same thread of execution.
19b3fd78c4SJerome Forissier  */
20b3fd78c4SJerome Forissier 
21b3fd78c4SJerome Forissier struct lockdep_edge {
22b3fd78c4SJerome Forissier 	struct lockdep_node *to;
23b3fd78c4SJerome Forissier 	uintptr_t thread_id;
24b3fd78c4SJerome Forissier 	vaddr_t *call_stack_from;
25b3fd78c4SJerome Forissier 	vaddr_t *call_stack_to;
26b3fd78c4SJerome Forissier 	STAILQ_ENTRY(lockdep_edge) link;
27b3fd78c4SJerome Forissier };
28b3fd78c4SJerome Forissier 
29b3fd78c4SJerome Forissier STAILQ_HEAD(lockdep_edge_head, lockdep_edge);
30b3fd78c4SJerome Forissier 
31b3fd78c4SJerome Forissier struct lockdep_node {
32b3fd78c4SJerome Forissier 	uintptr_t lock_id; /* For instance, address of actual lock object */
33b3fd78c4SJerome Forissier 	struct lockdep_edge_head edges;
34b3fd78c4SJerome Forissier 	TAILQ_ENTRY(lockdep_node) link;
35b3fd78c4SJerome Forissier 	uint8_t flags; /* Used temporarily when walking the graph */
36b3fd78c4SJerome Forissier };
37b3fd78c4SJerome Forissier 
38b3fd78c4SJerome Forissier TAILQ_HEAD(lockdep_node_head, lockdep_node);
39b3fd78c4SJerome Forissier 
40b3fd78c4SJerome Forissier /* Per-thread queue of currently owned locks (point to nodes in the graph) */
41b3fd78c4SJerome Forissier 
42b3fd78c4SJerome Forissier struct lockdep_lock {
43b3fd78c4SJerome Forissier 	struct lockdep_node *node;
44b3fd78c4SJerome Forissier 	vaddr_t *call_stack;
45b3fd78c4SJerome Forissier 	TAILQ_ENTRY(lockdep_lock) link;
46b3fd78c4SJerome Forissier };
47b3fd78c4SJerome Forissier 
48b3fd78c4SJerome Forissier TAILQ_HEAD(lockdep_lock_head, lockdep_lock);
49b3fd78c4SJerome Forissier 
50b3fd78c4SJerome Forissier #ifdef CFG_LOCKDEP
51b3fd78c4SJerome Forissier 
52b3fd78c4SJerome Forissier /*
53b3fd78c4SJerome Forissier  * Functions used internally and for testing the algorithm. Actual locking code
54b3fd78c4SJerome Forissier  * should use the wrappers below (which panic in case of error).
55b3fd78c4SJerome Forissier  */
56b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
57b3fd78c4SJerome Forissier 				  struct lockdep_lock_head *owned,
58b3fd78c4SJerome Forissier 				  uintptr_t id);
59b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned,
60b3fd78c4SJerome Forissier 				  uintptr_t id);
61b3fd78c4SJerome Forissier 
62b3fd78c4SJerome Forissier /* Delete all elements in @graph */
63b3fd78c4SJerome Forissier void lockdep_graph_delete(struct lockdep_node_head *graph);
64b3fd78c4SJerome Forissier 
65b3fd78c4SJerome Forissier /* Delete all elements in @queue */
66b3fd78c4SJerome Forissier void lockdep_queue_delete(struct lockdep_lock_head *queue);
67b3fd78c4SJerome Forissier 
68b3fd78c4SJerome Forissier /*
69b3fd78c4SJerome Forissier  * Acquire lock @id, while already holding the locks in @owned.
70b3fd78c4SJerome Forissier  * @owned represent the caller; there should be one instance per thread of
71b3fd78c4SJerome Forissier  * execution. @graph is the directed acyclic graph (DAG) to be used for
72b3fd78c4SJerome Forissier  * potential deadlock detection; use the same @graph for all the locks of the
73b3fd78c4SJerome Forissier  * same type as lock @id.
74b3fd78c4SJerome Forissier  *
75b3fd78c4SJerome Forissier  * This function will panic() if the acquire operation would result in a lock
76b3fd78c4SJerome Forissier  * hierarchy violation (potential deadlock).
77b3fd78c4SJerome Forissier  */
78b3fd78c4SJerome Forissier static inline void lockdep_lock_acquire(struct lockdep_node_head *graph,
79b3fd78c4SJerome Forissier 					struct lockdep_lock_head *owned,
80b3fd78c4SJerome Forissier 					uintptr_t id)
81b3fd78c4SJerome Forissier {
82b3fd78c4SJerome Forissier 	TEE_Result res = __lockdep_lock_acquire(graph, owned, id);
83b3fd78c4SJerome Forissier 
84b3fd78c4SJerome Forissier 	if (res) {
85b3fd78c4SJerome Forissier 		EMSG("lockdep: error %#" PRIx32, res);
86b3fd78c4SJerome Forissier 		panic();
87b3fd78c4SJerome Forissier 	}
88b3fd78c4SJerome Forissier }
89b3fd78c4SJerome Forissier 
90b3fd78c4SJerome Forissier /*
91b3fd78c4SJerome Forissier  * Release lock @id. The lock is removed from @owned.
92b3fd78c4SJerome Forissier  *
93b3fd78c4SJerome Forissier  * This function will panic() if the lock is not held by the caller.
94b3fd78c4SJerome Forissier  */
95b3fd78c4SJerome Forissier static inline void lockdep_lock_release(struct lockdep_lock_head *owned,
96b3fd78c4SJerome Forissier 					uintptr_t id)
97b3fd78c4SJerome Forissier {
98b3fd78c4SJerome Forissier 	TEE_Result res = __lockdep_lock_release(owned, id);
99b3fd78c4SJerome Forissier 
100b3fd78c4SJerome Forissier 	if (res) {
101b3fd78c4SJerome Forissier 		EMSG("lockdep: error %#" PRIx32, res);
102b3fd78c4SJerome Forissier 		panic();
103b3fd78c4SJerome Forissier 	}
104b3fd78c4SJerome Forissier }
105b3fd78c4SJerome Forissier 
106*ccbc05e1SJens Wiklander /*
107*ccbc05e1SJens Wiklander  * Destroy lock @id in @graph. The lock is freed.
108*ccbc05e1SJens Wiklander  */
109*ccbc05e1SJens Wiklander void lockdep_lock_destroy(struct lockdep_node_head *graph, uintptr_t id);
110*ccbc05e1SJens Wiklander 
111ab0df69eSJerome Forissier /* Initialize lockdep for mutex objects (kernel/mutex.h) */
112ab0df69eSJerome Forissier void mutex_lockdep_init(void);
113ab0df69eSJerome Forissier 
114b3fd78c4SJerome Forissier #else /* CFG_LOCKDEP */
115b3fd78c4SJerome Forissier 
116b3fd78c4SJerome Forissier static inline void lockdep_lock_acquire(struct lockdep_node_head *g __unused,
117b3fd78c4SJerome Forissier 					struct lockdep_lock_head *o __unused,
118b3fd78c4SJerome Forissier 					uintptr_t id __unused)
119b3fd78c4SJerome Forissier {}
120b3fd78c4SJerome Forissier 
121b3fd78c4SJerome Forissier static inline void lockdep_lock_release(struct lockdep_lock_head *o __unused,
122b3fd78c4SJerome Forissier 					uintptr_t id __unused)
123b3fd78c4SJerome Forissier {}
124b3fd78c4SJerome Forissier 
125*ccbc05e1SJens Wiklander static inline void
126*ccbc05e1SJens Wiklander lockdep_lock_destroy(struct lockdep_node_head *graph __unused,
127*ccbc05e1SJens Wiklander 		     uintptr_t id __unused)
128*ccbc05e1SJens Wiklander {}
129*ccbc05e1SJens Wiklander 
130ab0df69eSJerome Forissier static inline void mutex_lockdep_init(void)
131ab0df69eSJerome Forissier {}
132ab0df69eSJerome Forissier 
133b3fd78c4SJerome Forissier #endif /* !CFG_LOCKDEP */
134b3fd78c4SJerome Forissier 
135b3fd78c4SJerome Forissier #endif /* !__KERNEL_LOCKDEP_H */
136