xref: /optee_os/core/include/kernel/lockdep.h (revision 77327d7a47019cf9f66972403d0de1c32fe4cdee)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2018, Linaro Limited
4  */
5 
6 #ifndef __KERNEL_LOCKDEP_H
7 #define __KERNEL_LOCKDEP_H
8 
9 #include <compiler.h>
10 #include <kernel/panic.h>
11 #include <sys/queue.h>
12 #include <tee_api_types.h>
13 #include <trace.h>
14 #include <types_ext.h>
15 
16 /*
17  * Lock graph. If node A has an edge to node B, then A was locked before B in
18  * the same thread of execution.
19  */
20 
21 struct lockdep_edge {
22 	struct lockdep_node *to;
23 	uintptr_t thread_id;
24 	vaddr_t *call_stack_from;
25 	vaddr_t *call_stack_to;
26 	STAILQ_ENTRY(lockdep_edge) link;
27 };
28 
29 STAILQ_HEAD(lockdep_edge_head, lockdep_edge);
30 
31 struct lockdep_node {
32 	uintptr_t lock_id; /* For instance, address of actual lock object */
33 	struct lockdep_edge_head edges;
34 	TAILQ_ENTRY(lockdep_node) link;
35 	uint8_t flags; /* Used temporarily when walking the graph */
36 };
37 
38 TAILQ_HEAD(lockdep_node_head, lockdep_node);
39 
40 /* Per-thread queue of currently owned locks (point to nodes in the graph) */
41 
42 struct lockdep_lock {
43 	struct lockdep_node *node;
44 	vaddr_t *call_stack;
45 	TAILQ_ENTRY(lockdep_lock) link;
46 };
47 
48 TAILQ_HEAD(lockdep_lock_head, lockdep_lock);
49 
50 #ifdef CFG_LOCKDEP
51 
52 /*
53  * Functions used internally and for testing the algorithm. Actual locking code
54  * should use the wrappers below (which panic in case of error).
55  */
56 TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
57 				  struct lockdep_lock_head *owned,
58 				  uintptr_t id);
59 TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned,
60 				  uintptr_t id);
61 
62 /* Delete all elements in @graph */
63 void lockdep_graph_delete(struct lockdep_node_head *graph);
64 
65 /* Delete all elements in @queue */
66 void lockdep_queue_delete(struct lockdep_lock_head *queue);
67 
68 /*
69  * Acquire lock @id, while already holding the locks in @owned.
70  * @owned represent the caller; there should be one instance per thread of
71  * execution. @graph is the directed acyclic graph (DAG) to be used for
72  * potential deadlock detection; use the same @graph for all the locks of the
73  * same type as lock @id.
74  *
75  * This function will panic() if the acquire operation would result in a lock
76  * hierarchy violation (potential deadlock).
77  */
78 static inline void lockdep_lock_acquire(struct lockdep_node_head *graph,
79 					struct lockdep_lock_head *owned,
80 					uintptr_t id)
81 {
82 	TEE_Result res = __lockdep_lock_acquire(graph, owned, id);
83 
84 	if (res) {
85 		EMSG("lockdep: error %#" PRIx32, res);
86 		panic();
87 	}
88 }
89 
90 /*
91  * Release lock @id. The lock is removed from @owned.
92  *
93  * This function will panic() if the lock is not held by the caller.
94  */
95 static inline void lockdep_lock_release(struct lockdep_lock_head *owned,
96 					uintptr_t id)
97 {
98 	TEE_Result res = __lockdep_lock_release(owned, id);
99 
100 	if (res) {
101 		EMSG("lockdep: error %#" PRIx32, res);
102 		panic();
103 	}
104 }
105 
106 /* Initialize lockdep for mutex objects (kernel/mutex.h) */
107 void mutex_lockdep_init(void);
108 
109 #else /* CFG_LOCKDEP */
110 
111 static inline void lockdep_lock_acquire(struct lockdep_node_head *g __unused,
112 					struct lockdep_lock_head *o __unused,
113 					uintptr_t id __unused)
114 {}
115 
116 static inline void lockdep_lock_release(struct lockdep_lock_head *o __unused,
117 					uintptr_t id __unused)
118 {}
119 
120 static inline void mutex_lockdep_init(void)
121 {}
122 
123 #endif /* !CFG_LOCKDEP */
124 
125 #endif /* !__KERNEL_LOCKDEP_H */
126