xref: /optee_os/core/include/kernel/lockdep.h (revision b3fd78c4010d71a51bd7d0ca5e99247d9ebf67f7)
1*b3fd78c4SJerome Forissier /* SPDX-License-Identifier: BSD-2-Clause */
2*b3fd78c4SJerome Forissier /*
3*b3fd78c4SJerome Forissier  * Copyright (c) 2018, Linaro Limited
4*b3fd78c4SJerome Forissier  */
5*b3fd78c4SJerome Forissier 
6*b3fd78c4SJerome Forissier #ifndef __KERNEL_LOCKDEP_H
7*b3fd78c4SJerome Forissier #define __KERNEL_LOCKDEP_H
8*b3fd78c4SJerome Forissier 
9*b3fd78c4SJerome Forissier #include <compiler.h>
10*b3fd78c4SJerome Forissier #include <kernel/panic.h>
11*b3fd78c4SJerome Forissier #include <sys/queue.h>
12*b3fd78c4SJerome Forissier #include <tee_api_types.h>
13*b3fd78c4SJerome Forissier #include <trace.h>
14*b3fd78c4SJerome Forissier #include <types_ext.h>
15*b3fd78c4SJerome Forissier 
16*b3fd78c4SJerome Forissier /*
17*b3fd78c4SJerome Forissier  * Lock graph. If node A has an edge to node B, then A was locked before B in
18*b3fd78c4SJerome Forissier  * the same thread of execution.
19*b3fd78c4SJerome Forissier  */
20*b3fd78c4SJerome Forissier 
21*b3fd78c4SJerome Forissier struct lockdep_edge {
22*b3fd78c4SJerome Forissier 	struct lockdep_node *to;
23*b3fd78c4SJerome Forissier 	uintptr_t thread_id;
24*b3fd78c4SJerome Forissier 	vaddr_t *call_stack_from;
25*b3fd78c4SJerome Forissier 	vaddr_t *call_stack_to;
26*b3fd78c4SJerome Forissier 	STAILQ_ENTRY(lockdep_edge) link;
27*b3fd78c4SJerome Forissier };
28*b3fd78c4SJerome Forissier 
29*b3fd78c4SJerome Forissier STAILQ_HEAD(lockdep_edge_head, lockdep_edge);
30*b3fd78c4SJerome Forissier 
31*b3fd78c4SJerome Forissier struct lockdep_node {
32*b3fd78c4SJerome Forissier 	uintptr_t lock_id; /* For instance, address of actual lock object */
33*b3fd78c4SJerome Forissier 	struct lockdep_edge_head edges;
34*b3fd78c4SJerome Forissier 	TAILQ_ENTRY(lockdep_node) link;
35*b3fd78c4SJerome Forissier 	uint8_t flags; /* Used temporarily when walking the graph */
36*b3fd78c4SJerome Forissier };
37*b3fd78c4SJerome Forissier 
38*b3fd78c4SJerome Forissier TAILQ_HEAD(lockdep_node_head, lockdep_node);
39*b3fd78c4SJerome Forissier 
40*b3fd78c4SJerome Forissier /* Per-thread queue of currently owned locks (point to nodes in the graph) */
41*b3fd78c4SJerome Forissier 
42*b3fd78c4SJerome Forissier struct lockdep_lock {
43*b3fd78c4SJerome Forissier 	struct lockdep_node *node;
44*b3fd78c4SJerome Forissier 	vaddr_t *call_stack;
45*b3fd78c4SJerome Forissier 	TAILQ_ENTRY(lockdep_lock) link;
46*b3fd78c4SJerome Forissier };
47*b3fd78c4SJerome Forissier 
48*b3fd78c4SJerome Forissier TAILQ_HEAD(lockdep_lock_head, lockdep_lock);
49*b3fd78c4SJerome Forissier 
50*b3fd78c4SJerome Forissier #ifdef CFG_LOCKDEP
51*b3fd78c4SJerome Forissier 
52*b3fd78c4SJerome Forissier /*
53*b3fd78c4SJerome Forissier  * Functions used internally and for testing the algorithm. Actual locking code
54*b3fd78c4SJerome Forissier  * should use the wrappers below (which panic in case of error).
55*b3fd78c4SJerome Forissier  */
56*b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
57*b3fd78c4SJerome Forissier 				  struct lockdep_lock_head *owned,
58*b3fd78c4SJerome Forissier 				  uintptr_t id);
59*b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned,
60*b3fd78c4SJerome Forissier 				  uintptr_t id);
61*b3fd78c4SJerome Forissier 
62*b3fd78c4SJerome Forissier /* Delete all elements in @graph */
63*b3fd78c4SJerome Forissier void lockdep_graph_delete(struct lockdep_node_head *graph);
64*b3fd78c4SJerome Forissier 
65*b3fd78c4SJerome Forissier /* Delete all elements in @queue */
66*b3fd78c4SJerome Forissier void lockdep_queue_delete(struct lockdep_lock_head *queue);
67*b3fd78c4SJerome Forissier 
68*b3fd78c4SJerome Forissier /*
69*b3fd78c4SJerome Forissier  * Acquire lock @id, while already holding the locks in @owned.
70*b3fd78c4SJerome Forissier  * @owned represent the caller; there should be one instance per thread of
71*b3fd78c4SJerome Forissier  * execution. @graph is the directed acyclic graph (DAG) to be used for
72*b3fd78c4SJerome Forissier  * potential deadlock detection; use the same @graph for all the locks of the
73*b3fd78c4SJerome Forissier  * same type as lock @id.
74*b3fd78c4SJerome Forissier  *
75*b3fd78c4SJerome Forissier  * This function will panic() if the acquire operation would result in a lock
76*b3fd78c4SJerome Forissier  * hierarchy violation (potential deadlock).
77*b3fd78c4SJerome Forissier  */
78*b3fd78c4SJerome Forissier static inline void lockdep_lock_acquire(struct lockdep_node_head *graph,
79*b3fd78c4SJerome Forissier 					struct lockdep_lock_head *owned,
80*b3fd78c4SJerome Forissier 					uintptr_t id)
81*b3fd78c4SJerome Forissier {
82*b3fd78c4SJerome Forissier 	TEE_Result res = __lockdep_lock_acquire(graph, owned, id);
83*b3fd78c4SJerome Forissier 
84*b3fd78c4SJerome Forissier 	if (res) {
85*b3fd78c4SJerome Forissier 		EMSG("lockdep: error %#" PRIx32, res);
86*b3fd78c4SJerome Forissier 		panic();
87*b3fd78c4SJerome Forissier 	}
88*b3fd78c4SJerome Forissier }
89*b3fd78c4SJerome Forissier 
90*b3fd78c4SJerome Forissier /*
91*b3fd78c4SJerome Forissier  * Release lock @id. The lock is removed from @owned.
92*b3fd78c4SJerome Forissier  *
93*b3fd78c4SJerome Forissier  * This function will panic() if the lock is not held by the caller.
94*b3fd78c4SJerome Forissier  */
95*b3fd78c4SJerome Forissier static inline void lockdep_lock_release(struct lockdep_lock_head *owned,
96*b3fd78c4SJerome Forissier 					uintptr_t id)
97*b3fd78c4SJerome Forissier {
98*b3fd78c4SJerome Forissier 	TEE_Result res = __lockdep_lock_release(owned, id);
99*b3fd78c4SJerome Forissier 
100*b3fd78c4SJerome Forissier 	if (res) {
101*b3fd78c4SJerome Forissier 		EMSG("lockdep: error %#" PRIx32, res);
102*b3fd78c4SJerome Forissier 		panic();
103*b3fd78c4SJerome Forissier 	}
104*b3fd78c4SJerome Forissier }
105*b3fd78c4SJerome Forissier 
106*b3fd78c4SJerome Forissier #else /* CFG_LOCKDEP */
107*b3fd78c4SJerome Forissier 
108*b3fd78c4SJerome Forissier static inline void lockdep_lock_acquire(struct lockdep_node_head *g __unused,
109*b3fd78c4SJerome Forissier 					struct lockdep_lock_head *o __unused,
110*b3fd78c4SJerome Forissier 					uintptr_t id __unused)
111*b3fd78c4SJerome Forissier {}
112*b3fd78c4SJerome Forissier 
113*b3fd78c4SJerome Forissier static inline void lockdep_lock_release(struct lockdep_lock_head *o __unused,
114*b3fd78c4SJerome Forissier 					uintptr_t id __unused)
115*b3fd78c4SJerome Forissier {}
116*b3fd78c4SJerome Forissier 
117*b3fd78c4SJerome Forissier #endif /* !CFG_LOCKDEP */
118*b3fd78c4SJerome Forissier 
119*b3fd78c4SJerome Forissier #endif /* !__KERNEL_LOCKDEP_H */
120