1b3fd78c4SJerome Forissier /* SPDX-License-Identifier: BSD-2-Clause */
2b3fd78c4SJerome Forissier /*
3b3fd78c4SJerome Forissier * Copyright (c) 2018, Linaro Limited
4b3fd78c4SJerome Forissier */
5b3fd78c4SJerome Forissier
6b3fd78c4SJerome Forissier #ifndef __KERNEL_LOCKDEP_H
7b3fd78c4SJerome Forissier #define __KERNEL_LOCKDEP_H
8b3fd78c4SJerome Forissier
9b3fd78c4SJerome Forissier #include <compiler.h>
10b3fd78c4SJerome Forissier #include <kernel/panic.h>
11b3fd78c4SJerome Forissier #include <sys/queue.h>
12b3fd78c4SJerome Forissier #include <tee_api_types.h>
13b3fd78c4SJerome Forissier #include <trace.h>
14b3fd78c4SJerome Forissier #include <types_ext.h>
15b3fd78c4SJerome Forissier
16b3fd78c4SJerome Forissier /*
17b3fd78c4SJerome Forissier * Lock graph. If node A has an edge to node B, then A was locked before B in
18b3fd78c4SJerome Forissier * the same thread of execution.
19b3fd78c4SJerome Forissier */
20b3fd78c4SJerome Forissier
21b3fd78c4SJerome Forissier struct lockdep_edge {
22b3fd78c4SJerome Forissier struct lockdep_node *to;
23b3fd78c4SJerome Forissier uintptr_t thread_id;
24b3fd78c4SJerome Forissier vaddr_t *call_stack_from;
25b3fd78c4SJerome Forissier vaddr_t *call_stack_to;
26b3fd78c4SJerome Forissier STAILQ_ENTRY(lockdep_edge) link;
27b3fd78c4SJerome Forissier };
28b3fd78c4SJerome Forissier
29b3fd78c4SJerome Forissier STAILQ_HEAD(lockdep_edge_head, lockdep_edge);
30b3fd78c4SJerome Forissier
31b3fd78c4SJerome Forissier struct lockdep_node {
32b3fd78c4SJerome Forissier uintptr_t lock_id; /* For instance, address of actual lock object */
33b3fd78c4SJerome Forissier struct lockdep_edge_head edges;
34b3fd78c4SJerome Forissier TAILQ_ENTRY(lockdep_node) link;
35b3fd78c4SJerome Forissier uint8_t flags; /* Used temporarily when walking the graph */
36b3fd78c4SJerome Forissier };
37b3fd78c4SJerome Forissier
38b3fd78c4SJerome Forissier TAILQ_HEAD(lockdep_node_head, lockdep_node);
39b3fd78c4SJerome Forissier
40b3fd78c4SJerome Forissier /* Per-thread queue of currently owned locks (point to nodes in the graph) */
41b3fd78c4SJerome Forissier
42b3fd78c4SJerome Forissier struct lockdep_lock {
43b3fd78c4SJerome Forissier struct lockdep_node *node;
44b3fd78c4SJerome Forissier vaddr_t *call_stack;
45b3fd78c4SJerome Forissier TAILQ_ENTRY(lockdep_lock) link;
46b3fd78c4SJerome Forissier };
47b3fd78c4SJerome Forissier
48b3fd78c4SJerome Forissier TAILQ_HEAD(lockdep_lock_head, lockdep_lock);
49b3fd78c4SJerome Forissier
50b3fd78c4SJerome Forissier #ifdef CFG_LOCKDEP
51b3fd78c4SJerome Forissier
52b3fd78c4SJerome Forissier /*
53b3fd78c4SJerome Forissier * Functions used internally and for testing the algorithm. Actual locking code
54b3fd78c4SJerome Forissier * should use the wrappers below (which panic in case of error).
55b3fd78c4SJerome Forissier */
56b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph,
57b3fd78c4SJerome Forissier struct lockdep_lock_head *owned,
58b3fd78c4SJerome Forissier uintptr_t id);
59*02fbb41aSJerome Forissier TEE_Result __lockdep_lock_tryacquire(struct lockdep_node_head *graph,
60*02fbb41aSJerome Forissier struct lockdep_lock_head *owned,
61*02fbb41aSJerome Forissier uintptr_t id);
62b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned,
63b3fd78c4SJerome Forissier uintptr_t id);
64b3fd78c4SJerome Forissier
65b3fd78c4SJerome Forissier /* Delete all elements in @graph */
66b3fd78c4SJerome Forissier void lockdep_graph_delete(struct lockdep_node_head *graph);
67b3fd78c4SJerome Forissier
68b3fd78c4SJerome Forissier /* Delete all elements in @queue */
69b3fd78c4SJerome Forissier void lockdep_queue_delete(struct lockdep_lock_head *queue);
70b3fd78c4SJerome Forissier
71b3fd78c4SJerome Forissier /*
72b3fd78c4SJerome Forissier * Acquire lock @id, while already holding the locks in @owned.
73b3fd78c4SJerome Forissier * @owned represent the caller; there should be one instance per thread of
74b3fd78c4SJerome Forissier * execution. @graph is the directed acyclic graph (DAG) to be used for
75b3fd78c4SJerome Forissier * potential deadlock detection; use the same @graph for all the locks of the
76b3fd78c4SJerome Forissier * same type as lock @id.
77b3fd78c4SJerome Forissier *
78b3fd78c4SJerome Forissier * This function will panic() if the acquire operation would result in a lock
79b3fd78c4SJerome Forissier * hierarchy violation (potential deadlock).
80b3fd78c4SJerome Forissier */
lockdep_lock_acquire(struct lockdep_node_head * graph,struct lockdep_lock_head * owned,uintptr_t id)81b3fd78c4SJerome Forissier static inline void lockdep_lock_acquire(struct lockdep_node_head *graph,
82b3fd78c4SJerome Forissier struct lockdep_lock_head *owned,
83b3fd78c4SJerome Forissier uintptr_t id)
84b3fd78c4SJerome Forissier {
85b3fd78c4SJerome Forissier TEE_Result res = __lockdep_lock_acquire(graph, owned, id);
86b3fd78c4SJerome Forissier
87b3fd78c4SJerome Forissier if (res) {
88b3fd78c4SJerome Forissier EMSG("lockdep: error %#" PRIx32, res);
89b3fd78c4SJerome Forissier panic();
90b3fd78c4SJerome Forissier }
91b3fd78c4SJerome Forissier }
92b3fd78c4SJerome Forissier
93b3fd78c4SJerome Forissier /*
94*02fbb41aSJerome Forissier * Non-blocking acquire lock @id, while already holding the locks in @owned.
95*02fbb41aSJerome Forissier * @owned represent the caller; there should be one instance per thread of
96*02fbb41aSJerome Forissier * execution. @graph is the directed acyclic graph (DAG) to be used for
97*02fbb41aSJerome Forissier * potential deadlock detection; use the same @graph for all the locks of the
98*02fbb41aSJerome Forissier * same type as lock @id.
99*02fbb41aSJerome Forissier */
lockdep_lock_tryacquire(struct lockdep_node_head * graph,struct lockdep_lock_head * owned,uintptr_t id)100*02fbb41aSJerome Forissier static inline void lockdep_lock_tryacquire(struct lockdep_node_head *graph,
101*02fbb41aSJerome Forissier struct lockdep_lock_head *owned,
102*02fbb41aSJerome Forissier uintptr_t id)
103*02fbb41aSJerome Forissier {
104*02fbb41aSJerome Forissier TEE_Result res = __lockdep_lock_tryacquire(graph, owned, id);
105*02fbb41aSJerome Forissier
106*02fbb41aSJerome Forissier if (res) {
107*02fbb41aSJerome Forissier EMSG("lockdep: error %#" PRIx32, res);
108*02fbb41aSJerome Forissier panic();
109*02fbb41aSJerome Forissier }
110*02fbb41aSJerome Forissier }
111*02fbb41aSJerome Forissier
112*02fbb41aSJerome Forissier /*
113b3fd78c4SJerome Forissier * Release lock @id. The lock is removed from @owned.
114b3fd78c4SJerome Forissier *
115b3fd78c4SJerome Forissier * This function will panic() if the lock is not held by the caller.
116b3fd78c4SJerome Forissier */
lockdep_lock_release(struct lockdep_lock_head * owned,uintptr_t id)117b3fd78c4SJerome Forissier static inline void lockdep_lock_release(struct lockdep_lock_head *owned,
118b3fd78c4SJerome Forissier uintptr_t id)
119b3fd78c4SJerome Forissier {
120b3fd78c4SJerome Forissier TEE_Result res = __lockdep_lock_release(owned, id);
121b3fd78c4SJerome Forissier
122b3fd78c4SJerome Forissier if (res) {
123b3fd78c4SJerome Forissier EMSG("lockdep: error %#" PRIx32, res);
124b3fd78c4SJerome Forissier panic();
125b3fd78c4SJerome Forissier }
126b3fd78c4SJerome Forissier }
127b3fd78c4SJerome Forissier
128ccbc05e1SJens Wiklander /*
129ccbc05e1SJens Wiklander * Destroy lock @id in @graph. The lock is freed.
130ccbc05e1SJens Wiklander */
131ccbc05e1SJens Wiklander void lockdep_lock_destroy(struct lockdep_node_head *graph, uintptr_t id);
132ccbc05e1SJens Wiklander
133ab0df69eSJerome Forissier /* Initialize lockdep for mutex objects (kernel/mutex.h) */
134ab0df69eSJerome Forissier void mutex_lockdep_init(void);
135ab0df69eSJerome Forissier
136b3fd78c4SJerome Forissier #else /* CFG_LOCKDEP */
137b3fd78c4SJerome Forissier
lockdep_lock_acquire(struct lockdep_node_head * g __unused,struct lockdep_lock_head * o __unused,uintptr_t id __unused)138b3fd78c4SJerome Forissier static inline void lockdep_lock_acquire(struct lockdep_node_head *g __unused,
139b3fd78c4SJerome Forissier struct lockdep_lock_head *o __unused,
140b3fd78c4SJerome Forissier uintptr_t id __unused)
141b3fd78c4SJerome Forissier {}
142b3fd78c4SJerome Forissier
lockdep_lock_release(struct lockdep_lock_head * o __unused,uintptr_t id __unused)143b3fd78c4SJerome Forissier static inline void lockdep_lock_release(struct lockdep_lock_head *o __unused,
144b3fd78c4SJerome Forissier uintptr_t id __unused)
145b3fd78c4SJerome Forissier {}
146b3fd78c4SJerome Forissier
147ccbc05e1SJens Wiklander static inline void
lockdep_lock_destroy(struct lockdep_node_head * graph __unused,uintptr_t id __unused)148ccbc05e1SJens Wiklander lockdep_lock_destroy(struct lockdep_node_head *graph __unused,
149ccbc05e1SJens Wiklander uintptr_t id __unused)
150ccbc05e1SJens Wiklander {}
151ccbc05e1SJens Wiklander
mutex_lockdep_init(void)152ab0df69eSJerome Forissier static inline void mutex_lockdep_init(void)
153ab0df69eSJerome Forissier {}
154ab0df69eSJerome Forissier
155b3fd78c4SJerome Forissier #endif /* !CFG_LOCKDEP */
156b3fd78c4SJerome Forissier
157b3fd78c4SJerome Forissier #endif /* !__KERNEL_LOCKDEP_H */
158