Lines Matching +full:- +full:- +full:depth
1 // SPDX-License-Identifier: BSD-2-Clause
16 /* Flags used for depth-first topological sorting */
19 /* Flag used during breadth-first search (print shortest cycle) */
31 if (node->lock_id == lock_id) in lockdep_add_to_graph()
38 node->lock_id = lock_id; in lockdep_add_to_graph()
39 STAILQ_INIT(&node->edges); in lockdep_add_to_graph()
85 STAILQ_FOREACH(edge, &from->edges, link) in lockdep_add_edge()
86 if (edge->to == to) in lockdep_add_edge()
92 edge->to = to; in lockdep_add_edge()
93 edge->call_stack_from = dup_call_stack(call_stack_from); in lockdep_add_edge()
94 edge->call_stack_to = dup_call_stack(call_stack_to); in lockdep_add_edge()
95 edge->thread_id = thread_id; in lockdep_add_edge()
96 STAILQ_INSERT_TAIL(&from->edges, edge, link); in lockdep_add_edge()
117 free(cur->path); in lockdep_bfs_queue_delete()
124 * This function performs an iterative breadth-first search starting from @node,
135 node->flags |= LOCKDEP_NODE_BFS_VISITED; in lockdep_graph_get_shortest_cycle()
140 qe->node = node; in lockdep_graph_get_shortest_cycle()
141 qe->path = malloc(sizeof(uintptr_t)); in lockdep_graph_get_shortest_cycle()
142 if (!qe->path) in lockdep_graph_get_shortest_cycle()
144 qe->path[0] = node->lock_id; in lockdep_graph_get_shortest_cycle()
145 qe->pathlen = 1; in lockdep_graph_get_shortest_cycle()
153 n = qe->node; in lockdep_graph_get_shortest_cycle()
156 STAILQ_FOREACH(e, &n->edges, link) { in lockdep_graph_get_shortest_cycle()
157 if (e->to->lock_id == node->lock_id) { in lockdep_graph_get_shortest_cycle()
159 size_t nlen = qe->pathlen + 1; in lockdep_graph_get_shortest_cycle()
165 tmp = realloc(qe->path, in lockdep_graph_get_shortest_cycle()
169 free(qe->path); in lockdep_graph_get_shortest_cycle()
173 qe->path = tmp; in lockdep_graph_get_shortest_cycle()
174 qe->path[nlen - 1] = 0; in lockdep_graph_get_shortest_cycle()
175 ret = qe->path; in lockdep_graph_get_shortest_cycle()
179 if (!(e->to->flags & LOCKDEP_NODE_BFS_VISITED)) { in lockdep_graph_get_shortest_cycle()
183 e->to->flags |= LOCKDEP_NODE_BFS_VISITED; in lockdep_graph_get_shortest_cycle()
185 nlen = qe->pathlen + 1; in lockdep_graph_get_shortest_cycle()
189 nqe->node = e->to; in lockdep_graph_get_shortest_cycle()
190 nqe->path = malloc(nlen * sizeof(uintptr_t)); in lockdep_graph_get_shortest_cycle()
191 if (!nqe->path) in lockdep_graph_get_shortest_cycle()
193 nqe->pathlen = nlen; in lockdep_graph_get_shortest_cycle()
194 memcpy(nqe->path, qe->path, in lockdep_graph_get_shortest_cycle()
195 qe->pathlen * sizeof(uintptr_t)); in lockdep_graph_get_shortest_cycle()
196 nqe->path[nlen - 1] = e->to->lock_id; in lockdep_graph_get_shortest_cycle()
200 free(qe->path); in lockdep_graph_get_shortest_cycle()
215 if (node->flags & LOCKDEP_NODE_PERM_MARK) in lockdep_visit()
218 if (node->flags & LOCKDEP_NODE_TEMP_MARK) in lockdep_visit()
221 node->flags |= LOCKDEP_NODE_TEMP_MARK; in lockdep_visit()
223 STAILQ_FOREACH(e, &node->edges, link) { in lockdep_visit()
224 TEE_Result res = lockdep_visit(e->to); in lockdep_visit()
230 node->flags |= LOCKDEP_NODE_PERM_MARK; in lockdep_visit()
239 if (!node->flags) { in lockdep_graph_sort()
249 node->flags = 0; in lockdep_graph_sort()
261 if (node->lock_id == from) in lockdep_find_edge()
262 STAILQ_FOREACH(edge, &node->edges, link) in lockdep_find_edge()
263 if (edge->to->lock_id == to) in lockdep_find_edge()
271 uintptr_t __maybe_unused to = edge->to->lock_id; in lockdep_print_edge_info()
280 EMSG_RAW("-> Thread %#" PRIxPTR " acquired lock %#" PRIxPTR "%s", in lockdep_print_edge_info()
281 edge->thread_id, to, at_msg); in lockdep_print_edge_info()
282 lockdep_print_call_stack(edge->call_stack_to); in lockdep_print_edge_info()
285 lockdep_print_call_stack(edge->call_stack_from); in lockdep_print_edge_info()
303 EMSG_RAW("-> Shortest cycle:"); in lockdep_print_cycle_info()
349 res = lockdep_add_edge(lock->node, node, lock->call_stack, in __lockdep_lock_acquire()
367 lock->node = node; in __lockdep_lock_acquire()
368 lock->call_stack = acq_stack; in __lockdep_lock_acquire()
376 * Similar to __lockdep_lock_acquire(), but since the operation is non-blocking,
396 lock->node = node; in __lockdep_lock_tryacquire()
397 lock->call_stack = acq_stack; in __lockdep_lock_tryacquire()
408 if (lock->node->lock_id == id) { in __lockdep_lock_release()
410 free(lock->call_stack); in __lockdep_lock_release()
422 free(edge->call_stack_from); in lockdep_free_edge()
423 free(edge->call_stack_to); in lockdep_free_edge()
432 STAILQ_FOREACH_SAFE(edge, &node->edges, link, next) in lockdep_node_delete()
474 edge = STAILQ_FIRST(&from->edges); in lockdep_node_destroy()
475 while (edge && edge->to == node) { in lockdep_node_destroy()
476 STAILQ_REMOVE_HEAD(&from->edges, link); in lockdep_node_destroy()
478 edge = STAILQ_FIRST(&from->edges); in lockdep_node_destroy()
486 if (next->to == node) { in lockdep_node_destroy()
487 STAILQ_REMOVE_AFTER(&from->edges, edge, link); in lockdep_node_destroy()
496 STAILQ_FOREACH_SAFE(edge, &node->edges, link, next) in lockdep_node_destroy()
508 if (node->lock_id == lock_id) { in lockdep_lock_destroy()