1*b3fd78c4SJerome Forissier // SPDX-License-Identifier: BSD-2-Clause 2*b3fd78c4SJerome Forissier /* 3*b3fd78c4SJerome Forissier * Copyright (c) 2018, Linaro Limited 4*b3fd78c4SJerome Forissier */ 5*b3fd78c4SJerome Forissier 6*b3fd78c4SJerome Forissier #include <assert.h> 7*b3fd78c4SJerome Forissier #include <kernel/lockdep.h> 8*b3fd78c4SJerome Forissier #include <kernel/unwind.h> 9*b3fd78c4SJerome Forissier #include <stdlib.h> 10*b3fd78c4SJerome Forissier #include <string.h> 11*b3fd78c4SJerome Forissier #include <sys/queue.h> 12*b3fd78c4SJerome Forissier #include <util.h> 13*b3fd78c4SJerome Forissier 14*b3fd78c4SJerome Forissier /* lockdep_node::flags values */ 15*b3fd78c4SJerome Forissier /* Flags used for depth-first topological sorting */ 16*b3fd78c4SJerome Forissier #define LOCKDEP_NODE_TEMP_MARK BIT(0) 17*b3fd78c4SJerome Forissier #define LOCKDEP_NODE_PERM_MARK BIT(1) 18*b3fd78c4SJerome Forissier /* Flag used during breadth-first search (print shortest cycle) */ 19*b3fd78c4SJerome Forissier #define LOCKDEP_NODE_BFS_VISITED BIT(2) 20*b3fd78c4SJerome Forissier 21*b3fd78c4SJerome Forissier /* Find node in graph or add it */ 22*b3fd78c4SJerome Forissier static struct lockdep_node *lockdep_add_to_graph( 23*b3fd78c4SJerome Forissier struct lockdep_node_head *graph, 24*b3fd78c4SJerome Forissier uintptr_t lock_id) 25*b3fd78c4SJerome Forissier { 26*b3fd78c4SJerome Forissier struct lockdep_node *node = NULL; 27*b3fd78c4SJerome Forissier 28*b3fd78c4SJerome Forissier assert(graph); 29*b3fd78c4SJerome Forissier TAILQ_FOREACH(node, graph, link) 30*b3fd78c4SJerome Forissier if (node->lock_id == lock_id) 31*b3fd78c4SJerome Forissier return node; 32*b3fd78c4SJerome Forissier 33*b3fd78c4SJerome Forissier node = calloc(1, sizeof(*node)); 34*b3fd78c4SJerome Forissier if (!node) 35*b3fd78c4SJerome Forissier return NULL; 36*b3fd78c4SJerome Forissier 37*b3fd78c4SJerome Forissier node->lock_id = lock_id; 38*b3fd78c4SJerome Forissier STAILQ_INIT(&node->edges); 39*b3fd78c4SJerome Forissier TAILQ_INSERT_TAIL(graph, node, link); 40*b3fd78c4SJerome Forissier 41*b3fd78c4SJerome Forissier return node; 42*b3fd78c4SJerome Forissier } 43*b3fd78c4SJerome Forissier 44*b3fd78c4SJerome Forissier static vaddr_t *dup_call_stack(vaddr_t *stack) 45*b3fd78c4SJerome Forissier { 46*b3fd78c4SJerome Forissier if (!stack) 47*b3fd78c4SJerome Forissier return NULL; 48*b3fd78c4SJerome Forissier 49*b3fd78c4SJerome Forissier int n = 0; 50*b3fd78c4SJerome Forissier 51*b3fd78c4SJerome Forissier while (stack[n]) 52*b3fd78c4SJerome Forissier n++; 53*b3fd78c4SJerome Forissier 54*b3fd78c4SJerome Forissier vaddr_t *nstack = malloc(n * sizeof(vaddr_t)); 55*b3fd78c4SJerome Forissier 56*b3fd78c4SJerome Forissier if (!nstack) 57*b3fd78c4SJerome Forissier return NULL; 58*b3fd78c4SJerome Forissier memcpy(nstack, stack, n * sizeof(vaddr_t)); 59*b3fd78c4SJerome Forissier return nstack; 60*b3fd78c4SJerome Forissier } 61*b3fd78c4SJerome Forissier 62*b3fd78c4SJerome Forissier static void lockdep_print_call_stack(vaddr_t *stack) 63*b3fd78c4SJerome Forissier { 64*b3fd78c4SJerome Forissier vaddr_t *p = NULL; 65*b3fd78c4SJerome Forissier 66*b3fd78c4SJerome Forissier EMSG_RAW("Call stack:"); 67*b3fd78c4SJerome Forissier for (p = stack; p && *p; p++) 68*b3fd78c4SJerome Forissier EMSG_RAW(" %#" PRIxPTR, *p); 69*b3fd78c4SJerome Forissier } 70*b3fd78c4SJerome Forissier 71*b3fd78c4SJerome Forissier static TEE_Result lockdep_add_edge(struct lockdep_node *from, 72*b3fd78c4SJerome Forissier struct lockdep_node *to, 73*b3fd78c4SJerome Forissier vaddr_t *call_stack_from, 74*b3fd78c4SJerome Forissier vaddr_t *call_stack_to, 75*b3fd78c4SJerome Forissier uintptr_t thread_id) 76*b3fd78c4SJerome Forissier { 77*b3fd78c4SJerome Forissier struct lockdep_edge *edge = NULL; 78*b3fd78c4SJerome Forissier 79*b3fd78c4SJerome Forissier STAILQ_FOREACH(edge, &from->edges, link) 80*b3fd78c4SJerome Forissier if (edge->to == to) 81*b3fd78c4SJerome Forissier return TEE_SUCCESS; 82*b3fd78c4SJerome Forissier 83*b3fd78c4SJerome Forissier edge = calloc(1, sizeof(*edge)); 84*b3fd78c4SJerome Forissier if (!edge) 85*b3fd78c4SJerome Forissier return TEE_ERROR_OUT_OF_MEMORY; 86*b3fd78c4SJerome Forissier edge->to = to; 87*b3fd78c4SJerome Forissier edge->call_stack_from = dup_call_stack(call_stack_from); 88*b3fd78c4SJerome Forissier edge->call_stack_to = dup_call_stack(call_stack_to); 89*b3fd78c4SJerome Forissier edge->thread_id = thread_id; 90*b3fd78c4SJerome Forissier STAILQ_INSERT_TAIL(&from->edges, edge, link); 91*b3fd78c4SJerome Forissier 92*b3fd78c4SJerome Forissier return TEE_SUCCESS; 93*b3fd78c4SJerome Forissier } 94*b3fd78c4SJerome Forissier 95*b3fd78c4SJerome Forissier struct lockdep_bfs { 96*b3fd78c4SJerome Forissier struct lockdep_node *node; 97*b3fd78c4SJerome Forissier uintptr_t *path; 98*b3fd78c4SJerome Forissier int pathlen; 99*b3fd78c4SJerome Forissier TAILQ_ENTRY(lockdep_bfs) link; 100*b3fd78c4SJerome Forissier }; 101*b3fd78c4SJerome Forissier 102*b3fd78c4SJerome Forissier TAILQ_HEAD(lockdep_bfs_head, lockdep_bfs); 103*b3fd78c4SJerome Forissier 104*b3fd78c4SJerome Forissier static void lockdep_bfs_queue_delete(struct lockdep_bfs_head *queue) 105*b3fd78c4SJerome Forissier { 106*b3fd78c4SJerome Forissier struct lockdep_bfs *cur = NULL; 107*b3fd78c4SJerome Forissier struct lockdep_bfs *next = NULL; 108*b3fd78c4SJerome Forissier 109*b3fd78c4SJerome Forissier TAILQ_FOREACH_SAFE(cur, queue, link, next) { 110*b3fd78c4SJerome Forissier TAILQ_REMOVE(queue, cur, link); 111*b3fd78c4SJerome Forissier free(cur->path); 112*b3fd78c4SJerome Forissier free(cur); 113*b3fd78c4SJerome Forissier } 114*b3fd78c4SJerome Forissier } 115*b3fd78c4SJerome Forissier 116*b3fd78c4SJerome Forissier /* 117*b3fd78c4SJerome Forissier * Print shortest cycle in @graph that contains @node. 118*b3fd78c4SJerome Forissier * This function performs an iterative breadth-first search starting from @node, 119*b3fd78c4SJerome Forissier * and stops when it reaches @node again. In each node we're tracking the path 120*b3fd78c4SJerome Forissier * from the start node. 121*b3fd78c4SJerome Forissier */ 122*b3fd78c4SJerome Forissier static uintptr_t *lockdep_graph_get_shortest_cycle(struct lockdep_node *node) 123*b3fd78c4SJerome Forissier { 124*b3fd78c4SJerome Forissier struct lockdep_bfs_head queue; 125*b3fd78c4SJerome Forissier struct lockdep_bfs *qe = NULL; 126*b3fd78c4SJerome Forissier uintptr_t *ret = NULL; 127*b3fd78c4SJerome Forissier 128*b3fd78c4SJerome Forissier TAILQ_INIT(&queue); 129*b3fd78c4SJerome Forissier node->flags |= LOCKDEP_NODE_BFS_VISITED; 130*b3fd78c4SJerome Forissier 131*b3fd78c4SJerome Forissier qe = calloc(1, sizeof(*qe)); 132*b3fd78c4SJerome Forissier if (!qe) 133*b3fd78c4SJerome Forissier goto out; 134*b3fd78c4SJerome Forissier qe->node = node; 135*b3fd78c4SJerome Forissier qe->path = malloc(sizeof(uintptr_t)); 136*b3fd78c4SJerome Forissier if (!qe->path) 137*b3fd78c4SJerome Forissier goto out; 138*b3fd78c4SJerome Forissier qe->path[0] = node->lock_id; 139*b3fd78c4SJerome Forissier qe->pathlen = 1; 140*b3fd78c4SJerome Forissier TAILQ_INSERT_TAIL(&queue, qe, link); 141*b3fd78c4SJerome Forissier 142*b3fd78c4SJerome Forissier while (!TAILQ_EMPTY(&queue)) { 143*b3fd78c4SJerome Forissier qe = TAILQ_FIRST(&queue); 144*b3fd78c4SJerome Forissier 145*b3fd78c4SJerome Forissier struct lockdep_node *n = qe->node; 146*b3fd78c4SJerome Forissier 147*b3fd78c4SJerome Forissier TAILQ_REMOVE(&queue, qe, link); 148*b3fd78c4SJerome Forissier 149*b3fd78c4SJerome Forissier struct lockdep_edge *e = NULL; 150*b3fd78c4SJerome Forissier 151*b3fd78c4SJerome Forissier STAILQ_FOREACH(e, &n->edges, link) { 152*b3fd78c4SJerome Forissier if (e->to->lock_id == node->lock_id) { 153*b3fd78c4SJerome Forissier uintptr_t *tmp = NULL; 154*b3fd78c4SJerome Forissier size_t nlen = qe->pathlen + 1; 155*b3fd78c4SJerome Forissier 156*b3fd78c4SJerome Forissier /* 157*b3fd78c4SJerome Forissier * Cycle found. Terminate cycle path with NULL 158*b3fd78c4SJerome Forissier * and return it. 159*b3fd78c4SJerome Forissier */ 160*b3fd78c4SJerome Forissier tmp = realloc(qe->path, 161*b3fd78c4SJerome Forissier nlen * sizeof(uintptr_t)); 162*b3fd78c4SJerome Forissier if (!tmp) { 163*b3fd78c4SJerome Forissier EMSG("Out of memory"); 164*b3fd78c4SJerome Forissier free(qe->path); 165*b3fd78c4SJerome Forissier ret = NULL; 166*b3fd78c4SJerome Forissier goto out; 167*b3fd78c4SJerome Forissier } 168*b3fd78c4SJerome Forissier qe->path = tmp; 169*b3fd78c4SJerome Forissier qe->path[nlen - 1] = 0; 170*b3fd78c4SJerome Forissier ret = qe->path; 171*b3fd78c4SJerome Forissier goto out; 172*b3fd78c4SJerome Forissier } 173*b3fd78c4SJerome Forissier 174*b3fd78c4SJerome Forissier if (!(e->to->flags & LOCKDEP_NODE_BFS_VISITED)) { 175*b3fd78c4SJerome Forissier e->to->flags |= LOCKDEP_NODE_BFS_VISITED; 176*b3fd78c4SJerome Forissier 177*b3fd78c4SJerome Forissier size_t nlen = qe->pathlen + 1; 178*b3fd78c4SJerome Forissier struct lockdep_bfs *nqe = calloc(1, 179*b3fd78c4SJerome Forissier sizeof(*nqe)); 180*b3fd78c4SJerome Forissier 181*b3fd78c4SJerome Forissier if (!nqe) 182*b3fd78c4SJerome Forissier goto out; 183*b3fd78c4SJerome Forissier nqe->node = e->to; 184*b3fd78c4SJerome Forissier nqe->path = malloc(nlen * sizeof(uintptr_t)); 185*b3fd78c4SJerome Forissier if (!nqe->path) 186*b3fd78c4SJerome Forissier goto out; 187*b3fd78c4SJerome Forissier nqe->pathlen = nlen; 188*b3fd78c4SJerome Forissier memcpy(nqe->path, qe->path, 189*b3fd78c4SJerome Forissier qe->pathlen * sizeof(uintptr_t)); 190*b3fd78c4SJerome Forissier nqe->path[nlen - 1] = e->to->lock_id; 191*b3fd78c4SJerome Forissier TAILQ_INSERT_TAIL(&queue, nqe, link); 192*b3fd78c4SJerome Forissier } 193*b3fd78c4SJerome Forissier } 194*b3fd78c4SJerome Forissier free(qe->path); 195*b3fd78c4SJerome Forissier free(qe); 196*b3fd78c4SJerome Forissier qe = NULL; 197*b3fd78c4SJerome Forissier } 198*b3fd78c4SJerome Forissier 199*b3fd78c4SJerome Forissier out: 200*b3fd78c4SJerome Forissier free(qe); 201*b3fd78c4SJerome Forissier lockdep_bfs_queue_delete(&queue); 202*b3fd78c4SJerome Forissier return ret; 203*b3fd78c4SJerome Forissier } 204*b3fd78c4SJerome Forissier 205*b3fd78c4SJerome Forissier static TEE_Result lockdep_visit(struct lockdep_node *node) 206*b3fd78c4SJerome Forissier { 207*b3fd78c4SJerome Forissier if (node->flags & LOCKDEP_NODE_PERM_MARK) 208*b3fd78c4SJerome Forissier return TEE_SUCCESS; 209*b3fd78c4SJerome Forissier 210*b3fd78c4SJerome Forissier if (node->flags & LOCKDEP_NODE_TEMP_MARK) 211*b3fd78c4SJerome Forissier return TEE_ERROR_BAD_STATE; /* Not a DAG! */ 212*b3fd78c4SJerome Forissier 213*b3fd78c4SJerome Forissier node->flags |= LOCKDEP_NODE_TEMP_MARK; 214*b3fd78c4SJerome Forissier 215*b3fd78c4SJerome Forissier struct lockdep_edge *e; 216*b3fd78c4SJerome Forissier 217*b3fd78c4SJerome Forissier STAILQ_FOREACH(e, &node->edges, link) { 218*b3fd78c4SJerome Forissier TEE_Result res = lockdep_visit(e->to); 219*b3fd78c4SJerome Forissier 220*b3fd78c4SJerome Forissier if (res) 221*b3fd78c4SJerome Forissier return res; 222*b3fd78c4SJerome Forissier } 223*b3fd78c4SJerome Forissier 224*b3fd78c4SJerome Forissier node->flags |= LOCKDEP_NODE_PERM_MARK; 225*b3fd78c4SJerome Forissier return TEE_SUCCESS; 226*b3fd78c4SJerome Forissier } 227*b3fd78c4SJerome Forissier 228*b3fd78c4SJerome Forissier static TEE_Result lockdep_graph_sort(struct lockdep_node_head *graph) 229*b3fd78c4SJerome Forissier { 230*b3fd78c4SJerome Forissier struct lockdep_node *node = NULL; 231*b3fd78c4SJerome Forissier 232*b3fd78c4SJerome Forissier TAILQ_FOREACH(node, graph, link) { 233*b3fd78c4SJerome Forissier if (!node->flags) { 234*b3fd78c4SJerome Forissier /* Unmarked node */ 235*b3fd78c4SJerome Forissier TEE_Result res = lockdep_visit(node); 236*b3fd78c4SJerome Forissier 237*b3fd78c4SJerome Forissier if (res) 238*b3fd78c4SJerome Forissier return res; 239*b3fd78c4SJerome Forissier } 240*b3fd78c4SJerome Forissier } 241*b3fd78c4SJerome Forissier 242*b3fd78c4SJerome Forissier TAILQ_FOREACH(node, graph, link) 243*b3fd78c4SJerome Forissier node->flags = 0; 244*b3fd78c4SJerome Forissier 245*b3fd78c4SJerome Forissier return TEE_SUCCESS; 246*b3fd78c4SJerome Forissier } 247*b3fd78c4SJerome Forissier 248*b3fd78c4SJerome Forissier static struct lockdep_edge *lockdep_find_edge(struct lockdep_node_head *graph, 249*b3fd78c4SJerome Forissier uintptr_t from, uintptr_t to) 250*b3fd78c4SJerome Forissier { 251*b3fd78c4SJerome Forissier struct lockdep_node *node = NULL; 252*b3fd78c4SJerome Forissier struct lockdep_edge *edge = NULL; 253*b3fd78c4SJerome Forissier 254*b3fd78c4SJerome Forissier TAILQ_FOREACH(node, graph, link) 255*b3fd78c4SJerome Forissier if (node->lock_id == from) 256*b3fd78c4SJerome Forissier STAILQ_FOREACH(edge, &node->edges, link) 257*b3fd78c4SJerome Forissier if (edge->to->lock_id == to) 258*b3fd78c4SJerome Forissier return edge; 259*b3fd78c4SJerome Forissier return NULL; 260*b3fd78c4SJerome Forissier } 261*b3fd78c4SJerome Forissier 262*b3fd78c4SJerome Forissier static void lockdep_print_edge_info(uintptr_t from __maybe_unused, 263*b3fd78c4SJerome Forissier struct lockdep_edge *edge) 264*b3fd78c4SJerome Forissier { 265*b3fd78c4SJerome Forissier uintptr_t __maybe_unused to = edge->to->lock_id; 266*b3fd78c4SJerome Forissier 267*b3fd78c4SJerome Forissier EMSG_RAW("-> Thread %#" PRIxPTR " acquired lock %#" PRIxPTR " at:", 268*b3fd78c4SJerome Forissier edge->thread_id, to); 269*b3fd78c4SJerome Forissier lockdep_print_call_stack(edge->call_stack_to); 270*b3fd78c4SJerome Forissier EMSG_RAW("...while holding lock %#" PRIxPTR " acquired at:", 271*b3fd78c4SJerome Forissier from); 272*b3fd78c4SJerome Forissier lockdep_print_call_stack(edge->call_stack_from); 273*b3fd78c4SJerome Forissier } 274*b3fd78c4SJerome Forissier 275*b3fd78c4SJerome Forissier /* 276*b3fd78c4SJerome Forissier * Find cycle containing @node in the lock graph, then print full debug 277*b3fd78c4SJerome Forissier * information about each edge (thread that acquired the locks and call stacks) 278*b3fd78c4SJerome Forissier */ 279*b3fd78c4SJerome Forissier static void lockdep_print_cycle_info(struct lockdep_node_head *graph, 280*b3fd78c4SJerome Forissier struct lockdep_node *node) 281*b3fd78c4SJerome Forissier { 282*b3fd78c4SJerome Forissier struct lockdep_edge *edge = NULL; 283*b3fd78c4SJerome Forissier uintptr_t *cycle = NULL; 284*b3fd78c4SJerome Forissier uintptr_t *p = NULL; 285*b3fd78c4SJerome Forissier uintptr_t from = 0; 286*b3fd78c4SJerome Forissier uintptr_t to = 0; 287*b3fd78c4SJerome Forissier 288*b3fd78c4SJerome Forissier cycle = lockdep_graph_get_shortest_cycle(node); 289*b3fd78c4SJerome Forissier assert(cycle && cycle[0]); 290*b3fd78c4SJerome Forissier EMSG_RAW("-> Shortest cycle:"); 291*b3fd78c4SJerome Forissier for (p = cycle; *p; p++) 292*b3fd78c4SJerome Forissier EMSG_RAW(" Lock %#" PRIxPTR, *p); 293*b3fd78c4SJerome Forissier for (p = cycle; ; p++) { 294*b3fd78c4SJerome Forissier if (!*p) { 295*b3fd78c4SJerome Forissier assert(p != cycle); 296*b3fd78c4SJerome Forissier from = to; 297*b3fd78c4SJerome Forissier to = cycle[0]; 298*b3fd78c4SJerome Forissier edge = lockdep_find_edge(graph, from, to); 299*b3fd78c4SJerome Forissier lockdep_print_edge_info(from, edge); 300*b3fd78c4SJerome Forissier break; 301*b3fd78c4SJerome Forissier } 302*b3fd78c4SJerome Forissier if (p != cycle) 303*b3fd78c4SJerome Forissier from = to; 304*b3fd78c4SJerome Forissier to = *p; 305*b3fd78c4SJerome Forissier if (p != cycle) { 306*b3fd78c4SJerome Forissier edge = lockdep_find_edge(graph, from, to); 307*b3fd78c4SJerome Forissier lockdep_print_edge_info(from, edge); 308*b3fd78c4SJerome Forissier } 309*b3fd78c4SJerome Forissier } 310*b3fd78c4SJerome Forissier free(cycle); 311*b3fd78c4SJerome Forissier } 312*b3fd78c4SJerome Forissier 313*b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_acquire(struct lockdep_node_head *graph, 314*b3fd78c4SJerome Forissier struct lockdep_lock_head *owned, 315*b3fd78c4SJerome Forissier uintptr_t id) 316*b3fd78c4SJerome Forissier { 317*b3fd78c4SJerome Forissier struct lockdep_node *node = lockdep_add_to_graph(graph, id); 318*b3fd78c4SJerome Forissier 319*b3fd78c4SJerome Forissier if (!node) 320*b3fd78c4SJerome Forissier return TEE_ERROR_OUT_OF_MEMORY; 321*b3fd78c4SJerome Forissier 322*b3fd78c4SJerome Forissier struct lockdep_lock *lock = NULL; 323*b3fd78c4SJerome Forissier vaddr_t *acq_stack = unw_get_kernel_stack(); 324*b3fd78c4SJerome Forissier 325*b3fd78c4SJerome Forissier TAILQ_FOREACH(lock, owned, link) { 326*b3fd78c4SJerome Forissier TEE_Result res = lockdep_add_edge(lock->node, node, 327*b3fd78c4SJerome Forissier lock->call_stack, 328*b3fd78c4SJerome Forissier acq_stack, 329*b3fd78c4SJerome Forissier (uintptr_t)owned); 330*b3fd78c4SJerome Forissier 331*b3fd78c4SJerome Forissier if (res) 332*b3fd78c4SJerome Forissier return res; 333*b3fd78c4SJerome Forissier } 334*b3fd78c4SJerome Forissier 335*b3fd78c4SJerome Forissier TEE_Result res = lockdep_graph_sort(graph); 336*b3fd78c4SJerome Forissier 337*b3fd78c4SJerome Forissier if (res) { 338*b3fd78c4SJerome Forissier EMSG_RAW("Potential deadlock detected!"); 339*b3fd78c4SJerome Forissier EMSG_RAW("When trying to acquire lock %#" PRIxPTR, id); 340*b3fd78c4SJerome Forissier lockdep_print_cycle_info(graph, node); 341*b3fd78c4SJerome Forissier return res; 342*b3fd78c4SJerome Forissier } 343*b3fd78c4SJerome Forissier 344*b3fd78c4SJerome Forissier lock = calloc(1, sizeof(*lock)); 345*b3fd78c4SJerome Forissier if (!lock) 346*b3fd78c4SJerome Forissier return TEE_ERROR_OUT_OF_MEMORY; 347*b3fd78c4SJerome Forissier 348*b3fd78c4SJerome Forissier lock->node = node; 349*b3fd78c4SJerome Forissier lock->call_stack = acq_stack; 350*b3fd78c4SJerome Forissier TAILQ_INSERT_TAIL(owned, lock, link); 351*b3fd78c4SJerome Forissier 352*b3fd78c4SJerome Forissier return TEE_SUCCESS; 353*b3fd78c4SJerome Forissier } 354*b3fd78c4SJerome Forissier 355*b3fd78c4SJerome Forissier TEE_Result __lockdep_lock_release(struct lockdep_lock_head *owned, uintptr_t id) 356*b3fd78c4SJerome Forissier { 357*b3fd78c4SJerome Forissier struct lockdep_lock *lock = NULL; 358*b3fd78c4SJerome Forissier 359*b3fd78c4SJerome Forissier TAILQ_FOREACH_REVERSE(lock, owned, lockdep_lock_head, link) { 360*b3fd78c4SJerome Forissier if (lock->node->lock_id == id) { 361*b3fd78c4SJerome Forissier TAILQ_REMOVE(owned, lock, link); 362*b3fd78c4SJerome Forissier free(lock->call_stack); 363*b3fd78c4SJerome Forissier free(lock); 364*b3fd78c4SJerome Forissier return TEE_SUCCESS; 365*b3fd78c4SJerome Forissier } 366*b3fd78c4SJerome Forissier } 367*b3fd78c4SJerome Forissier 368*b3fd78c4SJerome Forissier EMSG_RAW("Thread %p does not own lock %#" PRIxPTR, (void *)owned, id); 369*b3fd78c4SJerome Forissier return TEE_ERROR_ITEM_NOT_FOUND; 370*b3fd78c4SJerome Forissier } 371*b3fd78c4SJerome Forissier 372*b3fd78c4SJerome Forissier static void lockdep_node_delete(struct lockdep_node *node) 373*b3fd78c4SJerome Forissier { 374*b3fd78c4SJerome Forissier struct lockdep_edge *edge = NULL; 375*b3fd78c4SJerome Forissier struct lockdep_edge *next = NULL; 376*b3fd78c4SJerome Forissier 377*b3fd78c4SJerome Forissier STAILQ_FOREACH_SAFE(edge, &node->edges, link, next) { 378*b3fd78c4SJerome Forissier free(edge->call_stack_from); 379*b3fd78c4SJerome Forissier free(edge->call_stack_to); 380*b3fd78c4SJerome Forissier free(edge); 381*b3fd78c4SJerome Forissier } 382*b3fd78c4SJerome Forissier free(node); 383*b3fd78c4SJerome Forissier } 384*b3fd78c4SJerome Forissier 385*b3fd78c4SJerome Forissier void lockdep_graph_delete(struct lockdep_node_head *graph) 386*b3fd78c4SJerome Forissier { 387*b3fd78c4SJerome Forissier struct lockdep_node *node = NULL; 388*b3fd78c4SJerome Forissier struct lockdep_node *next = NULL; 389*b3fd78c4SJerome Forissier 390*b3fd78c4SJerome Forissier TAILQ_FOREACH_SAFE(node, graph, link, next) { 391*b3fd78c4SJerome Forissier TAILQ_REMOVE(graph, node, link); 392*b3fd78c4SJerome Forissier lockdep_node_delete(node); 393*b3fd78c4SJerome Forissier } 394*b3fd78c4SJerome Forissier } 395*b3fd78c4SJerome Forissier 396*b3fd78c4SJerome Forissier void lockdep_queue_delete(struct lockdep_lock_head *owned) 397*b3fd78c4SJerome Forissier { 398*b3fd78c4SJerome Forissier struct lockdep_lock *lock = NULL; 399*b3fd78c4SJerome Forissier struct lockdep_lock *next = NULL; 400*b3fd78c4SJerome Forissier 401*b3fd78c4SJerome Forissier TAILQ_FOREACH_SAFE(lock, owned, link, next) { 402*b3fd78c4SJerome Forissier TAILQ_REMOVE(owned, lock, link); 403*b3fd78c4SJerome Forissier free(lock); 404*b3fd78c4SJerome Forissier } 405*b3fd78c4SJerome Forissier } 406