1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2015-2021, Linaro Limited 4 */ 5 6 #include <compiler.h> 7 #include <kernel/notif.h> 8 #include <kernel/spinlock.h> 9 #include <kernel/thread.h> 10 #include <kernel/wait_queue.h> 11 #include <tee_api_defines.h> 12 #include <trace.h> 13 #include <types_ext.h> 14 15 static unsigned wq_spin_lock; 16 17 18 void wq_init(struct wait_queue *wq) 19 { 20 *wq = (struct wait_queue)WAIT_QUEUE_INITIALIZER; 21 } 22 23 static TEE_Result do_notif(bool wait, int id, uint32_t timeout_ms, 24 const char *cmd_str __maybe_unused, 25 const void *sync_obj __maybe_unused, 26 const char *fname, int lineno __maybe_unused) 27 { 28 TEE_Result res = TEE_SUCCESS; 29 30 if (fname) 31 DMSG("%s thread %d %p %s:%d", cmd_str, id, 32 sync_obj, fname, lineno); 33 else 34 DMSG("%s thread %d %p", cmd_str, id, sync_obj); 35 36 if (wait) 37 res = notif_wait_timeout(id + NOTIF_SYNC_VALUE_BASE, 38 timeout_ms); 39 else 40 res = notif_send_sync(id + NOTIF_SYNC_VALUE_BASE); 41 if (res) 42 DMSG("%s thread %d res %#"PRIx32, cmd_str, id, res); 43 44 return res; 45 } 46 47 static void slist_add_tail(struct wait_queue *wq, struct wait_queue_elem *wqe) 48 { 49 struct wait_queue_elem *wqe_iter; 50 51 /* Add elem to end of wait queue */ 52 wqe_iter = SLIST_FIRST(wq); 53 if (wqe_iter) { 54 while (SLIST_NEXT(wqe_iter, link)) 55 wqe_iter = SLIST_NEXT(wqe_iter, link); 56 SLIST_INSERT_AFTER(wqe_iter, wqe, link); 57 } else 58 SLIST_INSERT_HEAD(wq, wqe, link); 59 } 60 61 void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe, 62 struct condvar *cv, bool wait_read) 63 { 64 uint32_t old_itr_status; 65 66 wqe->handle = thread_get_id(); 67 wqe->done = false; 68 wqe->wait_read = wait_read; 69 wqe->cv = cv; 70 71 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock); 72 73 slist_add_tail(wq, wqe); 74 75 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status); 76 } 77 78 static TEE_Result wq_wait_final_helper(struct wait_queue *wq, 79 struct wait_queue_elem *wqe, 80 uint32_t timeout_ms, 81 const void *sync_obj, const char *fname, 82 int lineno) 83 { 84 uint32_t old_itr_status = 0; 85 86 do_notif(true, wqe->handle, timeout_ms, "sleep", sync_obj, fname, 87 lineno); 88 89 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock); 90 SLIST_REMOVE(wq, wqe, wait_queue_elem, link); 91 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status); 92 93 if (wqe->done) 94 return TEE_SUCCESS; 95 else 96 return TEE_ERROR_TIMEOUT; 97 } 98 99 TEE_Result wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe, 100 uint32_t timeout_ms, const void *sync_obj, 101 const char *fname, int lineno) 102 { 103 return wq_wait_final_helper(wq, wqe, timeout_ms, sync_obj, fname, 104 lineno); 105 } 106 107 void wq_wake_next(struct wait_queue *wq, const void *sync_obj, 108 const char *fname, int lineno) 109 { 110 uint32_t old_itr_status; 111 struct wait_queue_elem *wqe; 112 int handle = -1; 113 bool do_wakeup = false; 114 bool wake_type_assigned = false; 115 bool wake_read = false; /* avoid gcc warning */ 116 117 /* 118 * If next type is wait_read wakeup all wqe with wait_read true. 119 * If next type isn't wait_read wakeup only the first wqe which isn't 120 * done. 121 */ 122 123 while (true) { 124 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock); 125 126 SLIST_FOREACH(wqe, wq, link) { 127 if (wqe->cv) 128 continue; 129 if (wqe->done) 130 continue; 131 if (!wake_type_assigned) { 132 wake_read = wqe->wait_read; 133 wake_type_assigned = true; 134 } 135 136 if (wqe->wait_read != wake_read) 137 continue; 138 139 wqe->done = true; 140 handle = wqe->handle; 141 do_wakeup = true; 142 break; 143 } 144 145 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status); 146 147 if (do_wakeup) 148 do_notif(false, handle, 0, 149 "wake ", sync_obj, fname, lineno); 150 151 if (!do_wakeup || !wake_read) 152 break; 153 do_wakeup = false; 154 } 155 } 156 157 void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv, 158 bool only_one, const void *sync_obj __unused, 159 const char *fname, int lineno __maybe_unused) 160 { 161 uint32_t old_itr_status; 162 struct wait_queue_elem *wqe; 163 164 if (!cv) 165 return; 166 167 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock); 168 169 /* 170 * Find condvar waiter(s) and promote each to an active waiter. 171 * This is a bit unfair to eventual other active waiters as a 172 * condvar waiter is added to the queue when waiting for the 173 * condvar. 174 */ 175 SLIST_FOREACH(wqe, wq, link) { 176 if (wqe->cv == cv) { 177 if (fname) 178 FMSG("promote thread %u %p %s:%d", 179 wqe->handle, (void *)cv->m, fname, lineno); 180 else 181 FMSG("promote thread %u %p", 182 wqe->handle, (void *)cv->m); 183 184 wqe->cv = NULL; 185 if (only_one) 186 break; 187 } 188 } 189 190 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status); 191 } 192 193 bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv) 194 { 195 uint32_t old_itr_status; 196 struct wait_queue_elem *wqe; 197 bool rc = false; 198 199 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock); 200 201 SLIST_FOREACH(wqe, wq, link) { 202 if (wqe->cv == cv) { 203 rc = true; 204 break; 205 } 206 } 207 208 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status); 209 210 return rc; 211 } 212 213 bool wq_is_empty(struct wait_queue *wq) 214 { 215 uint32_t old_itr_status; 216 bool ret; 217 218 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock); 219 220 ret = SLIST_EMPTY(wq); 221 222 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status); 223 224 return ret; 225 } 226