xref: /optee_os/core/kernel/wait_queue.c (revision 49286073c91e225524563320e42ee35f1fee9167)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  */
5 
6 #include <compiler.h>
7 #include <kernel/notif.h>
8 #include <kernel/spinlock.h>
9 #include <kernel/thread.h>
10 #include <kernel/wait_queue.h>
11 #include <tee_api_defines.h>
12 #include <trace.h>
13 #include <types_ext.h>
14 
15 static unsigned wq_spin_lock;
16 
17 
18 void wq_init(struct wait_queue *wq)
19 {
20 	*wq = (struct wait_queue)WAIT_QUEUE_INITIALIZER;
21 }
22 
23 static void do_notif(TEE_Result (*fn)(uint32_t), int id,
24 		     const char *cmd_str __maybe_unused,
25 		     const void *sync_obj __maybe_unused,
26 		     const char *fname, int lineno __maybe_unused)
27 {
28 	TEE_Result res = TEE_SUCCESS;
29 
30 	if (fname)
31 		DMSG("%s thread %d %p %s:%d", cmd_str, id,
32 		     sync_obj, fname, lineno);
33 	else
34 		DMSG("%s thread %d %p", cmd_str, id, sync_obj);
35 
36 	res = fn(id + NOTIF_SYNC_VALUE_BASE);
37 	if (res)
38 		DMSG("%s thread %d res %#"PRIx32, cmd_str, id, res);
39 }
40 
41 static void slist_add_tail(struct wait_queue *wq, struct wait_queue_elem *wqe)
42 {
43 	struct wait_queue_elem *wqe_iter;
44 
45 	/* Add elem to end of wait queue */
46 	wqe_iter = SLIST_FIRST(wq);
47 	if (wqe_iter) {
48 		while (SLIST_NEXT(wqe_iter, link))
49 			wqe_iter = SLIST_NEXT(wqe_iter, link);
50 		SLIST_INSERT_AFTER(wqe_iter, wqe, link);
51 	} else
52 		SLIST_INSERT_HEAD(wq, wqe, link);
53 }
54 
55 void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
56 		struct condvar *cv, bool wait_read)
57 {
58 	uint32_t old_itr_status;
59 
60 	wqe->handle = thread_get_id();
61 	wqe->done = false;
62 	wqe->wait_read = wait_read;
63 	wqe->cv = cv;
64 
65 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
66 
67 	slist_add_tail(wq, wqe);
68 
69 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
70 }
71 
72 void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
73 		   const void *sync_obj, const char *fname, int lineno)
74 {
75 	uint32_t old_itr_status;
76 	unsigned done;
77 
78 	do {
79 		do_notif(notif_wait, wqe->handle,
80 			 "sleep", sync_obj, fname, lineno);
81 
82 		old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
83 
84 		done = wqe->done;
85 		if (done)
86 			SLIST_REMOVE(wq, wqe, wait_queue_elem, link);
87 
88 		cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
89 	} while (!done);
90 }
91 
92 void wq_wake_next(struct wait_queue *wq, const void *sync_obj,
93 			const char *fname, int lineno)
94 {
95 	uint32_t old_itr_status;
96 	struct wait_queue_elem *wqe;
97 	int handle = -1;
98 	bool do_wakeup = false;
99 	bool wake_type_assigned = false;
100 	bool wake_read = false; /* avoid gcc warning */
101 
102 	/*
103 	 * If next type is wait_read wakeup all wqe with wait_read true.
104 	 * If next type isn't wait_read wakeup only the first wqe which isn't
105 	 * done.
106 	 */
107 
108 	while (true) {
109 		old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
110 
111 		SLIST_FOREACH(wqe, wq, link) {
112 			if (wqe->cv)
113 				continue;
114 			if (wqe->done)
115 				continue;
116 			if (!wake_type_assigned) {
117 				wake_read = wqe->wait_read;
118 				wake_type_assigned = true;
119 			}
120 
121 			if (wqe->wait_read != wake_read)
122 				continue;
123 
124 			wqe->done = true;
125 			handle = wqe->handle;
126 			do_wakeup = true;
127 			break;
128 		}
129 
130 		cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
131 
132 		if (do_wakeup)
133 			do_notif(notif_send_sync, handle,
134 				 "wake ", sync_obj, fname, lineno);
135 
136 		if (!do_wakeup || !wake_read)
137 			break;
138 		do_wakeup = false;
139 	}
140 }
141 
142 void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
143 			bool only_one, const void *sync_obj __unused,
144 			const char *fname, int lineno __maybe_unused)
145 {
146 	uint32_t old_itr_status;
147 	struct wait_queue_elem *wqe;
148 
149 	if (!cv)
150 		return;
151 
152 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
153 
154 	/*
155 	 * Find condvar waiter(s) and promote each to an active waiter.
156 	 * This is a bit unfair to eventual other active waiters as a
157 	 * condvar waiter is added to the queue when waiting for the
158 	 * condvar.
159 	 */
160 	SLIST_FOREACH(wqe, wq, link) {
161 		if (wqe->cv == cv) {
162 			if (fname)
163 				FMSG("promote thread %u %p %s:%d",
164 				     wqe->handle, (void *)cv->m, fname, lineno);
165 			else
166 				FMSG("promote thread %u %p",
167 				     wqe->handle, (void *)cv->m);
168 
169 			wqe->cv = NULL;
170 			if (only_one)
171 				break;
172 		}
173 	}
174 
175 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
176 }
177 
178 bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv)
179 {
180 	uint32_t old_itr_status;
181 	struct wait_queue_elem *wqe;
182 	bool rc = false;
183 
184 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
185 
186 	SLIST_FOREACH(wqe, wq, link) {
187 		if (wqe->cv == cv) {
188 			rc = true;
189 			break;
190 		}
191 	}
192 
193 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
194 
195 	return rc;
196 }
197 
198 bool wq_is_empty(struct wait_queue *wq)
199 {
200 	uint32_t old_itr_status;
201 	bool ret;
202 
203 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
204 
205 	ret = SLIST_EMPTY(wq);
206 
207 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
208 
209 	return ret;
210 }
211