xref: /optee_os/core/kernel/wait_queue.c (revision c282ebd61200b0cb0830399c1c33514dbd129dfd)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2016, Linaro Limited
4  */
5 #include <compiler.h>
6 #include <kernel/spinlock.h>
7 #include <kernel/thread.h>
8 #include <kernel/wait_queue.h>
9 #include <optee_rpc_cmd.h>
10 #include <string.h>
11 #include <tee_api_defines.h>
12 #include <trace.h>
13 #include <types_ext.h>
14 
15 static unsigned wq_spin_lock;
16 
17 
18 void wq_init(struct wait_queue *wq)
19 {
20 	*wq = (struct wait_queue)WAIT_QUEUE_INITIALIZER;
21 }
22 
23 static void __wq_rpc(uint32_t func, int id, const void *sync_obj __maybe_unused,
24 		     const char *fname, int lineno __maybe_unused)
25 {
26 	uint32_t ret;
27 	const char *cmd_str __maybe_unused =
28 	     func == OPTEE_RPC_WAIT_QUEUE_SLEEP ? "sleep" : "wake ";
29 
30 	if (fname)
31 		DMSG("%s thread %u %p %s:%d", cmd_str, id,
32 		     sync_obj, fname, lineno);
33 	else
34 		DMSG("%s thread %u %p", cmd_str, id, sync_obj);
35 
36 	struct thread_param params = THREAD_PARAM_VALUE(IN, func, id, 0);
37 
38 	ret = thread_rpc_cmd(OPTEE_RPC_CMD_WAIT_QUEUE, 1, &params);
39 	if (ret != TEE_SUCCESS)
40 		DMSG("%s thread %u ret 0x%x", cmd_str, id, ret);
41 }
42 
43 static void slist_add_tail(struct wait_queue *wq, struct wait_queue_elem *wqe)
44 {
45 	struct wait_queue_elem *wqe_iter;
46 
47 	/* Add elem to end of wait queue */
48 	wqe_iter = SLIST_FIRST(wq);
49 	if (wqe_iter) {
50 		while (SLIST_NEXT(wqe_iter, link))
51 			wqe_iter = SLIST_NEXT(wqe_iter, link);
52 		SLIST_INSERT_AFTER(wqe_iter, wqe, link);
53 	} else
54 		SLIST_INSERT_HEAD(wq, wqe, link);
55 }
56 
57 void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
58 		struct condvar *cv, bool wait_read)
59 {
60 	uint32_t old_itr_status;
61 
62 	wqe->handle = thread_get_id();
63 	wqe->done = false;
64 	wqe->wait_read = wait_read;
65 	wqe->cv = cv;
66 
67 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
68 
69 	slist_add_tail(wq, wqe);
70 
71 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
72 }
73 
74 void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
75 		   const void *sync_obj, const char *fname, int lineno)
76 {
77 	uint32_t old_itr_status;
78 	unsigned done;
79 
80 	do {
81 		__wq_rpc(OPTEE_RPC_WAIT_QUEUE_SLEEP, wqe->handle,
82 			 sync_obj, fname, lineno);
83 
84 		old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
85 
86 		done = wqe->done;
87 		if (done)
88 			SLIST_REMOVE(wq, wqe, wait_queue_elem, link);
89 
90 		cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
91 	} while (!done);
92 }
93 
94 void wq_wake_next(struct wait_queue *wq, const void *sync_obj,
95 			const char *fname, int lineno)
96 {
97 	uint32_t old_itr_status;
98 	struct wait_queue_elem *wqe;
99 	int handle = -1;
100 	bool do_wakeup = false;
101 	bool wake_type_assigned = false;
102 	bool wake_read = false; /* avoid gcc warning */
103 
104 	/*
105 	 * If next type is wait_read wakeup all wqe with wait_read true.
106 	 * If next type isn't wait_read wakeup only the first wqe which isn't
107 	 * done.
108 	 */
109 
110 	while (true) {
111 		old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
112 
113 		SLIST_FOREACH(wqe, wq, link) {
114 			if (wqe->cv)
115 				continue;
116 			if (wqe->done)
117 				continue;
118 			if (!wake_type_assigned) {
119 				wake_read = wqe->wait_read;
120 				wake_type_assigned = true;
121 			}
122 
123 			if (wqe->wait_read != wake_read)
124 				continue;
125 
126 			wqe->done = true;
127 			handle = wqe->handle;
128 			do_wakeup = true;
129 			break;
130 		}
131 
132 		cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
133 
134 		if (do_wakeup)
135 			__wq_rpc(OPTEE_RPC_WAIT_QUEUE_WAKEUP, handle,
136 				 sync_obj, fname, lineno);
137 
138 		if (!do_wakeup || !wake_read)
139 			break;
140 		do_wakeup = false;
141 	}
142 }
143 
144 void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
145 			bool only_one, const void *sync_obj __unused,
146 			const char *fname, int lineno __maybe_unused)
147 {
148 	uint32_t old_itr_status;
149 	struct wait_queue_elem *wqe;
150 
151 	if (!cv)
152 		return;
153 
154 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
155 
156 	/*
157 	 * Find condvar waiter(s) and promote each to an active waiter.
158 	 * This is a bit unfair to eventual other active waiters as a
159 	 * condvar waiter is added to the queue when waiting for the
160 	 * condvar.
161 	 */
162 	SLIST_FOREACH(wqe, wq, link) {
163 		if (wqe->cv == cv) {
164 			if (fname)
165 				FMSG("promote thread %u %p %s:%d",
166 				     wqe->handle, (void *)cv->m, fname, lineno);
167 			else
168 				FMSG("promote thread %u %p",
169 				     wqe->handle, (void *)cv->m);
170 
171 			wqe->cv = NULL;
172 			if (only_one)
173 				break;
174 		}
175 	}
176 
177 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
178 }
179 
180 bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv)
181 {
182 	uint32_t old_itr_status;
183 	struct wait_queue_elem *wqe;
184 	bool rc = false;
185 
186 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
187 
188 	SLIST_FOREACH(wqe, wq, link) {
189 		if (wqe->cv == cv) {
190 			rc = true;
191 			break;
192 		}
193 	}
194 
195 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
196 
197 	return rc;
198 }
199 
200 bool wq_is_empty(struct wait_queue *wq)
201 {
202 	uint32_t old_itr_status;
203 	bool ret;
204 
205 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
206 
207 	ret = SLIST_EMPTY(wq);
208 
209 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
210 
211 	return ret;
212 }
213