xref: /optee_os/core/kernel/wait_queue.c (revision 77bdbf67c42209142ef43129e01113d29d9c62f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2016, Linaro Limited
4  */
5 #include <compiler.h>
6 #include <kernel/spinlock.h>
7 #include <kernel/thread.h>
8 #include <kernel/wait_queue.h>
9 #include <optee_rpc_cmd.h>
10 #include <string.h>
11 #include <tee_api_defines.h>
12 #include <trace.h>
13 #include <types_ext.h>
14 
15 static unsigned wq_spin_lock;
16 
17 
18 void wq_init(struct wait_queue *wq)
19 {
20 	*wq = (struct wait_queue)WAIT_QUEUE_INITIALIZER;
21 }
22 
23 /*
24  * Note: this function is weak just to make it possible to exclude it from
25  * the unpaged area.
26  */
27 void __weak __wq_rpc(uint32_t func, int id, const void *sync_obj __maybe_unused,
28 		     const char *fname, int lineno __maybe_unused)
29 {
30 	uint32_t ret;
31 	const char *cmd_str __maybe_unused =
32 	     func == OPTEE_RPC_WAIT_QUEUE_SLEEP ? "sleep" : "wake ";
33 
34 	if (fname)
35 		DMSG("%s thread %u %p %s:%d", cmd_str, id,
36 		     sync_obj, fname, lineno);
37 	else
38 		DMSG("%s thread %u %p", cmd_str, id, sync_obj);
39 
40 	struct thread_param params = THREAD_PARAM_VALUE(IN, func, id, 0);
41 
42 	ret = thread_rpc_cmd(OPTEE_RPC_CMD_WAIT_QUEUE, 1, &params);
43 	if (ret != TEE_SUCCESS)
44 		DMSG("%s thread %u ret 0x%x", cmd_str, id, ret);
45 }
46 
47 static void slist_add_tail(struct wait_queue *wq, struct wait_queue_elem *wqe)
48 {
49 	struct wait_queue_elem *wqe_iter;
50 
51 	/* Add elem to end of wait queue */
52 	wqe_iter = SLIST_FIRST(wq);
53 	if (wqe_iter) {
54 		while (SLIST_NEXT(wqe_iter, link))
55 			wqe_iter = SLIST_NEXT(wqe_iter, link);
56 		SLIST_INSERT_AFTER(wqe_iter, wqe, link);
57 	} else
58 		SLIST_INSERT_HEAD(wq, wqe, link);
59 }
60 
61 void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
62 		struct condvar *cv, bool wait_read)
63 {
64 	uint32_t old_itr_status;
65 
66 	wqe->handle = thread_get_id();
67 	wqe->done = false;
68 	wqe->wait_read = wait_read;
69 	wqe->cv = cv;
70 
71 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
72 
73 	slist_add_tail(wq, wqe);
74 
75 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
76 }
77 
78 void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
79 		   const void *sync_obj, const char *fname, int lineno)
80 {
81 	uint32_t old_itr_status;
82 	unsigned done;
83 
84 	do {
85 		__wq_rpc(OPTEE_RPC_WAIT_QUEUE_SLEEP, wqe->handle,
86 			 sync_obj, fname, lineno);
87 
88 		old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
89 
90 		done = wqe->done;
91 		if (done)
92 			SLIST_REMOVE(wq, wqe, wait_queue_elem, link);
93 
94 		cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
95 	} while (!done);
96 }
97 
98 void wq_wake_next(struct wait_queue *wq, const void *sync_obj,
99 			const char *fname, int lineno)
100 {
101 	uint32_t old_itr_status;
102 	struct wait_queue_elem *wqe;
103 	int handle = -1;
104 	bool do_wakeup = false;
105 	bool wake_type_assigned = false;
106 	bool wake_read = false; /* avoid gcc warning */
107 
108 	/*
109 	 * If next type is wait_read wakeup all wqe with wait_read true.
110 	 * If next type isn't wait_read wakeup only the first wqe which isn't
111 	 * done.
112 	 */
113 
114 	while (true) {
115 		old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
116 
117 		SLIST_FOREACH(wqe, wq, link) {
118 			if (wqe->cv)
119 				continue;
120 			if (wqe->done)
121 				continue;
122 			if (!wake_type_assigned) {
123 				wake_read = wqe->wait_read;
124 				wake_type_assigned = true;
125 			}
126 
127 			if (wqe->wait_read != wake_read)
128 				continue;
129 
130 			wqe->done = true;
131 			handle = wqe->handle;
132 			do_wakeup = true;
133 			break;
134 		}
135 
136 		cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
137 
138 		if (do_wakeup)
139 			__wq_rpc(OPTEE_RPC_WAIT_QUEUE_WAKEUP, handle,
140 				 sync_obj, fname, lineno);
141 
142 		if (!do_wakeup || !wake_read)
143 			break;
144 		do_wakeup = false;
145 	}
146 }
147 
148 void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
149 			bool only_one, const void *sync_obj __unused,
150 			const char *fname, int lineno __maybe_unused)
151 {
152 	uint32_t old_itr_status;
153 	struct wait_queue_elem *wqe;
154 
155 	if (!cv)
156 		return;
157 
158 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
159 
160 	/*
161 	 * Find condvar waiter(s) and promote each to an active waiter.
162 	 * This is a bit unfair to eventual other active waiters as a
163 	 * condvar waiter is added to the queue when waiting for the
164 	 * condvar.
165 	 */
166 	SLIST_FOREACH(wqe, wq, link) {
167 		if (wqe->cv == cv) {
168 			if (fname)
169 				FMSG("promote thread %u %p %s:%d",
170 				     wqe->handle, (void *)cv->m, fname, lineno);
171 			else
172 				FMSG("promote thread %u %p",
173 				     wqe->handle, (void *)cv->m);
174 
175 			wqe->cv = NULL;
176 			if (only_one)
177 				break;
178 		}
179 	}
180 
181 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
182 }
183 
184 bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv)
185 {
186 	uint32_t old_itr_status;
187 	struct wait_queue_elem *wqe;
188 	bool rc = false;
189 
190 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
191 
192 	SLIST_FOREACH(wqe, wq, link) {
193 		if (wqe->cv == cv) {
194 			rc = true;
195 			break;
196 		}
197 	}
198 
199 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
200 
201 	return rc;
202 }
203 
204 bool wq_is_empty(struct wait_queue *wq)
205 {
206 	uint32_t old_itr_status;
207 	bool ret;
208 
209 	old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
210 
211 	ret = SLIST_EMPTY(wq);
212 
213 	cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
214 
215 	return ret;
216 }
217