xref: /optee_os/core/include/kernel/thread.h (revision ca8258906949361c727ebc4a354764fceca9453d)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2016-2017, Linaro Limited
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #ifndef KERNEL_THREAD_H
9 #define KERNEL_THREAD_H
10 
11 #ifndef __ASSEMBLER__
12 #include <types_ext.h>
13 #include <compiler.h>
14 #include <mm/pgt_cache.h>
15 #endif
16 #include <kernel/thread_arch.h>
17 
18 #define THREAD_ID_0		0
19 #define THREAD_ID_INVALID	-1
20 
21 #define THREAD_RPC_MAX_NUM_PARAMS	U(4)
22 
23 #ifndef __ASSEMBLER__
24 
25 struct thread_specific_data {
26 	TAILQ_HEAD(, ts_session) sess_stack;
27 	struct ts_ctx *ctx;
28 	struct pgt_cache pgt_cache;
29 #ifdef CFG_CORE_FFA
30 	uint32_t rpc_target_info;
31 #endif
32 	uint32_t abort_type;
33 	uint32_t abort_descr;
34 	vaddr_t abort_va;
35 	unsigned int abort_core;
36 	struct thread_abort_regs abort_regs;
37 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
38 	bool stackcheck_recursion;
39 #endif
40 	unsigned int syscall_recursion;
41 };
42 
43 void thread_init_canaries(void);
44 void thread_init_primary(void);
45 void thread_init_per_cpu(void);
46 
47 struct thread_core_local *thread_get_core_local(void);
48 
49 /*
50  * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
51  * first stack, THREAD_ID_0 + 1 for the next and so on.
52  *
53  * Returns true on success and false on errors.
54  */
55 bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
56 
57 /*
58  * Initializes thread contexts. Called in thread_init_boot_thread() if
59  * virtualization is disabled. Virtualization subsystem calls it for
60  * every new guest otherwise.
61  */
62 void thread_init_threads(void);
63 
64 /*
65  * Called by the init CPU. Sets temporary stack mode for all CPUs
66  * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit for
67  * the init CPU.
68  */
69 void thread_init_thread_core_local(void);
70 void thread_init_core_local_stacks(void);
71 
72 /*
73  * Initializes a thread to be used during boot
74  */
75 void thread_init_boot_thread(void);
76 
77 /*
78  * Clears the current thread id
79  * Only supposed to be used during initialization.
80  */
81 void thread_clr_boot_thread(void);
82 
83 /*
84  * Returns current thread id.
85  */
86 short int thread_get_id(void);
87 
88 /*
89  * Returns current thread id, return -1 on failure.
90  */
91 short int thread_get_id_may_fail(void);
92 
93 /* Returns Thread Specific Data (TSD) pointer. */
94 struct thread_specific_data *thread_get_tsd(void);
95 
96 /*
97  * Sets foreign interrupts status for current thread, must only be called
98  * from an active thread context.
99  *
100  * enable == true  -> enable foreign interrupts
101  * enable == false -> disable foreign interrupts
102  */
103 void thread_set_foreign_intr(bool enable);
104 
105 /*
106  * Restores the foreign interrupts status (in CPSR) for current thread, must
107  * only be called from an active thread context.
108  */
109 void thread_restore_foreign_intr(void);
110 
111 /*
112  * thread_get_exceptions() - return current exception mask
113  */
114 uint32_t thread_get_exceptions(void);
115 
116 /*
117  * thread_set_exceptions() - set exception mask
118  * @exceptions: exception mask to set
119  *
120  * Any previous exception mask is replaced by this exception mask, that is,
121  * old bits are cleared and replaced by these.
122  */
123 void thread_set_exceptions(uint32_t exceptions);
124 
125 /*
126  * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
127  * @exceptions	exceptions to mask
128  * @returns old exception state
129  */
130 uint32_t thread_mask_exceptions(uint32_t exceptions);
131 
132 /*
133  * thread_unmask_exceptions() - Unmasks asynchronous exceptions
134  * @state	Old asynchronous exception state to restore (returned by
135  *		thread_mask_exceptions())
136  */
137 void thread_unmask_exceptions(uint32_t state);
138 
139 
140 static inline bool __nostackcheck thread_foreign_intr_disabled(void)
141 {
142 	return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
143 }
144 
145 /*
146  * thread_enter_user_mode() - Enters user mode
147  * @a0:		Passed in r/x0 for user_func
148  * @a1:		Passed in r/x1 for user_func
149  * @a2:		Passed in r/x2 for user_func
150  * @a3:		Passed in r/x3 for user_func
151  * @user_sp:	Assigned sp value in user mode
152  * @user_func:	Function to execute in user mode
153  * @is_32bit:   True if TA should execute in Aarch32, false if Aarch64
154  * @exit_status0: Pointer to opaque exit staus 0
155  * @exit_status1: Pointer to opaque exit staus 1
156  *
157  * This functions enters user mode with the argument described above,
158  * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
159  * when returning back to the caller of this function through an exception
160  * handler.
161  *
162  * @Returns what's passed in "ret" to thread_unwind_user_mode()
163  */
164 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
165 		unsigned long a2, unsigned long a3, unsigned long user_sp,
166 		unsigned long entry_func, bool is_32bit,
167 		uint32_t *exit_status0, uint32_t *exit_status1);
168 
169 /*
170  * thread_unwind_user_mode() - Unwinds kernel stack from user entry
171  * @ret:	Value to return from thread_enter_user_mode()
172  * @exit_status0: Exit status 0
173  * @exit_status1: Exit status 1
174  *
175  * This is the function that exception handlers can return into
176  * to resume execution in kernel mode instead of user mode.
177  *
178  * This function is closely coupled with thread_enter_user_mode() since it
179  * need to restore registers saved by thread_enter_user_mode() and when it
180  * returns make it look like thread_enter_user_mode() just returned. It is
181  * expected that the stack pointer is where thread_enter_user_mode() left
182  * it. The stack will be unwound and the function will return to where
183  * thread_enter_user_mode() was called from.  Exit_status0 and exit_status1
184  * are filled in the corresponding pointers supplied to
185  * thread_enter_user_mode().
186  */
187 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
188 		uint32_t exit_status1);
189 
190 /*
191  * Returns the start address (bottom) of the stack for the current thread,
192  * zero if there is no current thread.
193  */
194 vaddr_t thread_stack_start(void);
195 
196 
197 /* Returns the stack size for the current thread */
198 size_t thread_stack_size(void);
199 
200 /*
201  * Returns the start (top, lowest address) and end (bottom, highest address) of
202  * the current stack (thread, temporary or abort stack).
203  * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or
204  * soft limits are queried. The difference between soft and hard is that for the
205  * latter, the stack start includes some additional space to let any function
206  * overflow the soft limit and still be able to print a stack dump in this case.
207  */
208 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard);
209 
210 static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start,
211 							vaddr_t *end)
212 {
213 	return get_stack_limits(start, end, false);
214 }
215 
216 static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start,
217 							vaddr_t *end)
218 {
219 	return get_stack_limits(start, end, true);
220 }
221 
222 bool thread_is_in_normal_mode(void);
223 
224 /*
225  * Returns true if previous exeception also was in abort mode.
226  *
227  * Note: it's only valid to call this function from an abort exception
228  * handler before interrupts has been re-enabled.
229  */
230 bool thread_is_from_abort_mode(void);
231 
232 /**
233  * Allocates data for payload buffers.
234  *
235  * @size:	size in bytes of payload buffer
236  *
237  * @returns	mobj that describes allocated buffer or NULL on error
238  */
239 struct mobj *thread_rpc_alloc_payload(size_t size);
240 
241 /**
242  * Free physical memory previously allocated with thread_rpc_alloc_payload()
243  *
244  * @mobj:	mobj that describes the buffer
245  */
246 void thread_rpc_free_payload(struct mobj *mobj);
247 
248 /**
249  * Allocate data for payload buffers only shared with the non-secure kernel
250  *
251  * @size:	size in bytes of payload buffer
252  *
253  * @returns	mobj that describes allocated buffer or NULL on error
254  */
255 struct mobj *thread_rpc_alloc_kernel_payload(size_t size);
256 
257 /**
258  * Free physical memory previously allocated with
259  * thread_rpc_alloc_kernel_payload()
260  *
261  * @mobj:	mobj that describes the buffer
262  */
263 void thread_rpc_free_kernel_payload(struct mobj *mobj);
264 
265 struct thread_param_memref {
266 	size_t offs;
267 	size_t size;
268 	struct mobj *mobj;
269 };
270 
271 struct thread_param_value {
272 	uint64_t a;
273 	uint64_t b;
274 	uint64_t c;
275 };
276 
277 /*
278  * Note that there's some arithmetics done on the value so it's important
279  * to keep in IN, OUT, INOUT order.
280  */
281 enum thread_param_attr {
282 	THREAD_PARAM_ATTR_NONE = 0,
283 	THREAD_PARAM_ATTR_VALUE_IN,
284 	THREAD_PARAM_ATTR_VALUE_OUT,
285 	THREAD_PARAM_ATTR_VALUE_INOUT,
286 	THREAD_PARAM_ATTR_MEMREF_IN,
287 	THREAD_PARAM_ATTR_MEMREF_OUT,
288 	THREAD_PARAM_ATTR_MEMREF_INOUT,
289 };
290 
291 struct thread_param {
292 	enum thread_param_attr attr;
293 	union {
294 		struct thread_param_memref memref;
295 		struct thread_param_value value;
296 	} u;
297 };
298 
299 #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \
300 	(struct thread_param){ \
301 		.attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \
302 		.mobj = (_mobj), .offs = (_offs), .size = (_size) } \
303 	}
304 
305 #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \
306 	(struct thread_param){ \
307 		.attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \
308 		.a = (_a), .b = (_b), .c = (_c) } \
309 	}
310 
311 /**
312  * Does an RPC using a preallocated argument buffer
313  * @cmd: RPC cmd
314  * @num_params: number of parameters
315  * @params: RPC parameters
316  * @returns RPC return value
317  */
318 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
319 		struct thread_param *params);
320 
321 /**
322  * Allocate data for payload buffers.
323  * Buffer is exported to user mode applications.
324  *
325  * @size:	size in bytes of payload buffer
326  *
327  * @returns	mobj that describes allocated buffer or NULL on error
328  */
329 struct mobj *thread_rpc_alloc_global_payload(size_t size);
330 
331 /**
332  * Free physical memory previously allocated with
333  * thread_rpc_alloc_global_payload()
334  *
335  * @mobj:	mobj that describes the buffer
336  */
337 void thread_rpc_free_global_payload(struct mobj *mobj);
338 
339 /*
340  * enum thread_shm_type - type of non-secure shared memory
341  * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory
342  * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory
343  * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory
344  */
345 enum thread_shm_type {
346 	THREAD_SHM_TYPE_APPLICATION,
347 	THREAD_SHM_TYPE_KERNEL_PRIVATE,
348 	THREAD_SHM_TYPE_GLOBAL,
349 };
350 
351 /*
352  * enum thread_shm_cache_user - user of a cache allocation
353  * @THREAD_SHM_CACHE_USER_SOCKET - socket communication
354  * @THREAD_SHM_CACHE_USER_FS - filesystem access
355  * @THREAD_SHM_CACHE_USER_I2C - I2C communication
356  *
357  * To ensure that each user of the shared memory cache doesn't interfere
358  * with each other a unique ID per user is used.
359  */
360 enum thread_shm_cache_user {
361 	THREAD_SHM_CACHE_USER_SOCKET,
362 	THREAD_SHM_CACHE_USER_FS,
363 	THREAD_SHM_CACHE_USER_I2C,
364 };
365 
366 /*
367  * Returns a pointer to the cached RPC memory. Each thread and @user tuple
368  * has a unique cache. The pointer is guaranteed to point to a large enough
369  * area or to be NULL.
370  */
371 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
372 				 enum thread_shm_type shm_type,
373 				 size_t size, struct mobj **mobj);
374 
375 #endif /*__ASSEMBLER__*/
376 
377 #endif /*KERNEL_THREAD_H*/
378