xref: /optee_os/core/include/kernel/thread.h (revision 19a31ec40245ae01a9adcd206eec2a4bb4479fc9)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2016-2017, Linaro Limited
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #ifndef __KERNEL_THREAD_H
9 #define __KERNEL_THREAD_H
10 
11 #ifndef __ASSEMBLER__
12 #include <types_ext.h>
13 #include <compiler.h>
14 #include <mm/pgt_cache.h>
15 #endif
16 #include <util.h>
17 #include <kernel/thread_arch.h>
18 
19 #define THREAD_FLAGS_COPY_ARGS_ON_RETURN	BIT(0)
20 #define THREAD_FLAGS_FOREIGN_INTR_ENABLE	BIT(1)
21 #define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR	BIT(2)
22 #define THREAD_FLAGS_FFA_ONLY			BIT(3)
23 
24 #define THREAD_ID_0		0
25 #define THREAD_ID_INVALID	-1
26 
27 #define THREAD_RPC_MAX_NUM_PARAMS	U(4)
28 
29 #ifndef __ASSEMBLER__
30 
31 struct thread_specific_data {
32 	TAILQ_HEAD(, ts_session) sess_stack;
33 	struct ts_ctx *ctx;
34 #ifdef CFG_CORE_FFA
35 	uint32_t rpc_target_info;
36 #endif
37 	uint32_t abort_type;
38 	uint32_t abort_descr;
39 	vaddr_t abort_va;
40 	unsigned int abort_core;
41 	struct thread_abort_regs abort_regs;
42 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
43 	bool stackcheck_recursion;
44 #endif
45 	unsigned int syscall_recursion;
46 #ifdef CFG_FAULT_MITIGATION
47 	struct ftmn_func_arg *ftmn_arg;
48 #endif
49 };
50 
51 void thread_init_canaries(void);
52 void thread_init_primary(void);
53 void thread_init_per_cpu(void);
54 
55 #if defined(CFG_WITH_STACK_CANARIES)
56 void thread_update_canaries(void);
57 #else
58 static inline void thread_update_canaries(void) { }
59 #endif
60 
61 struct thread_core_local *thread_get_core_local(void);
62 
63 /*
64  * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
65  * first stack, THREAD_ID_0 + 1 for the next and so on.
66  *
67  * Returns true on success and false on errors.
68  */
69 bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
70 
71 /*
72  * Initializes thread contexts. Called in thread_init_boot_thread() if
73  * virtualization is disabled. Virtualization subsystem calls it for
74  * every new guest otherwise.
75  */
76 void thread_init_threads(void);
77 
78 /*
79  * Called by the init CPU. Sets temporary stack mode for all CPUs
80  * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit for
81  * the init CPU.
82  */
83 void thread_init_thread_core_local(void);
84 void thread_init_core_local_stacks(void);
85 
86 #if defined(CFG_CORE_PAUTH)
87 void thread_init_thread_pauth_keys(void);
88 void thread_init_core_local_pauth_keys(void);
89 #else
90 static inline void thread_init_thread_pauth_keys(void) { }
91 static inline void thread_init_core_local_pauth_keys(void) { }
92 #endif
93 
94 /*
95  * Initializes a thread to be used during boot
96  */
97 void thread_init_boot_thread(void);
98 
99 /*
100  * Clears the current thread id
101  * Only supposed to be used during initialization.
102  */
103 void thread_clr_boot_thread(void);
104 
105 /*
106  * Returns current thread id.
107  */
108 short int thread_get_id(void);
109 
110 /*
111  * Returns current thread id, return -1 on failure.
112  */
113 short int thread_get_id_may_fail(void);
114 
115 /* Returns Thread Specific Data (TSD) pointer. */
116 struct thread_specific_data *thread_get_tsd(void);
117 
118 /*
119  * Sets foreign interrupts status for current thread, must only be called
120  * from an active thread context.
121  *
122  * enable == true  -> enable foreign interrupts
123  * enable == false -> disable foreign interrupts
124  */
125 void thread_set_foreign_intr(bool enable);
126 
127 /*
128  * Restores the foreign interrupts status (in CPSR) for current thread, must
129  * only be called from an active thread context.
130  */
131 void thread_restore_foreign_intr(void);
132 
133 /*
134  * thread_get_exceptions() - return current exception mask
135  */
136 uint32_t thread_get_exceptions(void);
137 
138 /*
139  * thread_set_exceptions() - set exception mask
140  * @exceptions: exception mask to set
141  *
142  * Any previous exception mask is replaced by this exception mask, that is,
143  * old bits are cleared and replaced by these.
144  */
145 void thread_set_exceptions(uint32_t exceptions);
146 
147 /*
148  * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
149  * @exceptions	exceptions to mask
150  * @returns old exception state
151  */
152 uint32_t thread_mask_exceptions(uint32_t exceptions);
153 
154 /*
155  * thread_unmask_exceptions() - Unmasks asynchronous exceptions
156  * @state	Old asynchronous exception state to restore (returned by
157  *		thread_mask_exceptions())
158  */
159 void thread_unmask_exceptions(uint32_t state);
160 
161 
162 static inline bool __nostackcheck thread_foreign_intr_disabled(void)
163 {
164 	return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
165 }
166 
167 /*
168  * thread_enter_user_mode() - Enters user mode
169  * @a0:		Passed in r/x0 for user_func
170  * @a1:		Passed in r/x1 for user_func
171  * @a2:		Passed in r/x2 for user_func
172  * @a3:		Passed in r/x3 for user_func
173  * @user_sp:	Assigned sp value in user mode
174  * @user_func:	Function to execute in user mode
175  * @is_32bit:   True if TA should execute in Aarch32, false if Aarch64
176  * @exit_status0: Pointer to opaque exit staus 0
177  * @exit_status1: Pointer to opaque exit staus 1
178  *
179  * This functions enters user mode with the argument described above,
180  * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
181  * when returning back to the caller of this function through an exception
182  * handler.
183  *
184  * @Returns what's passed in "ret" to thread_unwind_user_mode()
185  */
186 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
187 		unsigned long a2, unsigned long a3, unsigned long user_sp,
188 		unsigned long entry_func, bool is_32bit,
189 		uint32_t *exit_status0, uint32_t *exit_status1);
190 
191 /*
192  * thread_unwind_user_mode() - Unwinds kernel stack from user entry
193  * @ret:	Value to return from thread_enter_user_mode()
194  * @exit_status0: Exit status 0
195  * @exit_status1: Exit status 1
196  *
197  * This is the function that exception handlers can return into
198  * to resume execution in kernel mode instead of user mode.
199  *
200  * This function is closely coupled with thread_enter_user_mode() since it
201  * need to restore registers saved by thread_enter_user_mode() and when it
202  * returns make it look like thread_enter_user_mode() just returned. It is
203  * expected that the stack pointer is where thread_enter_user_mode() left
204  * it. The stack will be unwound and the function will return to where
205  * thread_enter_user_mode() was called from.  Exit_status0 and exit_status1
206  * are filled in the corresponding pointers supplied to
207  * thread_enter_user_mode().
208  */
209 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
210 		uint32_t exit_status1);
211 
212 /*
213  * Returns the start address (bottom) of the stack for the current thread,
214  * zero if there is no current thread.
215  */
216 vaddr_t thread_stack_start(void);
217 
218 
219 /* Returns the stack size for the current thread */
220 size_t thread_stack_size(void);
221 
222 /*
223  * Returns the start (top, lowest address) and end (bottom, highest address) of
224  * the current stack (thread, temporary or abort stack).
225  * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or
226  * soft limits are queried. The difference between soft and hard is that for the
227  * latter, the stack start includes some additional space to let any function
228  * overflow the soft limit and still be able to print a stack dump in this case.
229  */
230 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard);
231 
232 static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start,
233 							vaddr_t *end)
234 {
235 	return get_stack_limits(start, end, false);
236 }
237 
238 static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start,
239 							vaddr_t *end)
240 {
241 	return get_stack_limits(start, end, true);
242 }
243 
244 bool thread_is_in_normal_mode(void);
245 
246 /*
247  * Returns true if previous exeception also was in abort mode.
248  *
249  * Note: it's only valid to call this function from an abort exception
250  * handler before interrupts has been re-enabled.
251  */
252 bool thread_is_from_abort_mode(void);
253 
254 /**
255  * Allocates data for payload buffers.
256  *
257  * @size:	size in bytes of payload buffer
258  *
259  * @returns	mobj that describes allocated buffer or NULL on error
260  */
261 struct mobj *thread_rpc_alloc_payload(size_t size);
262 
263 /**
264  * Free physical memory previously allocated with thread_rpc_alloc_payload()
265  *
266  * @mobj:	mobj that describes the buffer
267  */
268 void thread_rpc_free_payload(struct mobj *mobj);
269 
270 /**
271  * Allocate data for payload buffers only shared with the non-secure kernel
272  *
273  * @size:	size in bytes of payload buffer
274  *
275  * @returns	mobj that describes allocated buffer or NULL on error
276  */
277 struct mobj *thread_rpc_alloc_kernel_payload(size_t size);
278 
279 /**
280  * Free physical memory previously allocated with
281  * thread_rpc_alloc_kernel_payload()
282  *
283  * @mobj:	mobj that describes the buffer
284  */
285 void thread_rpc_free_kernel_payload(struct mobj *mobj);
286 
287 struct thread_param_memref {
288 	size_t offs;
289 	size_t size;
290 	struct mobj *mobj;
291 };
292 
293 struct thread_param_value {
294 	uint64_t a;
295 	uint64_t b;
296 	uint64_t c;
297 };
298 
299 /*
300  * Note that there's some arithmetics done on the value so it's important
301  * to keep in IN, OUT, INOUT order.
302  */
303 enum thread_param_attr {
304 	THREAD_PARAM_ATTR_NONE = 0,
305 	THREAD_PARAM_ATTR_VALUE_IN,
306 	THREAD_PARAM_ATTR_VALUE_OUT,
307 	THREAD_PARAM_ATTR_VALUE_INOUT,
308 	THREAD_PARAM_ATTR_MEMREF_IN,
309 	THREAD_PARAM_ATTR_MEMREF_OUT,
310 	THREAD_PARAM_ATTR_MEMREF_INOUT,
311 };
312 
313 struct thread_param {
314 	enum thread_param_attr attr;
315 	union {
316 		struct thread_param_memref memref;
317 		struct thread_param_value value;
318 	} u;
319 };
320 
321 #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \
322 	(struct thread_param){ \
323 		.attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \
324 		.mobj = (_mobj), .offs = (_offs), .size = (_size) } \
325 	}
326 
327 #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \
328 	(struct thread_param){ \
329 		.attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \
330 		.a = (_a), .b = (_b), .c = (_c) } \
331 	}
332 
333 /**
334  * Does an RPC using a preallocated argument buffer
335  * @cmd: RPC cmd
336  * @num_params: number of parameters
337  * @params: RPC parameters
338  * @returns RPC return value
339  */
340 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
341 		struct thread_param *params);
342 
343 /**
344  * Allocate data for payload buffers.
345  * Buffer is exported to user mode applications.
346  *
347  * @size:	size in bytes of payload buffer
348  *
349  * @returns	mobj that describes allocated buffer or NULL on error
350  */
351 struct mobj *thread_rpc_alloc_global_payload(size_t size);
352 
353 /**
354  * Free physical memory previously allocated with
355  * thread_rpc_alloc_global_payload()
356  *
357  * @mobj:	mobj that describes the buffer
358  */
359 void thread_rpc_free_global_payload(struct mobj *mobj);
360 
361 /*
362  * enum thread_shm_type - type of non-secure shared memory
363  * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory
364  * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory
365  * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory
366  */
367 enum thread_shm_type {
368 	THREAD_SHM_TYPE_APPLICATION,
369 	THREAD_SHM_TYPE_KERNEL_PRIVATE,
370 	THREAD_SHM_TYPE_GLOBAL,
371 };
372 
373 /*
374  * enum thread_shm_cache_user - user of a cache allocation
375  * @THREAD_SHM_CACHE_USER_SOCKET - socket communication
376  * @THREAD_SHM_CACHE_USER_FS - filesystem access
377  * @THREAD_SHM_CACHE_USER_I2C - I2C communication
378  *
379  * To ensure that each user of the shared memory cache doesn't interfere
380  * with each other a unique ID per user is used.
381  */
382 enum thread_shm_cache_user {
383 	THREAD_SHM_CACHE_USER_SOCKET,
384 	THREAD_SHM_CACHE_USER_FS,
385 	THREAD_SHM_CACHE_USER_I2C,
386 };
387 
388 /*
389  * Returns a pointer to the cached RPC memory. Each thread and @user tuple
390  * has a unique cache. The pointer is guaranteed to point to a large enough
391  * area or to be NULL.
392  */
393 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
394 				 enum thread_shm_type shm_type,
395 				 size_t size, struct mobj **mobj);
396 
397 #endif /*__ASSEMBLER__*/
398 
399 #endif /*__KERNEL_THREAD_H*/
400