xref: /optee_os/core/include/kernel/thread.h (revision 5d5d7d0b1c038a6836be9f0b38585f5aa6a4dd01)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2016-2017, Linaro Limited
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #ifndef __KERNEL_THREAD_H
9 #define __KERNEL_THREAD_H
10 
11 #ifndef __ASSEMBLER__
12 #include <types_ext.h>
13 #include <compiler.h>
14 #include <mm/pgt_cache.h>
15 #endif
16 #include <util.h>
17 #include <kernel/thread_arch.h>
18 
19 #define THREAD_FLAGS_COPY_ARGS_ON_RETURN	BIT(0)
20 #define THREAD_FLAGS_FOREIGN_INTR_ENABLE	BIT(1)
21 #define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR	BIT(2)
22 #define THREAD_FLAGS_FFA_ONLY			BIT(3)
23 
24 #define THREAD_ID_0		0
25 #define THREAD_ID_INVALID	-1
26 
27 #define THREAD_RPC_MAX_NUM_PARAMS	U(4)
28 
29 #ifndef __ASSEMBLER__
30 
31 struct thread_specific_data {
32 	TAILQ_HEAD(, ts_session) sess_stack;
33 	struct ts_ctx *ctx;
34 #ifdef CFG_CORE_FFA
35 	uint32_t rpc_target_info;
36 #endif
37 	uint32_t abort_type;
38 	uint32_t abort_descr;
39 	vaddr_t abort_va;
40 	unsigned int abort_core;
41 	struct thread_abort_regs abort_regs;
42 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
43 	bool stackcheck_recursion;
44 #endif
45 	unsigned int syscall_recursion;
46 #ifdef CFG_FAULT_MITIGATION
47 	struct ftmn_func_arg *ftmn_arg;
48 #endif
49 };
50 
51 void thread_init_canaries(void);
52 void thread_init_primary(void);
53 void thread_init_per_cpu(void);
54 
55 #if defined(CFG_WITH_STACK_CANARIES)
56 void thread_update_canaries(void);
57 #else
58 static inline void thread_update_canaries(void) { }
59 #endif
60 
61 struct thread_core_local *thread_get_core_local(void);
62 
63 /*
64  * thread_init_threads() - Initialize threads
65  * @thread_count: Number of threads to configure
66  *
67  * Initializes thread contexts. Called in thread_init_boot_thread() if
68  * virtualization is disabled. Virtualization subsystem calls it for every
69  * new guest otherwise. @thread_count must be <= CFG_NUM_THREADS, and will
70  * initialize the number of threads to @thread_count if configured with
71  * CFG_DYN_STACK_CONFIG=y, else @thread_count must equal CFG_NUM_THREADS.
72  */
73 void thread_init_threads(size_t thread_count);
74 
75 vaddr_t thread_get_abt_stack(void);
76 
77 /*
78  * thread_init_thread_core_local() - Initialize thread_core_local
79  * @core_count:	Number of cores in the system
80  *
81  * Called by the init CPU. Sets temporary stack mode for all CPUs
82  * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit
83  * for the init CPU. @core_count must be <= CFG_TEE_CORE_NB_CORE, and will
84  * set the number of supported cores to @core_count if configured with
85  * CFG_DYN_CONFIG=y, else @core_count must equal CFG_TEE_CORE_NB_CORE.
86  */
87 void thread_init_thread_core_local(size_t core_count);
88 void thread_init_core_local_stacks(void);
89 
90 #if defined(CFG_CORE_PAUTH)
91 void thread_init_thread_pauth_keys(void);
92 void thread_init_core_local_pauth_keys(void);
93 #else
94 static inline void thread_init_thread_pauth_keys(void) { }
95 static inline void thread_init_core_local_pauth_keys(void) { }
96 #endif
97 
98 /*
99  * Initializes a thread to be used during boot
100  */
101 void thread_init_boot_thread(void);
102 
103 /*
104  * Clears the current thread id
105  * Only supposed to be used during initialization.
106  */
107 void thread_clr_boot_thread(void);
108 
109 /*
110  * Returns current thread id.
111  */
112 short int thread_get_id(void);
113 
114 /*
115  * Returns current thread id, return -1 on failure.
116  */
117 short int thread_get_id_may_fail(void);
118 
119 /* Returns Thread Specific Data (TSD) pointer. */
120 struct thread_specific_data *thread_get_tsd(void);
121 
122 /*
123  * Sets foreign interrupts status for current thread, must only be called
124  * from an active thread context.
125  *
126  * enable == true  -> enable foreign interrupts
127  * enable == false -> disable foreign interrupts
128  */
129 void thread_set_foreign_intr(bool enable);
130 
131 /*
132  * Restores the foreign interrupts status (in CPSR) for current thread, must
133  * only be called from an active thread context.
134  */
135 void thread_restore_foreign_intr(void);
136 
137 /*
138  * thread_get_exceptions() - return current exception mask
139  */
140 uint32_t thread_get_exceptions(void);
141 
142 /*
143  * thread_set_exceptions() - set exception mask
144  * @exceptions: exception mask to set
145  *
146  * Any previous exception mask is replaced by this exception mask, that is,
147  * old bits are cleared and replaced by these.
148  */
149 void thread_set_exceptions(uint32_t exceptions);
150 
151 /*
152  * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
153  * @exceptions	exceptions to mask
154  * @returns old exception state
155  */
156 uint32_t thread_mask_exceptions(uint32_t exceptions);
157 
158 /*
159  * thread_unmask_exceptions() - Unmasks asynchronous exceptions
160  * @state	Old asynchronous exception state to restore (returned by
161  *		thread_mask_exceptions())
162  */
163 void thread_unmask_exceptions(uint32_t state);
164 
165 
166 static inline bool __nostackcheck thread_foreign_intr_disabled(void)
167 {
168 	return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
169 }
170 
171 /*
172  * thread_enter_user_mode() - Enters user mode
173  * @a0:		Passed in r/x0 for user_func
174  * @a1:		Passed in r/x1 for user_func
175  * @a2:		Passed in r/x2 for user_func
176  * @a3:		Passed in r/x3 for user_func
177  * @user_sp:	Assigned sp value in user mode
178  * @user_func:	Function to execute in user mode
179  * @is_32bit:   True if TA should execute in Aarch32, false if Aarch64
180  * @exit_status0: Pointer to opaque exit staus 0
181  * @exit_status1: Pointer to opaque exit staus 1
182  *
183  * This functions enters user mode with the argument described above,
184  * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
185  * when returning back to the caller of this function through an exception
186  * handler.
187  *
188  * @Returns what's passed in "ret" to thread_unwind_user_mode()
189  */
190 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
191 		unsigned long a2, unsigned long a3, unsigned long user_sp,
192 		unsigned long entry_func, bool is_32bit,
193 		uint32_t *exit_status0, uint32_t *exit_status1);
194 
195 /*
196  * thread_unwind_user_mode() - Unwinds kernel stack from user entry
197  * @ret:	Value to return from thread_enter_user_mode()
198  * @exit_status0: Exit status 0
199  * @exit_status1: Exit status 1
200  *
201  * This is the function that exception handlers can return into
202  * to resume execution in kernel mode instead of user mode.
203  *
204  * This function is closely coupled with thread_enter_user_mode() since it
205  * need to restore registers saved by thread_enter_user_mode() and when it
206  * returns make it look like thread_enter_user_mode() just returned. It is
207  * expected that the stack pointer is where thread_enter_user_mode() left
208  * it. The stack will be unwound and the function will return to where
209  * thread_enter_user_mode() was called from.  Exit_status0 and exit_status1
210  * are filled in the corresponding pointers supplied to
211  * thread_enter_user_mode().
212  */
213 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
214 		uint32_t exit_status1);
215 
216 /*
217  * Returns the start address (bottom) of the stack for the current thread,
218  * zero if there is no current thread.
219  */
220 vaddr_t thread_stack_start(void);
221 
222 
223 /* Returns the stack size for the current thread */
224 size_t thread_stack_size(void);
225 
226 /*
227  * Returns the start (top, lowest address) and end (bottom, highest address) of
228  * the current stack (thread, temporary or abort stack).
229  * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or
230  * soft limits are queried. The difference between soft and hard is that for the
231  * latter, the stack start includes some additional space to let any function
232  * overflow the soft limit and still be able to print a stack dump in this case.
233  */
234 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard);
235 
236 static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start,
237 							vaddr_t *end)
238 {
239 	return get_stack_limits(start, end, false);
240 }
241 
242 static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start,
243 							vaddr_t *end)
244 {
245 	return get_stack_limits(start, end, true);
246 }
247 
248 bool thread_is_in_normal_mode(void);
249 
250 /*
251  * Returns true if previous exeception also was in abort mode.
252  *
253  * Note: it's only valid to call this function from an abort exception
254  * handler before interrupts has been re-enabled.
255  */
256 bool thread_is_from_abort_mode(void);
257 
258 /**
259  * Allocates data for payload buffers shared with a non-secure user space
260  * application. Ensure consistency with the enumeration
261  * THREAD_SHM_TYPE_APPLICATION.
262  *
263  * @size:	size in bytes of payload buffer
264  *
265  * @returns	mobj that describes allocated buffer or NULL on error
266  */
267 struct mobj *thread_rpc_alloc_payload(size_t size);
268 
269 /**
270  * Free physical memory previously allocated with thread_rpc_alloc_payload()
271  *
272  * @mobj:	mobj that describes the buffer
273  */
274 void thread_rpc_free_payload(struct mobj *mobj);
275 
276 /**
277  * Allocate data for payload buffers shared with the non-secure kernel.
278  * Ensure consistency with the enumeration THREAD_SHM_TYPE_KERNEL_PRIVATE.
279  *
280  * @size:	size in bytes of payload buffer
281  *
282  * @returns	mobj that describes allocated buffer or NULL on error
283  */
284 struct mobj *thread_rpc_alloc_kernel_payload(size_t size);
285 
286 /**
287  * Free physical memory previously allocated with
288  * thread_rpc_alloc_kernel_payload()
289  *
290  * @mobj:	mobj that describes the buffer
291  */
292 void thread_rpc_free_kernel_payload(struct mobj *mobj);
293 
294 struct thread_param_memref {
295 	size_t offs;
296 	size_t size;
297 	struct mobj *mobj;
298 };
299 
300 struct thread_param_value {
301 	uint64_t a;
302 	uint64_t b;
303 	uint64_t c;
304 };
305 
306 /*
307  * Note that there's some arithmetics done on the value so it's important
308  * to keep in IN, OUT, INOUT order.
309  */
310 enum thread_param_attr {
311 	THREAD_PARAM_ATTR_NONE = 0,
312 	THREAD_PARAM_ATTR_VALUE_IN,
313 	THREAD_PARAM_ATTR_VALUE_OUT,
314 	THREAD_PARAM_ATTR_VALUE_INOUT,
315 	THREAD_PARAM_ATTR_MEMREF_IN,
316 	THREAD_PARAM_ATTR_MEMREF_OUT,
317 	THREAD_PARAM_ATTR_MEMREF_INOUT,
318 };
319 
320 struct thread_param {
321 	enum thread_param_attr attr;
322 	union {
323 		struct thread_param_memref memref;
324 		struct thread_param_value value;
325 	} u;
326 };
327 
328 #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \
329 	(struct thread_param){ \
330 		.attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \
331 		.mobj = (_mobj), .offs = (_offs), .size = (_size) } \
332 	}
333 
334 #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \
335 	(struct thread_param){ \
336 		.attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \
337 		.a = (_a), .b = (_b), .c = (_c) } \
338 	}
339 
340 /**
341  * Does an RPC using a preallocated argument buffer
342  * @cmd: RPC cmd
343  * @num_params: number of parameters
344  * @params: RPC parameters
345  * @returns RPC return value
346  */
347 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
348 		struct thread_param *params);
349 
350 /**
351  * Allocate data for payload buffers shared with both user space applications
352  * and the non-secure kernel. Ensure consistency with the enumeration
353  * THREAD_SHM_TYPE_GLOBAL.
354  *
355  * @size:	size in bytes of payload buffer
356  *
357  * @returns	mobj that describes allocated buffer or NULL on error
358  */
359 struct mobj *thread_rpc_alloc_global_payload(size_t size);
360 
361 /**
362  * Free physical memory previously allocated with
363  * thread_rpc_alloc_global_payload()
364  *
365  * @mobj:	mobj that describes the buffer
366  */
367 void thread_rpc_free_global_payload(struct mobj *mobj);
368 
369 /*
370  * enum thread_shm_type - type of non-secure shared memory
371  * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory
372  * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory
373  * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory
374  */
375 enum thread_shm_type {
376 	THREAD_SHM_TYPE_APPLICATION,
377 	THREAD_SHM_TYPE_KERNEL_PRIVATE,
378 	THREAD_SHM_TYPE_GLOBAL,
379 };
380 
381 /*
382  * enum thread_shm_cache_user - user of a cache allocation
383  * @THREAD_SHM_CACHE_USER_SOCKET - socket communication
384  * @THREAD_SHM_CACHE_USER_FS - filesystem access
385  * @THREAD_SHM_CACHE_USER_I2C - I2C communication
386  * @THREAD_SHM_CACHE_USER_RPMB - RPMB communication
387  *
388  * To ensure that each user of the shared memory cache doesn't interfere
389  * with each other a unique ID per user is used.
390  */
391 enum thread_shm_cache_user {
392 	THREAD_SHM_CACHE_USER_SOCKET,
393 	THREAD_SHM_CACHE_USER_FS,
394 	THREAD_SHM_CACHE_USER_I2C,
395 	THREAD_SHM_CACHE_USER_RPMB,
396 };
397 
398 /*
399  * Returns a pointer to the cached RPC memory. Each thread and @user tuple
400  * has a unique cache. The pointer is guaranteed to point to a large enough
401  * area or to be NULL.
402  */
403 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
404 				 enum thread_shm_type shm_type,
405 				 size_t size, struct mobj **mobj);
406 
407 #endif /*__ASSEMBLER__*/
408 
409 #endif /*__KERNEL_THREAD_H*/
410