xref: /optee_os/core/include/kernel/thread.h (revision 93dc6b2960b97055bffaa67f1eb4adb1b4e9bfcd)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2016-2017, Linaro Limited
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #ifndef KERNEL_THREAD_H
9 #define KERNEL_THREAD_H
10 
11 #ifndef __ASSEMBLER__
12 #include <types_ext.h>
13 #include <compiler.h>
14 #include <mm/pgt_cache.h>
15 #endif
16 #include <util.h>
17 #include <kernel/thread_arch.h>
18 
19 #define THREAD_FLAGS_COPY_ARGS_ON_RETURN	BIT(0)
20 #define THREAD_FLAGS_FOREIGN_INTR_ENABLE	BIT(1)
21 #define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR	BIT(2)
22 
23 #define THREAD_ID_0		0
24 #define THREAD_ID_INVALID	-1
25 
26 #define THREAD_RPC_MAX_NUM_PARAMS	U(4)
27 
28 #ifndef __ASSEMBLER__
29 
30 struct thread_specific_data {
31 	TAILQ_HEAD(, ts_session) sess_stack;
32 	struct ts_ctx *ctx;
33 	struct pgt_cache pgt_cache;
34 #ifdef CFG_CORE_FFA
35 	uint32_t rpc_target_info;
36 #endif
37 	uint32_t abort_type;
38 	uint32_t abort_descr;
39 	vaddr_t abort_va;
40 	unsigned int abort_core;
41 	struct thread_abort_regs abort_regs;
42 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
43 	bool stackcheck_recursion;
44 #endif
45 	unsigned int syscall_recursion;
46 };
47 
48 void thread_init_canaries(void);
49 void thread_init_primary(void);
50 void thread_init_per_cpu(void);
51 
52 struct thread_core_local *thread_get_core_local(void);
53 
54 /*
55  * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
56  * first stack, THREAD_ID_0 + 1 for the next and so on.
57  *
58  * Returns true on success and false on errors.
59  */
60 bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
61 
62 /*
63  * Initializes thread contexts. Called in thread_init_boot_thread() if
64  * virtualization is disabled. Virtualization subsystem calls it for
65  * every new guest otherwise.
66  */
67 void thread_init_threads(void);
68 
69 /*
70  * Called by the init CPU. Sets temporary stack mode for all CPUs
71  * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit for
72  * the init CPU.
73  */
74 void thread_init_thread_core_local(void);
75 void thread_init_core_local_stacks(void);
76 
77 #if defined(CFG_CORE_PAUTH)
78 void thread_init_thread_pauth_keys(void);
79 void thread_init_core_local_pauth_keys(void);
80 #else
81 static inline void thread_init_thread_pauth_keys(void) { }
82 static inline void thread_init_core_local_pauth_keys(void) { }
83 #endif
84 
85 /*
86  * Initializes a thread to be used during boot
87  */
88 void thread_init_boot_thread(void);
89 
90 /*
91  * Clears the current thread id
92  * Only supposed to be used during initialization.
93  */
94 void thread_clr_boot_thread(void);
95 
96 /*
97  * Returns current thread id.
98  */
99 short int thread_get_id(void);
100 
101 /*
102  * Returns current thread id, return -1 on failure.
103  */
104 short int thread_get_id_may_fail(void);
105 
106 /* Returns Thread Specific Data (TSD) pointer. */
107 struct thread_specific_data *thread_get_tsd(void);
108 
109 /*
110  * Sets foreign interrupts status for current thread, must only be called
111  * from an active thread context.
112  *
113  * enable == true  -> enable foreign interrupts
114  * enable == false -> disable foreign interrupts
115  */
116 void thread_set_foreign_intr(bool enable);
117 
118 /*
119  * Restores the foreign interrupts status (in CPSR) for current thread, must
120  * only be called from an active thread context.
121  */
122 void thread_restore_foreign_intr(void);
123 
124 /*
125  * thread_get_exceptions() - return current exception mask
126  */
127 uint32_t thread_get_exceptions(void);
128 
129 /*
130  * thread_set_exceptions() - set exception mask
131  * @exceptions: exception mask to set
132  *
133  * Any previous exception mask is replaced by this exception mask, that is,
134  * old bits are cleared and replaced by these.
135  */
136 void thread_set_exceptions(uint32_t exceptions);
137 
138 /*
139  * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
140  * @exceptions	exceptions to mask
141  * @returns old exception state
142  */
143 uint32_t thread_mask_exceptions(uint32_t exceptions);
144 
145 /*
146  * thread_unmask_exceptions() - Unmasks asynchronous exceptions
147  * @state	Old asynchronous exception state to restore (returned by
148  *		thread_mask_exceptions())
149  */
150 void thread_unmask_exceptions(uint32_t state);
151 
152 
153 static inline bool __nostackcheck thread_foreign_intr_disabled(void)
154 {
155 	return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
156 }
157 
158 /*
159  * thread_enter_user_mode() - Enters user mode
160  * @a0:		Passed in r/x0 for user_func
161  * @a1:		Passed in r/x1 for user_func
162  * @a2:		Passed in r/x2 for user_func
163  * @a3:		Passed in r/x3 for user_func
164  * @user_sp:	Assigned sp value in user mode
165  * @user_func:	Function to execute in user mode
166  * @is_32bit:   True if TA should execute in Aarch32, false if Aarch64
167  * @exit_status0: Pointer to opaque exit staus 0
168  * @exit_status1: Pointer to opaque exit staus 1
169  *
170  * This functions enters user mode with the argument described above,
171  * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
172  * when returning back to the caller of this function through an exception
173  * handler.
174  *
175  * @Returns what's passed in "ret" to thread_unwind_user_mode()
176  */
177 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
178 		unsigned long a2, unsigned long a3, unsigned long user_sp,
179 		unsigned long entry_func, bool is_32bit,
180 		uint32_t *exit_status0, uint32_t *exit_status1);
181 
182 /*
183  * thread_unwind_user_mode() - Unwinds kernel stack from user entry
184  * @ret:	Value to return from thread_enter_user_mode()
185  * @exit_status0: Exit status 0
186  * @exit_status1: Exit status 1
187  *
188  * This is the function that exception handlers can return into
189  * to resume execution in kernel mode instead of user mode.
190  *
191  * This function is closely coupled with thread_enter_user_mode() since it
192  * need to restore registers saved by thread_enter_user_mode() and when it
193  * returns make it look like thread_enter_user_mode() just returned. It is
194  * expected that the stack pointer is where thread_enter_user_mode() left
195  * it. The stack will be unwound and the function will return to where
196  * thread_enter_user_mode() was called from.  Exit_status0 and exit_status1
197  * are filled in the corresponding pointers supplied to
198  * thread_enter_user_mode().
199  */
200 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
201 		uint32_t exit_status1);
202 
203 /*
204  * Returns the start address (bottom) of the stack for the current thread,
205  * zero if there is no current thread.
206  */
207 vaddr_t thread_stack_start(void);
208 
209 
210 /* Returns the stack size for the current thread */
211 size_t thread_stack_size(void);
212 
213 /*
214  * Returns the start (top, lowest address) and end (bottom, highest address) of
215  * the current stack (thread, temporary or abort stack).
216  * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or
217  * soft limits are queried. The difference between soft and hard is that for the
218  * latter, the stack start includes some additional space to let any function
219  * overflow the soft limit and still be able to print a stack dump in this case.
220  */
221 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard);
222 
223 static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start,
224 							vaddr_t *end)
225 {
226 	return get_stack_limits(start, end, false);
227 }
228 
229 static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start,
230 							vaddr_t *end)
231 {
232 	return get_stack_limits(start, end, true);
233 }
234 
235 bool thread_is_in_normal_mode(void);
236 
237 /*
238  * Returns true if previous exeception also was in abort mode.
239  *
240  * Note: it's only valid to call this function from an abort exception
241  * handler before interrupts has been re-enabled.
242  */
243 bool thread_is_from_abort_mode(void);
244 
245 /**
246  * Allocates data for payload buffers.
247  *
248  * @size:	size in bytes of payload buffer
249  *
250  * @returns	mobj that describes allocated buffer or NULL on error
251  */
252 struct mobj *thread_rpc_alloc_payload(size_t size);
253 
254 /**
255  * Free physical memory previously allocated with thread_rpc_alloc_payload()
256  *
257  * @mobj:	mobj that describes the buffer
258  */
259 void thread_rpc_free_payload(struct mobj *mobj);
260 
261 /**
262  * Allocate data for payload buffers only shared with the non-secure kernel
263  *
264  * @size:	size in bytes of payload buffer
265  *
266  * @returns	mobj that describes allocated buffer or NULL on error
267  */
268 struct mobj *thread_rpc_alloc_kernel_payload(size_t size);
269 
270 /**
271  * Free physical memory previously allocated with
272  * thread_rpc_alloc_kernel_payload()
273  *
274  * @mobj:	mobj that describes the buffer
275  */
276 void thread_rpc_free_kernel_payload(struct mobj *mobj);
277 
278 struct thread_param_memref {
279 	size_t offs;
280 	size_t size;
281 	struct mobj *mobj;
282 };
283 
284 struct thread_param_value {
285 	uint64_t a;
286 	uint64_t b;
287 	uint64_t c;
288 };
289 
290 /*
291  * Note that there's some arithmetics done on the value so it's important
292  * to keep in IN, OUT, INOUT order.
293  */
294 enum thread_param_attr {
295 	THREAD_PARAM_ATTR_NONE = 0,
296 	THREAD_PARAM_ATTR_VALUE_IN,
297 	THREAD_PARAM_ATTR_VALUE_OUT,
298 	THREAD_PARAM_ATTR_VALUE_INOUT,
299 	THREAD_PARAM_ATTR_MEMREF_IN,
300 	THREAD_PARAM_ATTR_MEMREF_OUT,
301 	THREAD_PARAM_ATTR_MEMREF_INOUT,
302 };
303 
304 struct thread_param {
305 	enum thread_param_attr attr;
306 	union {
307 		struct thread_param_memref memref;
308 		struct thread_param_value value;
309 	} u;
310 };
311 
312 #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \
313 	(struct thread_param){ \
314 		.attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \
315 		.mobj = (_mobj), .offs = (_offs), .size = (_size) } \
316 	}
317 
318 #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \
319 	(struct thread_param){ \
320 		.attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \
321 		.a = (_a), .b = (_b), .c = (_c) } \
322 	}
323 
324 /**
325  * Does an RPC using a preallocated argument buffer
326  * @cmd: RPC cmd
327  * @num_params: number of parameters
328  * @params: RPC parameters
329  * @returns RPC return value
330  */
331 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
332 		struct thread_param *params);
333 
334 /**
335  * Allocate data for payload buffers.
336  * Buffer is exported to user mode applications.
337  *
338  * @size:	size in bytes of payload buffer
339  *
340  * @returns	mobj that describes allocated buffer or NULL on error
341  */
342 struct mobj *thread_rpc_alloc_global_payload(size_t size);
343 
344 /**
345  * Free physical memory previously allocated with
346  * thread_rpc_alloc_global_payload()
347  *
348  * @mobj:	mobj that describes the buffer
349  */
350 void thread_rpc_free_global_payload(struct mobj *mobj);
351 
352 /*
353  * enum thread_shm_type - type of non-secure shared memory
354  * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory
355  * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory
356  * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory
357  */
358 enum thread_shm_type {
359 	THREAD_SHM_TYPE_APPLICATION,
360 	THREAD_SHM_TYPE_KERNEL_PRIVATE,
361 	THREAD_SHM_TYPE_GLOBAL,
362 };
363 
364 /*
365  * enum thread_shm_cache_user - user of a cache allocation
366  * @THREAD_SHM_CACHE_USER_SOCKET - socket communication
367  * @THREAD_SHM_CACHE_USER_FS - filesystem access
368  * @THREAD_SHM_CACHE_USER_I2C - I2C communication
369  *
370  * To ensure that each user of the shared memory cache doesn't interfere
371  * with each other a unique ID per user is used.
372  */
373 enum thread_shm_cache_user {
374 	THREAD_SHM_CACHE_USER_SOCKET,
375 	THREAD_SHM_CACHE_USER_FS,
376 	THREAD_SHM_CACHE_USER_I2C,
377 };
378 
379 /*
380  * Returns a pointer to the cached RPC memory. Each thread and @user tuple
381  * has a unique cache. The pointer is guaranteed to point to a large enough
382  * area or to be NULL.
383  */
384 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
385 				 enum thread_shm_type shm_type,
386 				 size_t size, struct mobj **mobj);
387 
388 #endif /*__ASSEMBLER__*/
389 
390 #endif /*KERNEL_THREAD_H*/
391