xref: /optee_os/core/include/kernel/thread.h (revision 91d4649de98c6beeb8217d40f1fafa50720fe785)
15d9ddca6SJens Wiklander /* SPDX-License-Identifier: BSD-2-Clause */
25d9ddca6SJens Wiklander /*
35d9ddca6SJens Wiklander  * Copyright (c) 2014, STMicroelectronics International N.V.
45d9ddca6SJens Wiklander  * Copyright (c) 2016-2017, Linaro Limited
55d9ddca6SJens Wiklander  * Copyright (c) 2020-2021, Arm Limited
65d9ddca6SJens Wiklander  */
75d9ddca6SJens Wiklander 
8d50fee03SEtienne Carriere #ifndef __KERNEL_THREAD_H
9d50fee03SEtienne Carriere #define __KERNEL_THREAD_H
105d9ddca6SJens Wiklander 
115d9ddca6SJens Wiklander #ifndef __ASSEMBLER__
125d9ddca6SJens Wiklander #include <types_ext.h>
135d9ddca6SJens Wiklander #include <compiler.h>
145d9ddca6SJens Wiklander #include <mm/pgt_cache.h>
155d9ddca6SJens Wiklander #endif
16891569afSJens Wiklander #include <util.h>
175d9ddca6SJens Wiklander #include <kernel/thread_arch.h>
185d9ddca6SJens Wiklander 
19891569afSJens Wiklander #define THREAD_FLAGS_COPY_ARGS_ON_RETURN	BIT(0)
20891569afSJens Wiklander #define THREAD_FLAGS_FOREIGN_INTR_ENABLE	BIT(1)
21891569afSJens Wiklander #define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR	BIT(2)
224d028847SImre Kis #define THREAD_FLAGS_FFA_ONLY			BIT(3)
23891569afSJens Wiklander 
245d9ddca6SJens Wiklander #define THREAD_ID_0		0
255d9ddca6SJens Wiklander #define THREAD_ID_INVALID	-1
265d9ddca6SJens Wiklander 
275d9ddca6SJens Wiklander #define THREAD_RPC_MAX_NUM_PARAMS	U(4)
285d9ddca6SJens Wiklander 
295d9ddca6SJens Wiklander #ifndef __ASSEMBLER__
305d9ddca6SJens Wiklander 
315d9ddca6SJens Wiklander struct thread_specific_data {
325d9ddca6SJens Wiklander 	TAILQ_HEAD(, ts_session) sess_stack;
335d9ddca6SJens Wiklander 	struct ts_ctx *ctx;
345d9ddca6SJens Wiklander #ifdef CFG_CORE_FFA
355d9ddca6SJens Wiklander 	uint32_t rpc_target_info;
365d9ddca6SJens Wiklander #endif
375d9ddca6SJens Wiklander 	uint32_t abort_type;
385d9ddca6SJens Wiklander 	uint32_t abort_descr;
395d9ddca6SJens Wiklander 	vaddr_t abort_va;
405d9ddca6SJens Wiklander 	unsigned int abort_core;
415d9ddca6SJens Wiklander 	struct thread_abort_regs abort_regs;
425d9ddca6SJens Wiklander #ifdef CFG_CORE_DEBUG_CHECK_STACKS
435d9ddca6SJens Wiklander 	bool stackcheck_recursion;
445d9ddca6SJens Wiklander #endif
455d9ddca6SJens Wiklander 	unsigned int syscall_recursion;
467e75ca54SJens Wiklander #ifdef CFG_FAULT_MITIGATION
477e75ca54SJens Wiklander 	struct ftmn_func_arg *ftmn_arg;
487e75ca54SJens Wiklander #endif
495d9ddca6SJens Wiklander };
505d9ddca6SJens Wiklander 
51ca825890SJens Wiklander void thread_init_canaries(void);
525d9ddca6SJens Wiklander void thread_init_primary(void);
535d9ddca6SJens Wiklander void thread_init_per_cpu(void);
545d9ddca6SJens Wiklander 
55b89b3da2SVincent Chuang #if defined(CFG_WITH_STACK_CANARIES)
56b89b3da2SVincent Chuang void thread_update_canaries(void);
57b89b3da2SVincent Chuang #else
58b89b3da2SVincent Chuang static inline void thread_update_canaries(void) { }
59b89b3da2SVincent Chuang #endif
60b89b3da2SVincent Chuang 
615d9ddca6SJens Wiklander struct thread_core_local *thread_get_core_local(void);
625d9ddca6SJens Wiklander 
635d9ddca6SJens Wiklander /*
64*91d4649dSJens Wiklander  * thread_init_threads() - Initialize threads
65*91d4649dSJens Wiklander  * @thread_count: Number of threads to configure
66*91d4649dSJens Wiklander  *
675d9ddca6SJens Wiklander  * Initializes thread contexts. Called in thread_init_boot_thread() if
68*91d4649dSJens Wiklander  * virtualization is disabled. Virtualization subsystem calls it for every
69*91d4649dSJens Wiklander  * new guest otherwise. @thread_count must be equal to CFG_NUM_THREADS.
705d9ddca6SJens Wiklander  */
71*91d4649dSJens Wiklander void thread_init_threads(size_t thread_count);
725d9ddca6SJens Wiklander 
73b5ec8152SJens Wiklander vaddr_t thread_get_abt_stack(void);
74b5ec8152SJens Wiklander 
755d9ddca6SJens Wiklander /*
76a4c2e0cbSJens Wiklander  * thread_init_thread_core_local() - Initialize thread_core_local
77a4c2e0cbSJens Wiklander  * @core_count:	Number of cores in the system
78a4c2e0cbSJens Wiklander  *
795d9ddca6SJens Wiklander  * Called by the init CPU. Sets temporary stack mode for all CPUs
80a4c2e0cbSJens Wiklander  * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit
8159724f22SJens Wiklander  * for the init CPU. @core_count must be <= CFG_TEE_CORE_NB_CORE, and will
8259724f22SJens Wiklander  * set the number of supported cores to @core_count if configured with
8359724f22SJens Wiklander  * CFG_DYN_CONFIG=y, else @core_count must equal CFG_TEE_CORE_NB_CORE.
845d9ddca6SJens Wiklander  */
85a4c2e0cbSJens Wiklander void thread_init_thread_core_local(size_t core_count);
86ca825890SJens Wiklander void thread_init_core_local_stacks(void);
875d9ddca6SJens Wiklander 
8893dc6b29SJens Wiklander #if defined(CFG_CORE_PAUTH)
8993dc6b29SJens Wiklander void thread_init_thread_pauth_keys(void);
9093dc6b29SJens Wiklander void thread_init_core_local_pauth_keys(void);
9193dc6b29SJens Wiklander #else
9293dc6b29SJens Wiklander static inline void thread_init_thread_pauth_keys(void) { }
9393dc6b29SJens Wiklander static inline void thread_init_core_local_pauth_keys(void) { }
9493dc6b29SJens Wiklander #endif
9593dc6b29SJens Wiklander 
965d9ddca6SJens Wiklander /*
975d9ddca6SJens Wiklander  * Initializes a thread to be used during boot
985d9ddca6SJens Wiklander  */
995d9ddca6SJens Wiklander void thread_init_boot_thread(void);
1005d9ddca6SJens Wiklander 
1015d9ddca6SJens Wiklander /*
1025d9ddca6SJens Wiklander  * Clears the current thread id
1035d9ddca6SJens Wiklander  * Only supposed to be used during initialization.
1045d9ddca6SJens Wiklander  */
1055d9ddca6SJens Wiklander void thread_clr_boot_thread(void);
1065d9ddca6SJens Wiklander 
1075d9ddca6SJens Wiklander /*
1085d9ddca6SJens Wiklander  * Returns current thread id.
1095d9ddca6SJens Wiklander  */
1105d9ddca6SJens Wiklander short int thread_get_id(void);
1115d9ddca6SJens Wiklander 
1125d9ddca6SJens Wiklander /*
1135d9ddca6SJens Wiklander  * Returns current thread id, return -1 on failure.
1145d9ddca6SJens Wiklander  */
1155d9ddca6SJens Wiklander short int thread_get_id_may_fail(void);
1165d9ddca6SJens Wiklander 
1175d9ddca6SJens Wiklander /* Returns Thread Specific Data (TSD) pointer. */
1185d9ddca6SJens Wiklander struct thread_specific_data *thread_get_tsd(void);
1195d9ddca6SJens Wiklander 
1205d9ddca6SJens Wiklander /*
1215d9ddca6SJens Wiklander  * Sets foreign interrupts status for current thread, must only be called
1225d9ddca6SJens Wiklander  * from an active thread context.
1235d9ddca6SJens Wiklander  *
1245d9ddca6SJens Wiklander  * enable == true  -> enable foreign interrupts
1255d9ddca6SJens Wiklander  * enable == false -> disable foreign interrupts
1265d9ddca6SJens Wiklander  */
1275d9ddca6SJens Wiklander void thread_set_foreign_intr(bool enable);
1285d9ddca6SJens Wiklander 
1295d9ddca6SJens Wiklander /*
1305d9ddca6SJens Wiklander  * Restores the foreign interrupts status (in CPSR) for current thread, must
1315d9ddca6SJens Wiklander  * only be called from an active thread context.
1325d9ddca6SJens Wiklander  */
1335d9ddca6SJens Wiklander void thread_restore_foreign_intr(void);
1345d9ddca6SJens Wiklander 
1355d9ddca6SJens Wiklander /*
1365d9ddca6SJens Wiklander  * thread_get_exceptions() - return current exception mask
1375d9ddca6SJens Wiklander  */
1385d9ddca6SJens Wiklander uint32_t thread_get_exceptions(void);
1395d9ddca6SJens Wiklander 
1405d9ddca6SJens Wiklander /*
1415d9ddca6SJens Wiklander  * thread_set_exceptions() - set exception mask
1425d9ddca6SJens Wiklander  * @exceptions: exception mask to set
1435d9ddca6SJens Wiklander  *
1445d9ddca6SJens Wiklander  * Any previous exception mask is replaced by this exception mask, that is,
1455d9ddca6SJens Wiklander  * old bits are cleared and replaced by these.
1465d9ddca6SJens Wiklander  */
1475d9ddca6SJens Wiklander void thread_set_exceptions(uint32_t exceptions);
1485d9ddca6SJens Wiklander 
1495d9ddca6SJens Wiklander /*
1505d9ddca6SJens Wiklander  * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
1515d9ddca6SJens Wiklander  * @exceptions	exceptions to mask
1525d9ddca6SJens Wiklander  * @returns old exception state
1535d9ddca6SJens Wiklander  */
1545d9ddca6SJens Wiklander uint32_t thread_mask_exceptions(uint32_t exceptions);
1555d9ddca6SJens Wiklander 
1565d9ddca6SJens Wiklander /*
1575d9ddca6SJens Wiklander  * thread_unmask_exceptions() - Unmasks asynchronous exceptions
1585d9ddca6SJens Wiklander  * @state	Old asynchronous exception state to restore (returned by
1595d9ddca6SJens Wiklander  *		thread_mask_exceptions())
1605d9ddca6SJens Wiklander  */
1615d9ddca6SJens Wiklander void thread_unmask_exceptions(uint32_t state);
1625d9ddca6SJens Wiklander 
1635d9ddca6SJens Wiklander 
1645d9ddca6SJens Wiklander static inline bool __nostackcheck thread_foreign_intr_disabled(void)
1655d9ddca6SJens Wiklander {
1665d9ddca6SJens Wiklander 	return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
1675d9ddca6SJens Wiklander }
1685d9ddca6SJens Wiklander 
1695d9ddca6SJens Wiklander /*
1705d9ddca6SJens Wiklander  * thread_enter_user_mode() - Enters user mode
1715d9ddca6SJens Wiklander  * @a0:		Passed in r/x0 for user_func
1725d9ddca6SJens Wiklander  * @a1:		Passed in r/x1 for user_func
1735d9ddca6SJens Wiklander  * @a2:		Passed in r/x2 for user_func
1745d9ddca6SJens Wiklander  * @a3:		Passed in r/x3 for user_func
1755d9ddca6SJens Wiklander  * @user_sp:	Assigned sp value in user mode
1765d9ddca6SJens Wiklander  * @user_func:	Function to execute in user mode
1775d9ddca6SJens Wiklander  * @is_32bit:   True if TA should execute in Aarch32, false if Aarch64
1785d9ddca6SJens Wiklander  * @exit_status0: Pointer to opaque exit staus 0
1795d9ddca6SJens Wiklander  * @exit_status1: Pointer to opaque exit staus 1
1805d9ddca6SJens Wiklander  *
1815d9ddca6SJens Wiklander  * This functions enters user mode with the argument described above,
1825d9ddca6SJens Wiklander  * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
1835d9ddca6SJens Wiklander  * when returning back to the caller of this function through an exception
1845d9ddca6SJens Wiklander  * handler.
1855d9ddca6SJens Wiklander  *
1865d9ddca6SJens Wiklander  * @Returns what's passed in "ret" to thread_unwind_user_mode()
1875d9ddca6SJens Wiklander  */
1885d9ddca6SJens Wiklander uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
1895d9ddca6SJens Wiklander 		unsigned long a2, unsigned long a3, unsigned long user_sp,
1905d9ddca6SJens Wiklander 		unsigned long entry_func, bool is_32bit,
1915d9ddca6SJens Wiklander 		uint32_t *exit_status0, uint32_t *exit_status1);
1925d9ddca6SJens Wiklander 
1935d9ddca6SJens Wiklander /*
1945d9ddca6SJens Wiklander  * thread_unwind_user_mode() - Unwinds kernel stack from user entry
1955d9ddca6SJens Wiklander  * @ret:	Value to return from thread_enter_user_mode()
1965d9ddca6SJens Wiklander  * @exit_status0: Exit status 0
1975d9ddca6SJens Wiklander  * @exit_status1: Exit status 1
1985d9ddca6SJens Wiklander  *
1995d9ddca6SJens Wiklander  * This is the function that exception handlers can return into
2005d9ddca6SJens Wiklander  * to resume execution in kernel mode instead of user mode.
2015d9ddca6SJens Wiklander  *
2025d9ddca6SJens Wiklander  * This function is closely coupled with thread_enter_user_mode() since it
2035d9ddca6SJens Wiklander  * need to restore registers saved by thread_enter_user_mode() and when it
2045d9ddca6SJens Wiklander  * returns make it look like thread_enter_user_mode() just returned. It is
2055d9ddca6SJens Wiklander  * expected that the stack pointer is where thread_enter_user_mode() left
2065d9ddca6SJens Wiklander  * it. The stack will be unwound and the function will return to where
2075d9ddca6SJens Wiklander  * thread_enter_user_mode() was called from.  Exit_status0 and exit_status1
2085d9ddca6SJens Wiklander  * are filled in the corresponding pointers supplied to
2095d9ddca6SJens Wiklander  * thread_enter_user_mode().
2105d9ddca6SJens Wiklander  */
2115d9ddca6SJens Wiklander void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
2125d9ddca6SJens Wiklander 		uint32_t exit_status1);
2135d9ddca6SJens Wiklander 
2145d9ddca6SJens Wiklander /*
2155d9ddca6SJens Wiklander  * Returns the start address (bottom) of the stack for the current thread,
2165d9ddca6SJens Wiklander  * zero if there is no current thread.
2175d9ddca6SJens Wiklander  */
2185d9ddca6SJens Wiklander vaddr_t thread_stack_start(void);
2195d9ddca6SJens Wiklander 
2205d9ddca6SJens Wiklander 
2215d9ddca6SJens Wiklander /* Returns the stack size for the current thread */
2225d9ddca6SJens Wiklander size_t thread_stack_size(void);
2235d9ddca6SJens Wiklander 
2245d9ddca6SJens Wiklander /*
2255d9ddca6SJens Wiklander  * Returns the start (top, lowest address) and end (bottom, highest address) of
2265d9ddca6SJens Wiklander  * the current stack (thread, temporary or abort stack).
2275d9ddca6SJens Wiklander  * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or
2285d9ddca6SJens Wiklander  * soft limits are queried. The difference between soft and hard is that for the
2295d9ddca6SJens Wiklander  * latter, the stack start includes some additional space to let any function
2305d9ddca6SJens Wiklander  * overflow the soft limit and still be able to print a stack dump in this case.
2315d9ddca6SJens Wiklander  */
2325d9ddca6SJens Wiklander bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard);
2335d9ddca6SJens Wiklander 
2345d9ddca6SJens Wiklander static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start,
2355d9ddca6SJens Wiklander 							vaddr_t *end)
2365d9ddca6SJens Wiklander {
2375d9ddca6SJens Wiklander 	return get_stack_limits(start, end, false);
2385d9ddca6SJens Wiklander }
2395d9ddca6SJens Wiklander 
2405d9ddca6SJens Wiklander static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start,
2415d9ddca6SJens Wiklander 							vaddr_t *end)
2425d9ddca6SJens Wiklander {
2435d9ddca6SJens Wiklander 	return get_stack_limits(start, end, true);
2445d9ddca6SJens Wiklander }
2455d9ddca6SJens Wiklander 
2465d9ddca6SJens Wiklander bool thread_is_in_normal_mode(void);
2475d9ddca6SJens Wiklander 
2485d9ddca6SJens Wiklander /*
2495d9ddca6SJens Wiklander  * Returns true if previous exeception also was in abort mode.
2505d9ddca6SJens Wiklander  *
2515d9ddca6SJens Wiklander  * Note: it's only valid to call this function from an abort exception
2525d9ddca6SJens Wiklander  * handler before interrupts has been re-enabled.
2535d9ddca6SJens Wiklander  */
2545d9ddca6SJens Wiklander bool thread_is_from_abort_mode(void);
2555d9ddca6SJens Wiklander 
2565d9ddca6SJens Wiklander /**
2576b1b2b99SYitong Cheng  * Allocates data for payload buffers shared with a non-secure user space
2586b1b2b99SYitong Cheng  * application. Ensure consistency with the enumeration
2596b1b2b99SYitong Cheng  * THREAD_SHM_TYPE_APPLICATION.
2605d9ddca6SJens Wiklander  *
2615d9ddca6SJens Wiklander  * @size:	size in bytes of payload buffer
2625d9ddca6SJens Wiklander  *
2635d9ddca6SJens Wiklander  * @returns	mobj that describes allocated buffer or NULL on error
2645d9ddca6SJens Wiklander  */
2655d9ddca6SJens Wiklander struct mobj *thread_rpc_alloc_payload(size_t size);
2665d9ddca6SJens Wiklander 
2675d9ddca6SJens Wiklander /**
2685d9ddca6SJens Wiklander  * Free physical memory previously allocated with thread_rpc_alloc_payload()
2695d9ddca6SJens Wiklander  *
2705d9ddca6SJens Wiklander  * @mobj:	mobj that describes the buffer
2715d9ddca6SJens Wiklander  */
2725d9ddca6SJens Wiklander void thread_rpc_free_payload(struct mobj *mobj);
2735d9ddca6SJens Wiklander 
2745d9ddca6SJens Wiklander /**
2756b1b2b99SYitong Cheng  * Allocate data for payload buffers shared with the non-secure kernel.
2766b1b2b99SYitong Cheng  * Ensure consistency with the enumeration THREAD_SHM_TYPE_KERNEL_PRIVATE.
2775d9ddca6SJens Wiklander  *
2785d9ddca6SJens Wiklander  * @size:	size in bytes of payload buffer
2795d9ddca6SJens Wiklander  *
2805d9ddca6SJens Wiklander  * @returns	mobj that describes allocated buffer or NULL on error
2815d9ddca6SJens Wiklander  */
2825d9ddca6SJens Wiklander struct mobj *thread_rpc_alloc_kernel_payload(size_t size);
2835d9ddca6SJens Wiklander 
2845d9ddca6SJens Wiklander /**
2855d9ddca6SJens Wiklander  * Free physical memory previously allocated with
2865d9ddca6SJens Wiklander  * thread_rpc_alloc_kernel_payload()
2875d9ddca6SJens Wiklander  *
2885d9ddca6SJens Wiklander  * @mobj:	mobj that describes the buffer
2895d9ddca6SJens Wiklander  */
2905d9ddca6SJens Wiklander void thread_rpc_free_kernel_payload(struct mobj *mobj);
2915d9ddca6SJens Wiklander 
2925d9ddca6SJens Wiklander struct thread_param_memref {
2935d9ddca6SJens Wiklander 	size_t offs;
2945d9ddca6SJens Wiklander 	size_t size;
2955d9ddca6SJens Wiklander 	struct mobj *mobj;
2965d9ddca6SJens Wiklander };
2975d9ddca6SJens Wiklander 
2985d9ddca6SJens Wiklander struct thread_param_value {
2995d9ddca6SJens Wiklander 	uint64_t a;
3005d9ddca6SJens Wiklander 	uint64_t b;
3015d9ddca6SJens Wiklander 	uint64_t c;
3025d9ddca6SJens Wiklander };
3035d9ddca6SJens Wiklander 
3045d9ddca6SJens Wiklander /*
3055d9ddca6SJens Wiklander  * Note that there's some arithmetics done on the value so it's important
3065d9ddca6SJens Wiklander  * to keep in IN, OUT, INOUT order.
3075d9ddca6SJens Wiklander  */
3085d9ddca6SJens Wiklander enum thread_param_attr {
3095d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_NONE = 0,
3105d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_VALUE_IN,
3115d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_VALUE_OUT,
3125d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_VALUE_INOUT,
3135d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_MEMREF_IN,
3145d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_MEMREF_OUT,
3155d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_MEMREF_INOUT,
3165d9ddca6SJens Wiklander };
3175d9ddca6SJens Wiklander 
3185d9ddca6SJens Wiklander struct thread_param {
3195d9ddca6SJens Wiklander 	enum thread_param_attr attr;
3205d9ddca6SJens Wiklander 	union {
3215d9ddca6SJens Wiklander 		struct thread_param_memref memref;
3225d9ddca6SJens Wiklander 		struct thread_param_value value;
3235d9ddca6SJens Wiklander 	} u;
3245d9ddca6SJens Wiklander };
3255d9ddca6SJens Wiklander 
3265d9ddca6SJens Wiklander #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \
3275d9ddca6SJens Wiklander 	(struct thread_param){ \
3285d9ddca6SJens Wiklander 		.attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \
3295d9ddca6SJens Wiklander 		.mobj = (_mobj), .offs = (_offs), .size = (_size) } \
3305d9ddca6SJens Wiklander 	}
3315d9ddca6SJens Wiklander 
3325d9ddca6SJens Wiklander #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \
3335d9ddca6SJens Wiklander 	(struct thread_param){ \
3345d9ddca6SJens Wiklander 		.attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \
3355d9ddca6SJens Wiklander 		.a = (_a), .b = (_b), .c = (_c) } \
3365d9ddca6SJens Wiklander 	}
3375d9ddca6SJens Wiklander 
3385d9ddca6SJens Wiklander /**
3395d9ddca6SJens Wiklander  * Does an RPC using a preallocated argument buffer
3405d9ddca6SJens Wiklander  * @cmd: RPC cmd
3415d9ddca6SJens Wiklander  * @num_params: number of parameters
3425d9ddca6SJens Wiklander  * @params: RPC parameters
3435d9ddca6SJens Wiklander  * @returns RPC return value
3445d9ddca6SJens Wiklander  */
3455d9ddca6SJens Wiklander uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
3465d9ddca6SJens Wiklander 		struct thread_param *params);
3475d9ddca6SJens Wiklander 
3485d9ddca6SJens Wiklander /**
3496b1b2b99SYitong Cheng  * Allocate data for payload buffers shared with both user space applications
3506b1b2b99SYitong Cheng  * and the non-secure kernel. Ensure consistency with the enumeration
3516b1b2b99SYitong Cheng  * THREAD_SHM_TYPE_GLOBAL.
3525d9ddca6SJens Wiklander  *
3535d9ddca6SJens Wiklander  * @size:	size in bytes of payload buffer
3545d9ddca6SJens Wiklander  *
3555d9ddca6SJens Wiklander  * @returns	mobj that describes allocated buffer or NULL on error
3565d9ddca6SJens Wiklander  */
3575d9ddca6SJens Wiklander struct mobj *thread_rpc_alloc_global_payload(size_t size);
3585d9ddca6SJens Wiklander 
3595d9ddca6SJens Wiklander /**
3605d9ddca6SJens Wiklander  * Free physical memory previously allocated with
3615d9ddca6SJens Wiklander  * thread_rpc_alloc_global_payload()
3625d9ddca6SJens Wiklander  *
3635d9ddca6SJens Wiklander  * @mobj:	mobj that describes the buffer
3645d9ddca6SJens Wiklander  */
3655d9ddca6SJens Wiklander void thread_rpc_free_global_payload(struct mobj *mobj);
3665d9ddca6SJens Wiklander 
3675d9ddca6SJens Wiklander /*
3685d9ddca6SJens Wiklander  * enum thread_shm_type - type of non-secure shared memory
3695d9ddca6SJens Wiklander  * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory
3705d9ddca6SJens Wiklander  * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory
3715d9ddca6SJens Wiklander  * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory
3725d9ddca6SJens Wiklander  */
3735d9ddca6SJens Wiklander enum thread_shm_type {
3745d9ddca6SJens Wiklander 	THREAD_SHM_TYPE_APPLICATION,
3755d9ddca6SJens Wiklander 	THREAD_SHM_TYPE_KERNEL_PRIVATE,
3765d9ddca6SJens Wiklander 	THREAD_SHM_TYPE_GLOBAL,
3775d9ddca6SJens Wiklander };
3785d9ddca6SJens Wiklander 
3795d9ddca6SJens Wiklander /*
3805d9ddca6SJens Wiklander  * enum thread_shm_cache_user - user of a cache allocation
3815d9ddca6SJens Wiklander  * @THREAD_SHM_CACHE_USER_SOCKET - socket communication
3825d9ddca6SJens Wiklander  * @THREAD_SHM_CACHE_USER_FS - filesystem access
3835d9ddca6SJens Wiklander  * @THREAD_SHM_CACHE_USER_I2C - I2C communication
384f00b453aSJens Wiklander  * @THREAD_SHM_CACHE_USER_RPMB - RPMB communication
3855d9ddca6SJens Wiklander  *
3865d9ddca6SJens Wiklander  * To ensure that each user of the shared memory cache doesn't interfere
3875d9ddca6SJens Wiklander  * with each other a unique ID per user is used.
3885d9ddca6SJens Wiklander  */
3895d9ddca6SJens Wiklander enum thread_shm_cache_user {
3905d9ddca6SJens Wiklander 	THREAD_SHM_CACHE_USER_SOCKET,
3915d9ddca6SJens Wiklander 	THREAD_SHM_CACHE_USER_FS,
3925d9ddca6SJens Wiklander 	THREAD_SHM_CACHE_USER_I2C,
393f00b453aSJens Wiklander 	THREAD_SHM_CACHE_USER_RPMB,
3945d9ddca6SJens Wiklander };
3955d9ddca6SJens Wiklander 
3965d9ddca6SJens Wiklander /*
3975d9ddca6SJens Wiklander  * Returns a pointer to the cached RPC memory. Each thread and @user tuple
3985d9ddca6SJens Wiklander  * has a unique cache. The pointer is guaranteed to point to a large enough
3995d9ddca6SJens Wiklander  * area or to be NULL.
4005d9ddca6SJens Wiklander  */
4015d9ddca6SJens Wiklander void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
4025d9ddca6SJens Wiklander 				 enum thread_shm_type shm_type,
4035d9ddca6SJens Wiklander 				 size_t size, struct mobj **mobj);
4045d9ddca6SJens Wiklander 
4055d9ddca6SJens Wiklander #endif /*__ASSEMBLER__*/
4065d9ddca6SJens Wiklander 
407d50fee03SEtienne Carriere #endif /*__KERNEL_THREAD_H*/
408