xref: /optee_os/core/include/kernel/thread.h (revision 5d9ddca63770b09d096797a3f12c0cb597732464)
1*5d9ddca6SJens Wiklander /* SPDX-License-Identifier: BSD-2-Clause */
2*5d9ddca6SJens Wiklander /*
3*5d9ddca6SJens Wiklander  * Copyright (c) 2014, STMicroelectronics International N.V.
4*5d9ddca6SJens Wiklander  * Copyright (c) 2016-2017, Linaro Limited
5*5d9ddca6SJens Wiklander  * Copyright (c) 2020-2021, Arm Limited
6*5d9ddca6SJens Wiklander  */
7*5d9ddca6SJens Wiklander 
8*5d9ddca6SJens Wiklander #ifndef KERNEL_THREAD_H
9*5d9ddca6SJens Wiklander #define KERNEL_THREAD_H
10*5d9ddca6SJens Wiklander 
11*5d9ddca6SJens Wiklander #ifndef __ASSEMBLER__
12*5d9ddca6SJens Wiklander #include <types_ext.h>
13*5d9ddca6SJens Wiklander #include <compiler.h>
14*5d9ddca6SJens Wiklander #include <mm/pgt_cache.h>
15*5d9ddca6SJens Wiklander #endif
16*5d9ddca6SJens Wiklander #include <kernel/thread_arch.h>
17*5d9ddca6SJens Wiklander 
18*5d9ddca6SJens Wiklander #define THREAD_ID_0		0
19*5d9ddca6SJens Wiklander #define THREAD_ID_INVALID	-1
20*5d9ddca6SJens Wiklander 
21*5d9ddca6SJens Wiklander #define THREAD_RPC_MAX_NUM_PARAMS	U(4)
22*5d9ddca6SJens Wiklander 
23*5d9ddca6SJens Wiklander #ifndef __ASSEMBLER__
24*5d9ddca6SJens Wiklander 
25*5d9ddca6SJens Wiklander struct thread_specific_data {
26*5d9ddca6SJens Wiklander 	TAILQ_HEAD(, ts_session) sess_stack;
27*5d9ddca6SJens Wiklander 	struct ts_ctx *ctx;
28*5d9ddca6SJens Wiklander 	struct pgt_cache pgt_cache;
29*5d9ddca6SJens Wiklander #ifdef CFG_CORE_FFA
30*5d9ddca6SJens Wiklander 	uint32_t rpc_target_info;
31*5d9ddca6SJens Wiklander #endif
32*5d9ddca6SJens Wiklander 	uint32_t abort_type;
33*5d9ddca6SJens Wiklander 	uint32_t abort_descr;
34*5d9ddca6SJens Wiklander 	vaddr_t abort_va;
35*5d9ddca6SJens Wiklander 	unsigned int abort_core;
36*5d9ddca6SJens Wiklander 	struct thread_abort_regs abort_regs;
37*5d9ddca6SJens Wiklander #ifdef CFG_CORE_DEBUG_CHECK_STACKS
38*5d9ddca6SJens Wiklander 	bool stackcheck_recursion;
39*5d9ddca6SJens Wiklander #endif
40*5d9ddca6SJens Wiklander 	unsigned int syscall_recursion;
41*5d9ddca6SJens Wiklander };
42*5d9ddca6SJens Wiklander 
43*5d9ddca6SJens Wiklander void thread_init_primary(void);
44*5d9ddca6SJens Wiklander void thread_init_per_cpu(void);
45*5d9ddca6SJens Wiklander 
46*5d9ddca6SJens Wiklander struct thread_core_local *thread_get_core_local(void);
47*5d9ddca6SJens Wiklander 
48*5d9ddca6SJens Wiklander /*
49*5d9ddca6SJens Wiklander  * Sets the stacks to be used by the different threads. Use THREAD_ID_0 for
50*5d9ddca6SJens Wiklander  * first stack, THREAD_ID_0 + 1 for the next and so on.
51*5d9ddca6SJens Wiklander  *
52*5d9ddca6SJens Wiklander  * Returns true on success and false on errors.
53*5d9ddca6SJens Wiklander  */
54*5d9ddca6SJens Wiklander bool thread_init_stack(uint32_t stack_id, vaddr_t sp);
55*5d9ddca6SJens Wiklander 
56*5d9ddca6SJens Wiklander /*
57*5d9ddca6SJens Wiklander  * Initializes thread contexts. Called in thread_init_boot_thread() if
58*5d9ddca6SJens Wiklander  * virtualization is disabled. Virtualization subsystem calls it for
59*5d9ddca6SJens Wiklander  * every new guest otherwise.
60*5d9ddca6SJens Wiklander  */
61*5d9ddca6SJens Wiklander void thread_init_threads(void);
62*5d9ddca6SJens Wiklander 
63*5d9ddca6SJens Wiklander /*
64*5d9ddca6SJens Wiklander  * Called by the init CPU. Sets temporary stack mode for all CPUs
65*5d9ddca6SJens Wiklander  * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit for
66*5d9ddca6SJens Wiklander  * the init CPU.
67*5d9ddca6SJens Wiklander  */
68*5d9ddca6SJens Wiklander void thread_init_thread_core_local(void);
69*5d9ddca6SJens Wiklander 
70*5d9ddca6SJens Wiklander /*
71*5d9ddca6SJens Wiklander  * Initializes a thread to be used during boot
72*5d9ddca6SJens Wiklander  */
73*5d9ddca6SJens Wiklander void thread_init_boot_thread(void);
74*5d9ddca6SJens Wiklander 
75*5d9ddca6SJens Wiklander /*
76*5d9ddca6SJens Wiklander  * Clears the current thread id
77*5d9ddca6SJens Wiklander  * Only supposed to be used during initialization.
78*5d9ddca6SJens Wiklander  */
79*5d9ddca6SJens Wiklander void thread_clr_boot_thread(void);
80*5d9ddca6SJens Wiklander 
81*5d9ddca6SJens Wiklander /*
82*5d9ddca6SJens Wiklander  * Returns current thread id.
83*5d9ddca6SJens Wiklander  */
84*5d9ddca6SJens Wiklander short int thread_get_id(void);
85*5d9ddca6SJens Wiklander 
86*5d9ddca6SJens Wiklander /*
87*5d9ddca6SJens Wiklander  * Returns current thread id, return -1 on failure.
88*5d9ddca6SJens Wiklander  */
89*5d9ddca6SJens Wiklander short int thread_get_id_may_fail(void);
90*5d9ddca6SJens Wiklander 
91*5d9ddca6SJens Wiklander /* Returns Thread Specific Data (TSD) pointer. */
92*5d9ddca6SJens Wiklander struct thread_specific_data *thread_get_tsd(void);
93*5d9ddca6SJens Wiklander 
94*5d9ddca6SJens Wiklander /*
95*5d9ddca6SJens Wiklander  * Sets foreign interrupts status for current thread, must only be called
96*5d9ddca6SJens Wiklander  * from an active thread context.
97*5d9ddca6SJens Wiklander  *
98*5d9ddca6SJens Wiklander  * enable == true  -> enable foreign interrupts
99*5d9ddca6SJens Wiklander  * enable == false -> disable foreign interrupts
100*5d9ddca6SJens Wiklander  */
101*5d9ddca6SJens Wiklander void thread_set_foreign_intr(bool enable);
102*5d9ddca6SJens Wiklander 
103*5d9ddca6SJens Wiklander /*
104*5d9ddca6SJens Wiklander  * Restores the foreign interrupts status (in CPSR) for current thread, must
105*5d9ddca6SJens Wiklander  * only be called from an active thread context.
106*5d9ddca6SJens Wiklander  */
107*5d9ddca6SJens Wiklander void thread_restore_foreign_intr(void);
108*5d9ddca6SJens Wiklander 
109*5d9ddca6SJens Wiklander /*
110*5d9ddca6SJens Wiklander  * thread_get_exceptions() - return current exception mask
111*5d9ddca6SJens Wiklander  */
112*5d9ddca6SJens Wiklander uint32_t thread_get_exceptions(void);
113*5d9ddca6SJens Wiklander 
114*5d9ddca6SJens Wiklander /*
115*5d9ddca6SJens Wiklander  * thread_set_exceptions() - set exception mask
116*5d9ddca6SJens Wiklander  * @exceptions: exception mask to set
117*5d9ddca6SJens Wiklander  *
118*5d9ddca6SJens Wiklander  * Any previous exception mask is replaced by this exception mask, that is,
119*5d9ddca6SJens Wiklander  * old bits are cleared and replaced by these.
120*5d9ddca6SJens Wiklander  */
121*5d9ddca6SJens Wiklander void thread_set_exceptions(uint32_t exceptions);
122*5d9ddca6SJens Wiklander 
123*5d9ddca6SJens Wiklander /*
124*5d9ddca6SJens Wiklander  * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions
125*5d9ddca6SJens Wiklander  * @exceptions	exceptions to mask
126*5d9ddca6SJens Wiklander  * @returns old exception state
127*5d9ddca6SJens Wiklander  */
128*5d9ddca6SJens Wiklander uint32_t thread_mask_exceptions(uint32_t exceptions);
129*5d9ddca6SJens Wiklander 
130*5d9ddca6SJens Wiklander /*
131*5d9ddca6SJens Wiklander  * thread_unmask_exceptions() - Unmasks asynchronous exceptions
132*5d9ddca6SJens Wiklander  * @state	Old asynchronous exception state to restore (returned by
133*5d9ddca6SJens Wiklander  *		thread_mask_exceptions())
134*5d9ddca6SJens Wiklander  */
135*5d9ddca6SJens Wiklander void thread_unmask_exceptions(uint32_t state);
136*5d9ddca6SJens Wiklander 
137*5d9ddca6SJens Wiklander 
138*5d9ddca6SJens Wiklander static inline bool __nostackcheck thread_foreign_intr_disabled(void)
139*5d9ddca6SJens Wiklander {
140*5d9ddca6SJens Wiklander 	return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
141*5d9ddca6SJens Wiklander }
142*5d9ddca6SJens Wiklander 
143*5d9ddca6SJens Wiklander /*
144*5d9ddca6SJens Wiklander  * thread_enter_user_mode() - Enters user mode
145*5d9ddca6SJens Wiklander  * @a0:		Passed in r/x0 for user_func
146*5d9ddca6SJens Wiklander  * @a1:		Passed in r/x1 for user_func
147*5d9ddca6SJens Wiklander  * @a2:		Passed in r/x2 for user_func
148*5d9ddca6SJens Wiklander  * @a3:		Passed in r/x3 for user_func
149*5d9ddca6SJens Wiklander  * @user_sp:	Assigned sp value in user mode
150*5d9ddca6SJens Wiklander  * @user_func:	Function to execute in user mode
151*5d9ddca6SJens Wiklander  * @is_32bit:   True if TA should execute in Aarch32, false if Aarch64
152*5d9ddca6SJens Wiklander  * @exit_status0: Pointer to opaque exit staus 0
153*5d9ddca6SJens Wiklander  * @exit_status1: Pointer to opaque exit staus 1
154*5d9ddca6SJens Wiklander  *
155*5d9ddca6SJens Wiklander  * This functions enters user mode with the argument described above,
156*5d9ddca6SJens Wiklander  * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode()
157*5d9ddca6SJens Wiklander  * when returning back to the caller of this function through an exception
158*5d9ddca6SJens Wiklander  * handler.
159*5d9ddca6SJens Wiklander  *
160*5d9ddca6SJens Wiklander  * @Returns what's passed in "ret" to thread_unwind_user_mode()
161*5d9ddca6SJens Wiklander  */
162*5d9ddca6SJens Wiklander uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
163*5d9ddca6SJens Wiklander 		unsigned long a2, unsigned long a3, unsigned long user_sp,
164*5d9ddca6SJens Wiklander 		unsigned long entry_func, bool is_32bit,
165*5d9ddca6SJens Wiklander 		uint32_t *exit_status0, uint32_t *exit_status1);
166*5d9ddca6SJens Wiklander 
167*5d9ddca6SJens Wiklander /*
168*5d9ddca6SJens Wiklander  * thread_unwind_user_mode() - Unwinds kernel stack from user entry
169*5d9ddca6SJens Wiklander  * @ret:	Value to return from thread_enter_user_mode()
170*5d9ddca6SJens Wiklander  * @exit_status0: Exit status 0
171*5d9ddca6SJens Wiklander  * @exit_status1: Exit status 1
172*5d9ddca6SJens Wiklander  *
173*5d9ddca6SJens Wiklander  * This is the function that exception handlers can return into
174*5d9ddca6SJens Wiklander  * to resume execution in kernel mode instead of user mode.
175*5d9ddca6SJens Wiklander  *
176*5d9ddca6SJens Wiklander  * This function is closely coupled with thread_enter_user_mode() since it
177*5d9ddca6SJens Wiklander  * need to restore registers saved by thread_enter_user_mode() and when it
178*5d9ddca6SJens Wiklander  * returns make it look like thread_enter_user_mode() just returned. It is
179*5d9ddca6SJens Wiklander  * expected that the stack pointer is where thread_enter_user_mode() left
180*5d9ddca6SJens Wiklander  * it. The stack will be unwound and the function will return to where
181*5d9ddca6SJens Wiklander  * thread_enter_user_mode() was called from.  Exit_status0 and exit_status1
182*5d9ddca6SJens Wiklander  * are filled in the corresponding pointers supplied to
183*5d9ddca6SJens Wiklander  * thread_enter_user_mode().
184*5d9ddca6SJens Wiklander  */
185*5d9ddca6SJens Wiklander void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
186*5d9ddca6SJens Wiklander 		uint32_t exit_status1);
187*5d9ddca6SJens Wiklander 
188*5d9ddca6SJens Wiklander /*
189*5d9ddca6SJens Wiklander  * Returns the start address (bottom) of the stack for the current thread,
190*5d9ddca6SJens Wiklander  * zero if there is no current thread.
191*5d9ddca6SJens Wiklander  */
192*5d9ddca6SJens Wiklander vaddr_t thread_stack_start(void);
193*5d9ddca6SJens Wiklander 
194*5d9ddca6SJens Wiklander 
195*5d9ddca6SJens Wiklander /* Returns the stack size for the current thread */
196*5d9ddca6SJens Wiklander size_t thread_stack_size(void);
197*5d9ddca6SJens Wiklander 
198*5d9ddca6SJens Wiklander /*
199*5d9ddca6SJens Wiklander  * Returns the start (top, lowest address) and end (bottom, highest address) of
200*5d9ddca6SJens Wiklander  * the current stack (thread, temporary or abort stack).
201*5d9ddca6SJens Wiklander  * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or
202*5d9ddca6SJens Wiklander  * soft limits are queried. The difference between soft and hard is that for the
203*5d9ddca6SJens Wiklander  * latter, the stack start includes some additional space to let any function
204*5d9ddca6SJens Wiklander  * overflow the soft limit and still be able to print a stack dump in this case.
205*5d9ddca6SJens Wiklander  */
206*5d9ddca6SJens Wiklander bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard);
207*5d9ddca6SJens Wiklander 
208*5d9ddca6SJens Wiklander static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start,
209*5d9ddca6SJens Wiklander 							vaddr_t *end)
210*5d9ddca6SJens Wiklander {
211*5d9ddca6SJens Wiklander 	return get_stack_limits(start, end, false);
212*5d9ddca6SJens Wiklander }
213*5d9ddca6SJens Wiklander 
214*5d9ddca6SJens Wiklander static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start,
215*5d9ddca6SJens Wiklander 							vaddr_t *end)
216*5d9ddca6SJens Wiklander {
217*5d9ddca6SJens Wiklander 	return get_stack_limits(start, end, true);
218*5d9ddca6SJens Wiklander }
219*5d9ddca6SJens Wiklander 
220*5d9ddca6SJens Wiklander bool thread_is_in_normal_mode(void);
221*5d9ddca6SJens Wiklander 
222*5d9ddca6SJens Wiklander /*
223*5d9ddca6SJens Wiklander  * Returns true if previous exeception also was in abort mode.
224*5d9ddca6SJens Wiklander  *
225*5d9ddca6SJens Wiklander  * Note: it's only valid to call this function from an abort exception
226*5d9ddca6SJens Wiklander  * handler before interrupts has been re-enabled.
227*5d9ddca6SJens Wiklander  */
228*5d9ddca6SJens Wiklander bool thread_is_from_abort_mode(void);
229*5d9ddca6SJens Wiklander 
230*5d9ddca6SJens Wiklander /**
231*5d9ddca6SJens Wiklander  * Allocates data for payload buffers.
232*5d9ddca6SJens Wiklander  *
233*5d9ddca6SJens Wiklander  * @size:	size in bytes of payload buffer
234*5d9ddca6SJens Wiklander  *
235*5d9ddca6SJens Wiklander  * @returns	mobj that describes allocated buffer or NULL on error
236*5d9ddca6SJens Wiklander  */
237*5d9ddca6SJens Wiklander struct mobj *thread_rpc_alloc_payload(size_t size);
238*5d9ddca6SJens Wiklander 
239*5d9ddca6SJens Wiklander /**
240*5d9ddca6SJens Wiklander  * Free physical memory previously allocated with thread_rpc_alloc_payload()
241*5d9ddca6SJens Wiklander  *
242*5d9ddca6SJens Wiklander  * @mobj:	mobj that describes the buffer
243*5d9ddca6SJens Wiklander  */
244*5d9ddca6SJens Wiklander void thread_rpc_free_payload(struct mobj *mobj);
245*5d9ddca6SJens Wiklander 
246*5d9ddca6SJens Wiklander /**
247*5d9ddca6SJens Wiklander  * Allocate data for payload buffers only shared with the non-secure kernel
248*5d9ddca6SJens Wiklander  *
249*5d9ddca6SJens Wiklander  * @size:	size in bytes of payload buffer
250*5d9ddca6SJens Wiklander  *
251*5d9ddca6SJens Wiklander  * @returns	mobj that describes allocated buffer or NULL on error
252*5d9ddca6SJens Wiklander  */
253*5d9ddca6SJens Wiklander struct mobj *thread_rpc_alloc_kernel_payload(size_t size);
254*5d9ddca6SJens Wiklander 
255*5d9ddca6SJens Wiklander /**
256*5d9ddca6SJens Wiklander  * Free physical memory previously allocated with
257*5d9ddca6SJens Wiklander  * thread_rpc_alloc_kernel_payload()
258*5d9ddca6SJens Wiklander  *
259*5d9ddca6SJens Wiklander  * @mobj:	mobj that describes the buffer
260*5d9ddca6SJens Wiklander  */
261*5d9ddca6SJens Wiklander void thread_rpc_free_kernel_payload(struct mobj *mobj);
262*5d9ddca6SJens Wiklander 
263*5d9ddca6SJens Wiklander struct thread_param_memref {
264*5d9ddca6SJens Wiklander 	size_t offs;
265*5d9ddca6SJens Wiklander 	size_t size;
266*5d9ddca6SJens Wiklander 	struct mobj *mobj;
267*5d9ddca6SJens Wiklander };
268*5d9ddca6SJens Wiklander 
269*5d9ddca6SJens Wiklander struct thread_param_value {
270*5d9ddca6SJens Wiklander 	uint64_t a;
271*5d9ddca6SJens Wiklander 	uint64_t b;
272*5d9ddca6SJens Wiklander 	uint64_t c;
273*5d9ddca6SJens Wiklander };
274*5d9ddca6SJens Wiklander 
275*5d9ddca6SJens Wiklander /*
276*5d9ddca6SJens Wiklander  * Note that there's some arithmetics done on the value so it's important
277*5d9ddca6SJens Wiklander  * to keep in IN, OUT, INOUT order.
278*5d9ddca6SJens Wiklander  */
279*5d9ddca6SJens Wiklander enum thread_param_attr {
280*5d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_NONE = 0,
281*5d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_VALUE_IN,
282*5d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_VALUE_OUT,
283*5d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_VALUE_INOUT,
284*5d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_MEMREF_IN,
285*5d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_MEMREF_OUT,
286*5d9ddca6SJens Wiklander 	THREAD_PARAM_ATTR_MEMREF_INOUT,
287*5d9ddca6SJens Wiklander };
288*5d9ddca6SJens Wiklander 
289*5d9ddca6SJens Wiklander struct thread_param {
290*5d9ddca6SJens Wiklander 	enum thread_param_attr attr;
291*5d9ddca6SJens Wiklander 	union {
292*5d9ddca6SJens Wiklander 		struct thread_param_memref memref;
293*5d9ddca6SJens Wiklander 		struct thread_param_value value;
294*5d9ddca6SJens Wiklander 	} u;
295*5d9ddca6SJens Wiklander };
296*5d9ddca6SJens Wiklander 
297*5d9ddca6SJens Wiklander #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \
298*5d9ddca6SJens Wiklander 	(struct thread_param){ \
299*5d9ddca6SJens Wiklander 		.attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \
300*5d9ddca6SJens Wiklander 		.mobj = (_mobj), .offs = (_offs), .size = (_size) } \
301*5d9ddca6SJens Wiklander 	}
302*5d9ddca6SJens Wiklander 
303*5d9ddca6SJens Wiklander #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \
304*5d9ddca6SJens Wiklander 	(struct thread_param){ \
305*5d9ddca6SJens Wiklander 		.attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \
306*5d9ddca6SJens Wiklander 		.a = (_a), .b = (_b), .c = (_c) } \
307*5d9ddca6SJens Wiklander 	}
308*5d9ddca6SJens Wiklander 
309*5d9ddca6SJens Wiklander /**
310*5d9ddca6SJens Wiklander  * Does an RPC using a preallocated argument buffer
311*5d9ddca6SJens Wiklander  * @cmd: RPC cmd
312*5d9ddca6SJens Wiklander  * @num_params: number of parameters
313*5d9ddca6SJens Wiklander  * @params: RPC parameters
314*5d9ddca6SJens Wiklander  * @returns RPC return value
315*5d9ddca6SJens Wiklander  */
316*5d9ddca6SJens Wiklander uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
317*5d9ddca6SJens Wiklander 		struct thread_param *params);
318*5d9ddca6SJens Wiklander 
319*5d9ddca6SJens Wiklander /**
320*5d9ddca6SJens Wiklander  * Allocate data for payload buffers.
321*5d9ddca6SJens Wiklander  * Buffer is exported to user mode applications.
322*5d9ddca6SJens Wiklander  *
323*5d9ddca6SJens Wiklander  * @size:	size in bytes of payload buffer
324*5d9ddca6SJens Wiklander  *
325*5d9ddca6SJens Wiklander  * @returns	mobj that describes allocated buffer or NULL on error
326*5d9ddca6SJens Wiklander  */
327*5d9ddca6SJens Wiklander struct mobj *thread_rpc_alloc_global_payload(size_t size);
328*5d9ddca6SJens Wiklander 
329*5d9ddca6SJens Wiklander /**
330*5d9ddca6SJens Wiklander  * Free physical memory previously allocated with
331*5d9ddca6SJens Wiklander  * thread_rpc_alloc_global_payload()
332*5d9ddca6SJens Wiklander  *
333*5d9ddca6SJens Wiklander  * @mobj:	mobj that describes the buffer
334*5d9ddca6SJens Wiklander  */
335*5d9ddca6SJens Wiklander void thread_rpc_free_global_payload(struct mobj *mobj);
336*5d9ddca6SJens Wiklander 
337*5d9ddca6SJens Wiklander /*
338*5d9ddca6SJens Wiklander  * enum thread_shm_type - type of non-secure shared memory
339*5d9ddca6SJens Wiklander  * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory
340*5d9ddca6SJens Wiklander  * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory
341*5d9ddca6SJens Wiklander  * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory
342*5d9ddca6SJens Wiklander  */
343*5d9ddca6SJens Wiklander enum thread_shm_type {
344*5d9ddca6SJens Wiklander 	THREAD_SHM_TYPE_APPLICATION,
345*5d9ddca6SJens Wiklander 	THREAD_SHM_TYPE_KERNEL_PRIVATE,
346*5d9ddca6SJens Wiklander 	THREAD_SHM_TYPE_GLOBAL,
347*5d9ddca6SJens Wiklander };
348*5d9ddca6SJens Wiklander 
349*5d9ddca6SJens Wiklander /*
350*5d9ddca6SJens Wiklander  * enum thread_shm_cache_user - user of a cache allocation
351*5d9ddca6SJens Wiklander  * @THREAD_SHM_CACHE_USER_SOCKET - socket communication
352*5d9ddca6SJens Wiklander  * @THREAD_SHM_CACHE_USER_FS - filesystem access
353*5d9ddca6SJens Wiklander  * @THREAD_SHM_CACHE_USER_I2C - I2C communication
354*5d9ddca6SJens Wiklander  *
355*5d9ddca6SJens Wiklander  * To ensure that each user of the shared memory cache doesn't interfere
356*5d9ddca6SJens Wiklander  * with each other a unique ID per user is used.
357*5d9ddca6SJens Wiklander  */
358*5d9ddca6SJens Wiklander enum thread_shm_cache_user {
359*5d9ddca6SJens Wiklander 	THREAD_SHM_CACHE_USER_SOCKET,
360*5d9ddca6SJens Wiklander 	THREAD_SHM_CACHE_USER_FS,
361*5d9ddca6SJens Wiklander 	THREAD_SHM_CACHE_USER_I2C,
362*5d9ddca6SJens Wiklander };
363*5d9ddca6SJens Wiklander 
364*5d9ddca6SJens Wiklander /*
365*5d9ddca6SJens Wiklander  * Returns a pointer to the cached RPC memory. Each thread and @user tuple
366*5d9ddca6SJens Wiklander  * has a unique cache. The pointer is guaranteed to point to a large enough
367*5d9ddca6SJens Wiklander  * area or to be NULL.
368*5d9ddca6SJens Wiklander  */
369*5d9ddca6SJens Wiklander void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
370*5d9ddca6SJens Wiklander 				 enum thread_shm_type shm_type,
371*5d9ddca6SJens Wiklander 				 size_t size, struct mobj **mobj);
372*5d9ddca6SJens Wiklander 
373*5d9ddca6SJens Wiklander #endif /*__ASSEMBLER__*/
374*5d9ddca6SJens Wiklander 
375*5d9ddca6SJens Wiklander #endif /*KERNEL_THREAD_H*/
376