xref: /optee_os/core/kernel/thread.c (revision a4c2e0cb4e4fcb0c760fb3daf9172e682a4e3628)
1ca825890SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause
2ca825890SJens Wiklander /*
393dc6b29SJens Wiklander  * Copyright (c) 2016-2022, Linaro Limited
4ca825890SJens Wiklander  * Copyright (c) 2014, STMicroelectronics International N.V.
5ca825890SJens Wiklander  * Copyright (c) 2020-2021, Arm Limited
6ca825890SJens Wiklander  */
7ca825890SJens Wiklander 
8ca825890SJens Wiklander #include <config.h>
993dc6b29SJens Wiklander #include <crypto/crypto.h>
10ca825890SJens Wiklander #include <kernel/asan.h>
11b89b3da2SVincent Chuang #include <kernel/boot.h>
12ca825890SJens Wiklander #include <kernel/lockdep.h>
13ca825890SJens Wiklander #include <kernel/misc.h>
14ca825890SJens Wiklander #include <kernel/panic.h>
15ca825890SJens Wiklander #include <kernel/spinlock.h>
16ca825890SJens Wiklander #include <kernel/thread.h>
17ca825890SJens Wiklander #include <kernel/thread_private.h>
18ca825890SJens Wiklander #include <mm/mobj.h>
19ca825890SJens Wiklander 
20ca825890SJens Wiklander struct thread_ctx threads[CFG_NUM_THREADS];
21ca825890SJens Wiklander 
22*a4c2e0cbSJens Wiklander static struct thread_core_local
23*a4c2e0cbSJens Wiklander 	__thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss;
24*a4c2e0cbSJens Wiklander struct thread_core_local *thread_core_local __nex_data = __thread_core_local;
25*a4c2e0cbSJens Wiklander size_t thread_core_count __nex_data = CFG_TEE_CORE_NB_CORE;
26*a4c2e0cbSJens Wiklander unsigned long thread_core_local_pa __nex_bss;
27ca825890SJens Wiklander 
28ca825890SJens Wiklander /*
29ca825890SJens Wiklander  * Stacks
30ca825890SJens Wiklander  *
31ca825890SJens Wiklander  * [Lower addresses on the left]
32ca825890SJens Wiklander  *
33ca825890SJens Wiklander  * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ]
34ca825890SJens Wiklander  * ^                     ^                   ^                ^
35ca825890SJens Wiklander  * stack_xxx[n]          "hard" top          "soft" top       bottom
36ca825890SJens Wiklander  */
37ca825890SJens Wiklander 
38b89b3da2SVincent Chuang static uint32_t start_canary_value = 0xdedede00;
39b89b3da2SVincent Chuang static uint32_t end_canary_value = 0xababab00;
40ca825890SJens Wiklander 
41ca825890SJens Wiklander #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \
42ca825890SJens Wiklander linkage uint32_t name[num_stacks] \
43ca825890SJens Wiklander 		[ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \
44ca825890SJens Wiklander 			 STACK_ALIGNMENT) / sizeof(uint32_t)] \
45ca825890SJens Wiklander 		__attribute__((section(".nozi_stack." # name), \
46ca825890SJens Wiklander 			       aligned(STACK_ALIGNMENT)))
47ca825890SJens Wiklander 
485956c77eSJerome Forissier DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE,
495956c77eSJerome Forissier 	      /* global linkage */);
50ca825890SJens Wiklander DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static);
51ca825890SJens Wiklander #ifndef CFG_WITH_PAGER
525956c77eSJerome Forissier DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static);
53ca825890SJens Wiklander #endif
54ca825890SJens Wiklander 
55ca825890SJens Wiklander #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \
56ca825890SJens Wiklander 				    STACK_CANARY_SIZE / 2)
57ca825890SJens Wiklander 
58ca825890SJens Wiklander const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") =
59ca825890SJens Wiklander 	sizeof(stack_tmp[0]);
60ca825890SJens Wiklander 
61ca825890SJens Wiklander /*
62528dabb2SJerome Forissier  * This stack setup info is required by secondary boot cores before they
63ca825890SJens Wiklander  * each locally enable the pager (the mmu). Hence kept in pager sections.
64ca825890SJens Wiklander  */
65ca825890SJens Wiklander DECLARE_KEEP_PAGER(stack_tmp_stride);
66ca825890SJens Wiklander 
67ca825890SJens Wiklander static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK;
68ca825890SJens Wiklander 
6905994c76SJens Wiklander static size_t stack_size_to_alloc_size(size_t stack_size)
7005994c76SJens Wiklander {
7105994c76SJens Wiklander 	return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA,
7205994c76SJens Wiklander 		       STACK_ALIGNMENT);
7305994c76SJens Wiklander }
7405994c76SJens Wiklander 
7505994c76SJens Wiklander static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va)
7605994c76SJens Wiklander {
7705994c76SJens Wiklander 	size_t l = stack_size_to_alloc_size(stack_size);
7805994c76SJens Wiklander 
7905994c76SJens Wiklander 	return end_va - l + STACK_CANARY_SIZE;
8005994c76SJens Wiklander }
8105994c76SJens Wiklander 
8205994c76SJens Wiklander static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va)
8305994c76SJens Wiklander {
8405994c76SJens Wiklander 	return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA;
8505994c76SJens Wiklander }
8605994c76SJens Wiklander 
8705994c76SJens Wiklander static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused,
8805994c76SJens Wiklander 				      vaddr_t end_va)
8905994c76SJens Wiklander {
9005994c76SJens Wiklander 	return end_va;
9105994c76SJens Wiklander }
9205994c76SJens Wiklander 
9305994c76SJens Wiklander static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va)
9405994c76SJens Wiklander {
9505994c76SJens Wiklander 	return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) -
9605994c76SJens Wiklander 			    STACK_CANARY_SIZE / 2);
9705994c76SJens Wiklander }
9805994c76SJens Wiklander 
9905994c76SJens Wiklander static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused,
10005994c76SJens Wiklander 					    vaddr_t end_va)
10105994c76SJens Wiklander {
10205994c76SJens Wiklander 	return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t));
10305994c76SJens Wiklander }
10405994c76SJens Wiklander 
105ad94da2aSJens Wiklander static void init_canaries(size_t stack_size, vaddr_t va_end)
106ca825890SJens Wiklander {
107ad94da2aSJens Wiklander 	uint32_t *canary = NULL;
108ad94da2aSJens Wiklander 
109ad94da2aSJens Wiklander 	assert(va_end);
110ad94da2aSJens Wiklander 	canary = stack_end_va_to_start_canary(stack_size, va_end);
111ad94da2aSJens Wiklander 	*canary = start_canary_value;
112ad94da2aSJens Wiklander 	canary = stack_end_va_to_end_canary(stack_size, va_end);
113ad94da2aSJens Wiklander 	*canary = end_canary_value;
114ca825890SJens Wiklander }
115ca825890SJens Wiklander 
116ad94da2aSJens Wiklander void thread_init_canaries(void)
117ad94da2aSJens Wiklander {
118ad94da2aSJens Wiklander 	vaddr_t va = 0;
119ad94da2aSJens Wiklander 	size_t n = 0;
120ad94da2aSJens Wiklander 
121ad94da2aSJens Wiklander 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) {
122ad94da2aSJens Wiklander 		for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
123ad94da2aSJens Wiklander 			if (thread_core_local[n].tmp_stack_va_end) {
124ad94da2aSJens Wiklander 				va = thread_core_local[n].tmp_stack_va_end +
125ad94da2aSJens Wiklander 				     STACK_TMP_OFFS;
126ad94da2aSJens Wiklander 				init_canaries(STACK_TMP_SIZE, va);
127ad94da2aSJens Wiklander 			}
128ad94da2aSJens Wiklander 			va = thread_core_local[n].abt_stack_va_end;
129ad94da2aSJens Wiklander 			if (va)
130ad94da2aSJens Wiklander 				init_canaries(STACK_ABT_SIZE, va);
131ad94da2aSJens Wiklander 		}
132ad94da2aSJens Wiklander 
133ad94da2aSJens Wiklander 	}
134ad94da2aSJens Wiklander 
135ad94da2aSJens Wiklander 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES) &&
136ad94da2aSJens Wiklander 	    !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
137ad94da2aSJens Wiklander 		for (n = 0; n < CFG_NUM_THREADS; n++) {
138ad94da2aSJens Wiklander 			va = threads[n].stack_va_end;
139ad94da2aSJens Wiklander 			if (va)
140ad94da2aSJens Wiklander 				init_canaries(STACK_THREAD_SIZE, va);
141ad94da2aSJens Wiklander 		}
142ad94da2aSJens Wiklander 	}
143ca825890SJens Wiklander }
144ca825890SJens Wiklander 
145b89b3da2SVincent Chuang #if defined(CFG_WITH_STACK_CANARIES)
146b89b3da2SVincent Chuang void thread_update_canaries(void)
147b89b3da2SVincent Chuang {
148b89b3da2SVincent Chuang 	uint32_t canary[2] = { };
149b89b3da2SVincent Chuang 	uint32_t exceptions = 0;
150b89b3da2SVincent Chuang 
151b89b3da2SVincent Chuang 	plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary),
152b89b3da2SVincent Chuang 				       sizeof(canary[0]));
153b89b3da2SVincent Chuang 
154b89b3da2SVincent Chuang 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
155b89b3da2SVincent Chuang 
156b89b3da2SVincent Chuang 	thread_check_canaries();
157b89b3da2SVincent Chuang 
158b89b3da2SVincent Chuang 	start_canary_value = canary[0];
159b89b3da2SVincent Chuang 	end_canary_value = canary[1];
160b89b3da2SVincent Chuang 	thread_init_canaries();
161b89b3da2SVincent Chuang 
162b89b3da2SVincent Chuang 	thread_unmask_exceptions(exceptions);
163b89b3da2SVincent Chuang }
164b89b3da2SVincent Chuang #endif
165b89b3da2SVincent Chuang 
16605994c76SJens Wiklander static void check_stack_canary(const char *stack_name __maybe_unused,
16705994c76SJens Wiklander 			       size_t n __maybe_unused,
16805994c76SJens Wiklander 			       size_t stack_size, vaddr_t end_va)
16905994c76SJens Wiklander {
17005994c76SJens Wiklander 	uint32_t *canary = NULL;
17105994c76SJens Wiklander 
17205994c76SJens Wiklander 	canary = stack_end_va_to_start_canary(stack_size, end_va);
17305994c76SJens Wiklander 	if (*canary != start_canary_value) {
17405994c76SJens Wiklander 		EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)",
17505994c76SJens Wiklander 			 stack_name, n, (void *)canary);
17605994c76SJens Wiklander 		panic();
17705994c76SJens Wiklander 	}
17805994c76SJens Wiklander 
17905994c76SJens Wiklander 	canary = stack_end_va_to_end_canary(stack_size, end_va);
18005994c76SJens Wiklander 	if (*canary != end_canary_value) {
18105994c76SJens Wiklander 		EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)",
18205994c76SJens Wiklander 			 stack_name, n, (void *)canary);
18305994c76SJens Wiklander 		panic();
18405994c76SJens Wiklander 	}
18505994c76SJens Wiklander }
186ca825890SJens Wiklander 
187ca825890SJens Wiklander void thread_check_canaries(void)
188ca825890SJens Wiklander {
18905994c76SJens Wiklander 	vaddr_t va = 0;
190ca825890SJens Wiklander 	size_t n = 0;
191ca825890SJens Wiklander 
19205994c76SJens Wiklander 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) {
19305994c76SJens Wiklander 		for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
194ad94da2aSJens Wiklander 			if (thread_core_local[n].tmp_stack_va_end) {
19505994c76SJens Wiklander 				va = thread_core_local[n].tmp_stack_va_end +
19605994c76SJens Wiklander 				     STACK_TMP_OFFS;
197ad94da2aSJens Wiklander 				check_stack_canary("tmp_stack", n,
198ad94da2aSJens Wiklander 						   STACK_TMP_SIZE, va);
199ad94da2aSJens Wiklander 			}
20005994c76SJens Wiklander 
20105994c76SJens Wiklander 			va = thread_core_local[n].abt_stack_va_end;
202ad94da2aSJens Wiklander 			if (va)
203ad94da2aSJens Wiklander 				check_stack_canary("abt_stack", n,
204ad94da2aSJens Wiklander 						   STACK_ABT_SIZE, va);
20505994c76SJens Wiklander 		}
206ca825890SJens Wiklander 	}
207ca825890SJens Wiklander 
20805994c76SJens Wiklander 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES) &&
20905994c76SJens Wiklander 	    !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
21005994c76SJens Wiklander 		for (n = 0; n < CFG_NUM_THREADS; n++) {
21105994c76SJens Wiklander 			va = threads[n].stack_va_end;
212ad94da2aSJens Wiklander 			if (va)
21305994c76SJens Wiklander 				check_stack_canary("thread_stack", n,
21405994c76SJens Wiklander 						   STACK_THREAD_SIZE, va);
215ca825890SJens Wiklander 		}
216ca825890SJens Wiklander 	}
217ca825890SJens Wiklander }
218ca825890SJens Wiklander 
219ca825890SJens Wiklander void thread_lock_global(void)
220ca825890SJens Wiklander {
221ca825890SJens Wiklander 	cpu_spin_lock(&thread_global_lock);
222ca825890SJens Wiklander }
223ca825890SJens Wiklander 
224ca825890SJens Wiklander void thread_unlock_global(void)
225ca825890SJens Wiklander {
226ca825890SJens Wiklander 	cpu_spin_unlock(&thread_global_lock);
227ca825890SJens Wiklander }
228ca825890SJens Wiklander 
229ca825890SJens Wiklander static struct thread_core_local * __nostackcheck
230ca825890SJens Wiklander get_core_local(unsigned int pos)
231ca825890SJens Wiklander {
232ca825890SJens Wiklander 	/*
233ca825890SJens Wiklander 	 * Foreign interrupts must be disabled before playing with core_local
234ca825890SJens Wiklander 	 * since we otherwise may be rescheduled to a different core in the
235ca825890SJens Wiklander 	 * middle of this function.
236ca825890SJens Wiklander 	 */
237ca825890SJens Wiklander 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
238ca825890SJens Wiklander 
239ca825890SJens Wiklander 	assert(pos < CFG_TEE_CORE_NB_CORE);
240ca825890SJens Wiklander 	return &thread_core_local[pos];
241ca825890SJens Wiklander }
242ca825890SJens Wiklander 
243ca825890SJens Wiklander struct thread_core_local * __nostackcheck thread_get_core_local(void)
244ca825890SJens Wiklander {
245ca825890SJens Wiklander 	unsigned int pos = get_core_pos();
246ca825890SJens Wiklander 
247ca825890SJens Wiklander 	return get_core_local(pos);
248ca825890SJens Wiklander }
249ca825890SJens Wiklander 
250ca825890SJens Wiklander #ifdef CFG_CORE_DEBUG_CHECK_STACKS
251ca825890SJens Wiklander static void print_stack_limits(void)
252ca825890SJens Wiklander {
253ca825890SJens Wiklander 	size_t n = 0;
254ca825890SJens Wiklander 	vaddr_t __maybe_unused start = 0;
255ca825890SJens Wiklander 	vaddr_t __maybe_unused end = 0;
25605994c76SJens Wiklander 	vaddr_t va = 0;
257ca825890SJens Wiklander 
258ca825890SJens Wiklander 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
25905994c76SJens Wiklander 		va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS;
26005994c76SJens Wiklander 		start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va);
26105994c76SJens Wiklander 		end = stack_end_va_to_bottom(STACK_TMP_SIZE, va);
262ca825890SJens Wiklander 		DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
26305994c76SJens Wiklander 
26405994c76SJens Wiklander 		va = thread_core_local[n].abt_stack_va_end;
26505994c76SJens Wiklander 		start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va);
26605994c76SJens Wiklander 		end = stack_end_va_to_bottom(STACK_ABT_SIZE, va);
267ca825890SJens Wiklander 		DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
268ca825890SJens Wiklander 	}
26905994c76SJens Wiklander 
270ca825890SJens Wiklander 	for (n = 0; n < CFG_NUM_THREADS; n++) {
27105994c76SJens Wiklander 		va = threads[n].stack_va_end;
27205994c76SJens Wiklander 		start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va);
27305994c76SJens Wiklander 		end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va);
274ca825890SJens Wiklander 		DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
275ca825890SJens Wiklander 	}
276ca825890SJens Wiklander }
277ca825890SJens Wiklander 
278ca825890SJens Wiklander static void check_stack_limits(void)
279ca825890SJens Wiklander {
280ca825890SJens Wiklander 	vaddr_t stack_start = 0;
281ca825890SJens Wiklander 	vaddr_t stack_end = 0;
282ca825890SJens Wiklander 	/* Any value in the current stack frame will do */
283ca825890SJens Wiklander 	vaddr_t current_sp = (vaddr_t)&stack_start;
284ca825890SJens Wiklander 
285ca825890SJens Wiklander 	if (!get_stack_soft_limits(&stack_start, &stack_end))
286ca825890SJens Wiklander 		panic("Unknown stack limits");
287ca825890SJens Wiklander 	if (current_sp < stack_start || current_sp > stack_end) {
28828d6e35aSJerome Forissier 		EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%"
28928d6e35aSJerome Forissier 		     PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start,
29028d6e35aSJerome Forissier 		     stack_end);
291ca825890SJens Wiklander 		print_stack_limits();
292ca825890SJens Wiklander 		panic();
293ca825890SJens Wiklander 	}
294ca825890SJens Wiklander }
295ca825890SJens Wiklander 
296ca825890SJens Wiklander static bool * __nostackcheck get_stackcheck_recursion_flag(void)
297ca825890SJens Wiklander {
298ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
299ca825890SJens Wiklander 	unsigned int pos = get_core_pos();
300ca825890SJens Wiklander 	struct thread_core_local *l = get_core_local(pos);
301ca825890SJens Wiklander 	int ct = l->curr_thread;
302ca825890SJens Wiklander 	bool *p = NULL;
303ca825890SJens Wiklander 
304ca825890SJens Wiklander 	if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP))
305ca825890SJens Wiklander 		p = &l->stackcheck_recursion;
306ca825890SJens Wiklander 	else if (!l->flags)
307ca825890SJens Wiklander 		p = &threads[ct].tsd.stackcheck_recursion;
308ca825890SJens Wiklander 
309ca825890SJens Wiklander 	thread_unmask_exceptions(exceptions);
310ca825890SJens Wiklander 	return p;
311ca825890SJens Wiklander }
312ca825890SJens Wiklander 
313ca825890SJens Wiklander void __cyg_profile_func_enter(void *this_fn, void *call_site);
314ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused,
315ca825890SJens Wiklander 					     void *call_site __unused)
316ca825890SJens Wiklander {
317ca825890SJens Wiklander 	bool *p = get_stackcheck_recursion_flag();
318ca825890SJens Wiklander 
319ca825890SJens Wiklander 	assert(p);
320ca825890SJens Wiklander 	if (*p)
321ca825890SJens Wiklander 		return;
322ca825890SJens Wiklander 	*p = true;
323ca825890SJens Wiklander 	check_stack_limits();
324ca825890SJens Wiklander 	*p = false;
325ca825890SJens Wiklander }
326ca825890SJens Wiklander 
327ca825890SJens Wiklander void __cyg_profile_func_exit(void *this_fn, void *call_site);
328ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused,
329ca825890SJens Wiklander 					    void *call_site __unused)
330ca825890SJens Wiklander {
331ca825890SJens Wiklander }
332ca825890SJens Wiklander #else
333ca825890SJens Wiklander static void print_stack_limits(void)
334ca825890SJens Wiklander {
335ca825890SJens Wiklander }
336ca825890SJens Wiklander #endif
337ca825890SJens Wiklander 
338ca825890SJens Wiklander void thread_init_boot_thread(void)
339ca825890SJens Wiklander {
340ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
341ca825890SJens Wiklander 
342ca825890SJens Wiklander 	thread_init_threads();
343ca825890SJens Wiklander 
344ca825890SJens Wiklander 	l->curr_thread = 0;
345ca825890SJens Wiklander 	threads[0].state = THREAD_STATE_ACTIVE;
346ca825890SJens Wiklander }
347ca825890SJens Wiklander 
348ca825890SJens Wiklander void __nostackcheck thread_clr_boot_thread(void)
349ca825890SJens Wiklander {
350ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
351ca825890SJens Wiklander 
352ca825890SJens Wiklander 	assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS);
353ca825890SJens Wiklander 	assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
354ca825890SJens Wiklander 	threads[l->curr_thread].state = THREAD_STATE_FREE;
355ca825890SJens Wiklander 	l->curr_thread = THREAD_ID_INVALID;
356ca825890SJens Wiklander }
357ca825890SJens Wiklander 
358ca825890SJens Wiklander void __nostackcheck *thread_get_tmp_sp(void)
359ca825890SJens Wiklander {
360ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
361ca825890SJens Wiklander 
362ca825890SJens Wiklander 	/*
363ca825890SJens Wiklander 	 * Called from assembly when switching to the temporary stack, so flags
364ca825890SJens Wiklander 	 * need updating
365ca825890SJens Wiklander 	 */
366ca825890SJens Wiklander 	l->flags |= THREAD_CLF_TMP;
367ca825890SJens Wiklander 
368ca825890SJens Wiklander 	return (void *)l->tmp_stack_va_end;
369ca825890SJens Wiklander }
370ca825890SJens Wiklander 
371ca825890SJens Wiklander vaddr_t thread_stack_start(void)
372ca825890SJens Wiklander {
373ca825890SJens Wiklander 	struct thread_ctx *thr;
374ca825890SJens Wiklander 	int ct = thread_get_id_may_fail();
375ca825890SJens Wiklander 
376ca825890SJens Wiklander 	if (ct == THREAD_ID_INVALID)
377ca825890SJens Wiklander 		return 0;
378ca825890SJens Wiklander 
379ca825890SJens Wiklander 	thr = threads + ct;
38005994c76SJens Wiklander 	return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end);
381ca825890SJens Wiklander }
382ca825890SJens Wiklander 
383ca825890SJens Wiklander size_t thread_stack_size(void)
384ca825890SJens Wiklander {
385ca825890SJens Wiklander 	return STACK_THREAD_SIZE;
386ca825890SJens Wiklander }
387ca825890SJens Wiklander 
388ca825890SJens Wiklander bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard)
389ca825890SJens Wiklander {
390ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
391ca825890SJens Wiklander 	unsigned int pos = get_core_pos();
392ca825890SJens Wiklander 	struct thread_core_local *l = get_core_local(pos);
393ca825890SJens Wiklander 	int ct = l->curr_thread;
39405994c76SJens Wiklander 	size_t stack_size = 0;
39505994c76SJens Wiklander 	bool ret = true;
39605994c76SJens Wiklander 	vaddr_t va = 0;
397ca825890SJens Wiklander 
398ca825890SJens Wiklander 	if (l->flags & THREAD_CLF_TMP) {
39905994c76SJens Wiklander 		va = l->tmp_stack_va_end + STACK_TMP_OFFS;
40005994c76SJens Wiklander 		stack_size = STACK_TMP_SIZE;
401ca825890SJens Wiklander 	} else if (l->flags & THREAD_CLF_ABORT) {
40205994c76SJens Wiklander 		va = l->abt_stack_va_end;
40305994c76SJens Wiklander 		stack_size = STACK_ABT_SIZE;
40405994c76SJens Wiklander 	} else if (!l->flags && ct >= 0 && ct < CFG_NUM_THREADS) {
40505994c76SJens Wiklander 		va = threads[ct].stack_va_end;
40605994c76SJens Wiklander 		stack_size = STACK_THREAD_SIZE;
40705994c76SJens Wiklander 	} else {
40805994c76SJens Wiklander 		ret = false;
409ca825890SJens Wiklander 		goto out;
410ca825890SJens Wiklander 	}
41105994c76SJens Wiklander 
41205994c76SJens Wiklander 	*end = stack_end_va_to_bottom(stack_size, va);
41305994c76SJens Wiklander 	if (hard)
41405994c76SJens Wiklander 		*start = stack_end_va_to_top_hard(stack_size, va);
41505994c76SJens Wiklander 	else
41605994c76SJens Wiklander 		*start = stack_end_va_to_top_soft(stack_size, va);
417ca825890SJens Wiklander out:
418ca825890SJens Wiklander 	thread_unmask_exceptions(exceptions);
419ca825890SJens Wiklander 	return ret;
420ca825890SJens Wiklander }
421ca825890SJens Wiklander 
422ca825890SJens Wiklander bool thread_is_from_abort_mode(void)
423ca825890SJens Wiklander {
424ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
425ca825890SJens Wiklander 
426ca825890SJens Wiklander 	return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT;
427ca825890SJens Wiklander }
428ca825890SJens Wiklander 
429ca825890SJens Wiklander /*
430ca825890SJens Wiklander  * This function should always be accurate, but it might be possible to
431ca825890SJens Wiklander  * implement a more efficient depending on cpu architecture.
432ca825890SJens Wiklander  */
43345c754ceSJens Wiklander bool __weak __noprof thread_is_in_normal_mode(void)
434ca825890SJens Wiklander {
435ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
436ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
437ca825890SJens Wiklander 	bool ret;
438ca825890SJens Wiklander 
439ca825890SJens Wiklander 	/*
440ca825890SJens Wiklander 	 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're
441ca825890SJens Wiklander 	 * handling some exception.
442ca825890SJens Wiklander 	 */
443ca825890SJens Wiklander 	ret = (l->curr_thread != THREAD_ID_INVALID) &&
444ca825890SJens Wiklander 	      !(l->flags & ~THREAD_CLF_TMP);
445ca825890SJens Wiklander 	thread_unmask_exceptions(exceptions);
446ca825890SJens Wiklander 
447ca825890SJens Wiklander 	return ret;
448ca825890SJens Wiklander }
449ca825890SJens Wiklander 
450239420cbSJerome Forissier short int __noprof thread_get_id_may_fail(void)
451ca825890SJens Wiklander {
452ca825890SJens Wiklander 	/*
453ca825890SJens Wiklander 	 * thread_get_core_local() requires foreign interrupts to be disabled
454ca825890SJens Wiklander 	 */
455ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
456ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
457ca825890SJens Wiklander 	short int ct = l->curr_thread;
458ca825890SJens Wiklander 
459ca825890SJens Wiklander 	thread_unmask_exceptions(exceptions);
460ca825890SJens Wiklander 	return ct;
461ca825890SJens Wiklander }
462ca825890SJens Wiklander 
4638577287cSJerome Forissier short int __noprof thread_get_id(void)
464ca825890SJens Wiklander {
465ca825890SJens Wiklander 	short int ct = thread_get_id_may_fail();
466ca825890SJens Wiklander 
467ca825890SJens Wiklander 	/* Thread ID has to fit in a short int */
468ca825890SJens Wiklander 	COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX);
469ca825890SJens Wiklander 	assert(ct >= 0 && ct < CFG_NUM_THREADS);
470ca825890SJens Wiklander 	return ct;
471ca825890SJens Wiklander }
472ca825890SJens Wiklander 
473ca825890SJens Wiklander #ifdef CFG_WITH_PAGER
474ca825890SJens Wiklander static void init_thread_stacks(void)
475ca825890SJens Wiklander {
476ca825890SJens Wiklander 	size_t n = 0;
477ca825890SJens Wiklander 
478ca825890SJens Wiklander 	/*
479ca825890SJens Wiklander 	 * Allocate virtual memory for thread stacks.
480ca825890SJens Wiklander 	 */
481ca825890SJens Wiklander 	for (n = 0; n < CFG_NUM_THREADS; n++) {
482ca825890SJens Wiklander 		tee_mm_entry_t *mm = NULL;
483ca825890SJens Wiklander 		vaddr_t sp = 0;
484ca825890SJens Wiklander 		size_t num_pages = 0;
485ca825890SJens Wiklander 		struct fobj *fobj = NULL;
486ca825890SJens Wiklander 
487ca825890SJens Wiklander 		/* Find vmem for thread stack and its protection gap */
4889b0ee59dSJens Wiklander 		mm = tee_mm_alloc(&core_virt_mem_pool,
489ca825890SJens Wiklander 				  SMALL_PAGE_SIZE + STACK_THREAD_SIZE);
490ca825890SJens Wiklander 		assert(mm);
491ca825890SJens Wiklander 
492ca825890SJens Wiklander 		/* Claim eventual physical page */
493ca825890SJens Wiklander 		tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
494ca825890SJens Wiklander 				    true);
495ca825890SJens Wiklander 
496ca825890SJens Wiklander 		num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1;
497ca825890SJens Wiklander 		fobj = fobj_locked_paged_alloc(num_pages);
498ca825890SJens Wiklander 
499ca825890SJens Wiklander 		/* Add the region to the pager */
500ca825890SJens Wiklander 		tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE,
501ca825890SJens Wiklander 					  PAGED_REGION_TYPE_LOCK, fobj);
502ca825890SJens Wiklander 		fobj_put(fobj);
503ca825890SJens Wiklander 
504ca825890SJens Wiklander 		/* init effective stack */
505ca825890SJens Wiklander 		sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
506ca825890SJens Wiklander 		asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp);
507980d32c4SJens Wiklander 		threads[n].stack_va_end = sp;
508ca825890SJens Wiklander 	}
509ca825890SJens Wiklander }
510ca825890SJens Wiklander #else
511ca825890SJens Wiklander static void init_thread_stacks(void)
512ca825890SJens Wiklander {
513ad94da2aSJens Wiklander 	vaddr_t va = 0;
514ad94da2aSJens Wiklander 	size_t n = 0;
515ca825890SJens Wiklander 
516ca825890SJens Wiklander 	/* Assign the thread stacks */
517ad94da2aSJens Wiklander 	for (n = 0; n < CFG_NUM_THREADS; n++) {
518ad94da2aSJens Wiklander 		va = GET_STACK_BOTTOM(stack_thread, n);
519ad94da2aSJens Wiklander 		threads[n].stack_va_end = va;
520ad94da2aSJens Wiklander 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
521ad94da2aSJens Wiklander 			init_canaries(STACK_THREAD_SIZE, va);
522ad94da2aSJens Wiklander 	}
523ca825890SJens Wiklander }
524ca825890SJens Wiklander #endif /*CFG_WITH_PAGER*/
525ca825890SJens Wiklander 
526ca825890SJens Wiklander void thread_init_threads(void)
527ca825890SJens Wiklander {
528ca825890SJens Wiklander 	size_t n = 0;
529ca825890SJens Wiklander 
530ca825890SJens Wiklander 	init_thread_stacks();
531ca825890SJens Wiklander 	print_stack_limits();
532ca825890SJens Wiklander 	pgt_init();
533ca825890SJens Wiklander 
534ca825890SJens Wiklander 	mutex_lockdep_init();
535ca825890SJens Wiklander 
536e17e7a56SJens Wiklander 	for (n = 0; n < CFG_NUM_THREADS; n++)
537ca825890SJens Wiklander 		TAILQ_INIT(&threads[n].tsd.sess_stack);
538ca825890SJens Wiklander }
539ca825890SJens Wiklander 
540b5ec8152SJens Wiklander vaddr_t __nostackcheck thread_get_abt_stack(void)
541b5ec8152SJens Wiklander {
542b5ec8152SJens Wiklander 	return GET_STACK_BOTTOM(stack_abt, get_core_pos());
543b5ec8152SJens Wiklander }
544b5ec8152SJens Wiklander 
545758c3687SJens Wiklander #ifdef CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL
546*a4c2e0cbSJens Wiklander void thread_init_thread_core_local(size_t core_count __maybe_unused)
547b5ec8152SJens Wiklander {
548b5ec8152SJens Wiklander 	struct thread_core_local *tcl = thread_core_local;
549758c3687SJens Wiklander 	const size_t core_pos = get_core_pos();
550ad94da2aSJens Wiklander 	vaddr_t va = 0;
551ad94da2aSJens Wiklander 	size_t n = 0;
552b5ec8152SJens Wiklander 
553*a4c2e0cbSJens Wiklander 	assert(core_count == CFG_TEE_CORE_NB_CORE);
554758c3687SJens Wiklander 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
555758c3687SJens Wiklander 		if (n == core_pos)
556758c3687SJens Wiklander 			continue;	/* Already initialized */
557b5ec8152SJens Wiklander 		tcl[n].curr_thread = THREAD_ID_INVALID;
558b5ec8152SJens Wiklander 		tcl[n].flags = THREAD_CLF_TMP;
559ad94da2aSJens Wiklander 
560ad94da2aSJens Wiklander 		va = GET_STACK_BOTTOM(stack_tmp, n);
561ad94da2aSJens Wiklander 		tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS;
562ad94da2aSJens Wiklander 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
563ad94da2aSJens Wiklander 			init_canaries(STACK_TMP_SIZE, va);
564ad94da2aSJens Wiklander 		va = GET_STACK_BOTTOM(stack_abt, n);
565ad94da2aSJens Wiklander 		tcl[n].abt_stack_va_end = va;
566ad94da2aSJens Wiklander 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
567ad94da2aSJens Wiklander 			init_canaries(STACK_ABT_SIZE, va);
568b5ec8152SJens Wiklander 	}
569*a4c2e0cbSJens Wiklander 
570*a4c2e0cbSJens Wiklander 	/* Might be needed when resuming from suspend on ARMv7. */
571*a4c2e0cbSJens Wiklander 	if (IS_ENABLED(CFG_ARM32_core) && !IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW))
572*a4c2e0cbSJens Wiklander 		thread_core_local_pa = virt_to_phys(tcl);
573b5ec8152SJens Wiklander }
574b5ec8152SJens Wiklander #else
575*a4c2e0cbSJens Wiklander void __nostackcheck thread_init_thread_core_local(size_t core_count)
576ca825890SJens Wiklander {
577ca825890SJens Wiklander 	size_t n = 0;
578ca825890SJens Wiklander 	struct thread_core_local *tcl = thread_core_local;
579ca825890SJens Wiklander 
580*a4c2e0cbSJens Wiklander 	assert(core_count == CFG_TEE_CORE_NB_CORE);
581ca825890SJens Wiklander 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
582ca825890SJens Wiklander 		tcl[n].curr_thread = THREAD_ID_INVALID;
583ca825890SJens Wiklander 		tcl[n].flags = THREAD_CLF_TMP;
584ca825890SJens Wiklander 	}
585ca825890SJens Wiklander 	tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0);
586ca825890SJens Wiklander }
587ca825890SJens Wiklander 
588a7a0664eSJerome Forissier void __nostackcheck thread_init_core_local_stacks(void)
589ca825890SJens Wiklander {
590ca825890SJens Wiklander 	size_t n = 0;
591ca825890SJens Wiklander 	struct thread_core_local *tcl = thread_core_local;
592ca825890SJens Wiklander 
593ca825890SJens Wiklander 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
594ca825890SJens Wiklander 		tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) -
595ca825890SJens Wiklander 					  STACK_TMP_OFFS;
596ca825890SJens Wiklander 		tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n);
597ca825890SJens Wiklander 	}
598ca825890SJens Wiklander }
599758c3687SJens Wiklander #endif /*CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL*/
600ca825890SJens Wiklander 
60193dc6b29SJens Wiklander #if defined(CFG_CORE_PAUTH)
60293dc6b29SJens Wiklander void thread_init_thread_pauth_keys(void)
60393dc6b29SJens Wiklander {
60493dc6b29SJens Wiklander 	size_t n = 0;
60593dc6b29SJens Wiklander 
60693dc6b29SJens Wiklander 	for (n = 0; n < CFG_NUM_THREADS; n++)
60793dc6b29SJens Wiklander 		if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys)))
60893dc6b29SJens Wiklander 			panic("Failed to init thread pauth keys");
60993dc6b29SJens Wiklander }
61093dc6b29SJens Wiklander 
61193dc6b29SJens Wiklander void thread_init_core_local_pauth_keys(void)
61293dc6b29SJens Wiklander {
61393dc6b29SJens Wiklander 	struct thread_core_local *tcl = thread_core_local;
61493dc6b29SJens Wiklander 	size_t n = 0;
61593dc6b29SJens Wiklander 
61693dc6b29SJens Wiklander 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
61793dc6b29SJens Wiklander 		if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys)))
61893dc6b29SJens Wiklander 			panic("Failed to init core local pauth keys");
61993dc6b29SJens Wiklander }
62093dc6b29SJens Wiklander #endif
62193dc6b29SJens Wiklander 
6228577287cSJerome Forissier struct thread_specific_data * __noprof thread_get_tsd(void)
623ca825890SJens Wiklander {
624ca825890SJens Wiklander 	return &threads[thread_get_id()].tsd;
625ca825890SJens Wiklander }
626ca825890SJens Wiklander 
627ca825890SJens Wiklander struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void)
628ca825890SJens Wiklander {
629ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
630ca825890SJens Wiklander 
631ca825890SJens Wiklander 	assert(l->curr_thread != THREAD_ID_INVALID);
632ca825890SJens Wiklander 	return &threads[l->curr_thread].regs;
633ca825890SJens Wiklander }
634ca825890SJens Wiklander 
635ca825890SJens Wiklander void thread_set_foreign_intr(bool enable)
636ca825890SJens Wiklander {
637ca825890SJens Wiklander 	/* thread_get_core_local() requires foreign interrupts to be disabled */
638ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
639ca825890SJens Wiklander 	struct thread_core_local *l;
640ca825890SJens Wiklander 
641ca825890SJens Wiklander 	l = thread_get_core_local();
642ca825890SJens Wiklander 
643ca825890SJens Wiklander 	assert(l->curr_thread != THREAD_ID_INVALID);
644ca825890SJens Wiklander 
645ca825890SJens Wiklander 	if (enable) {
646ca825890SJens Wiklander 		threads[l->curr_thread].flags |=
647ca825890SJens Wiklander 					THREAD_FLAGS_FOREIGN_INTR_ENABLE;
648ca825890SJens Wiklander 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
649ca825890SJens Wiklander 	} else {
650ca825890SJens Wiklander 		/*
651ca825890SJens Wiklander 		 * No need to disable foreign interrupts here since they're
652ca825890SJens Wiklander 		 * already disabled above.
653ca825890SJens Wiklander 		 */
654ca825890SJens Wiklander 		threads[l->curr_thread].flags &=
655ca825890SJens Wiklander 					~THREAD_FLAGS_FOREIGN_INTR_ENABLE;
656ca825890SJens Wiklander 	}
657ca825890SJens Wiklander }
658ca825890SJens Wiklander 
659ca825890SJens Wiklander void thread_restore_foreign_intr(void)
660ca825890SJens Wiklander {
661ca825890SJens Wiklander 	/* thread_get_core_local() requires foreign interrupts to be disabled */
662ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
663ca825890SJens Wiklander 	struct thread_core_local *l;
664ca825890SJens Wiklander 
665ca825890SJens Wiklander 	l = thread_get_core_local();
666ca825890SJens Wiklander 
667ca825890SJens Wiklander 	assert(l->curr_thread != THREAD_ID_INVALID);
668ca825890SJens Wiklander 
669ca825890SJens Wiklander 	if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE)
670ca825890SJens Wiklander 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
671ca825890SJens Wiklander }
672ca825890SJens Wiklander 
673ca825890SJens Wiklander static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size)
674ca825890SJens Wiklander {
675ca825890SJens Wiklander 	switch (shm_type) {
676ca825890SJens Wiklander 	case THREAD_SHM_TYPE_APPLICATION:
677ca825890SJens Wiklander 		return thread_rpc_alloc_payload(size);
678ca825890SJens Wiklander 	case THREAD_SHM_TYPE_KERNEL_PRIVATE:
679ca825890SJens Wiklander 		return thread_rpc_alloc_kernel_payload(size);
680ca825890SJens Wiklander 	case THREAD_SHM_TYPE_GLOBAL:
681ca825890SJens Wiklander 		return thread_rpc_alloc_global_payload(size);
682ca825890SJens Wiklander 	default:
683ca825890SJens Wiklander 		return NULL;
684ca825890SJens Wiklander 	}
685ca825890SJens Wiklander }
686ca825890SJens Wiklander 
687ca825890SJens Wiklander static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce)
688ca825890SJens Wiklander {
689ca825890SJens Wiklander 	if (ce->mobj) {
690ca825890SJens Wiklander 		switch (ce->type) {
691ca825890SJens Wiklander 		case THREAD_SHM_TYPE_APPLICATION:
692ca825890SJens Wiklander 			thread_rpc_free_payload(ce->mobj);
693ca825890SJens Wiklander 			break;
694ca825890SJens Wiklander 		case THREAD_SHM_TYPE_KERNEL_PRIVATE:
695ca825890SJens Wiklander 			thread_rpc_free_kernel_payload(ce->mobj);
696ca825890SJens Wiklander 			break;
697ca825890SJens Wiklander 		case THREAD_SHM_TYPE_GLOBAL:
698ca825890SJens Wiklander 			thread_rpc_free_global_payload(ce->mobj);
699ca825890SJens Wiklander 			break;
700ca825890SJens Wiklander 		default:
701ca825890SJens Wiklander 			assert(0); /* "can't happen" */
702ca825890SJens Wiklander 			break;
703ca825890SJens Wiklander 		}
704ca825890SJens Wiklander 	}
705ca825890SJens Wiklander 	ce->mobj = NULL;
706ca825890SJens Wiklander 	ce->size = 0;
707ca825890SJens Wiklander }
708ca825890SJens Wiklander 
709ca825890SJens Wiklander static struct thread_shm_cache_entry *
710ca825890SJens Wiklander get_shm_cache_entry(enum thread_shm_cache_user user)
711ca825890SJens Wiklander {
712ca825890SJens Wiklander 	struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache;
713ca825890SJens Wiklander 	struct thread_shm_cache_entry *ce = NULL;
714ca825890SJens Wiklander 
715ca825890SJens Wiklander 	SLIST_FOREACH(ce, cache, link)
716ca825890SJens Wiklander 		if (ce->user == user)
717ca825890SJens Wiklander 			return ce;
718ca825890SJens Wiklander 
719ca825890SJens Wiklander 	ce = calloc(1, sizeof(*ce));
720ca825890SJens Wiklander 	if (ce) {
721ca825890SJens Wiklander 		ce->user = user;
722ca825890SJens Wiklander 		SLIST_INSERT_HEAD(cache, ce, link);
723ca825890SJens Wiklander 	}
724ca825890SJens Wiklander 
725ca825890SJens Wiklander 	return ce;
726ca825890SJens Wiklander }
727ca825890SJens Wiklander 
728ca825890SJens Wiklander void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
729ca825890SJens Wiklander 				 enum thread_shm_type shm_type,
730ca825890SJens Wiklander 				 size_t size, struct mobj **mobj)
731ca825890SJens Wiklander {
732ca825890SJens Wiklander 	struct thread_shm_cache_entry *ce = NULL;
733ca825890SJens Wiklander 	size_t sz = size;
734ca825890SJens Wiklander 	paddr_t p = 0;
735ca825890SJens Wiklander 	void *va = NULL;
736ca825890SJens Wiklander 
737ca825890SJens Wiklander 	if (!size)
738ca825890SJens Wiklander 		return NULL;
739ca825890SJens Wiklander 
740ca825890SJens Wiklander 	ce = get_shm_cache_entry(user);
741ca825890SJens Wiklander 	if (!ce)
742ca825890SJens Wiklander 		return NULL;
743ca825890SJens Wiklander 
744ca825890SJens Wiklander 	/*
745ca825890SJens Wiklander 	 * Always allocate in page chunks as normal world allocates payload
746ca825890SJens Wiklander 	 * memory as complete pages.
747ca825890SJens Wiklander 	 */
748ca825890SJens Wiklander 	sz = ROUNDUP(size, SMALL_PAGE_SIZE);
749ca825890SJens Wiklander 
750ca825890SJens Wiklander 	if (ce->type != shm_type || sz > ce->size) {
751ca825890SJens Wiklander 		clear_shm_cache_entry(ce);
752ca825890SJens Wiklander 
753ca825890SJens Wiklander 		ce->mobj = alloc_shm(shm_type, sz);
754ca825890SJens Wiklander 		if (!ce->mobj)
755ca825890SJens Wiklander 			return NULL;
756ca825890SJens Wiklander 
757ca825890SJens Wiklander 		if (mobj_get_pa(ce->mobj, 0, 0, &p))
758ca825890SJens Wiklander 			goto err;
759ca825890SJens Wiklander 
760ca825890SJens Wiklander 		if (!IS_ALIGNED_WITH_TYPE(p, uint64_t))
761ca825890SJens Wiklander 			goto err;
762ca825890SJens Wiklander 
763ca825890SJens Wiklander 		va = mobj_get_va(ce->mobj, 0, sz);
764ca825890SJens Wiklander 		if (!va)
765ca825890SJens Wiklander 			goto err;
766ca825890SJens Wiklander 
767ca825890SJens Wiklander 		ce->size = sz;
768ca825890SJens Wiklander 		ce->type = shm_type;
769ca825890SJens Wiklander 	} else {
770ca825890SJens Wiklander 		va = mobj_get_va(ce->mobj, 0, sz);
771ca825890SJens Wiklander 		if (!va)
772ca825890SJens Wiklander 			goto err;
773ca825890SJens Wiklander 	}
774ca825890SJens Wiklander 	*mobj = ce->mobj;
775ca825890SJens Wiklander 
776ca825890SJens Wiklander 	return va;
777ca825890SJens Wiklander err:
778ca825890SJens Wiklander 	clear_shm_cache_entry(ce);
779ca825890SJens Wiklander 	return NULL;
780ca825890SJens Wiklander }
781ca825890SJens Wiklander 
782ca825890SJens Wiklander void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache)
783ca825890SJens Wiklander {
784ca825890SJens Wiklander 	while (true) {
785ca825890SJens Wiklander 		struct thread_shm_cache_entry *ce = SLIST_FIRST(cache);
786ca825890SJens Wiklander 
787ca825890SJens Wiklander 		if (!ce)
788ca825890SJens Wiklander 			break;
789ca825890SJens Wiklander 		SLIST_REMOVE_HEAD(cache, link);
790ca825890SJens Wiklander 		clear_shm_cache_entry(ce);
791ca825890SJens Wiklander 		free(ce);
792ca825890SJens Wiklander 	}
793ca825890SJens Wiklander }
794