xref: /optee_os/core/kernel/thread.c (revision bb53872221401f87ef4e98938464319684db9ada)
1ca825890SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause
2ca825890SJens Wiklander /*
393dc6b29SJens Wiklander  * Copyright (c) 2016-2022, Linaro Limited
4ca825890SJens Wiklander  * Copyright (c) 2014, STMicroelectronics International N.V.
5ca825890SJens Wiklander  * Copyright (c) 2020-2021, Arm Limited
6ca825890SJens Wiklander  */
7ca825890SJens Wiklander 
8ca825890SJens Wiklander #include <config.h>
993dc6b29SJens Wiklander #include <crypto/crypto.h>
10ca825890SJens Wiklander #include <kernel/asan.h>
11b89b3da2SVincent Chuang #include <kernel/boot.h>
12ca825890SJens Wiklander #include <kernel/lockdep.h>
13ca825890SJens Wiklander #include <kernel/misc.h>
14ca825890SJens Wiklander #include <kernel/panic.h>
15ca825890SJens Wiklander #include <kernel/spinlock.h>
16ca825890SJens Wiklander #include <kernel/thread.h>
17ca825890SJens Wiklander #include <kernel/thread_private.h>
18ca825890SJens Wiklander #include <mm/mobj.h>
1959724f22SJens Wiklander #include <mm/page_alloc.h>
2059724f22SJens Wiklander #include <stdalign.h>
21ca825890SJens Wiklander 
22*bb538722SAlvin Chang #if defined(CFG_DYN_CONFIG)
2359724f22SJens Wiklander struct thread_core_local *thread_core_local __nex_bss;
2459724f22SJens Wiklander size_t thread_core_count __nex_bss;
25aa0620cfSJens Wiklander struct thread_ctx *threads;
26aa0620cfSJens Wiklander size_t thread_count;
2759724f22SJens Wiklander #else
28a4c2e0cbSJens Wiklander static struct thread_core_local
29a4c2e0cbSJens Wiklander 	__thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss;
30a4c2e0cbSJens Wiklander struct thread_core_local *thread_core_local __nex_data = __thread_core_local;
31a4c2e0cbSJens Wiklander size_t thread_core_count __nex_data = CFG_TEE_CORE_NB_CORE;
32aa0620cfSJens Wiklander static struct thread_ctx __threads[CFG_NUM_THREADS];
33aa0620cfSJens Wiklander struct thread_ctx *threads = __threads;
3491d4649dSJens Wiklander size_t thread_count = CFG_NUM_THREADS;
35aa0620cfSJens Wiklander #endif
36a4c2e0cbSJens Wiklander unsigned long thread_core_local_pa __nex_bss;
3759724f22SJens Wiklander struct thread_core_local *__thread_core_local_new __nex_bss;
3859724f22SJens Wiklander size_t __thread_core_count_new __nex_bss;
39ca825890SJens Wiklander 
40ca825890SJens Wiklander /*
41ca825890SJens Wiklander  * Stacks
42ca825890SJens Wiklander  *
43ca825890SJens Wiklander  * [Lower addresses on the left]
44ca825890SJens Wiklander  *
45ca825890SJens Wiklander  * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ]
46ca825890SJens Wiklander  * ^                     ^                   ^                ^
47ca825890SJens Wiklander  * stack_xxx[n]          "hard" top          "soft" top       bottom
48ca825890SJens Wiklander  */
49ca825890SJens Wiklander 
50b89b3da2SVincent Chuang static uint32_t start_canary_value = 0xdedede00;
51b89b3da2SVincent Chuang static uint32_t end_canary_value = 0xababab00;
52ca825890SJens Wiklander 
53ca825890SJens Wiklander #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \
54ca825890SJens Wiklander linkage uint32_t name[num_stacks] \
55ca825890SJens Wiklander 		[ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \
56ca825890SJens Wiklander 			 STACK_ALIGNMENT) / sizeof(uint32_t)] \
57ca825890SJens Wiklander 		__attribute__((section(".nozi_stack." # name), \
58ca825890SJens Wiklander 			       aligned(STACK_ALIGNMENT)))
59ca825890SJens Wiklander 
60*bb538722SAlvin Chang #ifndef CFG_DYN_CONFIG
615956c77eSJerome Forissier DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE,
625956c77eSJerome Forissier 	      /* global linkage */);
63ca825890SJens Wiklander DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static);
64ca825890SJens Wiklander #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \
65ca825890SJens Wiklander 				    STACK_CANARY_SIZE / 2)
6659724f22SJens Wiklander #else
6759724f22SJens Wiklander /* Not used */
6859724f22SJens Wiklander #define GET_STACK_BOTTOM(stack, n) 0
6959724f22SJens Wiklander #endif
70aa0620cfSJens Wiklander 
71*bb538722SAlvin Chang #if defined(CFG_DYN_CONFIG) || defined(CFG_WITH_PAGER)
72aa0620cfSJens Wiklander /* Not used */
73aa0620cfSJens Wiklander #define GET_STACK_THREAD_BOTTOM(n) 0
74aa0620cfSJens Wiklander #else
7559724f22SJens Wiklander DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static);
7659724f22SJens Wiklander #define GET_STACK_THREAD_BOTTOM(n) \
7759724f22SJens Wiklander 	((vaddr_t)&stack_thread[n] +  sizeof(stack_thread[n]) - \
7859724f22SJens Wiklander 	 STACK_CANARY_SIZE / 2)
7959724f22SJens Wiklander #endif
80ca825890SJens Wiklander 
81*bb538722SAlvin Chang #ifndef CFG_DYN_CONFIG
82ca825890SJens Wiklander const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") =
83ca825890SJens Wiklander 	sizeof(stack_tmp[0]);
84ca825890SJens Wiklander 
85ca825890SJens Wiklander /*
86528dabb2SJerome Forissier  * This stack setup info is required by secondary boot cores before they
87ca825890SJens Wiklander  * each locally enable the pager (the mmu). Hence kept in pager sections.
88ca825890SJens Wiklander  */
89ca825890SJens Wiklander DECLARE_KEEP_PAGER(stack_tmp_stride);
9059724f22SJens Wiklander #endif
91ca825890SJens Wiklander 
92ca825890SJens Wiklander static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK;
93ca825890SJens Wiklander 
stack_size_to_alloc_size(size_t stack_size)9405994c76SJens Wiklander static size_t stack_size_to_alloc_size(size_t stack_size)
9505994c76SJens Wiklander {
9605994c76SJens Wiklander 	return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA,
9705994c76SJens Wiklander 		       STACK_ALIGNMENT);
9805994c76SJens Wiklander }
9905994c76SJens Wiklander 
stack_end_va_to_top_hard(size_t stack_size,vaddr_t end_va)10005994c76SJens Wiklander static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va)
10105994c76SJens Wiklander {
10205994c76SJens Wiklander 	size_t l = stack_size_to_alloc_size(stack_size);
10305994c76SJens Wiklander 
10405994c76SJens Wiklander 	return end_va - l + STACK_CANARY_SIZE;
10505994c76SJens Wiklander }
10605994c76SJens Wiklander 
stack_end_va_to_top_soft(size_t stack_size,vaddr_t end_va)10705994c76SJens Wiklander static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va)
10805994c76SJens Wiklander {
10905994c76SJens Wiklander 	return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA;
11005994c76SJens Wiklander }
11105994c76SJens Wiklander 
stack_end_va_to_bottom(size_t stack_size __unused,vaddr_t end_va)11205994c76SJens Wiklander static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused,
11305994c76SJens Wiklander 				      vaddr_t end_va)
11405994c76SJens Wiklander {
11505994c76SJens Wiklander 	return end_va;
11605994c76SJens Wiklander }
11705994c76SJens Wiklander 
stack_end_va_to_start_canary(size_t stack_size,vaddr_t end_va)11805994c76SJens Wiklander static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va)
11905994c76SJens Wiklander {
12005994c76SJens Wiklander 	return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) -
12105994c76SJens Wiklander 			    STACK_CANARY_SIZE / 2);
12205994c76SJens Wiklander }
12305994c76SJens Wiklander 
stack_end_va_to_end_canary(size_t stack_size __unused,vaddr_t end_va)12405994c76SJens Wiklander static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused,
12505994c76SJens Wiklander 					    vaddr_t end_va)
12605994c76SJens Wiklander {
12705994c76SJens Wiklander 	return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t));
12805994c76SJens Wiklander }
12905994c76SJens Wiklander 
init_canaries(size_t stack_size,vaddr_t va_end)130ad94da2aSJens Wiklander static void init_canaries(size_t stack_size, vaddr_t va_end)
131ca825890SJens Wiklander {
132ad94da2aSJens Wiklander 	uint32_t *canary = NULL;
133ad94da2aSJens Wiklander 
134ad94da2aSJens Wiklander 	assert(va_end);
135ad94da2aSJens Wiklander 	canary = stack_end_va_to_start_canary(stack_size, va_end);
136ad94da2aSJens Wiklander 	*canary = start_canary_value;
137ad94da2aSJens Wiklander 	canary = stack_end_va_to_end_canary(stack_size, va_end);
138ad94da2aSJens Wiklander 	*canary = end_canary_value;
139ca825890SJens Wiklander }
140ca825890SJens Wiklander 
thread_init_canaries(void)141ad94da2aSJens Wiklander void thread_init_canaries(void)
142ad94da2aSJens Wiklander {
143ad94da2aSJens Wiklander 	vaddr_t va = 0;
144ad94da2aSJens Wiklander 	size_t n = 0;
145ad94da2aSJens Wiklander 
146ad94da2aSJens Wiklander 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) {
14759724f22SJens Wiklander 		for (n = 0; n < thread_core_count; n++) {
148ad94da2aSJens Wiklander 			if (thread_core_local[n].tmp_stack_va_end) {
149ad94da2aSJens Wiklander 				va = thread_core_local[n].tmp_stack_va_end +
150ad94da2aSJens Wiklander 				     STACK_TMP_OFFS;
151ad94da2aSJens Wiklander 				init_canaries(STACK_TMP_SIZE, va);
152ad94da2aSJens Wiklander 			}
153ad94da2aSJens Wiklander 			va = thread_core_local[n].abt_stack_va_end;
154ad94da2aSJens Wiklander 			if (va)
155ad94da2aSJens Wiklander 				init_canaries(STACK_ABT_SIZE, va);
156ad94da2aSJens Wiklander 		}
157ad94da2aSJens Wiklander 
158ad94da2aSJens Wiklander 	}
159ad94da2aSJens Wiklander 
160ad94da2aSJens Wiklander 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES) &&
161aa0620cfSJens Wiklander 	    !IS_ENABLED(CFG_WITH_PAGER) &&
162aa0620cfSJens Wiklander 	    !IS_ENABLED(CFG_NS_VIRTUALIZATION) && threads) {
163aa0620cfSJens Wiklander 		for (n = 0; n < thread_count; n++) {
164ad94da2aSJens Wiklander 			va = threads[n].stack_va_end;
165ad94da2aSJens Wiklander 			if (va)
166ad94da2aSJens Wiklander 				init_canaries(STACK_THREAD_SIZE, va);
167ad94da2aSJens Wiklander 		}
168ad94da2aSJens Wiklander 	}
169ca825890SJens Wiklander }
170ca825890SJens Wiklander 
171b89b3da2SVincent Chuang #if defined(CFG_WITH_STACK_CANARIES)
thread_update_canaries(void)172b89b3da2SVincent Chuang void thread_update_canaries(void)
173b89b3da2SVincent Chuang {
174b89b3da2SVincent Chuang 	uint32_t canary[2] = { };
175b89b3da2SVincent Chuang 	uint32_t exceptions = 0;
176b89b3da2SVincent Chuang 
177b89b3da2SVincent Chuang 	plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary),
178b89b3da2SVincent Chuang 				       sizeof(canary[0]));
179b89b3da2SVincent Chuang 
180b89b3da2SVincent Chuang 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
181b89b3da2SVincent Chuang 
182b89b3da2SVincent Chuang 	thread_check_canaries();
183b89b3da2SVincent Chuang 
184b89b3da2SVincent Chuang 	start_canary_value = canary[0];
185b89b3da2SVincent Chuang 	end_canary_value = canary[1];
186b89b3da2SVincent Chuang 	thread_init_canaries();
187b89b3da2SVincent Chuang 
188b89b3da2SVincent Chuang 	thread_unmask_exceptions(exceptions);
189b89b3da2SVincent Chuang }
190b89b3da2SVincent Chuang #endif
191b89b3da2SVincent Chuang 
check_stack_canary(const char * stack_name __maybe_unused,size_t n __maybe_unused,size_t stack_size,vaddr_t end_va)19205994c76SJens Wiklander static void check_stack_canary(const char *stack_name __maybe_unused,
19305994c76SJens Wiklander 			       size_t n __maybe_unused,
19405994c76SJens Wiklander 			       size_t stack_size, vaddr_t end_va)
19505994c76SJens Wiklander {
19605994c76SJens Wiklander 	uint32_t *canary = NULL;
19705994c76SJens Wiklander 
19805994c76SJens Wiklander 	canary = stack_end_va_to_start_canary(stack_size, end_va);
19905994c76SJens Wiklander 	if (*canary != start_canary_value) {
20005994c76SJens Wiklander 		EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)",
20105994c76SJens Wiklander 			 stack_name, n, (void *)canary);
20205994c76SJens Wiklander 		panic();
20305994c76SJens Wiklander 	}
20405994c76SJens Wiklander 
20505994c76SJens Wiklander 	canary = stack_end_va_to_end_canary(stack_size, end_va);
20605994c76SJens Wiklander 	if (*canary != end_canary_value) {
20705994c76SJens Wiklander 		EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)",
20805994c76SJens Wiklander 			 stack_name, n, (void *)canary);
20905994c76SJens Wiklander 		panic();
21005994c76SJens Wiklander 	}
21105994c76SJens Wiklander }
212ca825890SJens Wiklander 
thread_check_canaries(void)213ca825890SJens Wiklander void thread_check_canaries(void)
214ca825890SJens Wiklander {
21505994c76SJens Wiklander 	vaddr_t va = 0;
216ca825890SJens Wiklander 	size_t n = 0;
217ca825890SJens Wiklander 
21805994c76SJens Wiklander 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) {
21959724f22SJens Wiklander 		for (n = 0; n < thread_core_count; n++) {
220ad94da2aSJens Wiklander 			if (thread_core_local[n].tmp_stack_va_end) {
22105994c76SJens Wiklander 				va = thread_core_local[n].tmp_stack_va_end +
22205994c76SJens Wiklander 				     STACK_TMP_OFFS;
223ad94da2aSJens Wiklander 				check_stack_canary("tmp_stack", n,
224ad94da2aSJens Wiklander 						   STACK_TMP_SIZE, va);
225ad94da2aSJens Wiklander 			}
22605994c76SJens Wiklander 
22705994c76SJens Wiklander 			va = thread_core_local[n].abt_stack_va_end;
228ad94da2aSJens Wiklander 			if (va)
229ad94da2aSJens Wiklander 				check_stack_canary("abt_stack", n,
230ad94da2aSJens Wiklander 						   STACK_ABT_SIZE, va);
23105994c76SJens Wiklander 		}
232ca825890SJens Wiklander 	}
233ca825890SJens Wiklander 
23405994c76SJens Wiklander 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES) &&
23505994c76SJens Wiklander 	    !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
236aa0620cfSJens Wiklander 		for (n = 0; n < thread_count; n++) {
23705994c76SJens Wiklander 			va = threads[n].stack_va_end;
238ad94da2aSJens Wiklander 			if (va)
23905994c76SJens Wiklander 				check_stack_canary("thread_stack", n,
24005994c76SJens Wiklander 						   STACK_THREAD_SIZE, va);
241ca825890SJens Wiklander 		}
242ca825890SJens Wiklander 	}
243ca825890SJens Wiklander }
244ca825890SJens Wiklander 
thread_lock_global(void)245ca825890SJens Wiklander void thread_lock_global(void)
246ca825890SJens Wiklander {
247ca825890SJens Wiklander 	cpu_spin_lock(&thread_global_lock);
248ca825890SJens Wiklander }
249ca825890SJens Wiklander 
thread_unlock_global(void)250ca825890SJens Wiklander void thread_unlock_global(void)
251ca825890SJens Wiklander {
252ca825890SJens Wiklander 	cpu_spin_unlock(&thread_global_lock);
253ca825890SJens Wiklander }
254ca825890SJens Wiklander 
255ca825890SJens Wiklander static struct thread_core_local * __nostackcheck
get_core_local(unsigned int pos)256ca825890SJens Wiklander get_core_local(unsigned int pos)
257ca825890SJens Wiklander {
258ca825890SJens Wiklander 	/*
259ca825890SJens Wiklander 	 * Foreign interrupts must be disabled before playing with core_local
260ca825890SJens Wiklander 	 * since we otherwise may be rescheduled to a different core in the
261ca825890SJens Wiklander 	 * middle of this function.
262ca825890SJens Wiklander 	 */
263ca825890SJens Wiklander 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
264ca825890SJens Wiklander 
26559724f22SJens Wiklander 	/*
26686df92b3SAlvin Chang 	 * We boot on a single core and have allocated only one struct
26786df92b3SAlvin Chang 	 * thread_core_local so we return that regardless of pos.
26859724f22SJens Wiklander 	 */
269*bb538722SAlvin Chang 	if (IS_ENABLED(CFG_DYN_CONFIG) &&
27059724f22SJens Wiklander 	    thread_core_local != __thread_core_local_new)
27159724f22SJens Wiklander 		return thread_core_local;
27259724f22SJens Wiklander 
27359724f22SJens Wiklander 	assert(pos < thread_core_count);
274ca825890SJens Wiklander 	return &thread_core_local[pos];
275ca825890SJens Wiklander }
276ca825890SJens Wiklander 
thread_get_core_local(void)277ca825890SJens Wiklander struct thread_core_local * __nostackcheck thread_get_core_local(void)
278ca825890SJens Wiklander {
279ca825890SJens Wiklander 	unsigned int pos = get_core_pos();
280ca825890SJens Wiklander 
281ca825890SJens Wiklander 	return get_core_local(pos);
282ca825890SJens Wiklander }
283ca825890SJens Wiklander 
284ca825890SJens Wiklander #ifdef CFG_CORE_DEBUG_CHECK_STACKS
print_stack_limits(void)285ca825890SJens Wiklander static void print_stack_limits(void)
286ca825890SJens Wiklander {
287ca825890SJens Wiklander 	size_t n = 0;
288ca825890SJens Wiklander 	vaddr_t __maybe_unused start = 0;
289ca825890SJens Wiklander 	vaddr_t __maybe_unused end = 0;
29005994c76SJens Wiklander 	vaddr_t va = 0;
291ca825890SJens Wiklander 
29259724f22SJens Wiklander 	for (n = 0; n < thread_core_count; n++) {
29305994c76SJens Wiklander 		va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS;
29405994c76SJens Wiklander 		start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va);
29505994c76SJens Wiklander 		end = stack_end_va_to_bottom(STACK_TMP_SIZE, va);
296ca825890SJens Wiklander 		DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
29705994c76SJens Wiklander 
29805994c76SJens Wiklander 		va = thread_core_local[n].abt_stack_va_end;
29905994c76SJens Wiklander 		start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va);
30005994c76SJens Wiklander 		end = stack_end_va_to_bottom(STACK_ABT_SIZE, va);
301ca825890SJens Wiklander 		DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
302ca825890SJens Wiklander 	}
30305994c76SJens Wiklander 
304aa0620cfSJens Wiklander 	for (n = 0; n < thread_count; n++) {
30505994c76SJens Wiklander 		va = threads[n].stack_va_end;
30605994c76SJens Wiklander 		start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va);
30705994c76SJens Wiklander 		end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va);
308ca825890SJens Wiklander 		DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
309ca825890SJens Wiklander 	}
310ca825890SJens Wiklander }
311ca825890SJens Wiklander 
check_stack_limits(void)312ca825890SJens Wiklander static void check_stack_limits(void)
313ca825890SJens Wiklander {
314ca825890SJens Wiklander 	vaddr_t stack_start = 0;
315ca825890SJens Wiklander 	vaddr_t stack_end = 0;
316ca825890SJens Wiklander 	/* Any value in the current stack frame will do */
317ca825890SJens Wiklander 	vaddr_t current_sp = (vaddr_t)&stack_start;
318ca825890SJens Wiklander 
319ca825890SJens Wiklander 	if (!get_stack_soft_limits(&stack_start, &stack_end))
320ca825890SJens Wiklander 		panic("Unknown stack limits");
321ca825890SJens Wiklander 	if (current_sp < stack_start || current_sp > stack_end) {
32228d6e35aSJerome Forissier 		EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%"
32328d6e35aSJerome Forissier 		     PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start,
32428d6e35aSJerome Forissier 		     stack_end);
325ca825890SJens Wiklander 		print_stack_limits();
326ca825890SJens Wiklander 		panic();
327ca825890SJens Wiklander 	}
328ca825890SJens Wiklander }
329ca825890SJens Wiklander 
get_stackcheck_recursion_flag(void)330ca825890SJens Wiklander static bool * __nostackcheck get_stackcheck_recursion_flag(void)
331ca825890SJens Wiklander {
332ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
333ca825890SJens Wiklander 	unsigned int pos = get_core_pos();
334ca825890SJens Wiklander 	struct thread_core_local *l = get_core_local(pos);
335ca825890SJens Wiklander 	int ct = l->curr_thread;
336ca825890SJens Wiklander 	bool *p = NULL;
337ca825890SJens Wiklander 
338ca825890SJens Wiklander 	if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP))
339ca825890SJens Wiklander 		p = &l->stackcheck_recursion;
340ca825890SJens Wiklander 	else if (!l->flags)
341ca825890SJens Wiklander 		p = &threads[ct].tsd.stackcheck_recursion;
342ca825890SJens Wiklander 
343ca825890SJens Wiklander 	thread_unmask_exceptions(exceptions);
344ca825890SJens Wiklander 	return p;
345ca825890SJens Wiklander }
346ca825890SJens Wiklander 
347ca825890SJens Wiklander void __cyg_profile_func_enter(void *this_fn, void *call_site);
__cyg_profile_func_enter(void * this_fn __unused,void * call_site __unused)348ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused,
349ca825890SJens Wiklander 					     void *call_site __unused)
350ca825890SJens Wiklander {
351ca825890SJens Wiklander 	bool *p = get_stackcheck_recursion_flag();
352ca825890SJens Wiklander 
353ca825890SJens Wiklander 	assert(p);
354ca825890SJens Wiklander 	if (*p)
355ca825890SJens Wiklander 		return;
356ca825890SJens Wiklander 	*p = true;
357ca825890SJens Wiklander 	check_stack_limits();
358ca825890SJens Wiklander 	*p = false;
359ca825890SJens Wiklander }
360ca825890SJens Wiklander 
361ca825890SJens Wiklander void __cyg_profile_func_exit(void *this_fn, void *call_site);
__cyg_profile_func_exit(void * this_fn __unused,void * call_site __unused)362ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused,
363ca825890SJens Wiklander 					    void *call_site __unused)
364ca825890SJens Wiklander {
365ca825890SJens Wiklander }
366ca825890SJens Wiklander #else
print_stack_limits(void)367ca825890SJens Wiklander static void print_stack_limits(void)
368ca825890SJens Wiklander {
369ca825890SJens Wiklander }
370ca825890SJens Wiklander #endif
371ca825890SJens Wiklander 
thread_init_boot_thread(void)372ca825890SJens Wiklander void thread_init_boot_thread(void)
373ca825890SJens Wiklander {
374ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
375ca825890SJens Wiklander 
376ca825890SJens Wiklander 	l->curr_thread = 0;
377ca825890SJens Wiklander 	threads[0].state = THREAD_STATE_ACTIVE;
378ca825890SJens Wiklander }
379ca825890SJens Wiklander 
thread_clr_boot_thread(void)380ca825890SJens Wiklander void __nostackcheck thread_clr_boot_thread(void)
381ca825890SJens Wiklander {
382ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
383ca825890SJens Wiklander 
384ca825890SJens Wiklander 	assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS);
385ca825890SJens Wiklander 	assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
386ca825890SJens Wiklander 	threads[l->curr_thread].state = THREAD_STATE_FREE;
387ca825890SJens Wiklander 	l->curr_thread = THREAD_ID_INVALID;
38859724f22SJens Wiklander 	print_stack_limits();
389ca825890SJens Wiklander }
390ca825890SJens Wiklander 
thread_get_tmp_sp(void)391ca825890SJens Wiklander void __nostackcheck *thread_get_tmp_sp(void)
392ca825890SJens Wiklander {
393ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
394ca825890SJens Wiklander 
395ca825890SJens Wiklander 	/*
396ca825890SJens Wiklander 	 * Called from assembly when switching to the temporary stack, so flags
397ca825890SJens Wiklander 	 * need updating
398ca825890SJens Wiklander 	 */
399ca825890SJens Wiklander 	l->flags |= THREAD_CLF_TMP;
400ca825890SJens Wiklander 
401ca825890SJens Wiklander 	return (void *)l->tmp_stack_va_end;
402ca825890SJens Wiklander }
403ca825890SJens Wiklander 
thread_stack_start(void)404ca825890SJens Wiklander vaddr_t thread_stack_start(void)
405ca825890SJens Wiklander {
406ca825890SJens Wiklander 	struct thread_ctx *thr;
407ca825890SJens Wiklander 	int ct = thread_get_id_may_fail();
408ca825890SJens Wiklander 
409ca825890SJens Wiklander 	if (ct == THREAD_ID_INVALID)
410ca825890SJens Wiklander 		return 0;
411ca825890SJens Wiklander 
412ca825890SJens Wiklander 	thr = threads + ct;
41305994c76SJens Wiklander 	return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end);
414ca825890SJens Wiklander }
415ca825890SJens Wiklander 
thread_stack_size(void)416ca825890SJens Wiklander size_t thread_stack_size(void)
417ca825890SJens Wiklander {
418ca825890SJens Wiklander 	return STACK_THREAD_SIZE;
419ca825890SJens Wiklander }
420ca825890SJens Wiklander 
get_stack_limits(vaddr_t * start,vaddr_t * end,bool hard)421ca825890SJens Wiklander bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard)
422ca825890SJens Wiklander {
423ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
424ca825890SJens Wiklander 	unsigned int pos = get_core_pos();
425ca825890SJens Wiklander 	struct thread_core_local *l = get_core_local(pos);
426ca825890SJens Wiklander 	int ct = l->curr_thread;
42705994c76SJens Wiklander 	size_t stack_size = 0;
42805994c76SJens Wiklander 	bool ret = true;
42905994c76SJens Wiklander 	vaddr_t va = 0;
430ca825890SJens Wiklander 
431ca825890SJens Wiklander 	if (l->flags & THREAD_CLF_TMP) {
43205994c76SJens Wiklander 		va = l->tmp_stack_va_end + STACK_TMP_OFFS;
43305994c76SJens Wiklander 		stack_size = STACK_TMP_SIZE;
434ca825890SJens Wiklander 	} else if (l->flags & THREAD_CLF_ABORT) {
43505994c76SJens Wiklander 		va = l->abt_stack_va_end;
43605994c76SJens Wiklander 		stack_size = STACK_ABT_SIZE;
437aa0620cfSJens Wiklander 	} else if (!l->flags && ct >= 0 && (size_t)ct < thread_count) {
43805994c76SJens Wiklander 		va = threads[ct].stack_va_end;
43905994c76SJens Wiklander 		stack_size = STACK_THREAD_SIZE;
44005994c76SJens Wiklander 	} else {
44105994c76SJens Wiklander 		ret = false;
442ca825890SJens Wiklander 		goto out;
443ca825890SJens Wiklander 	}
44405994c76SJens Wiklander 
44505994c76SJens Wiklander 	*end = stack_end_va_to_bottom(stack_size, va);
44605994c76SJens Wiklander 	if (hard)
44705994c76SJens Wiklander 		*start = stack_end_va_to_top_hard(stack_size, va);
44805994c76SJens Wiklander 	else
44905994c76SJens Wiklander 		*start = stack_end_va_to_top_soft(stack_size, va);
450ca825890SJens Wiklander out:
451ca825890SJens Wiklander 	thread_unmask_exceptions(exceptions);
452ca825890SJens Wiklander 	return ret;
453ca825890SJens Wiklander }
454ca825890SJens Wiklander 
thread_is_from_abort_mode(void)455ca825890SJens Wiklander bool thread_is_from_abort_mode(void)
456ca825890SJens Wiklander {
457ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
458ca825890SJens Wiklander 
459ca825890SJens Wiklander 	return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT;
460ca825890SJens Wiklander }
461ca825890SJens Wiklander 
462ca825890SJens Wiklander /*
463ca825890SJens Wiklander  * This function should always be accurate, but it might be possible to
464ca825890SJens Wiklander  * implement a more efficient depending on cpu architecture.
465ca825890SJens Wiklander  */
thread_is_in_normal_mode(void)46645c754ceSJens Wiklander bool __weak __noprof thread_is_in_normal_mode(void)
467ca825890SJens Wiklander {
468ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
469ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
470ca825890SJens Wiklander 	bool ret;
471ca825890SJens Wiklander 
472ca825890SJens Wiklander 	/*
473ca825890SJens Wiklander 	 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're
474ca825890SJens Wiklander 	 * handling some exception.
475ca825890SJens Wiklander 	 */
476ca825890SJens Wiklander 	ret = (l->curr_thread != THREAD_ID_INVALID) &&
477ca825890SJens Wiklander 	      !(l->flags & ~THREAD_CLF_TMP);
478ca825890SJens Wiklander 	thread_unmask_exceptions(exceptions);
479ca825890SJens Wiklander 
480ca825890SJens Wiklander 	return ret;
481ca825890SJens Wiklander }
482ca825890SJens Wiklander 
thread_get_id_may_fail(void)483239420cbSJerome Forissier short int __noprof thread_get_id_may_fail(void)
484ca825890SJens Wiklander {
485ca825890SJens Wiklander 	/*
486ca825890SJens Wiklander 	 * thread_get_core_local() requires foreign interrupts to be disabled
487ca825890SJens Wiklander 	 */
488ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
489ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
490ca825890SJens Wiklander 	short int ct = l->curr_thread;
491ca825890SJens Wiklander 
492ca825890SJens Wiklander 	thread_unmask_exceptions(exceptions);
493ca825890SJens Wiklander 	return ct;
494ca825890SJens Wiklander }
495ca825890SJens Wiklander 
thread_get_id(void)4968577287cSJerome Forissier short int __noprof thread_get_id(void)
497ca825890SJens Wiklander {
498ca825890SJens Wiklander 	short int ct = thread_get_id_may_fail();
499ca825890SJens Wiklander 
500ca825890SJens Wiklander 	/* Thread ID has to fit in a short int */
501ca825890SJens Wiklander 	COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX);
502ca825890SJens Wiklander 	assert(ct >= 0 && ct < CFG_NUM_THREADS);
503ca825890SJens Wiklander 	return ct;
504ca825890SJens Wiklander }
505ca825890SJens Wiklander 
alloc_stack(size_t stack_size,bool nex)506aa0620cfSJens Wiklander static vaddr_t alloc_stack(size_t stack_size, bool nex)
507aa0620cfSJens Wiklander {
508aa0620cfSJens Wiklander 	size_t l = stack_size_to_alloc_size(stack_size);
509aa0620cfSJens Wiklander 	size_t rl = ROUNDUP(l, SMALL_PAGE_SIZE);
510aa0620cfSJens Wiklander 	uint32_t flags = MAF_GUARD_HEAD;
511aa0620cfSJens Wiklander 	vaddr_t end_va = 0;
512aa0620cfSJens Wiklander 	vaddr_t va = 0;
513aa0620cfSJens Wiklander 
514aa0620cfSJens Wiklander 	if (nex)
515aa0620cfSJens Wiklander 		flags |= MAF_NEX;
516aa0620cfSJens Wiklander 	va = virt_page_alloc(rl / SMALL_PAGE_SIZE, flags);
517aa0620cfSJens Wiklander 	if (!va)
518aa0620cfSJens Wiklander 		panic();
519aa0620cfSJens Wiklander 
520aa0620cfSJens Wiklander 	end_va = va + l - STACK_CANARY_SIZE / 2;
521aa0620cfSJens Wiklander 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
522aa0620cfSJens Wiklander 		init_canaries(stack_size, end_va);
523aa0620cfSJens Wiklander 
524aa0620cfSJens Wiklander 	return end_va;
525aa0620cfSJens Wiklander }
526aa0620cfSJens Wiklander 
527ca825890SJens Wiklander #ifdef CFG_WITH_PAGER
init_thread_stacks(void)528ca825890SJens Wiklander static void init_thread_stacks(void)
529ca825890SJens Wiklander {
530ca825890SJens Wiklander 	size_t n = 0;
531ca825890SJens Wiklander 
532ca825890SJens Wiklander 	/*
533ca825890SJens Wiklander 	 * Allocate virtual memory for thread stacks.
534ca825890SJens Wiklander 	 */
535aa0620cfSJens Wiklander 	for (n = 0; n < thread_count; n++) {
536ca825890SJens Wiklander 		tee_mm_entry_t *mm = NULL;
537ca825890SJens Wiklander 		vaddr_t sp = 0;
538ca825890SJens Wiklander 		size_t num_pages = 0;
539ca825890SJens Wiklander 		struct fobj *fobj = NULL;
540ca825890SJens Wiklander 
541ca825890SJens Wiklander 		/* Find vmem for thread stack and its protection gap */
5429b0ee59dSJens Wiklander 		mm = tee_mm_alloc(&core_virt_mem_pool,
543ca825890SJens Wiklander 				  SMALL_PAGE_SIZE + STACK_THREAD_SIZE);
544ca825890SJens Wiklander 		assert(mm);
545ca825890SJens Wiklander 
546ca825890SJens Wiklander 		/* Claim eventual physical page */
547ca825890SJens Wiklander 		tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
548ca825890SJens Wiklander 				    true);
549ca825890SJens Wiklander 
550ca825890SJens Wiklander 		num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1;
551ca825890SJens Wiklander 		fobj = fobj_locked_paged_alloc(num_pages);
552ca825890SJens Wiklander 
553ca825890SJens Wiklander 		/* Add the region to the pager */
554ca825890SJens Wiklander 		tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE,
555ca825890SJens Wiklander 					  PAGED_REGION_TYPE_LOCK, fobj);
556ca825890SJens Wiklander 		fobj_put(fobj);
557ca825890SJens Wiklander 
558ca825890SJens Wiklander 		/* init effective stack */
559ca825890SJens Wiklander 		sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
560ca825890SJens Wiklander 		asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp);
561980d32c4SJens Wiklander 		threads[n].stack_va_end = sp;
562ca825890SJens Wiklander 	}
563ca825890SJens Wiklander }
564ca825890SJens Wiklander #else
init_thread_stacks(void)565ca825890SJens Wiklander static void init_thread_stacks(void)
566ca825890SJens Wiklander {
567ad94da2aSJens Wiklander 	vaddr_t va = 0;
568ad94da2aSJens Wiklander 	size_t n = 0;
569ca825890SJens Wiklander 
570ca825890SJens Wiklander 	/* Assign the thread stacks */
571aa0620cfSJens Wiklander 	for (n = 0; n < thread_count; n++) {
572*bb538722SAlvin Chang 		if (IS_ENABLED(CFG_DYN_CONFIG))
573aa0620cfSJens Wiklander 			va = alloc_stack(STACK_THREAD_SIZE, false);
574aa0620cfSJens Wiklander 		else
57559724f22SJens Wiklander 			va = GET_STACK_THREAD_BOTTOM(n);
576ad94da2aSJens Wiklander 		threads[n].stack_va_end = va;
577ad94da2aSJens Wiklander 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
578ad94da2aSJens Wiklander 			init_canaries(STACK_THREAD_SIZE, va);
579ad94da2aSJens Wiklander 	}
580ca825890SJens Wiklander }
581ca825890SJens Wiklander #endif /*CFG_WITH_PAGER*/
582ca825890SJens Wiklander 
thread_init_threads(size_t count)583aa0620cfSJens Wiklander void thread_init_threads(size_t count)
584ca825890SJens Wiklander {
585ca825890SJens Wiklander 	size_t n = 0;
586ca825890SJens Wiklander 
587*bb538722SAlvin Chang 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
588aa0620cfSJens Wiklander 		assert(count <= CFG_NUM_THREADS);
589aa0620cfSJens Wiklander 		threads = calloc(count, sizeof(*threads));
590aa0620cfSJens Wiklander 		if (!threads)
591aa0620cfSJens Wiklander 			panic();
592aa0620cfSJens Wiklander 		thread_count = count;
593aa0620cfSJens Wiklander 	} else {
59491d4649dSJens Wiklander 		assert(count == CFG_NUM_THREADS);
595aa0620cfSJens Wiklander 	}
596aa0620cfSJens Wiklander 
597ca825890SJens Wiklander 	init_thread_stacks();
598ca825890SJens Wiklander 	print_stack_limits();
599ca825890SJens Wiklander 	pgt_init();
600ca825890SJens Wiklander 
601ca825890SJens Wiklander 	mutex_lockdep_init();
602ca825890SJens Wiklander 
603aa0620cfSJens Wiklander 	for (n = 0; n < thread_count; n++)
604ca825890SJens Wiklander 		TAILQ_INIT(&threads[n].tsd.sess_stack);
605ca825890SJens Wiklander }
606ca825890SJens Wiklander 
607*bb538722SAlvin Chang #ifndef CFG_DYN_CONFIG
thread_get_abt_stack(void)608b5ec8152SJens Wiklander vaddr_t __nostackcheck thread_get_abt_stack(void)
609b5ec8152SJens Wiklander {
610b5ec8152SJens Wiklander 	return GET_STACK_BOTTOM(stack_abt, get_core_pos());
611b5ec8152SJens Wiklander }
61259724f22SJens Wiklander #endif
613b5ec8152SJens Wiklander 
thread_init_thread_core_local(size_t core_count)61459724f22SJens Wiklander void thread_init_thread_core_local(size_t core_count)
61559724f22SJens Wiklander {
61659724f22SJens Wiklander 	struct thread_core_local *tcl = NULL;
617758c3687SJens Wiklander 	const size_t core_pos = get_core_pos();
618ad94da2aSJens Wiklander 	vaddr_t va = 0;
619ad94da2aSJens Wiklander 	size_t n = 0;
620b5ec8152SJens Wiklander 
621*bb538722SAlvin Chang 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
62259724f22SJens Wiklander 		assert(core_count <= CFG_TEE_CORE_NB_CORE);
62359724f22SJens Wiklander 		tcl = nex_calloc(core_count, sizeof(*tcl));
62459724f22SJens Wiklander 		if (!tcl)
62559724f22SJens Wiklander 			panic();
62659724f22SJens Wiklander 		__thread_core_local_new = tcl;
62759724f22SJens Wiklander 		__thread_core_count_new = core_count;
62859724f22SJens Wiklander 	} else {
62959724f22SJens Wiklander 		tcl = thread_core_local;
630a4c2e0cbSJens Wiklander 		assert(core_count == CFG_TEE_CORE_NB_CORE);
63159724f22SJens Wiklander 
63259724f22SJens Wiklander 		for (n = 0; n < thread_core_count; n++) {
63359724f22SJens Wiklander 			init_canaries(STACK_TMP_SIZE,
63459724f22SJens Wiklander 				      GET_STACK_BOTTOM(stack_tmp, n));
63559724f22SJens Wiklander 			init_canaries(STACK_ABT_SIZE,
63659724f22SJens Wiklander 				      GET_STACK_BOTTOM(stack_abt, n));
63759724f22SJens Wiklander 		}
63859724f22SJens Wiklander 	}
63959724f22SJens Wiklander 
64059724f22SJens Wiklander 	for (n = 0; n < core_count; n++) {
64159724f22SJens Wiklander 		if (n == core_pos) {
642*bb538722SAlvin Chang 			if (IS_ENABLED(CFG_DYN_CONFIG))
64359724f22SJens Wiklander 				tcl[n] = thread_core_local[0];
64459724f22SJens Wiklander 			else
64559724f22SJens Wiklander 				continue;
64659724f22SJens Wiklander 		} else {
647b5ec8152SJens Wiklander 			tcl[n].curr_thread = THREAD_ID_INVALID;
648b5ec8152SJens Wiklander 			tcl[n].flags = THREAD_CLF_TMP;
64959724f22SJens Wiklander 		}
650ad94da2aSJens Wiklander 
651*bb538722SAlvin Chang 		if (IS_ENABLED(CFG_DYN_CONFIG))
65259724f22SJens Wiklander 			va = alloc_stack(STACK_TMP_SIZE, true);
65359724f22SJens Wiklander 		else
654ad94da2aSJens Wiklander 			va = GET_STACK_BOTTOM(stack_tmp, n);
655ad94da2aSJens Wiklander 		tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS;
65659724f22SJens Wiklander #ifdef ARM32
65759724f22SJens Wiklander 		tcl[n].tmp_stack_pa_end =
65859724f22SJens Wiklander 			vaddr_to_phys(tcl[n].tmp_stack_va_end);
65959724f22SJens Wiklander #endif
66059724f22SJens Wiklander 
661*bb538722SAlvin Chang 		if (IS_ENABLED(CFG_DYN_CONFIG))
66259724f22SJens Wiklander 			va = alloc_stack(STACK_ABT_SIZE, true);
66359724f22SJens Wiklander 		else
664ad94da2aSJens Wiklander 			va = GET_STACK_BOTTOM(stack_abt, n);
665ad94da2aSJens Wiklander 		tcl[n].abt_stack_va_end = va;
666b5ec8152SJens Wiklander 	}
667b5ec8152SJens Wiklander }
668ca825890SJens Wiklander 
66993dc6b29SJens Wiklander #if defined(CFG_CORE_PAUTH)
thread_init_thread_pauth_keys(void)67093dc6b29SJens Wiklander void thread_init_thread_pauth_keys(void)
67193dc6b29SJens Wiklander {
67293dc6b29SJens Wiklander 	size_t n = 0;
67393dc6b29SJens Wiklander 
674aa0620cfSJens Wiklander 	for (n = 0; n < thread_count; n++)
67593dc6b29SJens Wiklander 		if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys)))
67693dc6b29SJens Wiklander 			panic("Failed to init thread pauth keys");
67793dc6b29SJens Wiklander }
67893dc6b29SJens Wiklander 
thread_init_core_local_pauth_keys(void)67993dc6b29SJens Wiklander void thread_init_core_local_pauth_keys(void)
68093dc6b29SJens Wiklander {
68193dc6b29SJens Wiklander 	struct thread_core_local *tcl = thread_core_local;
68293dc6b29SJens Wiklander 	size_t n = 0;
68393dc6b29SJens Wiklander 
68459724f22SJens Wiklander 	for (n = 0; n < thread_core_count; n++)
68593dc6b29SJens Wiklander 		if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys)))
68693dc6b29SJens Wiklander 			panic("Failed to init core local pauth keys");
68793dc6b29SJens Wiklander }
68893dc6b29SJens Wiklander #endif
68993dc6b29SJens Wiklander 
thread_get_tsd(void)6908577287cSJerome Forissier struct thread_specific_data * __noprof thread_get_tsd(void)
691ca825890SJens Wiklander {
692ca825890SJens Wiklander 	return &threads[thread_get_id()].tsd;
693ca825890SJens Wiklander }
694ca825890SJens Wiklander 
thread_get_ctx_regs(void)695ca825890SJens Wiklander struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void)
696ca825890SJens Wiklander {
697ca825890SJens Wiklander 	struct thread_core_local *l = thread_get_core_local();
698ca825890SJens Wiklander 
699ca825890SJens Wiklander 	assert(l->curr_thread != THREAD_ID_INVALID);
700ca825890SJens Wiklander 	return &threads[l->curr_thread].regs;
701ca825890SJens Wiklander }
702ca825890SJens Wiklander 
thread_set_foreign_intr(bool enable)703ca825890SJens Wiklander void thread_set_foreign_intr(bool enable)
704ca825890SJens Wiklander {
705ca825890SJens Wiklander 	/* thread_get_core_local() requires foreign interrupts to be disabled */
706ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
707ca825890SJens Wiklander 	struct thread_core_local *l;
708ca825890SJens Wiklander 
709ca825890SJens Wiklander 	l = thread_get_core_local();
710ca825890SJens Wiklander 
711ca825890SJens Wiklander 	assert(l->curr_thread != THREAD_ID_INVALID);
712ca825890SJens Wiklander 
713ca825890SJens Wiklander 	if (enable) {
714ca825890SJens Wiklander 		threads[l->curr_thread].flags |=
715ca825890SJens Wiklander 					THREAD_FLAGS_FOREIGN_INTR_ENABLE;
716ca825890SJens Wiklander 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
717ca825890SJens Wiklander 	} else {
718ca825890SJens Wiklander 		/*
719ca825890SJens Wiklander 		 * No need to disable foreign interrupts here since they're
720ca825890SJens Wiklander 		 * already disabled above.
721ca825890SJens Wiklander 		 */
722ca825890SJens Wiklander 		threads[l->curr_thread].flags &=
723ca825890SJens Wiklander 					~THREAD_FLAGS_FOREIGN_INTR_ENABLE;
724ca825890SJens Wiklander 	}
725ca825890SJens Wiklander }
726ca825890SJens Wiklander 
thread_restore_foreign_intr(void)727ca825890SJens Wiklander void thread_restore_foreign_intr(void)
728ca825890SJens Wiklander {
729ca825890SJens Wiklander 	/* thread_get_core_local() requires foreign interrupts to be disabled */
730ca825890SJens Wiklander 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
731ca825890SJens Wiklander 	struct thread_core_local *l;
732ca825890SJens Wiklander 
733ca825890SJens Wiklander 	l = thread_get_core_local();
734ca825890SJens Wiklander 
735ca825890SJens Wiklander 	assert(l->curr_thread != THREAD_ID_INVALID);
736ca825890SJens Wiklander 
737ca825890SJens Wiklander 	if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE)
738ca825890SJens Wiklander 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
739ca825890SJens Wiklander }
740ca825890SJens Wiklander 
alloc_shm(enum thread_shm_type shm_type,size_t size)741ca825890SJens Wiklander static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size)
742ca825890SJens Wiklander {
743ca825890SJens Wiklander 	switch (shm_type) {
744ca825890SJens Wiklander 	case THREAD_SHM_TYPE_APPLICATION:
745ca825890SJens Wiklander 		return thread_rpc_alloc_payload(size);
746ca825890SJens Wiklander 	case THREAD_SHM_TYPE_KERNEL_PRIVATE:
747ca825890SJens Wiklander 		return thread_rpc_alloc_kernel_payload(size);
748ca825890SJens Wiklander 	case THREAD_SHM_TYPE_GLOBAL:
749ca825890SJens Wiklander 		return thread_rpc_alloc_global_payload(size);
750ca825890SJens Wiklander 	default:
751ca825890SJens Wiklander 		return NULL;
752ca825890SJens Wiklander 	}
753ca825890SJens Wiklander }
754ca825890SJens Wiklander 
clear_shm_cache_entry(struct thread_shm_cache_entry * ce)755ca825890SJens Wiklander static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce)
756ca825890SJens Wiklander {
757ca825890SJens Wiklander 	if (ce->mobj) {
758ca825890SJens Wiklander 		switch (ce->type) {
759ca825890SJens Wiklander 		case THREAD_SHM_TYPE_APPLICATION:
760ca825890SJens Wiklander 			thread_rpc_free_payload(ce->mobj);
761ca825890SJens Wiklander 			break;
762ca825890SJens Wiklander 		case THREAD_SHM_TYPE_KERNEL_PRIVATE:
763ca825890SJens Wiklander 			thread_rpc_free_kernel_payload(ce->mobj);
764ca825890SJens Wiklander 			break;
765ca825890SJens Wiklander 		case THREAD_SHM_TYPE_GLOBAL:
766ca825890SJens Wiklander 			thread_rpc_free_global_payload(ce->mobj);
767ca825890SJens Wiklander 			break;
768ca825890SJens Wiklander 		default:
769ca825890SJens Wiklander 			assert(0); /* "can't happen" */
770ca825890SJens Wiklander 			break;
771ca825890SJens Wiklander 		}
772ca825890SJens Wiklander 	}
773ca825890SJens Wiklander 	ce->mobj = NULL;
774ca825890SJens Wiklander 	ce->size = 0;
775ca825890SJens Wiklander }
776ca825890SJens Wiklander 
777ca825890SJens Wiklander static struct thread_shm_cache_entry *
get_shm_cache_entry(enum thread_shm_cache_user user)778ca825890SJens Wiklander get_shm_cache_entry(enum thread_shm_cache_user user)
779ca825890SJens Wiklander {
780ca825890SJens Wiklander 	struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache;
781ca825890SJens Wiklander 	struct thread_shm_cache_entry *ce = NULL;
782ca825890SJens Wiklander 
783ca825890SJens Wiklander 	SLIST_FOREACH(ce, cache, link)
784ca825890SJens Wiklander 		if (ce->user == user)
785ca825890SJens Wiklander 			return ce;
786ca825890SJens Wiklander 
787ca825890SJens Wiklander 	ce = calloc(1, sizeof(*ce));
788ca825890SJens Wiklander 	if (ce) {
789ca825890SJens Wiklander 		ce->user = user;
790ca825890SJens Wiklander 		SLIST_INSERT_HEAD(cache, ce, link);
791ca825890SJens Wiklander 	}
792ca825890SJens Wiklander 
793ca825890SJens Wiklander 	return ce;
794ca825890SJens Wiklander }
795ca825890SJens Wiklander 
thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,enum thread_shm_type shm_type,size_t size,struct mobj ** mobj)796ca825890SJens Wiklander void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
797ca825890SJens Wiklander 				 enum thread_shm_type shm_type,
798ca825890SJens Wiklander 				 size_t size, struct mobj **mobj)
799ca825890SJens Wiklander {
800ca825890SJens Wiklander 	struct thread_shm_cache_entry *ce = NULL;
801ca825890SJens Wiklander 	size_t sz = size;
802ca825890SJens Wiklander 	paddr_t p = 0;
803ca825890SJens Wiklander 	void *va = NULL;
804ca825890SJens Wiklander 
805ca825890SJens Wiklander 	if (!size)
806ca825890SJens Wiklander 		return NULL;
807ca825890SJens Wiklander 
808ca825890SJens Wiklander 	ce = get_shm_cache_entry(user);
809ca825890SJens Wiklander 	if (!ce)
810ca825890SJens Wiklander 		return NULL;
811ca825890SJens Wiklander 
812ca825890SJens Wiklander 	/*
813ca825890SJens Wiklander 	 * Always allocate in page chunks as normal world allocates payload
814ca825890SJens Wiklander 	 * memory as complete pages.
815ca825890SJens Wiklander 	 */
816ca825890SJens Wiklander 	sz = ROUNDUP(size, SMALL_PAGE_SIZE);
817ca825890SJens Wiklander 
818ca825890SJens Wiklander 	if (ce->type != shm_type || sz > ce->size) {
819ca825890SJens Wiklander 		clear_shm_cache_entry(ce);
820ca825890SJens Wiklander 
821ca825890SJens Wiklander 		ce->mobj = alloc_shm(shm_type, sz);
822ca825890SJens Wiklander 		if (!ce->mobj)
823ca825890SJens Wiklander 			return NULL;
824ca825890SJens Wiklander 
825ca825890SJens Wiklander 		if (mobj_get_pa(ce->mobj, 0, 0, &p))
826ca825890SJens Wiklander 			goto err;
827ca825890SJens Wiklander 
828ca825890SJens Wiklander 		if (!IS_ALIGNED_WITH_TYPE(p, uint64_t))
829ca825890SJens Wiklander 			goto err;
830ca825890SJens Wiklander 
831ca825890SJens Wiklander 		va = mobj_get_va(ce->mobj, 0, sz);
832ca825890SJens Wiklander 		if (!va)
833ca825890SJens Wiklander 			goto err;
834ca825890SJens Wiklander 
835ca825890SJens Wiklander 		ce->size = sz;
836ca825890SJens Wiklander 		ce->type = shm_type;
837ca825890SJens Wiklander 	} else {
838ca825890SJens Wiklander 		va = mobj_get_va(ce->mobj, 0, sz);
839ca825890SJens Wiklander 		if (!va)
840ca825890SJens Wiklander 			goto err;
841ca825890SJens Wiklander 	}
842ca825890SJens Wiklander 	*mobj = ce->mobj;
843ca825890SJens Wiklander 
844ca825890SJens Wiklander 	return va;
845ca825890SJens Wiklander err:
846ca825890SJens Wiklander 	clear_shm_cache_entry(ce);
847ca825890SJens Wiklander 	return NULL;
848ca825890SJens Wiklander }
849ca825890SJens Wiklander 
thread_rpc_shm_cache_clear(struct thread_shm_cache * cache)850ca825890SJens Wiklander void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache)
851ca825890SJens Wiklander {
852ca825890SJens Wiklander 	while (true) {
853ca825890SJens Wiklander 		struct thread_shm_cache_entry *ce = SLIST_FIRST(cache);
854ca825890SJens Wiklander 
855ca825890SJens Wiklander 		if (!ce)
856ca825890SJens Wiklander 			break;
857ca825890SJens Wiklander 		SLIST_REMOVE_HEAD(cache, link);
858ca825890SJens Wiklander 		clear_shm_cache_entry(ce);
859ca825890SJens Wiklander 		free(ce);
860ca825890SJens Wiklander 	}
861ca825890SJens Wiklander }
862