xref: /optee_os/core/kernel/thread.c (revision a4c2e0cb4e4fcb0c760fb3daf9172e682a4e3628)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2022, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #include <config.h>
9 #include <crypto/crypto.h>
10 #include <kernel/asan.h>
11 #include <kernel/boot.h>
12 #include <kernel/lockdep.h>
13 #include <kernel/misc.h>
14 #include <kernel/panic.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/thread.h>
17 #include <kernel/thread_private.h>
18 #include <mm/mobj.h>
19 
20 struct thread_ctx threads[CFG_NUM_THREADS];
21 
22 static struct thread_core_local
23 	__thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss;
24 struct thread_core_local *thread_core_local __nex_data = __thread_core_local;
25 size_t thread_core_count __nex_data = CFG_TEE_CORE_NB_CORE;
26 unsigned long thread_core_local_pa __nex_bss;
27 
28 /*
29  * Stacks
30  *
31  * [Lower addresses on the left]
32  *
33  * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ]
34  * ^                     ^                   ^                ^
35  * stack_xxx[n]          "hard" top          "soft" top       bottom
36  */
37 
38 static uint32_t start_canary_value = 0xdedede00;
39 static uint32_t end_canary_value = 0xababab00;
40 
41 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \
42 linkage uint32_t name[num_stacks] \
43 		[ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \
44 			 STACK_ALIGNMENT) / sizeof(uint32_t)] \
45 		__attribute__((section(".nozi_stack." # name), \
46 			       aligned(STACK_ALIGNMENT)))
47 
48 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE,
49 	      /* global linkage */);
50 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static);
51 #ifndef CFG_WITH_PAGER
52 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static);
53 #endif
54 
55 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \
56 				    STACK_CANARY_SIZE / 2)
57 
58 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") =
59 	sizeof(stack_tmp[0]);
60 
61 /*
62  * This stack setup info is required by secondary boot cores before they
63  * each locally enable the pager (the mmu). Hence kept in pager sections.
64  */
65 DECLARE_KEEP_PAGER(stack_tmp_stride);
66 
67 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK;
68 
69 static size_t stack_size_to_alloc_size(size_t stack_size)
70 {
71 	return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA,
72 		       STACK_ALIGNMENT);
73 }
74 
75 static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va)
76 {
77 	size_t l = stack_size_to_alloc_size(stack_size);
78 
79 	return end_va - l + STACK_CANARY_SIZE;
80 }
81 
82 static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va)
83 {
84 	return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA;
85 }
86 
87 static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused,
88 				      vaddr_t end_va)
89 {
90 	return end_va;
91 }
92 
93 static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va)
94 {
95 	return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) -
96 			    STACK_CANARY_SIZE / 2);
97 }
98 
99 static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused,
100 					    vaddr_t end_va)
101 {
102 	return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t));
103 }
104 
105 static void init_canaries(size_t stack_size, vaddr_t va_end)
106 {
107 	uint32_t *canary = NULL;
108 
109 	assert(va_end);
110 	canary = stack_end_va_to_start_canary(stack_size, va_end);
111 	*canary = start_canary_value;
112 	canary = stack_end_va_to_end_canary(stack_size, va_end);
113 	*canary = end_canary_value;
114 }
115 
116 void thread_init_canaries(void)
117 {
118 	vaddr_t va = 0;
119 	size_t n = 0;
120 
121 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) {
122 		for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
123 			if (thread_core_local[n].tmp_stack_va_end) {
124 				va = thread_core_local[n].tmp_stack_va_end +
125 				     STACK_TMP_OFFS;
126 				init_canaries(STACK_TMP_SIZE, va);
127 			}
128 			va = thread_core_local[n].abt_stack_va_end;
129 			if (va)
130 				init_canaries(STACK_ABT_SIZE, va);
131 		}
132 
133 	}
134 
135 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES) &&
136 	    !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
137 		for (n = 0; n < CFG_NUM_THREADS; n++) {
138 			va = threads[n].stack_va_end;
139 			if (va)
140 				init_canaries(STACK_THREAD_SIZE, va);
141 		}
142 	}
143 }
144 
145 #if defined(CFG_WITH_STACK_CANARIES)
146 void thread_update_canaries(void)
147 {
148 	uint32_t canary[2] = { };
149 	uint32_t exceptions = 0;
150 
151 	plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary),
152 				       sizeof(canary[0]));
153 
154 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
155 
156 	thread_check_canaries();
157 
158 	start_canary_value = canary[0];
159 	end_canary_value = canary[1];
160 	thread_init_canaries();
161 
162 	thread_unmask_exceptions(exceptions);
163 }
164 #endif
165 
166 static void check_stack_canary(const char *stack_name __maybe_unused,
167 			       size_t n __maybe_unused,
168 			       size_t stack_size, vaddr_t end_va)
169 {
170 	uint32_t *canary = NULL;
171 
172 	canary = stack_end_va_to_start_canary(stack_size, end_va);
173 	if (*canary != start_canary_value) {
174 		EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)",
175 			 stack_name, n, (void *)canary);
176 		panic();
177 	}
178 
179 	canary = stack_end_va_to_end_canary(stack_size, end_va);
180 	if (*canary != end_canary_value) {
181 		EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)",
182 			 stack_name, n, (void *)canary);
183 		panic();
184 	}
185 }
186 
187 void thread_check_canaries(void)
188 {
189 	vaddr_t va = 0;
190 	size_t n = 0;
191 
192 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) {
193 		for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
194 			if (thread_core_local[n].tmp_stack_va_end) {
195 				va = thread_core_local[n].tmp_stack_va_end +
196 				     STACK_TMP_OFFS;
197 				check_stack_canary("tmp_stack", n,
198 						   STACK_TMP_SIZE, va);
199 			}
200 
201 			va = thread_core_local[n].abt_stack_va_end;
202 			if (va)
203 				check_stack_canary("abt_stack", n,
204 						   STACK_ABT_SIZE, va);
205 		}
206 	}
207 
208 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES) &&
209 	    !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
210 		for (n = 0; n < CFG_NUM_THREADS; n++) {
211 			va = threads[n].stack_va_end;
212 			if (va)
213 				check_stack_canary("thread_stack", n,
214 						   STACK_THREAD_SIZE, va);
215 		}
216 	}
217 }
218 
219 void thread_lock_global(void)
220 {
221 	cpu_spin_lock(&thread_global_lock);
222 }
223 
224 void thread_unlock_global(void)
225 {
226 	cpu_spin_unlock(&thread_global_lock);
227 }
228 
229 static struct thread_core_local * __nostackcheck
230 get_core_local(unsigned int pos)
231 {
232 	/*
233 	 * Foreign interrupts must be disabled before playing with core_local
234 	 * since we otherwise may be rescheduled to a different core in the
235 	 * middle of this function.
236 	 */
237 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
238 
239 	assert(pos < CFG_TEE_CORE_NB_CORE);
240 	return &thread_core_local[pos];
241 }
242 
243 struct thread_core_local * __nostackcheck thread_get_core_local(void)
244 {
245 	unsigned int pos = get_core_pos();
246 
247 	return get_core_local(pos);
248 }
249 
250 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
251 static void print_stack_limits(void)
252 {
253 	size_t n = 0;
254 	vaddr_t __maybe_unused start = 0;
255 	vaddr_t __maybe_unused end = 0;
256 	vaddr_t va = 0;
257 
258 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
259 		va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS;
260 		start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va);
261 		end = stack_end_va_to_bottom(STACK_TMP_SIZE, va);
262 		DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
263 
264 		va = thread_core_local[n].abt_stack_va_end;
265 		start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va);
266 		end = stack_end_va_to_bottom(STACK_ABT_SIZE, va);
267 		DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
268 	}
269 
270 	for (n = 0; n < CFG_NUM_THREADS; n++) {
271 		va = threads[n].stack_va_end;
272 		start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va);
273 		end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va);
274 		DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
275 	}
276 }
277 
278 static void check_stack_limits(void)
279 {
280 	vaddr_t stack_start = 0;
281 	vaddr_t stack_end = 0;
282 	/* Any value in the current stack frame will do */
283 	vaddr_t current_sp = (vaddr_t)&stack_start;
284 
285 	if (!get_stack_soft_limits(&stack_start, &stack_end))
286 		panic("Unknown stack limits");
287 	if (current_sp < stack_start || current_sp > stack_end) {
288 		EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%"
289 		     PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start,
290 		     stack_end);
291 		print_stack_limits();
292 		panic();
293 	}
294 }
295 
296 static bool * __nostackcheck get_stackcheck_recursion_flag(void)
297 {
298 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
299 	unsigned int pos = get_core_pos();
300 	struct thread_core_local *l = get_core_local(pos);
301 	int ct = l->curr_thread;
302 	bool *p = NULL;
303 
304 	if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP))
305 		p = &l->stackcheck_recursion;
306 	else if (!l->flags)
307 		p = &threads[ct].tsd.stackcheck_recursion;
308 
309 	thread_unmask_exceptions(exceptions);
310 	return p;
311 }
312 
313 void __cyg_profile_func_enter(void *this_fn, void *call_site);
314 void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused,
315 					     void *call_site __unused)
316 {
317 	bool *p = get_stackcheck_recursion_flag();
318 
319 	assert(p);
320 	if (*p)
321 		return;
322 	*p = true;
323 	check_stack_limits();
324 	*p = false;
325 }
326 
327 void __cyg_profile_func_exit(void *this_fn, void *call_site);
328 void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused,
329 					    void *call_site __unused)
330 {
331 }
332 #else
333 static void print_stack_limits(void)
334 {
335 }
336 #endif
337 
338 void thread_init_boot_thread(void)
339 {
340 	struct thread_core_local *l = thread_get_core_local();
341 
342 	thread_init_threads();
343 
344 	l->curr_thread = 0;
345 	threads[0].state = THREAD_STATE_ACTIVE;
346 }
347 
348 void __nostackcheck thread_clr_boot_thread(void)
349 {
350 	struct thread_core_local *l = thread_get_core_local();
351 
352 	assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS);
353 	assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
354 	threads[l->curr_thread].state = THREAD_STATE_FREE;
355 	l->curr_thread = THREAD_ID_INVALID;
356 }
357 
358 void __nostackcheck *thread_get_tmp_sp(void)
359 {
360 	struct thread_core_local *l = thread_get_core_local();
361 
362 	/*
363 	 * Called from assembly when switching to the temporary stack, so flags
364 	 * need updating
365 	 */
366 	l->flags |= THREAD_CLF_TMP;
367 
368 	return (void *)l->tmp_stack_va_end;
369 }
370 
371 vaddr_t thread_stack_start(void)
372 {
373 	struct thread_ctx *thr;
374 	int ct = thread_get_id_may_fail();
375 
376 	if (ct == THREAD_ID_INVALID)
377 		return 0;
378 
379 	thr = threads + ct;
380 	return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end);
381 }
382 
383 size_t thread_stack_size(void)
384 {
385 	return STACK_THREAD_SIZE;
386 }
387 
388 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard)
389 {
390 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
391 	unsigned int pos = get_core_pos();
392 	struct thread_core_local *l = get_core_local(pos);
393 	int ct = l->curr_thread;
394 	size_t stack_size = 0;
395 	bool ret = true;
396 	vaddr_t va = 0;
397 
398 	if (l->flags & THREAD_CLF_TMP) {
399 		va = l->tmp_stack_va_end + STACK_TMP_OFFS;
400 		stack_size = STACK_TMP_SIZE;
401 	} else if (l->flags & THREAD_CLF_ABORT) {
402 		va = l->abt_stack_va_end;
403 		stack_size = STACK_ABT_SIZE;
404 	} else if (!l->flags && ct >= 0 && ct < CFG_NUM_THREADS) {
405 		va = threads[ct].stack_va_end;
406 		stack_size = STACK_THREAD_SIZE;
407 	} else {
408 		ret = false;
409 		goto out;
410 	}
411 
412 	*end = stack_end_va_to_bottom(stack_size, va);
413 	if (hard)
414 		*start = stack_end_va_to_top_hard(stack_size, va);
415 	else
416 		*start = stack_end_va_to_top_soft(stack_size, va);
417 out:
418 	thread_unmask_exceptions(exceptions);
419 	return ret;
420 }
421 
422 bool thread_is_from_abort_mode(void)
423 {
424 	struct thread_core_local *l = thread_get_core_local();
425 
426 	return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT;
427 }
428 
429 /*
430  * This function should always be accurate, but it might be possible to
431  * implement a more efficient depending on cpu architecture.
432  */
433 bool __weak __noprof thread_is_in_normal_mode(void)
434 {
435 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
436 	struct thread_core_local *l = thread_get_core_local();
437 	bool ret;
438 
439 	/*
440 	 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're
441 	 * handling some exception.
442 	 */
443 	ret = (l->curr_thread != THREAD_ID_INVALID) &&
444 	      !(l->flags & ~THREAD_CLF_TMP);
445 	thread_unmask_exceptions(exceptions);
446 
447 	return ret;
448 }
449 
450 short int __noprof thread_get_id_may_fail(void)
451 {
452 	/*
453 	 * thread_get_core_local() requires foreign interrupts to be disabled
454 	 */
455 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
456 	struct thread_core_local *l = thread_get_core_local();
457 	short int ct = l->curr_thread;
458 
459 	thread_unmask_exceptions(exceptions);
460 	return ct;
461 }
462 
463 short int __noprof thread_get_id(void)
464 {
465 	short int ct = thread_get_id_may_fail();
466 
467 	/* Thread ID has to fit in a short int */
468 	COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX);
469 	assert(ct >= 0 && ct < CFG_NUM_THREADS);
470 	return ct;
471 }
472 
473 #ifdef CFG_WITH_PAGER
474 static void init_thread_stacks(void)
475 {
476 	size_t n = 0;
477 
478 	/*
479 	 * Allocate virtual memory for thread stacks.
480 	 */
481 	for (n = 0; n < CFG_NUM_THREADS; n++) {
482 		tee_mm_entry_t *mm = NULL;
483 		vaddr_t sp = 0;
484 		size_t num_pages = 0;
485 		struct fobj *fobj = NULL;
486 
487 		/* Find vmem for thread stack and its protection gap */
488 		mm = tee_mm_alloc(&core_virt_mem_pool,
489 				  SMALL_PAGE_SIZE + STACK_THREAD_SIZE);
490 		assert(mm);
491 
492 		/* Claim eventual physical page */
493 		tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
494 				    true);
495 
496 		num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1;
497 		fobj = fobj_locked_paged_alloc(num_pages);
498 
499 		/* Add the region to the pager */
500 		tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE,
501 					  PAGED_REGION_TYPE_LOCK, fobj);
502 		fobj_put(fobj);
503 
504 		/* init effective stack */
505 		sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
506 		asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp);
507 		threads[n].stack_va_end = sp;
508 	}
509 }
510 #else
511 static void init_thread_stacks(void)
512 {
513 	vaddr_t va = 0;
514 	size_t n = 0;
515 
516 	/* Assign the thread stacks */
517 	for (n = 0; n < CFG_NUM_THREADS; n++) {
518 		va = GET_STACK_BOTTOM(stack_thread, n);
519 		threads[n].stack_va_end = va;
520 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
521 			init_canaries(STACK_THREAD_SIZE, va);
522 	}
523 }
524 #endif /*CFG_WITH_PAGER*/
525 
526 void thread_init_threads(void)
527 {
528 	size_t n = 0;
529 
530 	init_thread_stacks();
531 	print_stack_limits();
532 	pgt_init();
533 
534 	mutex_lockdep_init();
535 
536 	for (n = 0; n < CFG_NUM_THREADS; n++)
537 		TAILQ_INIT(&threads[n].tsd.sess_stack);
538 }
539 
540 vaddr_t __nostackcheck thread_get_abt_stack(void)
541 {
542 	return GET_STACK_BOTTOM(stack_abt, get_core_pos());
543 }
544 
545 #ifdef CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL
546 void thread_init_thread_core_local(size_t core_count __maybe_unused)
547 {
548 	struct thread_core_local *tcl = thread_core_local;
549 	const size_t core_pos = get_core_pos();
550 	vaddr_t va = 0;
551 	size_t n = 0;
552 
553 	assert(core_count == CFG_TEE_CORE_NB_CORE);
554 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
555 		if (n == core_pos)
556 			continue;	/* Already initialized */
557 		tcl[n].curr_thread = THREAD_ID_INVALID;
558 		tcl[n].flags = THREAD_CLF_TMP;
559 
560 		va = GET_STACK_BOTTOM(stack_tmp, n);
561 		tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS;
562 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
563 			init_canaries(STACK_TMP_SIZE, va);
564 		va = GET_STACK_BOTTOM(stack_abt, n);
565 		tcl[n].abt_stack_va_end = va;
566 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
567 			init_canaries(STACK_ABT_SIZE, va);
568 	}
569 
570 	/* Might be needed when resuming from suspend on ARMv7. */
571 	if (IS_ENABLED(CFG_ARM32_core) && !IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW))
572 		thread_core_local_pa = virt_to_phys(tcl);
573 }
574 #else
575 void __nostackcheck thread_init_thread_core_local(size_t core_count)
576 {
577 	size_t n = 0;
578 	struct thread_core_local *tcl = thread_core_local;
579 
580 	assert(core_count == CFG_TEE_CORE_NB_CORE);
581 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
582 		tcl[n].curr_thread = THREAD_ID_INVALID;
583 		tcl[n].flags = THREAD_CLF_TMP;
584 	}
585 	tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0);
586 }
587 
588 void __nostackcheck thread_init_core_local_stacks(void)
589 {
590 	size_t n = 0;
591 	struct thread_core_local *tcl = thread_core_local;
592 
593 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
594 		tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) -
595 					  STACK_TMP_OFFS;
596 		tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n);
597 	}
598 }
599 #endif /*CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL*/
600 
601 #if defined(CFG_CORE_PAUTH)
602 void thread_init_thread_pauth_keys(void)
603 {
604 	size_t n = 0;
605 
606 	for (n = 0; n < CFG_NUM_THREADS; n++)
607 		if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys)))
608 			panic("Failed to init thread pauth keys");
609 }
610 
611 void thread_init_core_local_pauth_keys(void)
612 {
613 	struct thread_core_local *tcl = thread_core_local;
614 	size_t n = 0;
615 
616 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
617 		if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys)))
618 			panic("Failed to init core local pauth keys");
619 }
620 #endif
621 
622 struct thread_specific_data * __noprof thread_get_tsd(void)
623 {
624 	return &threads[thread_get_id()].tsd;
625 }
626 
627 struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void)
628 {
629 	struct thread_core_local *l = thread_get_core_local();
630 
631 	assert(l->curr_thread != THREAD_ID_INVALID);
632 	return &threads[l->curr_thread].regs;
633 }
634 
635 void thread_set_foreign_intr(bool enable)
636 {
637 	/* thread_get_core_local() requires foreign interrupts to be disabled */
638 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
639 	struct thread_core_local *l;
640 
641 	l = thread_get_core_local();
642 
643 	assert(l->curr_thread != THREAD_ID_INVALID);
644 
645 	if (enable) {
646 		threads[l->curr_thread].flags |=
647 					THREAD_FLAGS_FOREIGN_INTR_ENABLE;
648 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
649 	} else {
650 		/*
651 		 * No need to disable foreign interrupts here since they're
652 		 * already disabled above.
653 		 */
654 		threads[l->curr_thread].flags &=
655 					~THREAD_FLAGS_FOREIGN_INTR_ENABLE;
656 	}
657 }
658 
659 void thread_restore_foreign_intr(void)
660 {
661 	/* thread_get_core_local() requires foreign interrupts to be disabled */
662 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
663 	struct thread_core_local *l;
664 
665 	l = thread_get_core_local();
666 
667 	assert(l->curr_thread != THREAD_ID_INVALID);
668 
669 	if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE)
670 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
671 }
672 
673 static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size)
674 {
675 	switch (shm_type) {
676 	case THREAD_SHM_TYPE_APPLICATION:
677 		return thread_rpc_alloc_payload(size);
678 	case THREAD_SHM_TYPE_KERNEL_PRIVATE:
679 		return thread_rpc_alloc_kernel_payload(size);
680 	case THREAD_SHM_TYPE_GLOBAL:
681 		return thread_rpc_alloc_global_payload(size);
682 	default:
683 		return NULL;
684 	}
685 }
686 
687 static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce)
688 {
689 	if (ce->mobj) {
690 		switch (ce->type) {
691 		case THREAD_SHM_TYPE_APPLICATION:
692 			thread_rpc_free_payload(ce->mobj);
693 			break;
694 		case THREAD_SHM_TYPE_KERNEL_PRIVATE:
695 			thread_rpc_free_kernel_payload(ce->mobj);
696 			break;
697 		case THREAD_SHM_TYPE_GLOBAL:
698 			thread_rpc_free_global_payload(ce->mobj);
699 			break;
700 		default:
701 			assert(0); /* "can't happen" */
702 			break;
703 		}
704 	}
705 	ce->mobj = NULL;
706 	ce->size = 0;
707 }
708 
709 static struct thread_shm_cache_entry *
710 get_shm_cache_entry(enum thread_shm_cache_user user)
711 {
712 	struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache;
713 	struct thread_shm_cache_entry *ce = NULL;
714 
715 	SLIST_FOREACH(ce, cache, link)
716 		if (ce->user == user)
717 			return ce;
718 
719 	ce = calloc(1, sizeof(*ce));
720 	if (ce) {
721 		ce->user = user;
722 		SLIST_INSERT_HEAD(cache, ce, link);
723 	}
724 
725 	return ce;
726 }
727 
728 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
729 				 enum thread_shm_type shm_type,
730 				 size_t size, struct mobj **mobj)
731 {
732 	struct thread_shm_cache_entry *ce = NULL;
733 	size_t sz = size;
734 	paddr_t p = 0;
735 	void *va = NULL;
736 
737 	if (!size)
738 		return NULL;
739 
740 	ce = get_shm_cache_entry(user);
741 	if (!ce)
742 		return NULL;
743 
744 	/*
745 	 * Always allocate in page chunks as normal world allocates payload
746 	 * memory as complete pages.
747 	 */
748 	sz = ROUNDUP(size, SMALL_PAGE_SIZE);
749 
750 	if (ce->type != shm_type || sz > ce->size) {
751 		clear_shm_cache_entry(ce);
752 
753 		ce->mobj = alloc_shm(shm_type, sz);
754 		if (!ce->mobj)
755 			return NULL;
756 
757 		if (mobj_get_pa(ce->mobj, 0, 0, &p))
758 			goto err;
759 
760 		if (!IS_ALIGNED_WITH_TYPE(p, uint64_t))
761 			goto err;
762 
763 		va = mobj_get_va(ce->mobj, 0, sz);
764 		if (!va)
765 			goto err;
766 
767 		ce->size = sz;
768 		ce->type = shm_type;
769 	} else {
770 		va = mobj_get_va(ce->mobj, 0, sz);
771 		if (!va)
772 			goto err;
773 	}
774 	*mobj = ce->mobj;
775 
776 	return va;
777 err:
778 	clear_shm_cache_entry(ce);
779 	return NULL;
780 }
781 
782 void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache)
783 {
784 	while (true) {
785 		struct thread_shm_cache_entry *ce = SLIST_FIRST(cache);
786 
787 		if (!ce)
788 			break;
789 		SLIST_REMOVE_HEAD(cache, link);
790 		clear_shm_cache_entry(ce);
791 		free(ce);
792 	}
793 }
794