xref: /optee_os/core/kernel/thread.c (revision 05994c760d5d792c6c9c98342ff4957425567953)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2022, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #include <config.h>
9 #include <crypto/crypto.h>
10 #include <kernel/asan.h>
11 #include <kernel/boot.h>
12 #include <kernel/lockdep.h>
13 #include <kernel/misc.h>
14 #include <kernel/panic.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/thread.h>
17 #include <kernel/thread_private.h>
18 #include <mm/mobj.h>
19 
20 struct thread_ctx threads[CFG_NUM_THREADS];
21 
22 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss;
23 
24 /*
25  * Stacks
26  *
27  * [Lower addresses on the left]
28  *
29  * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ]
30  * ^                     ^                   ^                ^
31  * stack_xxx[n]          "hard" top          "soft" top       bottom
32  */
33 
34 static uint32_t start_canary_value = 0xdedede00;
35 static uint32_t end_canary_value = 0xababab00;
36 #ifdef CFG_WITH_STACK_CANARIES
37 #define GET_START_CANARY(name, stack_num) name[stack_num][0]
38 #define GET_END_CANARY(name, stack_num) \
39 	name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1]
40 #endif
41 
42 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \
43 linkage uint32_t name[num_stacks] \
44 		[ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \
45 			 STACK_ALIGNMENT) / sizeof(uint32_t)] \
46 		__attribute__((section(".nozi_stack." # name), \
47 			       aligned(STACK_ALIGNMENT)))
48 
49 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE,
50 	      /* global linkage */);
51 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static);
52 #ifndef CFG_WITH_PAGER
53 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static);
54 #endif
55 
56 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \
57 				    STACK_CANARY_SIZE / 2)
58 
59 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") =
60 	sizeof(stack_tmp[0]);
61 
62 /*
63  * This stack setup info is required by secondary boot cores before they
64  * each locally enable the pager (the mmu). Hence kept in pager sections.
65  */
66 DECLARE_KEEP_PAGER(stack_tmp_stride);
67 
68 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK;
69 
70 static size_t stack_size_to_alloc_size(size_t stack_size)
71 {
72 	return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA,
73 		       STACK_ALIGNMENT);
74 }
75 
76 static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va)
77 {
78 	size_t l = stack_size_to_alloc_size(stack_size);
79 
80 	return end_va - l + STACK_CANARY_SIZE;
81 }
82 
83 static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va)
84 {
85 	return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA;
86 }
87 
88 static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused,
89 				      vaddr_t end_va)
90 {
91 	return end_va;
92 }
93 
94 static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va)
95 {
96 	return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) -
97 			    STACK_CANARY_SIZE / 2);
98 }
99 
100 static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused,
101 					    vaddr_t end_va)
102 {
103 	return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t));
104 }
105 
106 void thread_init_canaries(void)
107 {
108 #ifdef CFG_WITH_STACK_CANARIES
109 	size_t n;
110 #define INIT_CANARY(name)						\
111 	for (n = 0; n < ARRAY_SIZE(name); n++) {			\
112 		uint32_t *start_canary = &GET_START_CANARY(name, n);	\
113 		uint32_t *end_canary = &GET_END_CANARY(name, n);	\
114 									\
115 		*start_canary = start_canary_value;			\
116 		*end_canary = end_canary_value;				\
117 	}
118 
119 	INIT_CANARY(stack_tmp);
120 	INIT_CANARY(stack_abt);
121 #if !defined(CFG_WITH_PAGER) && !defined(CFG_NS_VIRTUALIZATION)
122 	INIT_CANARY(stack_thread);
123 #endif
124 #endif/*CFG_WITH_STACK_CANARIES*/
125 }
126 
127 #if defined(CFG_WITH_STACK_CANARIES)
128 void thread_update_canaries(void)
129 {
130 	uint32_t canary[2] = { };
131 	uint32_t exceptions = 0;
132 
133 	plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary),
134 				       sizeof(canary[0]));
135 
136 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
137 
138 	thread_check_canaries();
139 
140 	start_canary_value = canary[0];
141 	end_canary_value = canary[1];
142 	thread_init_canaries();
143 
144 	thread_unmask_exceptions(exceptions);
145 }
146 #endif
147 
148 static void check_stack_canary(const char *stack_name __maybe_unused,
149 			       size_t n __maybe_unused,
150 			       size_t stack_size, vaddr_t end_va)
151 {
152 	uint32_t *canary = NULL;
153 
154 	canary = stack_end_va_to_start_canary(stack_size, end_va);
155 	if (*canary != start_canary_value) {
156 		EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)",
157 			 stack_name, n, (void *)canary);
158 		panic();
159 	}
160 
161 	canary = stack_end_va_to_end_canary(stack_size, end_va);
162 	if (*canary != end_canary_value) {
163 		EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)",
164 			 stack_name, n, (void *)canary);
165 		panic();
166 	}
167 }
168 
169 void thread_check_canaries(void)
170 {
171 	vaddr_t va = 0;
172 	size_t n = 0;
173 
174 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) {
175 		for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
176 			va = thread_core_local[n].tmp_stack_va_end +
177 			     STACK_TMP_OFFS;
178 			check_stack_canary("tmp_stack", n, STACK_TMP_SIZE, va);
179 
180 			va = thread_core_local[n].abt_stack_va_end;
181 			check_stack_canary("abt_stack", n, STACK_ABT_SIZE, va);
182 		}
183 	}
184 
185 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES) &&
186 	    !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
187 		for (n = 0; n < CFG_NUM_THREADS; n++) {
188 			va = threads[n].stack_va_end;
189 			check_stack_canary("thread_stack", n,
190 					   STACK_THREAD_SIZE, va);
191 		}
192 	}
193 }
194 
195 void thread_lock_global(void)
196 {
197 	cpu_spin_lock(&thread_global_lock);
198 }
199 
200 void thread_unlock_global(void)
201 {
202 	cpu_spin_unlock(&thread_global_lock);
203 }
204 
205 static struct thread_core_local * __nostackcheck
206 get_core_local(unsigned int pos)
207 {
208 	/*
209 	 * Foreign interrupts must be disabled before playing with core_local
210 	 * since we otherwise may be rescheduled to a different core in the
211 	 * middle of this function.
212 	 */
213 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
214 
215 	assert(pos < CFG_TEE_CORE_NB_CORE);
216 	return &thread_core_local[pos];
217 }
218 
219 struct thread_core_local * __nostackcheck thread_get_core_local(void)
220 {
221 	unsigned int pos = get_core_pos();
222 
223 	return get_core_local(pos);
224 }
225 
226 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
227 static void print_stack_limits(void)
228 {
229 	size_t n = 0;
230 	vaddr_t __maybe_unused start = 0;
231 	vaddr_t __maybe_unused end = 0;
232 	vaddr_t va = 0;
233 
234 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
235 		va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS;
236 		start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va);
237 		end = stack_end_va_to_bottom(STACK_TMP_SIZE, va);
238 		DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
239 
240 		va = thread_core_local[n].abt_stack_va_end;
241 		start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va);
242 		end = stack_end_va_to_bottom(STACK_ABT_SIZE, va);
243 		DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
244 	}
245 
246 	for (n = 0; n < CFG_NUM_THREADS; n++) {
247 		va = threads[n].stack_va_end;
248 		start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va);
249 		end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va);
250 		DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
251 	}
252 }
253 
254 static void check_stack_limits(void)
255 {
256 	vaddr_t stack_start = 0;
257 	vaddr_t stack_end = 0;
258 	/* Any value in the current stack frame will do */
259 	vaddr_t current_sp = (vaddr_t)&stack_start;
260 
261 	if (!get_stack_soft_limits(&stack_start, &stack_end))
262 		panic("Unknown stack limits");
263 	if (current_sp < stack_start || current_sp > stack_end) {
264 		EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%"
265 		     PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start,
266 		     stack_end);
267 		print_stack_limits();
268 		panic();
269 	}
270 }
271 
272 static bool * __nostackcheck get_stackcheck_recursion_flag(void)
273 {
274 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
275 	unsigned int pos = get_core_pos();
276 	struct thread_core_local *l = get_core_local(pos);
277 	int ct = l->curr_thread;
278 	bool *p = NULL;
279 
280 	if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP))
281 		p = &l->stackcheck_recursion;
282 	else if (!l->flags)
283 		p = &threads[ct].tsd.stackcheck_recursion;
284 
285 	thread_unmask_exceptions(exceptions);
286 	return p;
287 }
288 
289 void __cyg_profile_func_enter(void *this_fn, void *call_site);
290 void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused,
291 					     void *call_site __unused)
292 {
293 	bool *p = get_stackcheck_recursion_flag();
294 
295 	assert(p);
296 	if (*p)
297 		return;
298 	*p = true;
299 	check_stack_limits();
300 	*p = false;
301 }
302 
303 void __cyg_profile_func_exit(void *this_fn, void *call_site);
304 void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused,
305 					    void *call_site __unused)
306 {
307 }
308 #else
309 static void print_stack_limits(void)
310 {
311 }
312 #endif
313 
314 void thread_init_boot_thread(void)
315 {
316 	struct thread_core_local *l = thread_get_core_local();
317 
318 	thread_init_threads();
319 
320 	l->curr_thread = 0;
321 	threads[0].state = THREAD_STATE_ACTIVE;
322 }
323 
324 void __nostackcheck thread_clr_boot_thread(void)
325 {
326 	struct thread_core_local *l = thread_get_core_local();
327 
328 	assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS);
329 	assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
330 	threads[l->curr_thread].state = THREAD_STATE_FREE;
331 	l->curr_thread = THREAD_ID_INVALID;
332 }
333 
334 void __nostackcheck *thread_get_tmp_sp(void)
335 {
336 	struct thread_core_local *l = thread_get_core_local();
337 
338 	/*
339 	 * Called from assembly when switching to the temporary stack, so flags
340 	 * need updating
341 	 */
342 	l->flags |= THREAD_CLF_TMP;
343 
344 	return (void *)l->tmp_stack_va_end;
345 }
346 
347 vaddr_t thread_stack_start(void)
348 {
349 	struct thread_ctx *thr;
350 	int ct = thread_get_id_may_fail();
351 
352 	if (ct == THREAD_ID_INVALID)
353 		return 0;
354 
355 	thr = threads + ct;
356 	return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end);
357 }
358 
359 size_t thread_stack_size(void)
360 {
361 	return STACK_THREAD_SIZE;
362 }
363 
364 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard)
365 {
366 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
367 	unsigned int pos = get_core_pos();
368 	struct thread_core_local *l = get_core_local(pos);
369 	int ct = l->curr_thread;
370 	size_t stack_size = 0;
371 	bool ret = true;
372 	vaddr_t va = 0;
373 
374 	if (l->flags & THREAD_CLF_TMP) {
375 		va = l->tmp_stack_va_end + STACK_TMP_OFFS;
376 		stack_size = STACK_TMP_SIZE;
377 	} else if (l->flags & THREAD_CLF_ABORT) {
378 		va = l->abt_stack_va_end;
379 		stack_size = STACK_ABT_SIZE;
380 	} else if (!l->flags && ct >= 0 && ct < CFG_NUM_THREADS) {
381 		va = threads[ct].stack_va_end;
382 		stack_size = STACK_THREAD_SIZE;
383 	} else {
384 		ret = false;
385 		goto out;
386 	}
387 
388 	*end = stack_end_va_to_bottom(stack_size, va);
389 	if (hard)
390 		*start = stack_end_va_to_top_hard(stack_size, va);
391 	else
392 		*start = stack_end_va_to_top_soft(stack_size, va);
393 out:
394 	thread_unmask_exceptions(exceptions);
395 	return ret;
396 }
397 
398 bool thread_is_from_abort_mode(void)
399 {
400 	struct thread_core_local *l = thread_get_core_local();
401 
402 	return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT;
403 }
404 
405 /*
406  * This function should always be accurate, but it might be possible to
407  * implement a more efficient depending on cpu architecture.
408  */
409 bool __weak thread_is_in_normal_mode(void)
410 {
411 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
412 	struct thread_core_local *l = thread_get_core_local();
413 	bool ret;
414 
415 	/*
416 	 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're
417 	 * handling some exception.
418 	 */
419 	ret = (l->curr_thread != THREAD_ID_INVALID) &&
420 	      !(l->flags & ~THREAD_CLF_TMP);
421 	thread_unmask_exceptions(exceptions);
422 
423 	return ret;
424 }
425 
426 short int __noprof thread_get_id_may_fail(void)
427 {
428 	/*
429 	 * thread_get_core_local() requires foreign interrupts to be disabled
430 	 */
431 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
432 	struct thread_core_local *l = thread_get_core_local();
433 	short int ct = l->curr_thread;
434 
435 	thread_unmask_exceptions(exceptions);
436 	return ct;
437 }
438 
439 short int __noprof thread_get_id(void)
440 {
441 	short int ct = thread_get_id_may_fail();
442 
443 	/* Thread ID has to fit in a short int */
444 	COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX);
445 	assert(ct >= 0 && ct < CFG_NUM_THREADS);
446 	return ct;
447 }
448 
449 #ifdef CFG_WITH_PAGER
450 static void init_thread_stacks(void)
451 {
452 	size_t n = 0;
453 
454 	/*
455 	 * Allocate virtual memory for thread stacks.
456 	 */
457 	for (n = 0; n < CFG_NUM_THREADS; n++) {
458 		tee_mm_entry_t *mm = NULL;
459 		vaddr_t sp = 0;
460 		size_t num_pages = 0;
461 		struct fobj *fobj = NULL;
462 
463 		/* Find vmem for thread stack and its protection gap */
464 		mm = tee_mm_alloc(&core_virt_mem_pool,
465 				  SMALL_PAGE_SIZE + STACK_THREAD_SIZE);
466 		assert(mm);
467 
468 		/* Claim eventual physical page */
469 		tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
470 				    true);
471 
472 		num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1;
473 		fobj = fobj_locked_paged_alloc(num_pages);
474 
475 		/* Add the region to the pager */
476 		tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE,
477 					  PAGED_REGION_TYPE_LOCK, fobj);
478 		fobj_put(fobj);
479 
480 		/* init effective stack */
481 		sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
482 		asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp);
483 		threads[n].stack_va_end = sp;
484 	}
485 }
486 #else
487 static void init_thread_stacks(void)
488 {
489 	size_t n;
490 
491 	/* Assign the thread stacks */
492 	for (n = 0; n < CFG_NUM_THREADS; n++)
493 		threads[n].stack_va_end = GET_STACK_BOTTOM(stack_thread, n);
494 }
495 #endif /*CFG_WITH_PAGER*/
496 
497 void thread_init_threads(void)
498 {
499 	size_t n = 0;
500 
501 	init_thread_stacks();
502 	print_stack_limits();
503 	pgt_init();
504 
505 	mutex_lockdep_init();
506 
507 	for (n = 0; n < CFG_NUM_THREADS; n++)
508 		TAILQ_INIT(&threads[n].tsd.sess_stack);
509 }
510 
511 vaddr_t __nostackcheck thread_get_abt_stack(void)
512 {
513 	return GET_STACK_BOTTOM(stack_abt, get_core_pos());
514 }
515 
516 #ifdef CFG_BOOT_INIT_THREAD_CORE_LOCAL0
517 void thread_init_thread_core_local(void)
518 {
519 	size_t n = 0;
520 	struct thread_core_local *tcl = thread_core_local;
521 
522 	for (n = 1; n < CFG_TEE_CORE_NB_CORE; n++) {
523 		tcl[n].curr_thread = THREAD_ID_INVALID;
524 		tcl[n].flags = THREAD_CLF_TMP;
525 		tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) -
526 					  STACK_TMP_OFFS;
527 		tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n);
528 	}
529 }
530 #else
531 void __nostackcheck thread_init_thread_core_local(void)
532 {
533 	size_t n = 0;
534 	struct thread_core_local *tcl = thread_core_local;
535 
536 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
537 		tcl[n].curr_thread = THREAD_ID_INVALID;
538 		tcl[n].flags = THREAD_CLF_TMP;
539 	}
540 	tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0);
541 }
542 
543 void __nostackcheck thread_init_core_local_stacks(void)
544 {
545 	size_t n = 0;
546 	struct thread_core_local *tcl = thread_core_local;
547 
548 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
549 		tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) -
550 					  STACK_TMP_OFFS;
551 		tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n);
552 	}
553 }
554 #endif /*CFG_BOOT_INIT_THREAD_CORE_LOCAL0*/
555 
556 #if defined(CFG_CORE_PAUTH)
557 void thread_init_thread_pauth_keys(void)
558 {
559 	size_t n = 0;
560 
561 	for (n = 0; n < CFG_NUM_THREADS; n++)
562 		if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys)))
563 			panic("Failed to init thread pauth keys");
564 }
565 
566 void thread_init_core_local_pauth_keys(void)
567 {
568 	struct thread_core_local *tcl = thread_core_local;
569 	size_t n = 0;
570 
571 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
572 		if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys)))
573 			panic("Failed to init core local pauth keys");
574 }
575 #endif
576 
577 struct thread_specific_data * __noprof thread_get_tsd(void)
578 {
579 	return &threads[thread_get_id()].tsd;
580 }
581 
582 struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void)
583 {
584 	struct thread_core_local *l = thread_get_core_local();
585 
586 	assert(l->curr_thread != THREAD_ID_INVALID);
587 	return &threads[l->curr_thread].regs;
588 }
589 
590 void thread_set_foreign_intr(bool enable)
591 {
592 	/* thread_get_core_local() requires foreign interrupts to be disabled */
593 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
594 	struct thread_core_local *l;
595 
596 	l = thread_get_core_local();
597 
598 	assert(l->curr_thread != THREAD_ID_INVALID);
599 
600 	if (enable) {
601 		threads[l->curr_thread].flags |=
602 					THREAD_FLAGS_FOREIGN_INTR_ENABLE;
603 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
604 	} else {
605 		/*
606 		 * No need to disable foreign interrupts here since they're
607 		 * already disabled above.
608 		 */
609 		threads[l->curr_thread].flags &=
610 					~THREAD_FLAGS_FOREIGN_INTR_ENABLE;
611 	}
612 }
613 
614 void thread_restore_foreign_intr(void)
615 {
616 	/* thread_get_core_local() requires foreign interrupts to be disabled */
617 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
618 	struct thread_core_local *l;
619 
620 	l = thread_get_core_local();
621 
622 	assert(l->curr_thread != THREAD_ID_INVALID);
623 
624 	if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE)
625 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
626 }
627 
628 static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size)
629 {
630 	switch (shm_type) {
631 	case THREAD_SHM_TYPE_APPLICATION:
632 		return thread_rpc_alloc_payload(size);
633 	case THREAD_SHM_TYPE_KERNEL_PRIVATE:
634 		return thread_rpc_alloc_kernel_payload(size);
635 	case THREAD_SHM_TYPE_GLOBAL:
636 		return thread_rpc_alloc_global_payload(size);
637 	default:
638 		return NULL;
639 	}
640 }
641 
642 static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce)
643 {
644 	if (ce->mobj) {
645 		switch (ce->type) {
646 		case THREAD_SHM_TYPE_APPLICATION:
647 			thread_rpc_free_payload(ce->mobj);
648 			break;
649 		case THREAD_SHM_TYPE_KERNEL_PRIVATE:
650 			thread_rpc_free_kernel_payload(ce->mobj);
651 			break;
652 		case THREAD_SHM_TYPE_GLOBAL:
653 			thread_rpc_free_global_payload(ce->mobj);
654 			break;
655 		default:
656 			assert(0); /* "can't happen" */
657 			break;
658 		}
659 	}
660 	ce->mobj = NULL;
661 	ce->size = 0;
662 }
663 
664 static struct thread_shm_cache_entry *
665 get_shm_cache_entry(enum thread_shm_cache_user user)
666 {
667 	struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache;
668 	struct thread_shm_cache_entry *ce = NULL;
669 
670 	SLIST_FOREACH(ce, cache, link)
671 		if (ce->user == user)
672 			return ce;
673 
674 	ce = calloc(1, sizeof(*ce));
675 	if (ce) {
676 		ce->user = user;
677 		SLIST_INSERT_HEAD(cache, ce, link);
678 	}
679 
680 	return ce;
681 }
682 
683 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
684 				 enum thread_shm_type shm_type,
685 				 size_t size, struct mobj **mobj)
686 {
687 	struct thread_shm_cache_entry *ce = NULL;
688 	size_t sz = size;
689 	paddr_t p = 0;
690 	void *va = NULL;
691 
692 	if (!size)
693 		return NULL;
694 
695 	ce = get_shm_cache_entry(user);
696 	if (!ce)
697 		return NULL;
698 
699 	/*
700 	 * Always allocate in page chunks as normal world allocates payload
701 	 * memory as complete pages.
702 	 */
703 	sz = ROUNDUP(size, SMALL_PAGE_SIZE);
704 
705 	if (ce->type != shm_type || sz > ce->size) {
706 		clear_shm_cache_entry(ce);
707 
708 		ce->mobj = alloc_shm(shm_type, sz);
709 		if (!ce->mobj)
710 			return NULL;
711 
712 		if (mobj_get_pa(ce->mobj, 0, 0, &p))
713 			goto err;
714 
715 		if (!IS_ALIGNED_WITH_TYPE(p, uint64_t))
716 			goto err;
717 
718 		va = mobj_get_va(ce->mobj, 0, sz);
719 		if (!va)
720 			goto err;
721 
722 		ce->size = sz;
723 		ce->type = shm_type;
724 	} else {
725 		va = mobj_get_va(ce->mobj, 0, sz);
726 		if (!va)
727 			goto err;
728 	}
729 	*mobj = ce->mobj;
730 
731 	return va;
732 err:
733 	clear_shm_cache_entry(ce);
734 	return NULL;
735 }
736 
737 void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache)
738 {
739 	while (true) {
740 		struct thread_shm_cache_entry *ce = SLIST_FIRST(cache);
741 
742 		if (!ce)
743 			break;
744 		SLIST_REMOVE_HEAD(cache, link);
745 		clear_shm_cache_entry(ce);
746 		free(ce);
747 	}
748 }
749