xref: /optee_os/core/kernel/thread.c (revision c95d740ab3604844575dc99dad8bd512781c5d07)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2022, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #include <config.h>
9 #include <crypto/crypto.h>
10 #include <kernel/asan.h>
11 #include <kernel/boot.h>
12 #include <kernel/lockdep.h>
13 #include <kernel/misc.h>
14 #include <kernel/panic.h>
15 #include <kernel/spinlock.h>
16 #include <kernel/thread.h>
17 #include <kernel/thread_private.h>
18 #include <mm/mobj.h>
19 
20 struct thread_ctx threads[CFG_NUM_THREADS];
21 
22 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss;
23 
24 /*
25  * Stacks
26  *
27  * [Lower addresses on the left]
28  *
29  * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ]
30  * ^                     ^                   ^                ^
31  * stack_xxx[n]          "hard" top          "soft" top       bottom
32  */
33 
34 static uint32_t start_canary_value = 0xdedede00;
35 static uint32_t end_canary_value = 0xababab00;
36 
37 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \
38 linkage uint32_t name[num_stacks] \
39 		[ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \
40 			 STACK_ALIGNMENT) / sizeof(uint32_t)] \
41 		__attribute__((section(".nozi_stack." # name), \
42 			       aligned(STACK_ALIGNMENT)))
43 
44 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE,
45 	      /* global linkage */);
46 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static);
47 #ifndef CFG_WITH_PAGER
48 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static);
49 #endif
50 
51 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \
52 				    STACK_CANARY_SIZE / 2)
53 
54 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") =
55 	sizeof(stack_tmp[0]);
56 
57 /*
58  * This stack setup info is required by secondary boot cores before they
59  * each locally enable the pager (the mmu). Hence kept in pager sections.
60  */
61 DECLARE_KEEP_PAGER(stack_tmp_stride);
62 
63 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK;
64 
65 static size_t stack_size_to_alloc_size(size_t stack_size)
66 {
67 	return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA,
68 		       STACK_ALIGNMENT);
69 }
70 
71 static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va)
72 {
73 	size_t l = stack_size_to_alloc_size(stack_size);
74 
75 	return end_va - l + STACK_CANARY_SIZE;
76 }
77 
78 static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va)
79 {
80 	return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA;
81 }
82 
83 static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused,
84 				      vaddr_t end_va)
85 {
86 	return end_va;
87 }
88 
89 static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va)
90 {
91 	return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) -
92 			    STACK_CANARY_SIZE / 2);
93 }
94 
95 static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused,
96 					    vaddr_t end_va)
97 {
98 	return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t));
99 }
100 
101 static void init_canaries(size_t stack_size, vaddr_t va_end)
102 {
103 	uint32_t *canary = NULL;
104 
105 	assert(va_end);
106 	canary = stack_end_va_to_start_canary(stack_size, va_end);
107 	*canary = start_canary_value;
108 	canary = stack_end_va_to_end_canary(stack_size, va_end);
109 	*canary = end_canary_value;
110 }
111 
112 void thread_init_canaries(void)
113 {
114 	vaddr_t va = 0;
115 	size_t n = 0;
116 
117 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) {
118 		for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
119 			if (thread_core_local[n].tmp_stack_va_end) {
120 				va = thread_core_local[n].tmp_stack_va_end +
121 				     STACK_TMP_OFFS;
122 				init_canaries(STACK_TMP_SIZE, va);
123 			}
124 			va = thread_core_local[n].abt_stack_va_end;
125 			if (va)
126 				init_canaries(STACK_ABT_SIZE, va);
127 		}
128 
129 	}
130 
131 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES) &&
132 	    !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
133 		for (n = 0; n < CFG_NUM_THREADS; n++) {
134 			va = threads[n].stack_va_end;
135 			if (va)
136 				init_canaries(STACK_THREAD_SIZE, va);
137 		}
138 	}
139 }
140 
141 #if defined(CFG_WITH_STACK_CANARIES)
142 void thread_update_canaries(void)
143 {
144 	uint32_t canary[2] = { };
145 	uint32_t exceptions = 0;
146 
147 	plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary),
148 				       sizeof(canary[0]));
149 
150 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
151 
152 	thread_check_canaries();
153 
154 	start_canary_value = canary[0];
155 	end_canary_value = canary[1];
156 	thread_init_canaries();
157 
158 	thread_unmask_exceptions(exceptions);
159 }
160 #endif
161 
162 static void check_stack_canary(const char *stack_name __maybe_unused,
163 			       size_t n __maybe_unused,
164 			       size_t stack_size, vaddr_t end_va)
165 {
166 	uint32_t *canary = NULL;
167 
168 	canary = stack_end_va_to_start_canary(stack_size, end_va);
169 	if (*canary != start_canary_value) {
170 		EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)",
171 			 stack_name, n, (void *)canary);
172 		panic();
173 	}
174 
175 	canary = stack_end_va_to_end_canary(stack_size, end_va);
176 	if (*canary != end_canary_value) {
177 		EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)",
178 			 stack_name, n, (void *)canary);
179 		panic();
180 	}
181 }
182 
183 void thread_check_canaries(void)
184 {
185 	vaddr_t va = 0;
186 	size_t n = 0;
187 
188 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) {
189 		for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
190 			if (thread_core_local[n].tmp_stack_va_end) {
191 				va = thread_core_local[n].tmp_stack_va_end +
192 				     STACK_TMP_OFFS;
193 				check_stack_canary("tmp_stack", n,
194 						   STACK_TMP_SIZE, va);
195 			}
196 
197 			va = thread_core_local[n].abt_stack_va_end;
198 			if (va)
199 				check_stack_canary("abt_stack", n,
200 						   STACK_ABT_SIZE, va);
201 		}
202 	}
203 
204 	if (IS_ENABLED(CFG_WITH_STACK_CANARIES) &&
205 	    !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
206 		for (n = 0; n < CFG_NUM_THREADS; n++) {
207 			va = threads[n].stack_va_end;
208 			if (va)
209 				check_stack_canary("thread_stack", n,
210 						   STACK_THREAD_SIZE, va);
211 		}
212 	}
213 }
214 
215 void thread_lock_global(void)
216 {
217 	cpu_spin_lock(&thread_global_lock);
218 }
219 
220 void thread_unlock_global(void)
221 {
222 	cpu_spin_unlock(&thread_global_lock);
223 }
224 
225 static struct thread_core_local * __nostackcheck
226 get_core_local(unsigned int pos)
227 {
228 	/*
229 	 * Foreign interrupts must be disabled before playing with core_local
230 	 * since we otherwise may be rescheduled to a different core in the
231 	 * middle of this function.
232 	 */
233 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
234 
235 	assert(pos < CFG_TEE_CORE_NB_CORE);
236 	return &thread_core_local[pos];
237 }
238 
239 struct thread_core_local * __nostackcheck thread_get_core_local(void)
240 {
241 	unsigned int pos = get_core_pos();
242 
243 	return get_core_local(pos);
244 }
245 
246 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
247 static void print_stack_limits(void)
248 {
249 	size_t n = 0;
250 	vaddr_t __maybe_unused start = 0;
251 	vaddr_t __maybe_unused end = 0;
252 	vaddr_t va = 0;
253 
254 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
255 		va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS;
256 		start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va);
257 		end = stack_end_va_to_bottom(STACK_TMP_SIZE, va);
258 		DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
259 
260 		va = thread_core_local[n].abt_stack_va_end;
261 		start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va);
262 		end = stack_end_va_to_bottom(STACK_ABT_SIZE, va);
263 		DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
264 	}
265 
266 	for (n = 0; n < CFG_NUM_THREADS; n++) {
267 		va = threads[n].stack_va_end;
268 		start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va);
269 		end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va);
270 		DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end);
271 	}
272 }
273 
274 static void check_stack_limits(void)
275 {
276 	vaddr_t stack_start = 0;
277 	vaddr_t stack_end = 0;
278 	/* Any value in the current stack frame will do */
279 	vaddr_t current_sp = (vaddr_t)&stack_start;
280 
281 	if (!get_stack_soft_limits(&stack_start, &stack_end))
282 		panic("Unknown stack limits");
283 	if (current_sp < stack_start || current_sp > stack_end) {
284 		EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%"
285 		     PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start,
286 		     stack_end);
287 		print_stack_limits();
288 		panic();
289 	}
290 }
291 
292 static bool * __nostackcheck get_stackcheck_recursion_flag(void)
293 {
294 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
295 	unsigned int pos = get_core_pos();
296 	struct thread_core_local *l = get_core_local(pos);
297 	int ct = l->curr_thread;
298 	bool *p = NULL;
299 
300 	if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP))
301 		p = &l->stackcheck_recursion;
302 	else if (!l->flags)
303 		p = &threads[ct].tsd.stackcheck_recursion;
304 
305 	thread_unmask_exceptions(exceptions);
306 	return p;
307 }
308 
309 void __cyg_profile_func_enter(void *this_fn, void *call_site);
310 void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused,
311 					     void *call_site __unused)
312 {
313 	bool *p = get_stackcheck_recursion_flag();
314 
315 	assert(p);
316 	if (*p)
317 		return;
318 	*p = true;
319 	check_stack_limits();
320 	*p = false;
321 }
322 
323 void __cyg_profile_func_exit(void *this_fn, void *call_site);
324 void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused,
325 					    void *call_site __unused)
326 {
327 }
328 #else
329 static void print_stack_limits(void)
330 {
331 }
332 #endif
333 
334 void thread_init_boot_thread(void)
335 {
336 	struct thread_core_local *l = thread_get_core_local();
337 
338 	thread_init_threads();
339 
340 	l->curr_thread = 0;
341 	threads[0].state = THREAD_STATE_ACTIVE;
342 }
343 
344 void __nostackcheck thread_clr_boot_thread(void)
345 {
346 	struct thread_core_local *l = thread_get_core_local();
347 
348 	assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS);
349 	assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
350 	threads[l->curr_thread].state = THREAD_STATE_FREE;
351 	l->curr_thread = THREAD_ID_INVALID;
352 }
353 
354 void __nostackcheck *thread_get_tmp_sp(void)
355 {
356 	struct thread_core_local *l = thread_get_core_local();
357 
358 	/*
359 	 * Called from assembly when switching to the temporary stack, so flags
360 	 * need updating
361 	 */
362 	l->flags |= THREAD_CLF_TMP;
363 
364 	return (void *)l->tmp_stack_va_end;
365 }
366 
367 vaddr_t thread_stack_start(void)
368 {
369 	struct thread_ctx *thr;
370 	int ct = thread_get_id_may_fail();
371 
372 	if (ct == THREAD_ID_INVALID)
373 		return 0;
374 
375 	thr = threads + ct;
376 	return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end);
377 }
378 
379 size_t thread_stack_size(void)
380 {
381 	return STACK_THREAD_SIZE;
382 }
383 
384 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard)
385 {
386 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
387 	unsigned int pos = get_core_pos();
388 	struct thread_core_local *l = get_core_local(pos);
389 	int ct = l->curr_thread;
390 	size_t stack_size = 0;
391 	bool ret = true;
392 	vaddr_t va = 0;
393 
394 	if (l->flags & THREAD_CLF_TMP) {
395 		va = l->tmp_stack_va_end + STACK_TMP_OFFS;
396 		stack_size = STACK_TMP_SIZE;
397 	} else if (l->flags & THREAD_CLF_ABORT) {
398 		va = l->abt_stack_va_end;
399 		stack_size = STACK_ABT_SIZE;
400 	} else if (!l->flags && ct >= 0 && ct < CFG_NUM_THREADS) {
401 		va = threads[ct].stack_va_end;
402 		stack_size = STACK_THREAD_SIZE;
403 	} else {
404 		ret = false;
405 		goto out;
406 	}
407 
408 	*end = stack_end_va_to_bottom(stack_size, va);
409 	if (hard)
410 		*start = stack_end_va_to_top_hard(stack_size, va);
411 	else
412 		*start = stack_end_va_to_top_soft(stack_size, va);
413 out:
414 	thread_unmask_exceptions(exceptions);
415 	return ret;
416 }
417 
418 bool thread_is_from_abort_mode(void)
419 {
420 	struct thread_core_local *l = thread_get_core_local();
421 
422 	return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT;
423 }
424 
425 /*
426  * This function should always be accurate, but it might be possible to
427  * implement a more efficient depending on cpu architecture.
428  */
429 bool __weak thread_is_in_normal_mode(void)
430 {
431 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
432 	struct thread_core_local *l = thread_get_core_local();
433 	bool ret;
434 
435 	/*
436 	 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're
437 	 * handling some exception.
438 	 */
439 	ret = (l->curr_thread != THREAD_ID_INVALID) &&
440 	      !(l->flags & ~THREAD_CLF_TMP);
441 	thread_unmask_exceptions(exceptions);
442 
443 	return ret;
444 }
445 
446 short int __noprof thread_get_id_may_fail(void)
447 {
448 	/*
449 	 * thread_get_core_local() requires foreign interrupts to be disabled
450 	 */
451 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
452 	struct thread_core_local *l = thread_get_core_local();
453 	short int ct = l->curr_thread;
454 
455 	thread_unmask_exceptions(exceptions);
456 	return ct;
457 }
458 
459 short int __noprof thread_get_id(void)
460 {
461 	short int ct = thread_get_id_may_fail();
462 
463 	/* Thread ID has to fit in a short int */
464 	COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX);
465 	assert(ct >= 0 && ct < CFG_NUM_THREADS);
466 	return ct;
467 }
468 
469 #ifdef CFG_WITH_PAGER
470 static void init_thread_stacks(void)
471 {
472 	size_t n = 0;
473 
474 	/*
475 	 * Allocate virtual memory for thread stacks.
476 	 */
477 	for (n = 0; n < CFG_NUM_THREADS; n++) {
478 		tee_mm_entry_t *mm = NULL;
479 		vaddr_t sp = 0;
480 		size_t num_pages = 0;
481 		struct fobj *fobj = NULL;
482 
483 		/* Find vmem for thread stack and its protection gap */
484 		mm = tee_mm_alloc(&core_virt_mem_pool,
485 				  SMALL_PAGE_SIZE + STACK_THREAD_SIZE);
486 		assert(mm);
487 
488 		/* Claim eventual physical page */
489 		tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
490 				    true);
491 
492 		num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1;
493 		fobj = fobj_locked_paged_alloc(num_pages);
494 
495 		/* Add the region to the pager */
496 		tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE,
497 					  PAGED_REGION_TYPE_LOCK, fobj);
498 		fobj_put(fobj);
499 
500 		/* init effective stack */
501 		sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
502 		asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp);
503 		threads[n].stack_va_end = sp;
504 	}
505 }
506 #else
507 static void init_thread_stacks(void)
508 {
509 	vaddr_t va = 0;
510 	size_t n = 0;
511 
512 	/* Assign the thread stacks */
513 	for (n = 0; n < CFG_NUM_THREADS; n++) {
514 		va = GET_STACK_BOTTOM(stack_thread, n);
515 		threads[n].stack_va_end = va;
516 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
517 			init_canaries(STACK_THREAD_SIZE, va);
518 	}
519 }
520 #endif /*CFG_WITH_PAGER*/
521 
522 void thread_init_threads(void)
523 {
524 	size_t n = 0;
525 
526 	init_thread_stacks();
527 	print_stack_limits();
528 	pgt_init();
529 
530 	mutex_lockdep_init();
531 
532 	for (n = 0; n < CFG_NUM_THREADS; n++)
533 		TAILQ_INIT(&threads[n].tsd.sess_stack);
534 }
535 
536 vaddr_t __nostackcheck thread_get_abt_stack(void)
537 {
538 	return GET_STACK_BOTTOM(stack_abt, get_core_pos());
539 }
540 
541 #ifdef CFG_BOOT_INIT_THREAD_CORE_LOCAL0
542 void thread_init_thread_core_local(void)
543 {
544 	struct thread_core_local *tcl = thread_core_local;
545 	vaddr_t va = 0;
546 	size_t n = 0;
547 
548 	for (n = 1; n < CFG_TEE_CORE_NB_CORE; n++) {
549 		tcl[n].curr_thread = THREAD_ID_INVALID;
550 		tcl[n].flags = THREAD_CLF_TMP;
551 
552 		va = GET_STACK_BOTTOM(stack_tmp, n);
553 		tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS;
554 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
555 			init_canaries(STACK_TMP_SIZE, va);
556 		va = GET_STACK_BOTTOM(stack_abt, n);
557 		tcl[n].abt_stack_va_end = va;
558 		if (IS_ENABLED(CFG_WITH_STACK_CANARIES))
559 			init_canaries(STACK_ABT_SIZE, va);
560 	}
561 }
562 #else
563 void __nostackcheck thread_init_thread_core_local(void)
564 {
565 	size_t n = 0;
566 	struct thread_core_local *tcl = thread_core_local;
567 
568 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
569 		tcl[n].curr_thread = THREAD_ID_INVALID;
570 		tcl[n].flags = THREAD_CLF_TMP;
571 	}
572 	tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0);
573 }
574 
575 void __nostackcheck thread_init_core_local_stacks(void)
576 {
577 	size_t n = 0;
578 	struct thread_core_local *tcl = thread_core_local;
579 
580 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
581 		tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) -
582 					  STACK_TMP_OFFS;
583 		tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n);
584 	}
585 }
586 #endif /*CFG_BOOT_INIT_THREAD_CORE_LOCAL0*/
587 
588 #if defined(CFG_CORE_PAUTH)
589 void thread_init_thread_pauth_keys(void)
590 {
591 	size_t n = 0;
592 
593 	for (n = 0; n < CFG_NUM_THREADS; n++)
594 		if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys)))
595 			panic("Failed to init thread pauth keys");
596 }
597 
598 void thread_init_core_local_pauth_keys(void)
599 {
600 	struct thread_core_local *tcl = thread_core_local;
601 	size_t n = 0;
602 
603 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
604 		if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys)))
605 			panic("Failed to init core local pauth keys");
606 }
607 #endif
608 
609 struct thread_specific_data * __noprof thread_get_tsd(void)
610 {
611 	return &threads[thread_get_id()].tsd;
612 }
613 
614 struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void)
615 {
616 	struct thread_core_local *l = thread_get_core_local();
617 
618 	assert(l->curr_thread != THREAD_ID_INVALID);
619 	return &threads[l->curr_thread].regs;
620 }
621 
622 void thread_set_foreign_intr(bool enable)
623 {
624 	/* thread_get_core_local() requires foreign interrupts to be disabled */
625 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
626 	struct thread_core_local *l;
627 
628 	l = thread_get_core_local();
629 
630 	assert(l->curr_thread != THREAD_ID_INVALID);
631 
632 	if (enable) {
633 		threads[l->curr_thread].flags |=
634 					THREAD_FLAGS_FOREIGN_INTR_ENABLE;
635 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
636 	} else {
637 		/*
638 		 * No need to disable foreign interrupts here since they're
639 		 * already disabled above.
640 		 */
641 		threads[l->curr_thread].flags &=
642 					~THREAD_FLAGS_FOREIGN_INTR_ENABLE;
643 	}
644 }
645 
646 void thread_restore_foreign_intr(void)
647 {
648 	/* thread_get_core_local() requires foreign interrupts to be disabled */
649 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
650 	struct thread_core_local *l;
651 
652 	l = thread_get_core_local();
653 
654 	assert(l->curr_thread != THREAD_ID_INVALID);
655 
656 	if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE)
657 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
658 }
659 
660 static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size)
661 {
662 	switch (shm_type) {
663 	case THREAD_SHM_TYPE_APPLICATION:
664 		return thread_rpc_alloc_payload(size);
665 	case THREAD_SHM_TYPE_KERNEL_PRIVATE:
666 		return thread_rpc_alloc_kernel_payload(size);
667 	case THREAD_SHM_TYPE_GLOBAL:
668 		return thread_rpc_alloc_global_payload(size);
669 	default:
670 		return NULL;
671 	}
672 }
673 
674 static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce)
675 {
676 	if (ce->mobj) {
677 		switch (ce->type) {
678 		case THREAD_SHM_TYPE_APPLICATION:
679 			thread_rpc_free_payload(ce->mobj);
680 			break;
681 		case THREAD_SHM_TYPE_KERNEL_PRIVATE:
682 			thread_rpc_free_kernel_payload(ce->mobj);
683 			break;
684 		case THREAD_SHM_TYPE_GLOBAL:
685 			thread_rpc_free_global_payload(ce->mobj);
686 			break;
687 		default:
688 			assert(0); /* "can't happen" */
689 			break;
690 		}
691 	}
692 	ce->mobj = NULL;
693 	ce->size = 0;
694 }
695 
696 static struct thread_shm_cache_entry *
697 get_shm_cache_entry(enum thread_shm_cache_user user)
698 {
699 	struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache;
700 	struct thread_shm_cache_entry *ce = NULL;
701 
702 	SLIST_FOREACH(ce, cache, link)
703 		if (ce->user == user)
704 			return ce;
705 
706 	ce = calloc(1, sizeof(*ce));
707 	if (ce) {
708 		ce->user = user;
709 		SLIST_INSERT_HEAD(cache, ce, link);
710 	}
711 
712 	return ce;
713 }
714 
715 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user,
716 				 enum thread_shm_type shm_type,
717 				 size_t size, struct mobj **mobj)
718 {
719 	struct thread_shm_cache_entry *ce = NULL;
720 	size_t sz = size;
721 	paddr_t p = 0;
722 	void *va = NULL;
723 
724 	if (!size)
725 		return NULL;
726 
727 	ce = get_shm_cache_entry(user);
728 	if (!ce)
729 		return NULL;
730 
731 	/*
732 	 * Always allocate in page chunks as normal world allocates payload
733 	 * memory as complete pages.
734 	 */
735 	sz = ROUNDUP(size, SMALL_PAGE_SIZE);
736 
737 	if (ce->type != shm_type || sz > ce->size) {
738 		clear_shm_cache_entry(ce);
739 
740 		ce->mobj = alloc_shm(shm_type, sz);
741 		if (!ce->mobj)
742 			return NULL;
743 
744 		if (mobj_get_pa(ce->mobj, 0, 0, &p))
745 			goto err;
746 
747 		if (!IS_ALIGNED_WITH_TYPE(p, uint64_t))
748 			goto err;
749 
750 		va = mobj_get_va(ce->mobj, 0, sz);
751 		if (!va)
752 			goto err;
753 
754 		ce->size = sz;
755 		ce->type = shm_type;
756 	} else {
757 		va = mobj_get_va(ce->mobj, 0, sz);
758 		if (!va)
759 			goto err;
760 	}
761 	*mobj = ce->mobj;
762 
763 	return va;
764 err:
765 	clear_shm_cache_entry(ce);
766 	return NULL;
767 }
768 
769 void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache)
770 {
771 	while (true) {
772 		struct thread_shm_cache_entry *ce = SLIST_FIRST(cache);
773 
774 		if (!ce)
775 			break;
776 		SLIST_REMOVE_HEAD(cache, link);
777 		clear_shm_cache_entry(ce);
778 		free(ce);
779 	}
780 }
781