xref: /optee_os/core/arch/arm/kernel/thread.c (revision 0d77037f5943c86560dd7c8f473fbc6a55d60a34)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <platform_config.h>
8 
9 #include <arm.h>
10 #include <assert.h>
11 #include <io.h>
12 #include <keep.h>
13 #include <kernel/asan.h>
14 #include <kernel/linker.h>
15 #include <kernel/lockdep.h>
16 #include <kernel/misc.h>
17 #include <kernel/panic.h>
18 #include <kernel/spinlock.h>
19 #include <kernel/tee_ta_manager.h>
20 #include <kernel/thread_defs.h>
21 #include <kernel/thread.h>
22 #include <kernel/virtualization.h>
23 #include <mm/core_memprot.h>
24 #include <mm/mobj.h>
25 #include <mm/tee_mm.h>
26 #include <mm/tee_mmu.h>
27 #include <mm/tee_pager.h>
28 #include <smccc.h>
29 #include <sm/sm.h>
30 #include <trace.h>
31 #include <util.h>
32 
33 #include "thread_private.h"
34 
35 #ifdef CFG_WITH_ARM_TRUSTED_FW
36 #define STACK_TMP_OFFS		0
37 #else
38 #define STACK_TMP_OFFS		SM_STACK_TMP_RESERVE_SIZE
39 #endif
40 
41 
42 #ifdef ARM32
43 #ifdef CFG_CORE_SANITIZE_KADDRESS
44 #define STACK_TMP_SIZE		(3072 + STACK_TMP_OFFS)
45 #else
46 #define STACK_TMP_SIZE		(2048 + STACK_TMP_OFFS)
47 #endif
48 #define STACK_THREAD_SIZE	8192
49 
50 #if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(__clang__)
51 #define STACK_ABT_SIZE		3072
52 #else
53 #define STACK_ABT_SIZE		2048
54 #endif
55 
56 #endif /*ARM32*/
57 
58 #ifdef ARM64
59 #define STACK_TMP_SIZE		(2048 + STACK_TMP_OFFS)
60 #define STACK_THREAD_SIZE	8192
61 
62 #if TRACE_LEVEL > 0
63 #define STACK_ABT_SIZE		3072
64 #else
65 #define STACK_ABT_SIZE		1024
66 #endif
67 #endif /*ARM64*/
68 
69 struct thread_ctx threads[CFG_NUM_THREADS];
70 
71 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss;
72 
73 #ifdef CFG_WITH_STACK_CANARIES
74 #ifdef ARM32
75 #define STACK_CANARY_SIZE	(4 * sizeof(uint32_t))
76 #endif
77 #ifdef ARM64
78 #define STACK_CANARY_SIZE	(8 * sizeof(uint32_t))
79 #endif
80 #define START_CANARY_VALUE	0xdededede
81 #define END_CANARY_VALUE	0xabababab
82 #define GET_START_CANARY(name, stack_num) name[stack_num][0]
83 #define GET_END_CANARY(name, stack_num) \
84 	name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1]
85 #else
86 #define STACK_CANARY_SIZE	0
87 #endif
88 
89 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \
90 linkage uint32_t name[num_stacks] \
91 		[ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \
92 		sizeof(uint32_t)] \
93 		__attribute__((section(".nozi_stack." # name), \
94 			       aligned(STACK_ALIGNMENT)))
95 
96 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2)
97 
98 #define GET_STACK(stack) \
99 	((vaddr_t)(stack) + STACK_SIZE(stack))
100 
101 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, static);
102 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static);
103 #ifndef CFG_WITH_PAGER
104 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static);
105 #endif
106 
107 const void *stack_tmp_export = (uint8_t *)stack_tmp + sizeof(stack_tmp[0]) -
108 			       (STACK_TMP_OFFS + STACK_CANARY_SIZE / 2);
109 const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]);
110 
111 /*
112  * These stack setup info are required by secondary boot cores before they
113  * each locally enable the pager (the mmu). Hence kept in pager sections.
114  */
115 KEEP_PAGER(stack_tmp_export);
116 KEEP_PAGER(stack_tmp_stride);
117 
118 thread_pm_handler_t thread_cpu_on_handler_ptr __nex_bss;
119 thread_pm_handler_t thread_cpu_off_handler_ptr __nex_bss;
120 thread_pm_handler_t thread_cpu_suspend_handler_ptr __nex_bss;
121 thread_pm_handler_t thread_cpu_resume_handler_ptr __nex_bss;
122 thread_pm_handler_t thread_system_off_handler_ptr __nex_bss;
123 thread_pm_handler_t thread_system_reset_handler_ptr __nex_bss;
124 
125 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
126 static vaddr_t thread_user_kcode_va __nex_bss;
127 long thread_user_kcode_offset __nex_bss;
128 static size_t thread_user_kcode_size __nex_bss;
129 #endif
130 
131 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
132 	defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
133 long thread_user_kdata_sp_offset __nex_bss;
134 static uint8_t thread_user_kdata_page[
135 	ROUNDUP(sizeof(thread_core_local), SMALL_PAGE_SIZE)]
136 	__aligned(SMALL_PAGE_SIZE)
137 #ifndef CFG_VIRTUALIZATION
138 	__section(".nozi.kdata_page");
139 #else
140 	__section(".nex_nozi.kdata_page");
141 #endif
142 #endif
143 
144 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK;
145 
146 static void init_canaries(void)
147 {
148 #ifdef CFG_WITH_STACK_CANARIES
149 	size_t n;
150 #define INIT_CANARY(name)						\
151 	for (n = 0; n < ARRAY_SIZE(name); n++) {			\
152 		uint32_t *start_canary = &GET_START_CANARY(name, n);	\
153 		uint32_t *end_canary = &GET_END_CANARY(name, n);	\
154 									\
155 		*start_canary = START_CANARY_VALUE;			\
156 		*end_canary = END_CANARY_VALUE;				\
157 		DMSG("#Stack canaries for %s[%zu] with top at %p",	\
158 			#name, n, (void *)(end_canary - 1));		\
159 		DMSG("watch *%p", (void *)end_canary);			\
160 	}
161 
162 	INIT_CANARY(stack_tmp);
163 	INIT_CANARY(stack_abt);
164 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION)
165 	INIT_CANARY(stack_thread);
166 #endif
167 #endif/*CFG_WITH_STACK_CANARIES*/
168 }
169 
170 #define CANARY_DIED(stack, loc, n) \
171 	do { \
172 		EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \
173 		panic(); \
174 	} while (0)
175 
176 void thread_check_canaries(void)
177 {
178 #ifdef CFG_WITH_STACK_CANARIES
179 	size_t n;
180 
181 	for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) {
182 		if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE)
183 			CANARY_DIED(stack_tmp, start, n);
184 		if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE)
185 			CANARY_DIED(stack_tmp, end, n);
186 	}
187 
188 	for (n = 0; n < ARRAY_SIZE(stack_abt); n++) {
189 		if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE)
190 			CANARY_DIED(stack_abt, start, n);
191 		if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE)
192 			CANARY_DIED(stack_abt, end, n);
193 
194 	}
195 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION)
196 	for (n = 0; n < ARRAY_SIZE(stack_thread); n++) {
197 		if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE)
198 			CANARY_DIED(stack_thread, start, n);
199 		if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE)
200 			CANARY_DIED(stack_thread, end, n);
201 	}
202 #endif
203 #endif/*CFG_WITH_STACK_CANARIES*/
204 }
205 
206 void thread_lock_global(void)
207 {
208 	cpu_spin_lock(&thread_global_lock);
209 }
210 
211 void thread_unlock_global(void)
212 {
213 	cpu_spin_unlock(&thread_global_lock);
214 }
215 
216 #ifdef ARM32
217 uint32_t thread_get_exceptions(void)
218 {
219 	uint32_t cpsr = read_cpsr();
220 
221 	return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL;
222 }
223 
224 void thread_set_exceptions(uint32_t exceptions)
225 {
226 	uint32_t cpsr = read_cpsr();
227 
228 	/* Foreign interrupts must not be unmasked while holding a spinlock */
229 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
230 		assert_have_no_spinlock();
231 
232 	cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT);
233 	cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT);
234 	write_cpsr(cpsr);
235 }
236 #endif /*ARM32*/
237 
238 #ifdef ARM64
239 uint32_t thread_get_exceptions(void)
240 {
241 	uint32_t daif = read_daif();
242 
243 	return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL;
244 }
245 
246 void thread_set_exceptions(uint32_t exceptions)
247 {
248 	uint32_t daif = read_daif();
249 
250 	/* Foreign interrupts must not be unmasked while holding a spinlock */
251 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
252 		assert_have_no_spinlock();
253 
254 	daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT);
255 	daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT);
256 	write_daif(daif);
257 }
258 #endif /*ARM64*/
259 
260 uint32_t thread_mask_exceptions(uint32_t exceptions)
261 {
262 	uint32_t state = thread_get_exceptions();
263 
264 	thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
265 	return state;
266 }
267 
268 void thread_unmask_exceptions(uint32_t state)
269 {
270 	thread_set_exceptions(state & THREAD_EXCP_ALL);
271 }
272 
273 
274 struct thread_core_local *thread_get_core_local(void)
275 {
276 	uint32_t cpu_id = get_core_pos();
277 
278 	/*
279 	 * Foreign interrupts must be disabled before playing with core_local
280 	 * since we otherwise may be rescheduled to a different core in the
281 	 * middle of this function.
282 	 */
283 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
284 
285 	assert(cpu_id < CFG_TEE_CORE_NB_CORE);
286 	return &thread_core_local[cpu_id];
287 }
288 
289 static void thread_lazy_save_ns_vfp(void)
290 {
291 #ifdef CFG_WITH_VFP
292 	struct thread_ctx *thr = threads + thread_get_id();
293 
294 	thr->vfp_state.ns_saved = false;
295 	vfp_lazy_save_state_init(&thr->vfp_state.ns);
296 #endif /*CFG_WITH_VFP*/
297 }
298 
299 static void thread_lazy_restore_ns_vfp(void)
300 {
301 #ifdef CFG_WITH_VFP
302 	struct thread_ctx *thr = threads + thread_get_id();
303 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
304 
305 	assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved);
306 
307 	if (tuv && tuv->lazy_saved && !tuv->saved) {
308 		vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/);
309 		tuv->saved = true;
310 	}
311 
312 	vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved);
313 	thr->vfp_state.ns_saved = false;
314 #endif /*CFG_WITH_VFP*/
315 }
316 
317 #ifdef ARM32
318 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
319 		      uint32_t a2, uint32_t a3)
320 {
321 	thread->regs.pc = (uint32_t)thread_std_smc_entry;
322 
323 	/*
324 	 * Stdcalls starts in SVC mode with masked foreign interrupts, masked
325 	 * Asynchronous abort and unmasked native interrupts.
326 	 */
327 	thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E;
328 	thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A |
329 			(THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT);
330 	/* Enable thumb mode if it's a thumb instruction */
331 	if (thread->regs.pc & 1)
332 		thread->regs.cpsr |= CPSR_T;
333 	/* Reinitialize stack pointer */
334 	thread->regs.svc_sp = thread->stack_va_end;
335 
336 	/*
337 	 * Copy arguments into context. This will make the
338 	 * arguments appear in r0-r7 when thread is started.
339 	 */
340 	thread->regs.r0 = a0;
341 	thread->regs.r1 = a1;
342 	thread->regs.r2 = a2;
343 	thread->regs.r3 = a3;
344 	thread->regs.r4 = 0;
345 	thread->regs.r5 = 0;
346 	thread->regs.r6 = 0;
347 	thread->regs.r7 = 0;
348 }
349 #endif /*ARM32*/
350 
351 #ifdef ARM64
352 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
353 		      uint32_t a2, uint32_t a3)
354 {
355 	thread->regs.pc = (uint64_t)thread_std_smc_entry;
356 
357 	/*
358 	 * Stdcalls starts in SVC mode with masked foreign interrupts, masked
359 	 * Asynchronous abort and unmasked native interrupts.
360 	 */
361 	thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0,
362 				THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT);
363 	/* Reinitialize stack pointer */
364 	thread->regs.sp = thread->stack_va_end;
365 
366 	/*
367 	 * Copy arguments into context. This will make the
368 	 * arguments appear in x0-x7 when thread is started.
369 	 */
370 	thread->regs.x[0] = a0;
371 	thread->regs.x[1] = a1;
372 	thread->regs.x[2] = a2;
373 	thread->regs.x[3] = a3;
374 	thread->regs.x[4] = 0;
375 	thread->regs.x[5] = 0;
376 	thread->regs.x[6] = 0;
377 	thread->regs.x[7] = 0;
378 
379 	/* Set up frame pointer as per the Aarch64 AAPCS */
380 	thread->regs.x[29] = 0;
381 }
382 #endif /*ARM64*/
383 
384 void thread_init_boot_thread(void)
385 {
386 	struct thread_core_local *l = thread_get_core_local();
387 
388 	thread_init_threads();
389 
390 	l->curr_thread = 0;
391 	threads[0].state = THREAD_STATE_ACTIVE;
392 }
393 
394 void thread_clr_boot_thread(void)
395 {
396 	struct thread_core_local *l = thread_get_core_local();
397 
398 	assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS);
399 	assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE);
400 	threads[l->curr_thread].state = THREAD_STATE_FREE;
401 	l->curr_thread = -1;
402 }
403 
404 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3)
405 {
406 	size_t n;
407 	struct thread_core_local *l = thread_get_core_local();
408 	bool found_thread = false;
409 
410 	assert(l->curr_thread == -1);
411 
412 	thread_lock_global();
413 
414 	for (n = 0; n < CFG_NUM_THREADS; n++) {
415 		if (threads[n].state == THREAD_STATE_FREE) {
416 			threads[n].state = THREAD_STATE_ACTIVE;
417 			found_thread = true;
418 			break;
419 		}
420 	}
421 
422 	thread_unlock_global();
423 
424 	if (!found_thread)
425 		return;
426 
427 	l->curr_thread = n;
428 
429 	threads[n].flags = 0;
430 	init_regs(threads + n, a0, a1, a2, a3);
431 
432 	thread_lazy_save_ns_vfp();
433 	thread_resume(&threads[n].regs);
434 	/*NOTREACHED*/
435 	panic();
436 }
437 
438 #ifdef ARM32
439 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
440 			  uint32_t a1, uint32_t a2, uint32_t a3)
441 {
442 	/*
443 	 * Update returned values from RPC, values will appear in
444 	 * r0-r3 when thread is resumed.
445 	 */
446 	regs->r0 = a0;
447 	regs->r1 = a1;
448 	regs->r2 = a2;
449 	regs->r3 = a3;
450 }
451 #endif /*ARM32*/
452 
453 #ifdef ARM64
454 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
455 			  uint32_t a1, uint32_t a2, uint32_t a3)
456 {
457 	/*
458 	 * Update returned values from RPC, values will appear in
459 	 * x0-x3 when thread is resumed.
460 	 */
461 	regs->x[0] = a0;
462 	regs->x[1] = a1;
463 	regs->x[2] = a2;
464 	regs->x[3] = a3;
465 }
466 #endif /*ARM64*/
467 
468 #ifdef ARM32
469 static bool is_from_user(uint32_t cpsr)
470 {
471 	return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
472 }
473 #endif
474 
475 #ifdef ARM64
476 static bool is_from_user(uint32_t cpsr)
477 {
478 	if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
479 		return true;
480 	if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
481 	     SPSR_64_MODE_EL0)
482 		return true;
483 	return false;
484 }
485 #endif
486 
487 #ifdef CFG_SYSCALL_FTRACE
488 static void __noprof ftrace_suspend(void)
489 {
490 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
491 
492 	if (!s)
493 		return;
494 
495 	if (s->fbuf)
496 		s->fbuf->syscall_trace_suspended = true;
497 }
498 
499 static void __noprof ftrace_resume(void)
500 {
501 	struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
502 
503 	if (!s)
504 		return;
505 
506 	if (s->fbuf)
507 		s->fbuf->syscall_trace_suspended = false;
508 }
509 #else
510 static void __noprof ftrace_suspend(void)
511 {
512 }
513 
514 static void __noprof ftrace_resume(void)
515 {
516 }
517 #endif
518 
519 static bool is_user_mode(struct thread_ctx_regs *regs)
520 {
521 	return is_from_user((uint32_t)regs->cpsr);
522 }
523 
524 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
525 			    uint32_t a2, uint32_t a3)
526 {
527 	size_t n = thread_id;
528 	struct thread_core_local *l = thread_get_core_local();
529 	bool found_thread = false;
530 
531 	assert(l->curr_thread == -1);
532 
533 	thread_lock_global();
534 
535 	if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
536 		threads[n].state = THREAD_STATE_ACTIVE;
537 		found_thread = true;
538 	}
539 
540 	thread_unlock_global();
541 
542 	if (!found_thread)
543 		return;
544 
545 	l->curr_thread = n;
546 
547 	if (threads[n].have_user_map) {
548 		core_mmu_set_user_map(&threads[n].user_map);
549 		if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
550 			tee_ta_ftrace_update_times_resume();
551 	}
552 
553 	if (is_user_mode(&threads[n].regs))
554 		tee_ta_update_session_utime_resume();
555 
556 	/*
557 	 * Return from RPC to request service of a foreign interrupt must not
558 	 * get parameters from non-secure world.
559 	 */
560 	if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
561 		copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
562 		threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
563 	}
564 
565 	thread_lazy_save_ns_vfp();
566 
567 	if (threads[n].have_user_map)
568 		ftrace_resume();
569 
570 	thread_resume(&threads[n].regs);
571 	/*NOTREACHED*/
572 	panic();
573 }
574 
575 void *thread_get_tmp_sp(void)
576 {
577 	struct thread_core_local *l = thread_get_core_local();
578 
579 	return (void *)l->tmp_stack_va_end;
580 }
581 
582 #ifdef ARM64
583 vaddr_t thread_get_saved_thread_sp(void)
584 {
585 	struct thread_core_local *l = thread_get_core_local();
586 	int ct = l->curr_thread;
587 
588 	assert(ct != -1);
589 	return threads[ct].kern_sp;
590 }
591 #endif /*ARM64*/
592 
593 vaddr_t thread_stack_start(void)
594 {
595 	struct thread_ctx *thr;
596 	int ct = thread_get_id_may_fail();
597 
598 	if (ct == -1)
599 		return 0;
600 
601 	thr = threads + ct;
602 	return thr->stack_va_end - STACK_THREAD_SIZE;
603 }
604 
605 size_t thread_stack_size(void)
606 {
607 	return STACK_THREAD_SIZE;
608 }
609 
610 bool thread_is_from_abort_mode(void)
611 {
612 	struct thread_core_local *l = thread_get_core_local();
613 
614 	return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT;
615 }
616 
617 #ifdef ARM32
618 bool thread_is_in_normal_mode(void)
619 {
620 	return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC;
621 }
622 #endif
623 
624 #ifdef ARM64
625 bool thread_is_in_normal_mode(void)
626 {
627 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
628 	struct thread_core_local *l = thread_get_core_local();
629 	bool ret;
630 
631 	/* If any bit in l->flags is set we're handling some exception. */
632 	ret = !l->flags;
633 	thread_unmask_exceptions(exceptions);
634 
635 	return ret;
636 }
637 #endif
638 
639 void thread_state_free(void)
640 {
641 	struct thread_core_local *l = thread_get_core_local();
642 	int ct = l->curr_thread;
643 
644 	assert(ct != -1);
645 
646 	thread_lazy_restore_ns_vfp();
647 	tee_pager_release_phys(
648 		(void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE),
649 		STACK_THREAD_SIZE);
650 
651 	thread_lock_global();
652 
653 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
654 	threads[ct].state = THREAD_STATE_FREE;
655 	threads[ct].flags = 0;
656 	l->curr_thread = -1;
657 
658 #ifdef CFG_VIRTUALIZATION
659 	virt_unset_guest();
660 #endif
661 	thread_unlock_global();
662 }
663 
664 #ifdef CFG_WITH_PAGER
665 static void release_unused_kernel_stack(struct thread_ctx *thr,
666 					uint32_t cpsr __maybe_unused)
667 {
668 #ifdef ARM64
669 	/*
670 	 * If we're from user mode then thr->regs.sp is the saved user
671 	 * stack pointer and thr->kern_sp holds the last kernel stack
672 	 * pointer. But if we're from kernel mode then thr->kern_sp isn't
673 	 * up to date so we need to read from thr->regs.sp instead.
674 	 */
675 	vaddr_t sp = is_from_user(cpsr) ?  thr->kern_sp : thr->regs.sp;
676 #else
677 	vaddr_t sp = thr->regs.svc_sp;
678 #endif
679 	vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE;
680 	size_t len = sp - base;
681 
682 	tee_pager_release_phys((void *)base, len);
683 }
684 #else
685 static void release_unused_kernel_stack(struct thread_ctx *thr __unused,
686 					uint32_t cpsr __unused)
687 {
688 }
689 #endif
690 
691 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc)
692 {
693 	struct thread_core_local *l = thread_get_core_local();
694 	int ct = l->curr_thread;
695 
696 	assert(ct != -1);
697 
698 	if (core_mmu_user_mapping_is_active())
699 		ftrace_suspend();
700 
701 	thread_check_canaries();
702 
703 	release_unused_kernel_stack(threads + ct, cpsr);
704 
705 	if (is_from_user(cpsr)) {
706 		thread_user_save_vfp();
707 		tee_ta_update_session_utime_suspend();
708 		tee_ta_gprof_sample_pc(pc);
709 	}
710 	thread_lazy_restore_ns_vfp();
711 
712 	thread_lock_global();
713 
714 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
715 	threads[ct].flags |= flags;
716 	threads[ct].regs.cpsr = cpsr;
717 	threads[ct].regs.pc = pc;
718 	threads[ct].state = THREAD_STATE_SUSPENDED;
719 
720 	threads[ct].have_user_map = core_mmu_user_mapping_is_active();
721 	if (threads[ct].have_user_map) {
722 		if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
723 			tee_ta_ftrace_update_times_suspend();
724 		core_mmu_get_user_map(&threads[ct].user_map);
725 		core_mmu_set_user_map(NULL);
726 	}
727 
728 	l->curr_thread = -1;
729 
730 #ifdef CFG_VIRTUALIZATION
731 	virt_unset_guest();
732 #endif
733 
734 	thread_unlock_global();
735 
736 	return ct;
737 }
738 
739 #ifdef ARM32
740 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp)
741 {
742 	l->tmp_stack_va_end = sp;
743 	thread_set_irq_sp(sp);
744 	thread_set_fiq_sp(sp);
745 }
746 
747 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp)
748 {
749 	l->abt_stack_va_end = sp;
750 	thread_set_abt_sp((vaddr_t)l);
751 	thread_set_und_sp((vaddr_t)l);
752 }
753 #endif /*ARM32*/
754 
755 #ifdef ARM64
756 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp)
757 {
758 	/*
759 	 * We're already using the tmp stack when this function is called
760 	 * so there's no need to assign it to any stack pointer. However,
761 	 * we'll need to restore it at different times so store it here.
762 	 */
763 	l->tmp_stack_va_end = sp;
764 }
765 
766 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp)
767 {
768 	l->abt_stack_va_end = sp;
769 }
770 #endif /*ARM64*/
771 
772 bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
773 {
774 	if (thread_id >= CFG_NUM_THREADS)
775 		return false;
776 	threads[thread_id].stack_va_end = sp;
777 	return true;
778 }
779 
780 int thread_get_id_may_fail(void)
781 {
782 	/*
783 	 * thread_get_core_local() requires foreign interrupts to be disabled
784 	 */
785 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
786 	struct thread_core_local *l = thread_get_core_local();
787 	int ct = l->curr_thread;
788 
789 	thread_unmask_exceptions(exceptions);
790 	return ct;
791 }
792 
793 int thread_get_id(void)
794 {
795 	int ct = thread_get_id_may_fail();
796 
797 	assert(ct >= 0 && ct < CFG_NUM_THREADS);
798 	return ct;
799 }
800 
801 static void init_handlers(const struct thread_handlers *handlers)
802 {
803 	thread_cpu_on_handler_ptr = handlers->cpu_on;
804 	thread_cpu_off_handler_ptr = handlers->cpu_off;
805 	thread_cpu_suspend_handler_ptr = handlers->cpu_suspend;
806 	thread_cpu_resume_handler_ptr = handlers->cpu_resume;
807 	thread_system_off_handler_ptr = handlers->system_off;
808 	thread_system_reset_handler_ptr = handlers->system_reset;
809 }
810 
811 #ifdef CFG_WITH_PAGER
812 static void init_thread_stacks(void)
813 {
814 	size_t n = 0;
815 
816 	/*
817 	 * Allocate virtual memory for thread stacks.
818 	 */
819 	for (n = 0; n < CFG_NUM_THREADS; n++) {
820 		tee_mm_entry_t *mm = NULL;
821 		vaddr_t sp = 0;
822 		size_t num_pages = 0;
823 		struct fobj *fobj = NULL;
824 
825 		/* Find vmem for thread stack and its protection gap */
826 		mm = tee_mm_alloc(&tee_mm_vcore,
827 				  SMALL_PAGE_SIZE + STACK_THREAD_SIZE);
828 		assert(mm);
829 
830 		/* Claim eventual physical page */
831 		tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm),
832 				    true);
833 
834 		num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1;
835 		fobj = fobj_locked_paged_alloc(num_pages);
836 
837 		/* Add the area to the pager */
838 		tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE,
839 					PAGER_AREA_TYPE_LOCK, fobj);
840 		fobj_put(fobj);
841 
842 		/* init effective stack */
843 		sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm);
844 		asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp);
845 		if (!thread_init_stack(n, sp))
846 			panic("init stack failed");
847 	}
848 }
849 #else
850 static void init_thread_stacks(void)
851 {
852 	size_t n;
853 
854 	/* Assign the thread stacks */
855 	for (n = 0; n < CFG_NUM_THREADS; n++) {
856 		if (!thread_init_stack(n, GET_STACK(stack_thread[n])))
857 			panic("thread_init_stack failed");
858 	}
859 }
860 #endif /*CFG_WITH_PAGER*/
861 
862 static void init_user_kcode(void)
863 {
864 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
865 	vaddr_t v = (vaddr_t)thread_excp_vect;
866 	vaddr_t ve = (vaddr_t)thread_excp_vect_end;
867 
868 	thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE);
869 	ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE);
870 	thread_user_kcode_size = ve - thread_user_kcode_va;
871 
872 	core_mmu_get_user_va_range(&v, NULL);
873 	thread_user_kcode_offset = thread_user_kcode_va - v;
874 
875 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
876 	/*
877 	 * When transitioning to EL0 subtract SP with this much to point to
878 	 * this special kdata page instead. SP is restored by add this much
879 	 * while transitioning back to EL1.
880 	 */
881 	v += thread_user_kcode_size;
882 	thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v;
883 #endif
884 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
885 }
886 
887 void thread_init_threads(void)
888 {
889 	size_t n;
890 
891 	init_thread_stacks();
892 	pgt_init();
893 
894 	mutex_lockdep_init();
895 
896 	for (n = 0; n < CFG_NUM_THREADS; n++) {
897 		TAILQ_INIT(&threads[n].tsd.sess_stack);
898 		SLIST_INIT(&threads[n].tsd.pgt_cache);
899 	}
900 
901 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
902 		thread_core_local[n].curr_thread = -1;
903 }
904 
905 void thread_init_primary(const struct thread_handlers *handlers)
906 {
907 	init_handlers(handlers);
908 
909 	/* Initialize canaries around the stacks */
910 	init_canaries();
911 
912 	init_user_kcode();
913 }
914 
915 static void init_sec_mon(size_t pos __maybe_unused)
916 {
917 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
918 	/* Initialize secure monitor */
919 	sm_init(GET_STACK(stack_tmp[pos]));
920 #endif
921 }
922 
923 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr)
924 {
925 	return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK;
926 }
927 
928 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr)
929 {
930 	return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) &
931 	       MIDR_PRIMARY_PART_NUM_MASK;
932 }
933 
934 #ifdef ARM64
935 static bool probe_workaround_available(void)
936 {
937 	int32_t r;
938 
939 	r = thread_smc(SMCCC_VERSION, 0, 0, 0);
940 	if (r < 0)
941 		return false;
942 	if (r < 0x10001)	/* compare with version 1.1 */
943 		return false;
944 
945 	/* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */
946 	r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0);
947 	return r >= 0;
948 }
949 
950 static vaddr_t __maybe_unused select_vector(vaddr_t a)
951 {
952 	if (probe_workaround_available()) {
953 		DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available",
954 		     SMCCC_ARCH_WORKAROUND_1);
955 		DMSG("SMC Workaround for CVE-2017-5715 used");
956 		return a;
957 	}
958 
959 	DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable",
960 	     SMCCC_ARCH_WORKAROUND_1);
961 	DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)");
962 	return (vaddr_t)thread_excp_vect;
963 }
964 #else
965 static vaddr_t __maybe_unused select_vector(vaddr_t a)
966 {
967 	return a;
968 }
969 #endif
970 
971 static vaddr_t get_excp_vect(void)
972 {
973 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
974 	uint32_t midr = read_midr();
975 
976 	if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM)
977 		return (vaddr_t)thread_excp_vect;
978 
979 	switch (get_midr_primary_part(midr)) {
980 #ifdef ARM32
981 	case CORTEX_A8_PART_NUM:
982 	case CORTEX_A9_PART_NUM:
983 	case CORTEX_A17_PART_NUM:
984 #endif
985 	case CORTEX_A57_PART_NUM:
986 	case CORTEX_A72_PART_NUM:
987 	case CORTEX_A73_PART_NUM:
988 	case CORTEX_A75_PART_NUM:
989 		return select_vector((vaddr_t)thread_excp_vect_workaround);
990 #ifdef ARM32
991 	case CORTEX_A15_PART_NUM:
992 		return select_vector((vaddr_t)thread_excp_vect_workaround_a15);
993 #endif
994 	default:
995 		return (vaddr_t)thread_excp_vect;
996 	}
997 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
998 
999 	return (vaddr_t)thread_excp_vect;
1000 }
1001 
1002 void thread_init_per_cpu(void)
1003 {
1004 	size_t pos = get_core_pos();
1005 	struct thread_core_local *l = thread_get_core_local();
1006 
1007 	init_sec_mon(pos);
1008 
1009 	set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS);
1010 	set_abt_stack(l, GET_STACK(stack_abt[pos]));
1011 
1012 	thread_init_vbar(get_excp_vect());
1013 
1014 #ifdef CFG_FTRACE_SUPPORT
1015 	/*
1016 	 * Enable accesses to frequency register and physical counter
1017 	 * register in EL0/PL0 required for timestamping during
1018 	 * function tracing.
1019 	 */
1020 	write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN);
1021 #endif
1022 }
1023 
1024 struct thread_specific_data *thread_get_tsd(void)
1025 {
1026 	return &threads[thread_get_id()].tsd;
1027 }
1028 
1029 struct thread_ctx_regs *thread_get_ctx_regs(void)
1030 {
1031 	struct thread_core_local *l = thread_get_core_local();
1032 
1033 	assert(l->curr_thread != -1);
1034 	return &threads[l->curr_thread].regs;
1035 }
1036 
1037 void thread_set_foreign_intr(bool enable)
1038 {
1039 	/* thread_get_core_local() requires foreign interrupts to be disabled */
1040 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
1041 	struct thread_core_local *l;
1042 
1043 	l = thread_get_core_local();
1044 
1045 	assert(l->curr_thread != -1);
1046 
1047 	if (enable) {
1048 		threads[l->curr_thread].flags |=
1049 					THREAD_FLAGS_FOREIGN_INTR_ENABLE;
1050 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
1051 	} else {
1052 		/*
1053 		 * No need to disable foreign interrupts here since they're
1054 		 * already disabled above.
1055 		 */
1056 		threads[l->curr_thread].flags &=
1057 					~THREAD_FLAGS_FOREIGN_INTR_ENABLE;
1058 	}
1059 }
1060 
1061 void thread_restore_foreign_intr(void)
1062 {
1063 	/* thread_get_core_local() requires foreign interrupts to be disabled */
1064 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
1065 	struct thread_core_local *l;
1066 
1067 	l = thread_get_core_local();
1068 
1069 	assert(l->curr_thread != -1);
1070 
1071 	if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE)
1072 		thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR);
1073 }
1074 
1075 #ifdef CFG_WITH_VFP
1076 uint32_t thread_kernel_enable_vfp(void)
1077 {
1078 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
1079 	struct thread_ctx *thr = threads + thread_get_id();
1080 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
1081 
1082 	assert(!vfp_is_enabled());
1083 
1084 	if (!thr->vfp_state.ns_saved) {
1085 		vfp_lazy_save_state_final(&thr->vfp_state.ns,
1086 					  true /*force_save*/);
1087 		thr->vfp_state.ns_saved = true;
1088 	} else if (thr->vfp_state.sec_lazy_saved &&
1089 		   !thr->vfp_state.sec_saved) {
1090 		/*
1091 		 * This happens when we're handling an abort while the
1092 		 * thread was using the VFP state.
1093 		 */
1094 		vfp_lazy_save_state_final(&thr->vfp_state.sec,
1095 					  false /*!force_save*/);
1096 		thr->vfp_state.sec_saved = true;
1097 	} else if (tuv && tuv->lazy_saved && !tuv->saved) {
1098 		/*
1099 		 * This can happen either during syscall or abort
1100 		 * processing (while processing a syscall).
1101 		 */
1102 		vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/);
1103 		tuv->saved = true;
1104 	}
1105 
1106 	vfp_enable();
1107 	return exceptions;
1108 }
1109 
1110 void thread_kernel_disable_vfp(uint32_t state)
1111 {
1112 	uint32_t exceptions;
1113 
1114 	assert(vfp_is_enabled());
1115 
1116 	vfp_disable();
1117 	exceptions = thread_get_exceptions();
1118 	assert(exceptions & THREAD_EXCP_FOREIGN_INTR);
1119 	exceptions &= ~THREAD_EXCP_FOREIGN_INTR;
1120 	exceptions |= state & THREAD_EXCP_FOREIGN_INTR;
1121 	thread_set_exceptions(exceptions);
1122 }
1123 
1124 void thread_kernel_save_vfp(void)
1125 {
1126 	struct thread_ctx *thr = threads + thread_get_id();
1127 
1128 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
1129 	if (vfp_is_enabled()) {
1130 		vfp_lazy_save_state_init(&thr->vfp_state.sec);
1131 		thr->vfp_state.sec_lazy_saved = true;
1132 	}
1133 }
1134 
1135 void thread_kernel_restore_vfp(void)
1136 {
1137 	struct thread_ctx *thr = threads + thread_get_id();
1138 
1139 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
1140 	assert(!vfp_is_enabled());
1141 	if (thr->vfp_state.sec_lazy_saved) {
1142 		vfp_lazy_restore_state(&thr->vfp_state.sec,
1143 				       thr->vfp_state.sec_saved);
1144 		thr->vfp_state.sec_saved = false;
1145 		thr->vfp_state.sec_lazy_saved = false;
1146 	}
1147 }
1148 
1149 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp)
1150 {
1151 	struct thread_ctx *thr = threads + thread_get_id();
1152 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
1153 
1154 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
1155 	assert(!vfp_is_enabled());
1156 
1157 	if (!thr->vfp_state.ns_saved) {
1158 		vfp_lazy_save_state_final(&thr->vfp_state.ns,
1159 					  true /*force_save*/);
1160 		thr->vfp_state.ns_saved = true;
1161 	} else if (tuv && uvfp != tuv) {
1162 		if (tuv->lazy_saved && !tuv->saved) {
1163 			vfp_lazy_save_state_final(&tuv->vfp,
1164 						  false /*!force_save*/);
1165 			tuv->saved = true;
1166 		}
1167 	}
1168 
1169 	if (uvfp->lazy_saved)
1170 		vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved);
1171 	uvfp->lazy_saved = false;
1172 	uvfp->saved = false;
1173 
1174 	thr->vfp_state.uvfp = uvfp;
1175 	vfp_enable();
1176 }
1177 
1178 void thread_user_save_vfp(void)
1179 {
1180 	struct thread_ctx *thr = threads + thread_get_id();
1181 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
1182 
1183 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
1184 	if (!vfp_is_enabled())
1185 		return;
1186 
1187 	assert(tuv && !tuv->lazy_saved && !tuv->saved);
1188 	vfp_lazy_save_state_init(&tuv->vfp);
1189 	tuv->lazy_saved = true;
1190 }
1191 
1192 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp)
1193 {
1194 	struct thread_ctx *thr = threads + thread_get_id();
1195 
1196 	if (uvfp == thr->vfp_state.uvfp)
1197 		thr->vfp_state.uvfp = NULL;
1198 	uvfp->lazy_saved = false;
1199 	uvfp->saved = false;
1200 }
1201 #endif /*CFG_WITH_VFP*/
1202 
1203 #ifdef ARM32
1204 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
1205 {
1206 	uint32_t s;
1207 
1208 	if (!is_32bit)
1209 		return false;
1210 
1211 	s = read_spsr();
1212 	s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2);
1213 	s |= CPSR_MODE_USR;
1214 	if (entry_func & 1)
1215 		s |= CPSR_T;
1216 	*spsr = s;
1217 	return true;
1218 }
1219 #endif
1220 
1221 #ifdef ARM64
1222 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
1223 {
1224 	uint32_t s;
1225 
1226 	if (is_32bit) {
1227 		s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT);
1228 		s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT;
1229 		s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT;
1230 	} else {
1231 		s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT);
1232 	}
1233 
1234 	*spsr = s;
1235 	return true;
1236 }
1237 #endif
1238 
1239 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
1240 		unsigned long a2, unsigned long a3, unsigned long user_sp,
1241 		unsigned long entry_func, bool is_32bit,
1242 		uint32_t *exit_status0, uint32_t *exit_status1)
1243 {
1244 	uint32_t spsr;
1245 
1246 	tee_ta_update_session_utime_resume();
1247 
1248 	if (!get_spsr(is_32bit, entry_func, &spsr)) {
1249 		*exit_status0 = 1; /* panic */
1250 		*exit_status1 = 0xbadbadba;
1251 		return 0;
1252 	}
1253 	return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func,
1254 					spsr, exit_status0, exit_status1);
1255 }
1256 
1257 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1258 void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
1259 			   vaddr_t *va, size_t *sz)
1260 {
1261 	core_mmu_get_user_va_range(va, NULL);
1262 	*mobj = mobj_tee_ram;
1263 	*offset = thread_user_kcode_va - VCORE_START_VA;
1264 	*sz = thread_user_kcode_size;
1265 }
1266 #endif
1267 
1268 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
1269 	defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
1270 void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
1271 			   vaddr_t *va, size_t *sz)
1272 {
1273 	vaddr_t v;
1274 
1275 	core_mmu_get_user_va_range(&v, NULL);
1276 	*va = v + thread_user_kcode_size;
1277 	*mobj = mobj_tee_ram;
1278 	*offset = (vaddr_t)thread_user_kdata_page - VCORE_START_VA;
1279 	*sz = sizeof(thread_user_kdata_page);
1280 }
1281 #endif
1282