xref: /optee_os/core/arch/arm/kernel/thread.c (revision ba2a6adb764f1310ad3c3091d89de84274f86b02)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2022, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #include <platform_config.h>
9 
10 #include <arm.h>
11 #include <assert.h>
12 #include <config.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/linker.h>
18 #include <kernel/lockdep.h>
19 #include <kernel/misc.h>
20 #include <kernel/panic.h>
21 #include <kernel/spinlock.h>
22 #include <kernel/spmc_sp_handler.h>
23 #include <kernel/tee_ta_manager.h>
24 #include <kernel/thread.h>
25 #include <kernel/thread_private.h>
26 #include <kernel/user_mode_ctx_struct.h>
27 #include <kernel/virtualization.h>
28 #include <mm/core_memprot.h>
29 #include <mm/mobj.h>
30 #include <mm/tee_mm.h>
31 #include <mm/tee_pager.h>
32 #include <mm/vm.h>
33 #include <smccc.h>
34 #include <sm/sm.h>
35 #include <trace.h>
36 #include <util.h>
37 
38 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
39 static vaddr_t thread_user_kcode_va __nex_bss;
40 long thread_user_kcode_offset __nex_bss;
41 static size_t thread_user_kcode_size __nex_bss;
42 #endif
43 
44 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
45 	defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
46 long thread_user_kdata_sp_offset __nex_bss;
47 static uint8_t thread_user_kdata_page[
48 	ROUNDUP(sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE,
49 		SMALL_PAGE_SIZE)]
50 	__aligned(SMALL_PAGE_SIZE)
51 #ifndef CFG_NS_VIRTUALIZATION
52 	__section(".nozi.kdata_page");
53 #else
54 	__section(".nex_nozi.kdata_page");
55 #endif
56 #endif
57 
58 #ifdef ARM32
59 uint32_t __nostackcheck thread_get_exceptions(void)
60 {
61 	uint32_t cpsr = read_cpsr();
62 
63 	return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL;
64 }
65 
66 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
67 {
68 	uint32_t cpsr = read_cpsr();
69 
70 	/* Foreign interrupts must not be unmasked while holding a spinlock */
71 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
72 		assert_have_no_spinlock();
73 
74 	cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT);
75 	cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT);
76 
77 	barrier();
78 	write_cpsr(cpsr);
79 	barrier();
80 }
81 #endif /*ARM32*/
82 
83 #ifdef ARM64
84 uint32_t __nostackcheck thread_get_exceptions(void)
85 {
86 	uint32_t daif = read_daif();
87 
88 	return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL;
89 }
90 
91 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
92 {
93 	uint32_t daif = read_daif();
94 
95 	/* Foreign interrupts must not be unmasked while holding a spinlock */
96 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
97 		assert_have_no_spinlock();
98 
99 	daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT);
100 	daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT);
101 
102 	barrier();
103 	write_daif(daif);
104 	barrier();
105 }
106 #endif /*ARM64*/
107 
108 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions)
109 {
110 	uint32_t state = thread_get_exceptions();
111 
112 	thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
113 	return state;
114 }
115 
116 void __nostackcheck thread_unmask_exceptions(uint32_t state)
117 {
118 	thread_set_exceptions(state & THREAD_EXCP_ALL);
119 }
120 
121 static void thread_lazy_save_ns_vfp(void)
122 {
123 #ifdef CFG_WITH_VFP
124 	struct thread_ctx *thr = threads + thread_get_id();
125 
126 	thr->vfp_state.ns_saved = false;
127 	vfp_lazy_save_state_init(&thr->vfp_state.ns);
128 #endif /*CFG_WITH_VFP*/
129 }
130 
131 static void thread_lazy_restore_ns_vfp(void)
132 {
133 #ifdef CFG_WITH_VFP
134 	struct thread_ctx *thr = threads + thread_get_id();
135 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
136 
137 	assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved);
138 
139 	if (tuv && tuv->lazy_saved && !tuv->saved) {
140 		vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/);
141 		tuv->saved = true;
142 	}
143 
144 	vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved);
145 	thr->vfp_state.ns_saved = false;
146 #endif /*CFG_WITH_VFP*/
147 }
148 
149 #ifdef ARM32
150 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
151 		      uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
152 		      uint32_t a6, uint32_t a7, void *pc)
153 {
154 	thread->regs.pc = (uint32_t)pc;
155 
156 	/*
157 	 * Stdcalls starts in SVC mode with masked foreign interrupts, masked
158 	 * Asynchronous abort and unmasked native interrupts.
159 	 */
160 	thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E;
161 	thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A |
162 			(THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT);
163 	/* Enable thumb mode if it's a thumb instruction */
164 	if (thread->regs.pc & 1)
165 		thread->regs.cpsr |= CPSR_T;
166 	/* Reinitialize stack pointer */
167 	thread->regs.svc_sp = thread->stack_va_end;
168 
169 	/*
170 	 * Copy arguments into context. This will make the
171 	 * arguments appear in r0-r7 when thread is started.
172 	 */
173 	thread->regs.r0 = a0;
174 	thread->regs.r1 = a1;
175 	thread->regs.r2 = a2;
176 	thread->regs.r3 = a3;
177 	thread->regs.r4 = a4;
178 	thread->regs.r5 = a5;
179 	thread->regs.r6 = a6;
180 	thread->regs.r7 = a7;
181 }
182 #endif /*ARM32*/
183 
184 #ifdef ARM64
185 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
186 		      uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
187 		      uint32_t a6, uint32_t a7, void *pc)
188 {
189 	thread->regs.pc = (uint64_t)pc;
190 
191 	/*
192 	 * Stdcalls starts in SVC mode with masked foreign interrupts, masked
193 	 * Asynchronous abort and unmasked native interrupts.
194 	 */
195 	thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0,
196 				THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT);
197 	/* Reinitialize stack pointer */
198 	thread->regs.sp = thread->stack_va_end;
199 
200 	/*
201 	 * Copy arguments into context. This will make the
202 	 * arguments appear in x0-x7 when thread is started.
203 	 */
204 	thread->regs.x[0] = a0;
205 	thread->regs.x[1] = a1;
206 	thread->regs.x[2] = a2;
207 	thread->regs.x[3] = a3;
208 	thread->regs.x[4] = a4;
209 	thread->regs.x[5] = a5;
210 	thread->regs.x[6] = a6;
211 	thread->regs.x[7] = a7;
212 
213 	/* Set up frame pointer as per the Aarch64 AAPCS */
214 	thread->regs.x[29] = 0;
215 }
216 #endif /*ARM64*/
217 
218 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2,
219 				   uint32_t a3, uint32_t a4, uint32_t a5,
220 				   uint32_t a6, uint32_t a7,
221 				   void *pc, uint32_t flags)
222 {
223 	struct thread_core_local *l = thread_get_core_local();
224 	bool found_thread = false;
225 	size_t n = 0;
226 
227 	assert(l->curr_thread == THREAD_ID_INVALID);
228 
229 	thread_lock_global();
230 
231 	for (n = 0; n < CFG_NUM_THREADS; n++) {
232 		if (threads[n].state == THREAD_STATE_FREE) {
233 			threads[n].state = THREAD_STATE_ACTIVE;
234 			found_thread = true;
235 			break;
236 		}
237 	}
238 
239 	thread_unlock_global();
240 
241 	if (!found_thread)
242 		return;
243 
244 	l->curr_thread = n;
245 
246 	threads[n].flags = flags;
247 	init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc);
248 #ifdef CFG_CORE_PAUTH
249 	/*
250 	 * Copy the APIA key into the registers to be restored with
251 	 * thread_resume().
252 	 */
253 	threads[n].regs.apiakey_hi = threads[n].keys.apia_hi;
254 	threads[n].regs.apiakey_lo = threads[n].keys.apia_lo;
255 #endif
256 
257 	thread_lazy_save_ns_vfp();
258 
259 	l->flags &= ~THREAD_CLF_TMP;
260 	thread_resume(&threads[n].regs);
261 	/*NOTREACHED*/
262 	panic();
263 }
264 
265 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
266 			  uint32_t a4, uint32_t a5)
267 {
268 	__thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0,
269 			       thread_std_smc_entry, 0);
270 }
271 
272 #ifdef CFG_SECURE_PARTITION
273 void thread_sp_alloc_and_run(struct thread_smc_args *args __maybe_unused)
274 {
275 	__thread_alloc_and_run(args->a0, args->a1, args->a2, args->a3, args->a4,
276 			       args->a5, args->a6, args->a7,
277 			       spmc_sp_thread_entry, THREAD_FLAGS_FFA_ONLY);
278 }
279 #endif
280 
281 #ifdef ARM32
282 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
283 			  uint32_t a1, uint32_t a2, uint32_t a3)
284 {
285 	/*
286 	 * Update returned values from RPC, values will appear in
287 	 * r0-r3 when thread is resumed.
288 	 */
289 	regs->r0 = a0;
290 	regs->r1 = a1;
291 	regs->r2 = a2;
292 	regs->r3 = a3;
293 }
294 #endif /*ARM32*/
295 
296 #ifdef ARM64
297 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
298 			  uint32_t a1, uint32_t a2, uint32_t a3)
299 {
300 	/*
301 	 * Update returned values from RPC, values will appear in
302 	 * x0-x3 when thread is resumed.
303 	 */
304 	regs->x[0] = a0;
305 	regs->x[1] = a1;
306 	regs->x[2] = a2;
307 	regs->x[3] = a3;
308 }
309 #endif /*ARM64*/
310 
311 #ifdef ARM32
312 static bool is_from_user(uint32_t cpsr)
313 {
314 	return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
315 }
316 #endif
317 
318 #ifdef ARM64
319 static bool is_from_user(uint32_t cpsr)
320 {
321 	if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
322 		return true;
323 	if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
324 	     SPSR_64_MODE_EL0)
325 		return true;
326 	return false;
327 }
328 #endif
329 
330 #ifdef CFG_SYSCALL_FTRACE
331 static void __noprof ftrace_suspend(void)
332 {
333 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
334 
335 	if (s && s->fbuf)
336 		s->fbuf->syscall_trace_suspended = true;
337 }
338 
339 static void __noprof ftrace_resume(void)
340 {
341 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
342 
343 	if (s && s->fbuf)
344 		s->fbuf->syscall_trace_suspended = false;
345 }
346 #else
347 static void __noprof ftrace_suspend(void)
348 {
349 }
350 
351 static void __noprof ftrace_resume(void)
352 {
353 }
354 #endif
355 
356 static bool is_user_mode(struct thread_ctx_regs *regs)
357 {
358 	return is_from_user((uint32_t)regs->cpsr);
359 }
360 
361 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
362 			    uint32_t a2, uint32_t a3)
363 {
364 	size_t n = thread_id;
365 	struct thread_core_local *l = thread_get_core_local();
366 	bool found_thread = false;
367 
368 	assert(l->curr_thread == THREAD_ID_INVALID);
369 
370 	thread_lock_global();
371 
372 	if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
373 		threads[n].state = THREAD_STATE_ACTIVE;
374 		found_thread = true;
375 	}
376 
377 	thread_unlock_global();
378 
379 	if (!found_thread)
380 		return;
381 
382 	l->curr_thread = n;
383 
384 	if (threads[n].have_user_map) {
385 		core_mmu_set_user_map(&threads[n].user_map);
386 		if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
387 			tee_ta_ftrace_update_times_resume();
388 	}
389 
390 	if (is_user_mode(&threads[n].regs))
391 		tee_ta_update_session_utime_resume();
392 
393 	/*
394 	 * Return from RPC to request service of a foreign interrupt must not
395 	 * get parameters from non-secure world.
396 	 */
397 	if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
398 		copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
399 		threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
400 	}
401 
402 	thread_lazy_save_ns_vfp();
403 
404 	if (threads[n].have_user_map)
405 		ftrace_resume();
406 
407 	l->flags &= ~THREAD_CLF_TMP;
408 	thread_resume(&threads[n].regs);
409 	/*NOTREACHED*/
410 	panic();
411 }
412 
413 #ifdef ARM64
414 vaddr_t thread_get_saved_thread_sp(void)
415 {
416 	struct thread_core_local *l = thread_get_core_local();
417 	int ct = l->curr_thread;
418 
419 	assert(ct != THREAD_ID_INVALID);
420 	return threads[ct].kern_sp;
421 }
422 #endif /*ARM64*/
423 
424 #ifdef ARM32
425 bool thread_is_in_normal_mode(void)
426 {
427 	return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC;
428 }
429 #endif
430 
431 void thread_state_free(void)
432 {
433 	struct thread_core_local *l = thread_get_core_local();
434 	int ct = l->curr_thread;
435 
436 	assert(ct != THREAD_ID_INVALID);
437 
438 	thread_lazy_restore_ns_vfp();
439 	tee_pager_release_phys(
440 		(void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE),
441 		STACK_THREAD_SIZE);
442 
443 	thread_lock_global();
444 
445 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
446 	threads[ct].state = THREAD_STATE_FREE;
447 	threads[ct].flags = 0;
448 	l->curr_thread = THREAD_ID_INVALID;
449 
450 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
451 		virt_unset_guest();
452 	thread_unlock_global();
453 }
454 
455 #ifdef CFG_WITH_PAGER
456 static void release_unused_kernel_stack(struct thread_ctx *thr,
457 					uint32_t cpsr __maybe_unused)
458 {
459 #ifdef ARM64
460 	/*
461 	 * If we're from user mode then thr->regs.sp is the saved user
462 	 * stack pointer and thr->kern_sp holds the last kernel stack
463 	 * pointer. But if we're from kernel mode then thr->kern_sp isn't
464 	 * up to date so we need to read from thr->regs.sp instead.
465 	 */
466 	vaddr_t sp = is_from_user(cpsr) ?  thr->kern_sp : thr->regs.sp;
467 #else
468 	vaddr_t sp = thr->regs.svc_sp;
469 #endif
470 	vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE;
471 	size_t len = sp - base;
472 
473 	tee_pager_release_phys((void *)base, len);
474 }
475 #else
476 static void release_unused_kernel_stack(struct thread_ctx *thr __unused,
477 					uint32_t cpsr __unused)
478 {
479 }
480 #endif
481 
482 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc)
483 {
484 	struct thread_core_local *l = thread_get_core_local();
485 	int ct = l->curr_thread;
486 
487 	assert(ct != THREAD_ID_INVALID);
488 
489 	if (core_mmu_user_mapping_is_active())
490 		ftrace_suspend();
491 
492 	thread_check_canaries();
493 
494 	release_unused_kernel_stack(threads + ct, cpsr);
495 
496 	if (is_from_user(cpsr)) {
497 		thread_user_save_vfp();
498 		tee_ta_update_session_utime_suspend();
499 		tee_ta_gprof_sample_pc(pc);
500 	}
501 	thread_lazy_restore_ns_vfp();
502 
503 	thread_lock_global();
504 
505 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
506 	threads[ct].flags |= flags;
507 	threads[ct].regs.cpsr = cpsr;
508 	threads[ct].regs.pc = pc;
509 	threads[ct].state = THREAD_STATE_SUSPENDED;
510 
511 	threads[ct].have_user_map = core_mmu_user_mapping_is_active();
512 	if (threads[ct].have_user_map) {
513 		if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
514 			tee_ta_ftrace_update_times_suspend();
515 		core_mmu_get_user_map(&threads[ct].user_map);
516 		core_mmu_set_user_map(NULL);
517 	}
518 
519 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
520 		struct ts_session *ts_sess =
521 			TAILQ_FIRST(&threads[ct].tsd.sess_stack);
522 
523 		spmc_sp_set_to_preempted(ts_sess);
524 	}
525 
526 	l->curr_thread = THREAD_ID_INVALID;
527 
528 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
529 		virt_unset_guest();
530 
531 	thread_unlock_global();
532 
533 	return ct;
534 }
535 
536 bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
537 {
538 	if (thread_id >= CFG_NUM_THREADS)
539 		return false;
540 	threads[thread_id].stack_va_end = sp;
541 	return true;
542 }
543 
544 static void __maybe_unused
545 set_core_local_kcode_offset(struct thread_core_local *cls, long offset)
546 {
547 	size_t n = 0;
548 
549 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
550 		cls[n].kcode_offset = offset;
551 }
552 
553 static void init_user_kcode(void)
554 {
555 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
556 	vaddr_t v = (vaddr_t)thread_excp_vect;
557 	vaddr_t ve = (vaddr_t)thread_excp_vect_end;
558 
559 	thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE);
560 	ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE);
561 	thread_user_kcode_size = ve - thread_user_kcode_va;
562 
563 	core_mmu_get_user_va_range(&v, NULL);
564 	thread_user_kcode_offset = thread_user_kcode_va - v;
565 
566 	set_core_local_kcode_offset(thread_core_local,
567 				    thread_user_kcode_offset);
568 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
569 	set_core_local_kcode_offset((void *)thread_user_kdata_page,
570 				    thread_user_kcode_offset);
571 	/*
572 	 * When transitioning to EL0 subtract SP with this much to point to
573 	 * this special kdata page instead. SP is restored by add this much
574 	 * while transitioning back to EL1.
575 	 */
576 	v += thread_user_kcode_size;
577 	thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v;
578 #endif
579 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
580 }
581 
582 void thread_init_primary(void)
583 {
584 	/* Initialize canaries around the stacks */
585 	thread_init_canaries();
586 
587 	init_user_kcode();
588 }
589 
590 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr)
591 {
592 	return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK;
593 }
594 
595 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr)
596 {
597 	return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) &
598 	       MIDR_PRIMARY_PART_NUM_MASK;
599 }
600 
601 static uint32_t __maybe_unused get_midr_variant(uint32_t midr)
602 {
603 	return (midr >> MIDR_VARIANT_SHIFT) & MIDR_VARIANT_MASK;
604 }
605 
606 static uint32_t __maybe_unused get_midr_revision(uint32_t midr)
607 {
608 	return (midr >> MIDR_REVISION_SHIFT) & MIDR_REVISION_MASK;
609 }
610 
611 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
612 #ifdef ARM64
613 static bool probe_workaround_available(uint32_t wa_id)
614 {
615 	int32_t r;
616 
617 	r = thread_smc(SMCCC_VERSION, 0, 0, 0);
618 	if (r < 0)
619 		return false;
620 	if (r < 0x10001)	/* compare with version 1.1 */
621 		return false;
622 
623 	/* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */
624 	r = thread_smc(SMCCC_ARCH_FEATURES, wa_id, 0, 0);
625 	return r >= 0;
626 }
627 
628 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void)
629 {
630 	if (probe_workaround_available(SMCCC_ARCH_WORKAROUND_1)) {
631 		DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available",
632 		     SMCCC_ARCH_WORKAROUND_1);
633 		DMSG("SMC Workaround for CVE-2017-5715 used");
634 		return (vaddr_t)thread_excp_vect_wa_spectre_v2;
635 	}
636 
637 	DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable",
638 	     SMCCC_ARCH_WORKAROUND_1);
639 	DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)");
640 	return (vaddr_t)thread_excp_vect;
641 }
642 #else
643 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void)
644 {
645 	return (vaddr_t)thread_excp_vect_wa_spectre_v2;
646 }
647 #endif
648 #endif
649 
650 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
651 static vaddr_t select_vector_wa_spectre_bhb(uint8_t loop_count __maybe_unused)
652 {
653 	/*
654 	 * Spectre-BHB has only been analyzed for AArch64 so far. For
655 	 * AArch32 fall back to the Spectre-V2 workaround which is likely
656 	 * to work even if perhaps a bit more expensive than a more
657 	 * optimized workaround.
658 	 */
659 #ifdef ARM64
660 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
661 	struct thread_core_local *cl = (void *)thread_user_kdata_page;
662 
663 	cl[get_core_pos()].bhb_loop_count = loop_count;
664 #endif
665 	thread_get_core_local()->bhb_loop_count = loop_count;
666 
667 	DMSG("Spectre-BHB CVE-2022-23960 workaround enabled with \"K\" = %u",
668 	     loop_count);
669 
670 	return (vaddr_t)thread_excp_vect_wa_spectre_bhb;
671 #else
672 	return select_vector_wa_spectre_v2();
673 #endif
674 }
675 #endif
676 
677 static vaddr_t get_excp_vect(void)
678 {
679 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
680 	uint32_t midr = read_midr();
681 	uint8_t vers = 0;
682 
683 	if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM)
684 		return (vaddr_t)thread_excp_vect;
685 	/*
686 	 * Variant rx, Revision py, for instance
687 	 * Variant 2 Revision 0 = r2p0 = 0x20
688 	 */
689 	vers = (get_midr_variant(midr) << 4) | get_midr_revision(midr);
690 
691 	/*
692 	 * Spectre-V2 (CVE-2017-5715) software workarounds covers what's
693 	 * needed for Spectre-BHB (CVE-2022-23960) too. The workaround for
694 	 * Spectre-V2 is more expensive than the one for Spectre-BHB so if
695 	 * possible select the workaround for Spectre-BHB.
696 	 */
697 	switch (get_midr_primary_part(midr)) {
698 #ifdef ARM32
699 	/* Spectre-V2 */
700 	case CORTEX_A8_PART_NUM:
701 	case CORTEX_A9_PART_NUM:
702 	case CORTEX_A17_PART_NUM:
703 #endif
704 	/* Spectre-V2 */
705 	case CORTEX_A57_PART_NUM:
706 	case CORTEX_A73_PART_NUM:
707 	case CORTEX_A75_PART_NUM:
708 		return select_vector_wa_spectre_v2();
709 #ifdef ARM32
710 	/* Spectre-V2 */
711 	case CORTEX_A15_PART_NUM:
712 		return (vaddr_t)thread_excp_vect_wa_a15_spectre_v2;
713 #endif
714 	/*
715 	 * Spectre-V2 for vers < r1p0
716 	 * Spectre-BHB for vers >= r1p0
717 	 */
718 	case CORTEX_A72_PART_NUM:
719 		if (vers < 0x10)
720 			return select_vector_wa_spectre_v2();
721 		return select_vector_wa_spectre_bhb(8);
722 
723 	/*
724 	 * Doing the more safe but expensive Spectre-V2 workaround for CPUs
725 	 * still being researched on the best mitigation sequence.
726 	 */
727 	case CORTEX_A65_PART_NUM:
728 	case CORTEX_A65AE_PART_NUM:
729 	case NEOVERSE_E1_PART_NUM:
730 		return select_vector_wa_spectre_v2();
731 
732 	/* Spectre-BHB */
733 	case CORTEX_A76_PART_NUM:
734 	case CORTEX_A76AE_PART_NUM:
735 	case CORTEX_A77_PART_NUM:
736 		return select_vector_wa_spectre_bhb(24);
737 	case CORTEX_A78_PART_NUM:
738 	case CORTEX_A78AE_PART_NUM:
739 	case CORTEX_A78C_PART_NUM:
740 	case CORTEX_A710_PART_NUM:
741 	case CORTEX_X1_PART_NUM:
742 	case CORTEX_X2_PART_NUM:
743 		return select_vector_wa_spectre_bhb(32);
744 	case NEOVERSE_N1_PART_NUM:
745 		return select_vector_wa_spectre_bhb(24);
746 	case NEOVERSE_N2_PART_NUM:
747 	case NEOVERSE_V1_PART_NUM:
748 		return select_vector_wa_spectre_bhb(32);
749 
750 	default:
751 		return (vaddr_t)thread_excp_vect;
752 	}
753 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
754 
755 	return (vaddr_t)thread_excp_vect;
756 }
757 
758 void thread_init_per_cpu(void)
759 {
760 #ifdef ARM32
761 	struct thread_core_local *l = thread_get_core_local();
762 
763 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
764 	/* Initialize secure monitor */
765 	sm_init(l->tmp_stack_va_end + STACK_TMP_OFFS);
766 #endif
767 	thread_set_irq_sp(l->tmp_stack_va_end);
768 	thread_set_fiq_sp(l->tmp_stack_va_end);
769 	thread_set_abt_sp((vaddr_t)l);
770 	thread_set_und_sp((vaddr_t)l);
771 #endif
772 
773 	thread_init_vbar(get_excp_vect());
774 
775 #ifdef CFG_FTRACE_SUPPORT
776 	/*
777 	 * Enable accesses to frequency register and physical counter
778 	 * register in EL0/PL0 required for timestamping during
779 	 * function tracing.
780 	 */
781 	write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN);
782 #endif
783 }
784 
785 #ifdef CFG_WITH_VFP
786 uint32_t thread_kernel_enable_vfp(void)
787 {
788 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
789 	struct thread_ctx *thr = threads + thread_get_id();
790 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
791 
792 	assert(!vfp_is_enabled());
793 
794 	if (!thr->vfp_state.ns_saved) {
795 		vfp_lazy_save_state_final(&thr->vfp_state.ns,
796 					  true /*force_save*/);
797 		thr->vfp_state.ns_saved = true;
798 	} else if (thr->vfp_state.sec_lazy_saved &&
799 		   !thr->vfp_state.sec_saved) {
800 		/*
801 		 * This happens when we're handling an abort while the
802 		 * thread was using the VFP state.
803 		 */
804 		vfp_lazy_save_state_final(&thr->vfp_state.sec,
805 					  false /*!force_save*/);
806 		thr->vfp_state.sec_saved = true;
807 	} else if (tuv && tuv->lazy_saved && !tuv->saved) {
808 		/*
809 		 * This can happen either during syscall or abort
810 		 * processing (while processing a syscall).
811 		 */
812 		vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/);
813 		tuv->saved = true;
814 	}
815 
816 	vfp_enable();
817 	return exceptions;
818 }
819 
820 void thread_kernel_disable_vfp(uint32_t state)
821 {
822 	uint32_t exceptions;
823 
824 	assert(vfp_is_enabled());
825 
826 	vfp_disable();
827 	exceptions = thread_get_exceptions();
828 	assert(exceptions & THREAD_EXCP_FOREIGN_INTR);
829 	exceptions &= ~THREAD_EXCP_FOREIGN_INTR;
830 	exceptions |= state & THREAD_EXCP_FOREIGN_INTR;
831 	thread_set_exceptions(exceptions);
832 }
833 
834 void thread_kernel_save_vfp(void)
835 {
836 	struct thread_ctx *thr = threads + thread_get_id();
837 
838 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
839 	if (vfp_is_enabled()) {
840 		vfp_lazy_save_state_init(&thr->vfp_state.sec);
841 		thr->vfp_state.sec_lazy_saved = true;
842 	}
843 }
844 
845 void thread_kernel_restore_vfp(void)
846 {
847 	struct thread_ctx *thr = threads + thread_get_id();
848 
849 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
850 	assert(!vfp_is_enabled());
851 	if (thr->vfp_state.sec_lazy_saved) {
852 		vfp_lazy_restore_state(&thr->vfp_state.sec,
853 				       thr->vfp_state.sec_saved);
854 		thr->vfp_state.sec_saved = false;
855 		thr->vfp_state.sec_lazy_saved = false;
856 	}
857 }
858 
859 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp)
860 {
861 	struct thread_ctx *thr = threads + thread_get_id();
862 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
863 
864 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
865 	assert(!vfp_is_enabled());
866 
867 	if (!thr->vfp_state.ns_saved) {
868 		vfp_lazy_save_state_final(&thr->vfp_state.ns,
869 					  true /*force_save*/);
870 		thr->vfp_state.ns_saved = true;
871 	} else if (tuv && uvfp != tuv) {
872 		if (tuv->lazy_saved && !tuv->saved) {
873 			vfp_lazy_save_state_final(&tuv->vfp,
874 						  false /*!force_save*/);
875 			tuv->saved = true;
876 		}
877 	}
878 
879 	if (uvfp->lazy_saved)
880 		vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved);
881 	uvfp->lazy_saved = false;
882 	uvfp->saved = false;
883 
884 	thr->vfp_state.uvfp = uvfp;
885 	vfp_enable();
886 }
887 
888 void thread_user_save_vfp(void)
889 {
890 	struct thread_ctx *thr = threads + thread_get_id();
891 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
892 
893 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
894 	if (!vfp_is_enabled())
895 		return;
896 
897 	assert(tuv && !tuv->lazy_saved && !tuv->saved);
898 	vfp_lazy_save_state_init(&tuv->vfp);
899 	tuv->lazy_saved = true;
900 }
901 
902 void thread_user_clear_vfp(struct user_mode_ctx *uctx)
903 {
904 	struct thread_user_vfp_state *uvfp = &uctx->vfp;
905 	struct thread_ctx *thr = threads + thread_get_id();
906 
907 	if (uvfp == thr->vfp_state.uvfp)
908 		thr->vfp_state.uvfp = NULL;
909 	uvfp->lazy_saved = false;
910 	uvfp->saved = false;
911 }
912 #endif /*CFG_WITH_VFP*/
913 
914 #ifdef ARM32
915 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
916 {
917 	uint32_t s;
918 
919 	if (!is_32bit)
920 		return false;
921 
922 	s = read_cpsr();
923 	s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2);
924 	s |= CPSR_MODE_USR;
925 	if (entry_func & 1)
926 		s |= CPSR_T;
927 	*spsr = s;
928 	return true;
929 }
930 #endif
931 
932 #ifdef ARM64
933 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
934 {
935 	uint32_t s;
936 
937 	if (is_32bit) {
938 		s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT);
939 		s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT;
940 		s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT;
941 	} else {
942 		s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT);
943 	}
944 
945 	*spsr = s;
946 	return true;
947 }
948 #endif
949 
950 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
951 			 unsigned long a1, unsigned long a2, unsigned long a3,
952 			 unsigned long user_sp, unsigned long entry_func,
953 			 uint32_t spsr,
954 			 struct thread_pauth_keys *keys __maybe_unused)
955 {
956 	/*
957 	 * First clear all registers to avoid leaking information from
958 	 * other TAs or even the Core itself.
959 	 */
960 	*regs = (struct thread_ctx_regs){ };
961 #ifdef ARM32
962 	regs->r0 = a0;
963 	regs->r1 = a1;
964 	regs->r2 = a2;
965 	regs->r3 = a3;
966 	regs->usr_sp = user_sp;
967 	regs->pc = entry_func;
968 	regs->cpsr = spsr;
969 #endif
970 #ifdef ARM64
971 	regs->x[0] = a0;
972 	regs->x[1] = a1;
973 	regs->x[2] = a2;
974 	regs->x[3] = a3;
975 	regs->sp = user_sp;
976 	regs->pc = entry_func;
977 	regs->cpsr = spsr;
978 	regs->x[13] = user_sp;	/* Used when running TA in Aarch32 */
979 	regs->sp = user_sp;	/* Used when running TA in Aarch64 */
980 #ifdef CFG_TA_PAUTH
981 	assert(keys);
982 	regs->apiakey_hi = keys->apia_hi;
983 	regs->apiakey_lo = keys->apia_lo;
984 #endif
985 	/* Set frame pointer (user stack can't be unwound past this point) */
986 	regs->x[29] = 0;
987 #endif
988 }
989 
990 static struct thread_pauth_keys *thread_get_pauth_keys(void)
991 {
992 #if defined(CFG_TA_PAUTH)
993 	struct ts_session *s = ts_get_current_session();
994 	/* Only user TA's support the PAUTH keys */
995 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
996 
997 	return &utc->uctx.keys;
998 #else
999 	return NULL;
1000 #endif
1001 }
1002 
1003 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
1004 		unsigned long a2, unsigned long a3, unsigned long user_sp,
1005 		unsigned long entry_func, bool is_32bit,
1006 		uint32_t *exit_status0, uint32_t *exit_status1)
1007 {
1008 	uint32_t spsr = 0;
1009 	uint32_t exceptions = 0;
1010 	uint32_t rc = 0;
1011 	struct thread_ctx_regs *regs = NULL;
1012 	struct thread_pauth_keys *keys = NULL;
1013 
1014 	tee_ta_update_session_utime_resume();
1015 
1016 	keys = thread_get_pauth_keys();
1017 
1018 	/* Derive SPSR from current CPSR/PSTATE readout. */
1019 	if (!get_spsr(is_32bit, entry_func, &spsr)) {
1020 		*exit_status0 = 1; /* panic */
1021 		*exit_status1 = 0xbadbadba;
1022 		return 0;
1023 	}
1024 
1025 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1026 	/*
1027 	 * We're using the per thread location of saved context registers
1028 	 * for temporary storage. Now that exceptions are masked they will
1029 	 * not be used for any thing else until they are eventually
1030 	 * unmasked when user mode has been entered.
1031 	 */
1032 	regs = thread_get_ctx_regs();
1033 	set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr, keys);
1034 	rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
1035 	thread_unmask_exceptions(exceptions);
1036 	return rc;
1037 }
1038 
1039 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1040 void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
1041 			   vaddr_t *va, size_t *sz)
1042 {
1043 	core_mmu_get_user_va_range(va, NULL);
1044 	*mobj = mobj_tee_ram_rx;
1045 	*sz = thread_user_kcode_size;
1046 	*offset = thread_user_kcode_va - (vaddr_t)mobj_get_va(*mobj, 0, *sz);
1047 }
1048 #endif
1049 
1050 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
1051 	defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
1052 void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
1053 			   vaddr_t *va, size_t *sz)
1054 {
1055 	vaddr_t v;
1056 
1057 	core_mmu_get_user_va_range(&v, NULL);
1058 	*va = v + thread_user_kcode_size;
1059 	*mobj = mobj_tee_ram_rw;
1060 	*sz = sizeof(thread_user_kdata_page);
1061 	*offset = (vaddr_t)thread_user_kdata_page -
1062 		  (vaddr_t)mobj_get_va(*mobj, 0, *sz);
1063 }
1064 #endif
1065 
1066 static void setup_unwind_user_mode(struct thread_scall_regs *regs)
1067 {
1068 #ifdef ARM32
1069 	regs->lr = (uintptr_t)thread_unwind_user_mode;
1070 	regs->spsr = read_cpsr();
1071 #endif
1072 #ifdef ARM64
1073 	regs->elr = (uintptr_t)thread_unwind_user_mode;
1074 	regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0);
1075 	regs->spsr |= read_daif();
1076 	/*
1077 	 * Regs is the value of stack pointer before calling the SVC
1078 	 * handler.  By the addition matches for the reserved space at the
1079 	 * beginning of el0_sync_svc(). This prepares the stack when
1080 	 * returning to thread_unwind_user_mode instead of a normal
1081 	 * exception return.
1082 	 */
1083 	regs->sp_el0 = (uint64_t)(regs + 1);
1084 #endif
1085 }
1086 
1087 static void gprof_set_status(struct ts_session *s __maybe_unused,
1088 			     enum ts_gprof_status status __maybe_unused)
1089 {
1090 #ifdef CFG_TA_GPROF_SUPPORT
1091 	if (s->ctx->ops->gprof_set_status)
1092 		s->ctx->ops->gprof_set_status(status);
1093 #endif
1094 }
1095 
1096 /*
1097  * Note: this function is weak just to make it possible to exclude it from
1098  * the unpaged area.
1099  */
1100 void __weak thread_scall_handler(struct thread_scall_regs *regs)
1101 {
1102 	struct ts_session *sess = NULL;
1103 	uint32_t state = 0;
1104 
1105 	/* Enable native interrupts */
1106 	state = thread_get_exceptions();
1107 	thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR);
1108 
1109 	thread_user_save_vfp();
1110 
1111 	sess = ts_get_current_session();
1112 	/*
1113 	 * User mode service has just entered kernel mode, suspend gprof
1114 	 * collection until we're about to switch back again.
1115 	 */
1116 	gprof_set_status(sess, TS_GPROF_SUSPEND);
1117 
1118 	/* Restore foreign interrupts which are disabled on exception entry */
1119 	thread_restore_foreign_intr();
1120 
1121 	assert(sess && sess->handle_scall);
1122 	if (sess->handle_scall(regs)) {
1123 		/* We're about to switch back to user mode */
1124 		gprof_set_status(sess, TS_GPROF_RESUME);
1125 	} else {
1126 		/* We're returning from __thread_enter_user_mode() */
1127 		setup_unwind_user_mode(regs);
1128 	}
1129 }
1130 
1131 #ifdef CFG_WITH_ARM_TRUSTED_FW
1132 /*
1133  * These five functions are __weak to allow platforms to override them if
1134  * needed.
1135  */
1136 unsigned long __weak thread_cpu_off_handler(unsigned long a0 __unused,
1137 					    unsigned long a1 __unused)
1138 {
1139 	return 0;
1140 }
1141 DECLARE_KEEP_PAGER(thread_cpu_off_handler);
1142 
1143 unsigned long __weak thread_cpu_suspend_handler(unsigned long a0 __unused,
1144 						unsigned long a1 __unused)
1145 {
1146 	return 0;
1147 }
1148 DECLARE_KEEP_PAGER(thread_cpu_suspend_handler);
1149 
1150 unsigned long __weak thread_cpu_resume_handler(unsigned long a0 __unused,
1151 					       unsigned long a1 __unused)
1152 {
1153 	return 0;
1154 }
1155 DECLARE_KEEP_PAGER(thread_cpu_resume_handler);
1156 
1157 unsigned long __weak thread_system_off_handler(unsigned long a0 __unused,
1158 					       unsigned long a1 __unused)
1159 {
1160 	return 0;
1161 }
1162 DECLARE_KEEP_PAGER(thread_system_off_handler);
1163 
1164 unsigned long __weak thread_system_reset_handler(unsigned long a0 __unused,
1165 						 unsigned long a1 __unused)
1166 {
1167 	return 0;
1168 }
1169 DECLARE_KEEP_PAGER(thread_system_reset_handler);
1170 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
1171 
1172 #ifdef CFG_CORE_WORKAROUND_ARM_NMFI
1173 void __noreturn interrupt_main_handler(void)
1174 {
1175 	/*
1176 	 * Note: overrides the default implementation of this function so that
1177 	 * if there would be another handler defined there would be duplicate
1178 	 * symbol error during linking.
1179 	 */
1180 	panic("Secure interrupt received but it is not supported");
1181 }
1182 #endif
1183