xref: /optee_os/core/arch/arm/kernel/thread.c (revision 19a31ec40245ae01a9adcd206eec2a4bb4479fc9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2022, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #include <platform_config.h>
9 
10 #include <arm.h>
11 #include <assert.h>
12 #include <config.h>
13 #include <io.h>
14 #include <keep.h>
15 #include <kernel/asan.h>
16 #include <kernel/boot.h>
17 #include <kernel/interrupt.h>
18 #include <kernel/linker.h>
19 #include <kernel/lockdep.h>
20 #include <kernel/misc.h>
21 #include <kernel/panic.h>
22 #include <kernel/spinlock.h>
23 #include <kernel/spmc_sp_handler.h>
24 #include <kernel/tee_ta_manager.h>
25 #include <kernel/thread.h>
26 #include <kernel/thread_private.h>
27 #include <kernel/user_access.h>
28 #include <kernel/user_mode_ctx_struct.h>
29 #include <kernel/virtualization.h>
30 #include <mm/core_memprot.h>
31 #include <mm/mobj.h>
32 #include <mm/tee_mm.h>
33 #include <mm/tee_pager.h>
34 #include <smccc.h>
35 #include <sm/sm.h>
36 #include <trace.h>
37 #include <util.h>
38 
39 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
40 static vaddr_t thread_user_kcode_va __nex_bss;
41 long thread_user_kcode_offset __nex_bss;
42 static size_t thread_user_kcode_size __nex_bss;
43 #endif
44 
45 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
46 	defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
47 long thread_user_kdata_sp_offset __nex_bss;
48 static uint8_t thread_user_kdata_page[
49 	ROUNDUP(sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE,
50 		SMALL_PAGE_SIZE)]
51 	__aligned(SMALL_PAGE_SIZE)
52 #ifndef CFG_NS_VIRTUALIZATION
53 	__section(".nozi.kdata_page");
54 #else
55 	__section(".nex_nozi.kdata_page");
56 #endif
57 #endif
58 
59 #ifdef ARM32
60 uint32_t __nostackcheck thread_get_exceptions(void)
61 {
62 	uint32_t cpsr = read_cpsr();
63 
64 	return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL;
65 }
66 
67 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
68 {
69 	uint32_t cpsr = read_cpsr();
70 
71 	/* Foreign interrupts must not be unmasked while holding a spinlock */
72 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
73 		assert_have_no_spinlock();
74 
75 	cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT);
76 	cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT);
77 
78 	barrier();
79 	write_cpsr(cpsr);
80 	barrier();
81 }
82 #endif /*ARM32*/
83 
84 #ifdef ARM64
85 uint32_t __nostackcheck thread_get_exceptions(void)
86 {
87 	uint32_t daif = read_daif();
88 
89 	return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL;
90 }
91 
92 void __nostackcheck thread_set_exceptions(uint32_t exceptions)
93 {
94 	uint32_t daif = read_daif();
95 
96 	/* Foreign interrupts must not be unmasked while holding a spinlock */
97 	if (!(exceptions & THREAD_EXCP_FOREIGN_INTR))
98 		assert_have_no_spinlock();
99 
100 	daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT);
101 	daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT);
102 
103 	barrier();
104 	write_daif(daif);
105 	barrier();
106 }
107 #endif /*ARM64*/
108 
109 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions)
110 {
111 	uint32_t state = thread_get_exceptions();
112 
113 	thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL));
114 	return state;
115 }
116 
117 void __nostackcheck thread_unmask_exceptions(uint32_t state)
118 {
119 	thread_set_exceptions(state & THREAD_EXCP_ALL);
120 }
121 
122 static void thread_lazy_save_ns_vfp(void)
123 {
124 #ifdef CFG_WITH_VFP
125 	struct thread_ctx *thr = threads + thread_get_id();
126 
127 	thr->vfp_state.ns_saved = false;
128 	vfp_lazy_save_state_init(&thr->vfp_state.ns);
129 #endif /*CFG_WITH_VFP*/
130 }
131 
132 static void thread_lazy_restore_ns_vfp(void)
133 {
134 #ifdef CFG_WITH_VFP
135 	struct thread_ctx *thr = threads + thread_get_id();
136 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
137 
138 	assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved);
139 
140 	if (tuv && tuv->lazy_saved && !tuv->saved) {
141 		vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/);
142 		tuv->saved = true;
143 	}
144 
145 	vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved);
146 	thr->vfp_state.ns_saved = false;
147 #endif /*CFG_WITH_VFP*/
148 }
149 
150 #ifdef ARM32
151 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
152 		      uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
153 		      uint32_t a6, uint32_t a7, void *pc)
154 {
155 	thread->regs.pc = (uint32_t)pc;
156 
157 	/*
158 	 * Stdcalls starts in SVC mode with masked foreign interrupts, masked
159 	 * Asynchronous abort and unmasked native interrupts.
160 	 */
161 	thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E;
162 	thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A |
163 			(THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT);
164 	/* Enable thumb mode if it's a thumb instruction */
165 	if (thread->regs.pc & 1)
166 		thread->regs.cpsr |= CPSR_T;
167 	/* Reinitialize stack pointer */
168 	thread->regs.svc_sp = thread->stack_va_end;
169 
170 	/*
171 	 * Copy arguments into context. This will make the
172 	 * arguments appear in r0-r7 when thread is started.
173 	 */
174 	thread->regs.r0 = a0;
175 	thread->regs.r1 = a1;
176 	thread->regs.r2 = a2;
177 	thread->regs.r3 = a3;
178 	thread->regs.r4 = a4;
179 	thread->regs.r5 = a5;
180 	thread->regs.r6 = a6;
181 	thread->regs.r7 = a7;
182 }
183 #endif /*ARM32*/
184 
185 #ifdef ARM64
186 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1,
187 		      uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5,
188 		      uint32_t a6, uint32_t a7, void *pc)
189 {
190 	thread->regs.pc = (uint64_t)pc;
191 
192 	/*
193 	 * Stdcalls starts in SVC mode with masked foreign interrupts, masked
194 	 * Asynchronous abort and unmasked native interrupts.
195 	 */
196 	thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0,
197 				THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT);
198 	/* Reinitialize stack pointer */
199 	thread->regs.sp = thread->stack_va_end;
200 
201 	/*
202 	 * Copy arguments into context. This will make the
203 	 * arguments appear in x0-x7 when thread is started.
204 	 */
205 	thread->regs.x[0] = a0;
206 	thread->regs.x[1] = a1;
207 	thread->regs.x[2] = a2;
208 	thread->regs.x[3] = a3;
209 	thread->regs.x[4] = a4;
210 	thread->regs.x[5] = a5;
211 	thread->regs.x[6] = a6;
212 	thread->regs.x[7] = a7;
213 
214 	/* Set up frame pointer as per the Aarch64 AAPCS */
215 	thread->regs.x[29] = 0;
216 }
217 #endif /*ARM64*/
218 
219 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2,
220 				   uint32_t a3, uint32_t a4, uint32_t a5,
221 				   uint32_t a6, uint32_t a7,
222 				   void *pc, uint32_t flags)
223 {
224 	struct thread_core_local *l = thread_get_core_local();
225 	bool found_thread = false;
226 	size_t n = 0;
227 
228 	assert(l->curr_thread == THREAD_ID_INVALID);
229 
230 	thread_lock_global();
231 
232 	for (n = 0; n < CFG_NUM_THREADS; n++) {
233 		if (threads[n].state == THREAD_STATE_FREE) {
234 			threads[n].state = THREAD_STATE_ACTIVE;
235 			found_thread = true;
236 			break;
237 		}
238 	}
239 
240 	thread_unlock_global();
241 
242 	if (!found_thread)
243 		return;
244 
245 	l->curr_thread = n;
246 
247 	threads[n].flags = flags;
248 	init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc);
249 #ifdef CFG_CORE_PAUTH
250 	/*
251 	 * Copy the APIA key into the registers to be restored with
252 	 * thread_resume().
253 	 */
254 	threads[n].regs.apiakey_hi = threads[n].keys.apia_hi;
255 	threads[n].regs.apiakey_lo = threads[n].keys.apia_lo;
256 #endif
257 
258 	thread_lazy_save_ns_vfp();
259 
260 	l->flags &= ~THREAD_CLF_TMP;
261 	thread_resume(&threads[n].regs);
262 	/*NOTREACHED*/
263 	panic();
264 }
265 
266 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
267 			  uint32_t a4, uint32_t a5)
268 {
269 	__thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0,
270 			       thread_std_smc_entry, 0);
271 }
272 
273 #ifdef CFG_SECURE_PARTITION
274 void thread_sp_alloc_and_run(struct thread_smc_args *args __maybe_unused)
275 {
276 	__thread_alloc_and_run(args->a0, args->a1, args->a2, args->a3, args->a4,
277 			       args->a5, args->a6, args->a7,
278 			       spmc_sp_thread_entry, THREAD_FLAGS_FFA_ONLY);
279 }
280 #endif
281 
282 #ifdef ARM32
283 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
284 			  uint32_t a1, uint32_t a2, uint32_t a3)
285 {
286 	/*
287 	 * Update returned values from RPC, values will appear in
288 	 * r0-r3 when thread is resumed.
289 	 */
290 	regs->r0 = a0;
291 	regs->r1 = a1;
292 	regs->r2 = a2;
293 	regs->r3 = a3;
294 }
295 #endif /*ARM32*/
296 
297 #ifdef ARM64
298 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0,
299 			  uint32_t a1, uint32_t a2, uint32_t a3)
300 {
301 	/*
302 	 * Update returned values from RPC, values will appear in
303 	 * x0-x3 when thread is resumed.
304 	 */
305 	regs->x[0] = a0;
306 	regs->x[1] = a1;
307 	regs->x[2] = a2;
308 	regs->x[3] = a3;
309 }
310 #endif /*ARM64*/
311 
312 #ifdef ARM32
313 static bool is_from_user(uint32_t cpsr)
314 {
315 	return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR;
316 }
317 #endif
318 
319 #ifdef ARM64
320 static bool is_from_user(uint32_t cpsr)
321 {
322 	if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT))
323 		return true;
324 	if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) ==
325 	     SPSR_64_MODE_EL0)
326 		return true;
327 	return false;
328 }
329 #endif
330 
331 #ifdef CFG_SYSCALL_FTRACE
332 static void __noprof ftrace_suspend(void)
333 {
334 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
335 
336 	if (s && s->fbuf)
337 		s->fbuf->syscall_trace_suspended = true;
338 }
339 
340 static void __noprof ftrace_resume(void)
341 {
342 	struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack);
343 
344 	if (s && s->fbuf)
345 		s->fbuf->syscall_trace_suspended = false;
346 }
347 #else
348 static void __noprof ftrace_suspend(void)
349 {
350 }
351 
352 static void __noprof ftrace_resume(void)
353 {
354 }
355 #endif
356 
357 static bool is_user_mode(struct thread_ctx_regs *regs)
358 {
359 	return is_from_user((uint32_t)regs->cpsr);
360 }
361 
362 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
363 			    uint32_t a2, uint32_t a3)
364 {
365 	size_t n = thread_id;
366 	struct thread_core_local *l = thread_get_core_local();
367 	bool found_thread = false;
368 
369 	assert(l->curr_thread == THREAD_ID_INVALID);
370 
371 	thread_lock_global();
372 
373 	if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) {
374 		threads[n].state = THREAD_STATE_ACTIVE;
375 		found_thread = true;
376 	}
377 
378 	thread_unlock_global();
379 
380 	if (!found_thread)
381 		return;
382 
383 	l->curr_thread = n;
384 
385 	if (threads[n].have_user_map) {
386 		core_mmu_set_user_map(&threads[n].user_map);
387 		if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
388 			tee_ta_ftrace_update_times_resume();
389 	}
390 
391 	if (is_user_mode(&threads[n].regs))
392 		tee_ta_update_session_utime_resume();
393 
394 	/*
395 	 * Return from RPC to request service of a foreign interrupt must not
396 	 * get parameters from non-secure world.
397 	 */
398 	if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) {
399 		copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3);
400 		threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN;
401 	}
402 
403 	thread_lazy_save_ns_vfp();
404 
405 	if (threads[n].have_user_map)
406 		ftrace_resume();
407 
408 	l->flags &= ~THREAD_CLF_TMP;
409 	thread_resume(&threads[n].regs);
410 	/*NOTREACHED*/
411 	panic();
412 }
413 
414 #ifdef ARM64
415 static uint64_t spsr_from_pstate(void)
416 {
417 	uint64_t spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0);
418 
419 	spsr |= read_daif();
420 	if (IS_ENABLED(CFG_PAN) && feat_pan_implemented() && read_pan())
421 		spsr |= SPSR_64_PAN;
422 
423 	return spsr;
424 }
425 
426 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])
427 {
428 	thread_rpc_spsr(rv, spsr_from_pstate());
429 }
430 
431 vaddr_t thread_get_saved_thread_sp(void)
432 {
433 	struct thread_core_local *l = thread_get_core_local();
434 	int ct = l->curr_thread;
435 
436 	assert(ct != THREAD_ID_INVALID);
437 	return threads[ct].kern_sp;
438 }
439 #endif /*ARM64*/
440 
441 #ifdef ARM32
442 bool thread_is_in_normal_mode(void)
443 {
444 	return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC;
445 }
446 #endif
447 
448 void thread_state_free(void)
449 {
450 	struct thread_core_local *l = thread_get_core_local();
451 	int ct = l->curr_thread;
452 
453 	assert(ct != THREAD_ID_INVALID);
454 
455 	thread_lazy_restore_ns_vfp();
456 	tee_pager_release_phys(
457 		(void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE),
458 		STACK_THREAD_SIZE);
459 
460 	thread_lock_global();
461 
462 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
463 	threads[ct].state = THREAD_STATE_FREE;
464 	threads[ct].flags = 0;
465 	l->curr_thread = THREAD_ID_INVALID;
466 
467 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
468 		virt_unset_guest();
469 	thread_unlock_global();
470 }
471 
472 #ifdef CFG_WITH_PAGER
473 static void release_unused_kernel_stack(struct thread_ctx *thr,
474 					uint32_t cpsr __maybe_unused)
475 {
476 #ifdef ARM64
477 	/*
478 	 * If we're from user mode then thr->regs.sp is the saved user
479 	 * stack pointer and thr->kern_sp holds the last kernel stack
480 	 * pointer. But if we're from kernel mode then thr->kern_sp isn't
481 	 * up to date so we need to read from thr->regs.sp instead.
482 	 */
483 	vaddr_t sp = is_from_user(cpsr) ?  thr->kern_sp : thr->regs.sp;
484 #else
485 	vaddr_t sp = thr->regs.svc_sp;
486 #endif
487 	vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE;
488 	size_t len = sp - base;
489 
490 	tee_pager_release_phys((void *)base, len);
491 }
492 #else
493 static void release_unused_kernel_stack(struct thread_ctx *thr __unused,
494 					uint32_t cpsr __unused)
495 {
496 }
497 #endif
498 
499 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc)
500 {
501 	struct thread_core_local *l = thread_get_core_local();
502 	int ct = l->curr_thread;
503 
504 	assert(ct != THREAD_ID_INVALID);
505 
506 	if (core_mmu_user_mapping_is_active())
507 		ftrace_suspend();
508 
509 	thread_check_canaries();
510 
511 	release_unused_kernel_stack(threads + ct, cpsr);
512 
513 	if (is_from_user(cpsr)) {
514 		thread_user_save_vfp();
515 		tee_ta_update_session_utime_suspend();
516 		tee_ta_gprof_sample_pc(pc);
517 	}
518 	thread_lazy_restore_ns_vfp();
519 
520 	thread_lock_global();
521 
522 	assert(threads[ct].state == THREAD_STATE_ACTIVE);
523 	threads[ct].flags |= flags;
524 	threads[ct].regs.cpsr = cpsr;
525 	threads[ct].regs.pc = pc;
526 	threads[ct].state = THREAD_STATE_SUSPENDED;
527 
528 	threads[ct].have_user_map = core_mmu_user_mapping_is_active();
529 	if (threads[ct].have_user_map) {
530 		if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR)
531 			tee_ta_ftrace_update_times_suspend();
532 		core_mmu_get_user_map(&threads[ct].user_map);
533 		core_mmu_set_user_map(NULL);
534 	}
535 
536 	if (IS_ENABLED(CFG_SECURE_PARTITION)) {
537 		struct ts_session *ts_sess =
538 			TAILQ_FIRST(&threads[ct].tsd.sess_stack);
539 
540 		spmc_sp_set_to_preempted(ts_sess);
541 	}
542 
543 	l->curr_thread = THREAD_ID_INVALID;
544 
545 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
546 		virt_unset_guest();
547 
548 	thread_unlock_global();
549 
550 	return ct;
551 }
552 
553 bool thread_init_stack(uint32_t thread_id, vaddr_t sp)
554 {
555 	if (thread_id >= CFG_NUM_THREADS)
556 		return false;
557 	threads[thread_id].stack_va_end = sp;
558 	return true;
559 }
560 
561 static void __maybe_unused
562 set_core_local_kcode_offset(struct thread_core_local *cls, long offset)
563 {
564 	size_t n = 0;
565 
566 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
567 		cls[n].kcode_offset = offset;
568 }
569 
570 static void init_user_kcode(void)
571 {
572 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
573 	vaddr_t v = (vaddr_t)thread_excp_vect;
574 	vaddr_t ve = (vaddr_t)thread_excp_vect_end;
575 
576 	thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE);
577 	ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE);
578 	thread_user_kcode_size = ve - thread_user_kcode_va;
579 
580 	core_mmu_get_user_va_range(&v, NULL);
581 	thread_user_kcode_offset = thread_user_kcode_va - v;
582 
583 	set_core_local_kcode_offset(thread_core_local,
584 				    thread_user_kcode_offset);
585 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
586 	set_core_local_kcode_offset((void *)thread_user_kdata_page,
587 				    thread_user_kcode_offset);
588 	/*
589 	 * When transitioning to EL0 subtract SP with this much to point to
590 	 * this special kdata page instead. SP is restored by add this much
591 	 * while transitioning back to EL1.
592 	 */
593 	v += thread_user_kcode_size;
594 	thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v;
595 #endif
596 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
597 }
598 
599 void thread_init_primary(void)
600 {
601 	/* Initialize canaries around the stacks */
602 	thread_init_canaries();
603 
604 	init_user_kcode();
605 }
606 
607 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr)
608 {
609 	return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK;
610 }
611 
612 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr)
613 {
614 	return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) &
615 	       MIDR_PRIMARY_PART_NUM_MASK;
616 }
617 
618 static uint32_t __maybe_unused get_midr_variant(uint32_t midr)
619 {
620 	return (midr >> MIDR_VARIANT_SHIFT) & MIDR_VARIANT_MASK;
621 }
622 
623 static uint32_t __maybe_unused get_midr_revision(uint32_t midr)
624 {
625 	return (midr >> MIDR_REVISION_SHIFT) & MIDR_REVISION_MASK;
626 }
627 
628 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
629 #ifdef ARM64
630 static bool probe_workaround_available(uint32_t wa_id)
631 {
632 	int32_t r;
633 
634 	r = thread_smc(SMCCC_VERSION, 0, 0, 0);
635 	if (r < 0)
636 		return false;
637 	if (r < 0x10001)	/* compare with version 1.1 */
638 		return false;
639 
640 	/* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */
641 	r = thread_smc(SMCCC_ARCH_FEATURES, wa_id, 0, 0);
642 	return r >= 0;
643 }
644 
645 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void)
646 {
647 	if (probe_workaround_available(SMCCC_ARCH_WORKAROUND_1)) {
648 		DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available",
649 		     SMCCC_ARCH_WORKAROUND_1);
650 		DMSG("SMC Workaround for CVE-2017-5715 used");
651 		return (vaddr_t)thread_excp_vect_wa_spectre_v2;
652 	}
653 
654 	DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable",
655 	     SMCCC_ARCH_WORKAROUND_1);
656 	DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)");
657 	return (vaddr_t)thread_excp_vect;
658 }
659 #else
660 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void)
661 {
662 	return (vaddr_t)thread_excp_vect_wa_spectre_v2;
663 }
664 #endif
665 #endif
666 
667 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
668 static vaddr_t select_vector_wa_spectre_bhb(uint8_t loop_count __maybe_unused)
669 {
670 	/*
671 	 * Spectre-BHB has only been analyzed for AArch64 so far. For
672 	 * AArch32 fall back to the Spectre-V2 workaround which is likely
673 	 * to work even if perhaps a bit more expensive than a more
674 	 * optimized workaround.
675 	 */
676 #ifdef ARM64
677 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
678 	struct thread_core_local *cl = (void *)thread_user_kdata_page;
679 
680 	cl[get_core_pos()].bhb_loop_count = loop_count;
681 #endif
682 	thread_get_core_local()->bhb_loop_count = loop_count;
683 
684 	DMSG("Spectre-BHB CVE-2022-23960 workaround enabled with \"K\" = %u",
685 	     loop_count);
686 
687 	return (vaddr_t)thread_excp_vect_wa_spectre_bhb;
688 #else
689 	return select_vector_wa_spectre_v2();
690 #endif
691 }
692 #endif
693 
694 static vaddr_t get_excp_vect(void)
695 {
696 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
697 	uint32_t midr = read_midr();
698 	uint8_t vers = 0;
699 
700 	if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM)
701 		return (vaddr_t)thread_excp_vect;
702 	/*
703 	 * Variant rx, Revision py, for instance
704 	 * Variant 2 Revision 0 = r2p0 = 0x20
705 	 */
706 	vers = (get_midr_variant(midr) << 4) | get_midr_revision(midr);
707 
708 	/*
709 	 * Spectre-V2 (CVE-2017-5715) software workarounds covers what's
710 	 * needed for Spectre-BHB (CVE-2022-23960) too. The workaround for
711 	 * Spectre-V2 is more expensive than the one for Spectre-BHB so if
712 	 * possible select the workaround for Spectre-BHB.
713 	 */
714 	switch (get_midr_primary_part(midr)) {
715 #ifdef ARM32
716 	/* Spectre-V2 */
717 	case CORTEX_A8_PART_NUM:
718 	case CORTEX_A9_PART_NUM:
719 	case CORTEX_A17_PART_NUM:
720 #endif
721 	/* Spectre-V2 */
722 	case CORTEX_A57_PART_NUM:
723 	case CORTEX_A73_PART_NUM:
724 	case CORTEX_A75_PART_NUM:
725 		return select_vector_wa_spectre_v2();
726 #ifdef ARM32
727 	/* Spectre-V2 */
728 	case CORTEX_A15_PART_NUM:
729 		return (vaddr_t)thread_excp_vect_wa_a15_spectre_v2;
730 #endif
731 	/*
732 	 * Spectre-V2 for vers < r1p0
733 	 * Spectre-BHB for vers >= r1p0
734 	 */
735 	case CORTEX_A72_PART_NUM:
736 		if (vers < 0x10)
737 			return select_vector_wa_spectre_v2();
738 		return select_vector_wa_spectre_bhb(8);
739 
740 	/*
741 	 * Doing the more safe but expensive Spectre-V2 workaround for CPUs
742 	 * still being researched on the best mitigation sequence.
743 	 */
744 	case CORTEX_A65_PART_NUM:
745 	case CORTEX_A65AE_PART_NUM:
746 	case NEOVERSE_E1_PART_NUM:
747 		return select_vector_wa_spectre_v2();
748 
749 	/* Spectre-BHB */
750 	case CORTEX_A76_PART_NUM:
751 	case CORTEX_A76AE_PART_NUM:
752 	case CORTEX_A77_PART_NUM:
753 		return select_vector_wa_spectre_bhb(24);
754 	case CORTEX_A78_PART_NUM:
755 	case CORTEX_A78AE_PART_NUM:
756 	case CORTEX_A78C_PART_NUM:
757 	case CORTEX_A710_PART_NUM:
758 	case CORTEX_X1_PART_NUM:
759 	case CORTEX_X2_PART_NUM:
760 		return select_vector_wa_spectre_bhb(32);
761 	case NEOVERSE_N1_PART_NUM:
762 		return select_vector_wa_spectre_bhb(24);
763 	case NEOVERSE_N2_PART_NUM:
764 	case NEOVERSE_V1_PART_NUM:
765 		return select_vector_wa_spectre_bhb(32);
766 
767 	default:
768 		return (vaddr_t)thread_excp_vect;
769 	}
770 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
771 
772 	return (vaddr_t)thread_excp_vect;
773 }
774 
775 void thread_init_per_cpu(void)
776 {
777 #ifdef ARM32
778 	struct thread_core_local *l = thread_get_core_local();
779 
780 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
781 	/* Initialize secure monitor */
782 	sm_init(l->tmp_stack_va_end + STACK_TMP_OFFS);
783 #endif
784 	thread_set_irq_sp(l->tmp_stack_va_end);
785 	thread_set_fiq_sp(l->tmp_stack_va_end);
786 	thread_set_abt_sp((vaddr_t)l);
787 	thread_set_und_sp((vaddr_t)l);
788 #endif
789 
790 	thread_init_vbar(get_excp_vect());
791 
792 #ifdef CFG_FTRACE_SUPPORT
793 	/*
794 	 * Enable accesses to frequency register and physical counter
795 	 * register in EL0/PL0 required for timestamping during
796 	 * function tracing.
797 	 */
798 	write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN);
799 #endif
800 }
801 
802 #ifdef CFG_WITH_VFP
803 uint32_t thread_kernel_enable_vfp(void)
804 {
805 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
806 	struct thread_ctx *thr = threads + thread_get_id();
807 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
808 
809 	assert(!vfp_is_enabled());
810 
811 	if (!thr->vfp_state.ns_saved) {
812 		vfp_lazy_save_state_final(&thr->vfp_state.ns,
813 					  true /*force_save*/);
814 		thr->vfp_state.ns_saved = true;
815 	} else if (thr->vfp_state.sec_lazy_saved &&
816 		   !thr->vfp_state.sec_saved) {
817 		/*
818 		 * This happens when we're handling an abort while the
819 		 * thread was using the VFP state.
820 		 */
821 		vfp_lazy_save_state_final(&thr->vfp_state.sec,
822 					  false /*!force_save*/);
823 		thr->vfp_state.sec_saved = true;
824 	} else if (tuv && tuv->lazy_saved && !tuv->saved) {
825 		/*
826 		 * This can happen either during syscall or abort
827 		 * processing (while processing a syscall).
828 		 */
829 		vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/);
830 		tuv->saved = true;
831 	}
832 
833 	vfp_enable();
834 	return exceptions;
835 }
836 
837 void thread_kernel_disable_vfp(uint32_t state)
838 {
839 	uint32_t exceptions;
840 
841 	assert(vfp_is_enabled());
842 
843 	vfp_disable();
844 	exceptions = thread_get_exceptions();
845 	assert(exceptions & THREAD_EXCP_FOREIGN_INTR);
846 	exceptions &= ~THREAD_EXCP_FOREIGN_INTR;
847 	exceptions |= state & THREAD_EXCP_FOREIGN_INTR;
848 	thread_set_exceptions(exceptions);
849 }
850 
851 void thread_kernel_save_vfp(void)
852 {
853 	struct thread_ctx *thr = threads + thread_get_id();
854 
855 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
856 	if (vfp_is_enabled()) {
857 		vfp_lazy_save_state_init(&thr->vfp_state.sec);
858 		thr->vfp_state.sec_lazy_saved = true;
859 	}
860 }
861 
862 void thread_kernel_restore_vfp(void)
863 {
864 	struct thread_ctx *thr = threads + thread_get_id();
865 
866 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
867 	assert(!vfp_is_enabled());
868 	if (thr->vfp_state.sec_lazy_saved) {
869 		vfp_lazy_restore_state(&thr->vfp_state.sec,
870 				       thr->vfp_state.sec_saved);
871 		thr->vfp_state.sec_saved = false;
872 		thr->vfp_state.sec_lazy_saved = false;
873 	}
874 }
875 
876 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp)
877 {
878 	struct thread_ctx *thr = threads + thread_get_id();
879 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
880 
881 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
882 	assert(!vfp_is_enabled());
883 
884 	if (!thr->vfp_state.ns_saved) {
885 		vfp_lazy_save_state_final(&thr->vfp_state.ns,
886 					  true /*force_save*/);
887 		thr->vfp_state.ns_saved = true;
888 	} else if (tuv && uvfp != tuv) {
889 		if (tuv->lazy_saved && !tuv->saved) {
890 			vfp_lazy_save_state_final(&tuv->vfp,
891 						  false /*!force_save*/);
892 			tuv->saved = true;
893 		}
894 	}
895 
896 	if (uvfp->lazy_saved)
897 		vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved);
898 	uvfp->lazy_saved = false;
899 	uvfp->saved = false;
900 
901 	thr->vfp_state.uvfp = uvfp;
902 	vfp_enable();
903 }
904 
905 void thread_user_save_vfp(void)
906 {
907 	struct thread_ctx *thr = threads + thread_get_id();
908 	struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp;
909 
910 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
911 	if (!vfp_is_enabled())
912 		return;
913 
914 	assert(tuv && !tuv->lazy_saved && !tuv->saved);
915 	vfp_lazy_save_state_init(&tuv->vfp);
916 	tuv->lazy_saved = true;
917 }
918 
919 void thread_user_clear_vfp(struct user_mode_ctx *uctx)
920 {
921 	struct thread_user_vfp_state *uvfp = &uctx->vfp;
922 	struct thread_ctx *thr = threads + thread_get_id();
923 
924 	if (uvfp == thr->vfp_state.uvfp)
925 		thr->vfp_state.uvfp = NULL;
926 	uvfp->lazy_saved = false;
927 	uvfp->saved = false;
928 }
929 #endif /*CFG_WITH_VFP*/
930 
931 #ifdef ARM32
932 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
933 {
934 	uint32_t s;
935 
936 	if (!is_32bit)
937 		return false;
938 
939 	s = read_cpsr();
940 	s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2);
941 	s |= CPSR_MODE_USR;
942 	if (entry_func & 1)
943 		s |= CPSR_T;
944 	*spsr = s;
945 	return true;
946 }
947 #endif
948 
949 #ifdef ARM64
950 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr)
951 {
952 	uint32_t s;
953 
954 	if (is_32bit) {
955 		s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT);
956 		s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT;
957 		s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT;
958 	} else {
959 		s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT);
960 	}
961 
962 	*spsr = s;
963 	return true;
964 }
965 #endif
966 
967 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
968 			 unsigned long a1, unsigned long a2, unsigned long a3,
969 			 unsigned long user_sp, unsigned long entry_func,
970 			 uint32_t spsr,
971 			 struct thread_pauth_keys *keys __maybe_unused)
972 {
973 	/*
974 	 * First clear all registers to avoid leaking information from
975 	 * other TAs or even the Core itself.
976 	 */
977 	*regs = (struct thread_ctx_regs){ };
978 #ifdef ARM32
979 	regs->r0 = a0;
980 	regs->r1 = a1;
981 	regs->r2 = a2;
982 	regs->r3 = a3;
983 	regs->usr_sp = user_sp;
984 	regs->pc = entry_func;
985 	regs->cpsr = spsr;
986 #endif
987 #ifdef ARM64
988 	regs->x[0] = a0;
989 	regs->x[1] = a1;
990 	regs->x[2] = a2;
991 	regs->x[3] = a3;
992 	regs->sp = user_sp;
993 	regs->pc = entry_func;
994 	regs->cpsr = spsr;
995 	regs->x[13] = user_sp;	/* Used when running TA in Aarch32 */
996 	regs->sp = user_sp;	/* Used when running TA in Aarch64 */
997 #ifdef CFG_TA_PAUTH
998 	assert(keys);
999 	regs->apiakey_hi = keys->apia_hi;
1000 	regs->apiakey_lo = keys->apia_lo;
1001 #endif
1002 	/* Set frame pointer (user stack can't be unwound past this point) */
1003 	regs->x[29] = 0;
1004 #endif
1005 }
1006 
1007 static struct thread_pauth_keys *thread_get_pauth_keys(void)
1008 {
1009 #if defined(CFG_TA_PAUTH)
1010 	struct ts_session *s = ts_get_current_session();
1011 	/* Only user TA's support the PAUTH keys */
1012 	struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx);
1013 
1014 	return &utc->uctx.keys;
1015 #else
1016 	return NULL;
1017 #endif
1018 }
1019 
1020 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
1021 		unsigned long a2, unsigned long a3, unsigned long user_sp,
1022 		unsigned long entry_func, bool is_32bit,
1023 		uint32_t *exit_status0, uint32_t *exit_status1)
1024 {
1025 	uint32_t spsr = 0;
1026 	uint32_t exceptions = 0;
1027 	uint32_t rc = 0;
1028 	struct thread_ctx_regs *regs = NULL;
1029 	struct thread_pauth_keys *keys = NULL;
1030 
1031 	tee_ta_update_session_utime_resume();
1032 
1033 	keys = thread_get_pauth_keys();
1034 
1035 	/* Derive SPSR from current CPSR/PSTATE readout. */
1036 	if (!get_spsr(is_32bit, entry_func, &spsr)) {
1037 		*exit_status0 = 1; /* panic */
1038 		*exit_status1 = 0xbadbadba;
1039 		return 0;
1040 	}
1041 
1042 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1043 	/*
1044 	 * We're using the per thread location of saved context registers
1045 	 * for temporary storage. Now that exceptions are masked they will
1046 	 * not be used for any thing else until they are eventually
1047 	 * unmasked when user mode has been entered.
1048 	 */
1049 	regs = thread_get_ctx_regs();
1050 	set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr, keys);
1051 	rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
1052 	thread_unmask_exceptions(exceptions);
1053 	return rc;
1054 }
1055 
1056 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1057 void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
1058 			   vaddr_t *va, size_t *sz)
1059 {
1060 	core_mmu_get_user_va_range(va, NULL);
1061 	*mobj = mobj_tee_ram_rx;
1062 	*sz = thread_user_kcode_size;
1063 	*offset = thread_user_kcode_va - (vaddr_t)mobj_get_va(*mobj, 0, *sz);
1064 }
1065 #endif
1066 
1067 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
1068 	defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
1069 void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
1070 			   vaddr_t *va, size_t *sz)
1071 {
1072 	vaddr_t v;
1073 
1074 	core_mmu_get_user_va_range(&v, NULL);
1075 	*va = v + thread_user_kcode_size;
1076 	*mobj = mobj_tee_ram_rw;
1077 	*sz = sizeof(thread_user_kdata_page);
1078 	*offset = (vaddr_t)thread_user_kdata_page -
1079 		  (vaddr_t)mobj_get_va(*mobj, 0, *sz);
1080 }
1081 #endif
1082 
1083 static void setup_unwind_user_mode(struct thread_scall_regs *regs)
1084 {
1085 #ifdef ARM32
1086 	regs->lr = (uintptr_t)thread_unwind_user_mode;
1087 	regs->spsr = read_cpsr();
1088 #endif
1089 #ifdef ARM64
1090 	regs->elr = (uintptr_t)thread_unwind_user_mode;
1091 	regs->spsr = spsr_from_pstate();
1092 	/*
1093 	 * Regs is the value of stack pointer before calling the SVC
1094 	 * handler.  By the addition matches for the reserved space at the
1095 	 * beginning of el0_sync_svc(). This prepares the stack when
1096 	 * returning to thread_unwind_user_mode instead of a normal
1097 	 * exception return.
1098 	 */
1099 	regs->sp_el0 = (uint64_t)(regs + 1);
1100 #endif
1101 }
1102 
1103 static void gprof_set_status(struct ts_session *s __maybe_unused,
1104 			     enum ts_gprof_status status __maybe_unused)
1105 {
1106 #ifdef CFG_TA_GPROF_SUPPORT
1107 	if (s->ctx->ops->gprof_set_status)
1108 		s->ctx->ops->gprof_set_status(status);
1109 #endif
1110 }
1111 
1112 /*
1113  * Note: this function is weak just to make it possible to exclude it from
1114  * the unpaged area.
1115  */
1116 void __weak thread_scall_handler(struct thread_scall_regs *regs)
1117 {
1118 	struct ts_session *sess = NULL;
1119 	uint32_t state = 0;
1120 
1121 	/* Enable native interrupts */
1122 	state = thread_get_exceptions();
1123 	thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR);
1124 
1125 	thread_user_save_vfp();
1126 
1127 	sess = ts_get_current_session();
1128 	/*
1129 	 * User mode service has just entered kernel mode, suspend gprof
1130 	 * collection until we're about to switch back again.
1131 	 */
1132 	gprof_set_status(sess, TS_GPROF_SUSPEND);
1133 
1134 	/* Restore foreign interrupts which are disabled on exception entry */
1135 	thread_restore_foreign_intr();
1136 
1137 	assert(sess && sess->handle_scall);
1138 	if (sess->handle_scall(regs)) {
1139 		/* We're about to switch back to user mode */
1140 		gprof_set_status(sess, TS_GPROF_RESUME);
1141 	} else {
1142 		/* We're returning from __thread_enter_user_mode() */
1143 		setup_unwind_user_mode(regs);
1144 	}
1145 }
1146 
1147 #ifdef CFG_WITH_ARM_TRUSTED_FW
1148 /*
1149  * These five functions are __weak to allow platforms to override them if
1150  * needed.
1151  */
1152 unsigned long __weak thread_cpu_off_handler(unsigned long a0 __unused,
1153 					    unsigned long a1 __unused)
1154 {
1155 	return 0;
1156 }
1157 DECLARE_KEEP_PAGER(thread_cpu_off_handler);
1158 
1159 unsigned long __weak thread_cpu_suspend_handler(unsigned long a0 __unused,
1160 						unsigned long a1 __unused)
1161 {
1162 	return 0;
1163 }
1164 DECLARE_KEEP_PAGER(thread_cpu_suspend_handler);
1165 
1166 unsigned long __weak thread_cpu_resume_handler(unsigned long a0 __unused,
1167 					       unsigned long a1 __unused)
1168 {
1169 	return 0;
1170 }
1171 DECLARE_KEEP_PAGER(thread_cpu_resume_handler);
1172 
1173 unsigned long __weak thread_system_off_handler(unsigned long a0 __unused,
1174 					       unsigned long a1 __unused)
1175 {
1176 	return 0;
1177 }
1178 DECLARE_KEEP_PAGER(thread_system_off_handler);
1179 
1180 unsigned long __weak thread_system_reset_handler(unsigned long a0 __unused,
1181 						 unsigned long a1 __unused)
1182 {
1183 	return 0;
1184 }
1185 DECLARE_KEEP_PAGER(thread_system_reset_handler);
1186 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
1187 
1188 #ifdef CFG_CORE_WORKAROUND_ARM_NMFI
1189 void __noreturn interrupt_main_handler(void)
1190 {
1191 	/*
1192 	 * Note: overrides the default implementation of this function so that
1193 	 * if there would be another handler defined there would be duplicate
1194 	 * symbol error during linking.
1195 	 */
1196 	panic("Secure interrupt received but it is not supported");
1197 }
1198 #endif
1199