xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision a1cbb728630308fcf902a8953a32cc972d14757e)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/thread_defs.h>
14#include <kernel/unwind.h>
15#include <mm/core_mmu.h>
16#include <sm/optee_smc.h>
17#include <sm/teesmc_opteed.h>
18#include <sm/teesmc_opteed_macros.h>
19
20#include "thread_private.h"
21
22	.arch_extension sec
23
24	.macro cmp_spsr_user_mode reg:req
25		/*
26		 * We're only testing the lower 4 bits as bit 5 (0x10)
27		 * always is set.
28		 */
29		tst	\reg, #0x0f
30	.endm
31
32LOCAL_FUNC vector_std_smc_entry , :
33UNWIND(	.fnstart)
34UNWIND(	.cantunwind)
35	push	{r0-r7}
36	mov	r0, sp
37	bl	thread_handle_std_smc
38	/*
39	 * Normally thread_handle_std_smc() should return via
40	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
41	 * hasn't switched stack (error detected) it will do a normal "C"
42	 * return.
43	 */
44	pop	{r1-r8}
45	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
46	smc	#0
47	b	.	/* SMC should not return */
48UNWIND(	.fnend)
49END_FUNC vector_std_smc_entry
50
51LOCAL_FUNC vector_fast_smc_entry , :
52UNWIND(	.fnstart)
53UNWIND(	.cantunwind)
54	push	{r0-r7}
55	mov	r0, sp
56	bl	thread_handle_fast_smc
57	pop	{r1-r8}
58	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
59	smc	#0
60	b	.	/* SMC should not return */
61UNWIND(	.fnend)
62END_FUNC vector_fast_smc_entry
63
64LOCAL_FUNC vector_fiq_entry , :
65UNWIND(	.fnstart)
66UNWIND(	.cantunwind)
67 	/* Secure Monitor received a FIQ and passed control to us. */
68	bl	thread_check_canaries
69	ldr	lr, =thread_nintr_handler_ptr
70 	ldr	lr, [lr]
71 	blx	lr
72	mov	r1, r0
73	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
74	smc	#0
75	b	.	/* SMC should not return */
76UNWIND(	.fnend)
77END_FUNC vector_fiq_entry
78
79LOCAL_FUNC vector_cpu_on_entry , :
80UNWIND(	.fnstart)
81UNWIND(	.cantunwind)
82	ldr	lr, =thread_cpu_on_handler_ptr
83	ldr	lr, [lr]
84	blx	lr
85	mov	r1, r0
86	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
87	smc	#0
88	b	.	/* SMC should not return */
89UNWIND(	.fnend)
90END_FUNC vector_cpu_on_entry
91
92LOCAL_FUNC vector_cpu_off_entry , :
93UNWIND(	.fnstart)
94UNWIND(	.cantunwind)
95	ldr	lr, =thread_cpu_off_handler_ptr
96	ldr	lr, [lr]
97	blx	lr
98	mov	r1, r0
99	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
100	smc	#0
101	b	.	/* SMC should not return */
102UNWIND(	.fnend)
103END_FUNC vector_cpu_off_entry
104
105LOCAL_FUNC vector_cpu_suspend_entry , :
106UNWIND(	.fnstart)
107UNWIND(	.cantunwind)
108	ldr	lr, =thread_cpu_suspend_handler_ptr
109	ldr	lr, [lr]
110	blx	lr
111	mov	r1, r0
112	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
113	smc	#0
114	b	.	/* SMC should not return */
115UNWIND(	.fnend)
116END_FUNC vector_cpu_suspend_entry
117
118LOCAL_FUNC vector_cpu_resume_entry , :
119UNWIND(	.fnstart)
120UNWIND(	.cantunwind)
121	ldr	lr, =thread_cpu_resume_handler_ptr
122	ldr	lr, [lr]
123	blx	lr
124	mov	r1, r0
125	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
126	smc	#0
127	b	.	/* SMC should not return */
128UNWIND(	.fnend)
129END_FUNC vector_cpu_resume_entry
130
131LOCAL_FUNC vector_system_off_entry , :
132UNWIND(	.fnstart)
133UNWIND(	.cantunwind)
134	ldr	lr, =thread_system_off_handler_ptr
135	ldr	lr, [lr]
136	blx	lr
137	mov	r1, r0
138	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
139	smc	#0
140	b	.	/* SMC should not return */
141UNWIND(	.fnend)
142END_FUNC vector_system_off_entry
143
144LOCAL_FUNC vector_system_reset_entry , :
145UNWIND(	.fnstart)
146UNWIND(	.cantunwind)
147	ldr	lr, =thread_system_reset_handler_ptr
148	ldr	lr, [lr]
149	blx	lr
150	mov	r1, r0
151	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
152	smc	#0
153	b	.	/* SMC should not return */
154UNWIND(	.fnend)
155END_FUNC vector_system_reset_entry
156
157/*
158 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
159 * initialization.  Also used when compiled with the internal monitor, but
160 * the cpu_*_entry and system_*_entry are not used then.
161 *
162 * Note that ARM-TF depends on the layout of this vector table, any change
163 * in layout has to be synced with ARM-TF.
164 */
165FUNC thread_vector_table , :
166UNWIND(	.fnstart)
167UNWIND(	.cantunwind)
168	b	vector_std_smc_entry
169	b	vector_fast_smc_entry
170	b	vector_cpu_on_entry
171	b	vector_cpu_off_entry
172	b	vector_cpu_resume_entry
173	b	vector_cpu_suspend_entry
174	b	vector_fiq_entry
175	b	vector_system_off_entry
176	b	vector_system_reset_entry
177UNWIND(	.fnend)
178END_FUNC thread_vector_table
179KEEP_PAGER thread_vector_table
180
181FUNC thread_set_abt_sp , :
182UNWIND(	.fnstart)
183UNWIND(	.cantunwind)
184	mrs	r1, cpsr
185	cps	#CPSR_MODE_ABT
186	mov	sp, r0
187	msr	cpsr, r1
188	bx	lr
189UNWIND(	.fnend)
190END_FUNC thread_set_abt_sp
191
192FUNC thread_set_und_sp , :
193UNWIND(	.fnstart)
194UNWIND(	.cantunwind)
195	mrs	r1, cpsr
196	cps	#CPSR_MODE_UND
197	mov	sp, r0
198	msr	cpsr, r1
199	bx	lr
200UNWIND(	.fnend)
201END_FUNC thread_set_und_sp
202
203FUNC thread_set_irq_sp , :
204UNWIND(	.fnstart)
205UNWIND(	.cantunwind)
206	mrs	r1, cpsr
207	cps	#CPSR_MODE_IRQ
208	mov	sp, r0
209	msr	cpsr, r1
210	bx	lr
211UNWIND(	.fnend)
212END_FUNC thread_set_irq_sp
213
214FUNC thread_set_fiq_sp , :
215UNWIND(	.fnstart)
216UNWIND(	.cantunwind)
217	mrs	r1, cpsr
218	cps	#CPSR_MODE_FIQ
219	mov	sp, r0
220	msr	cpsr, r1
221	bx	lr
222UNWIND(	.fnend)
223END_FUNC thread_set_fiq_sp
224
225/* void thread_resume(struct thread_ctx_regs *regs) */
226FUNC thread_resume , :
227UNWIND(	.fnstart)
228UNWIND(	.cantunwind)
229	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
230
231	cps	#CPSR_MODE_SYS
232	ldm	r12!, {sp, lr}
233
234	cps	#CPSR_MODE_SVC
235	ldm	r12!, {r1, sp, lr}
236	msr	spsr_fsxc, r1
237
238	ldm	r12, {r1, r2}
239
240	/*
241	 * Switching to some other mode than SVC as we need to set spsr in
242	 * order to return into the old state properly and it may be SVC
243	 * mode we're returning to.
244	 */
245	cps	#CPSR_MODE_ABT
246	cmp_spsr_user_mode r2
247	mov	lr, r1
248	msr	spsr_fsxc, r2
249	ldm	r0, {r0-r12}
250	movnes	pc, lr
251	b	eret_to_user_mode
252UNWIND(	.fnend)
253END_FUNC thread_resume
254
255/*
256 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
257 * the banked r8-r12 registers, returns original CPSR.
258 */
259LOCAL_FUNC thread_save_state_fiq , :
260UNWIND(	.fnstart)
261UNWIND(	.cantunwind)
262	mov	r9, lr
263
264	/*
265	 * Uses stack for temporary storage, while storing needed
266	 * context in the thread context struct.
267	 */
268
269	mrs	r8, cpsr
270
271	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
272
273	push	{r4-r7}
274	push	{r0-r3}
275
276	mrs	r6, cpsr		/* Save current CPSR */
277
278	bl	thread_get_ctx_regs
279
280	pop	{r1-r4}			/* r0-r3 pushed above */
281	stm	r0!, {r1-r4}
282	pop	{r1-r4}			/* r4-r7 pushed above */
283	stm	r0!, {r1-r4}
284
285	cps     #CPSR_MODE_SYS
286	stm	r0!, {r8-r12}
287	stm     r0!, {sp, lr}
288
289	cps     #CPSR_MODE_SVC
290	mrs     r1, spsr
291	stm     r0!, {r1, sp, lr}
292
293	/* back to fiq mode */
294	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
295	msr	cpsr, r6		/* Restore mode */
296
297	mov	r0, r8			/* Return original CPSR */
298	bx	r9
299UNWIND(	.fnend)
300END_FUNC thread_save_state_fiq
301
302/*
303 * Disables IRQ and FIQ and saves state of thread, returns original
304 * CPSR.
305 */
306LOCAL_FUNC thread_save_state , :
307UNWIND(	.fnstart)
308UNWIND(	.cantunwind)
309	push	{r12, lr}
310	/*
311	 * Uses stack for temporary storage, while storing needed
312	 * context in the thread context struct.
313	 */
314
315	mrs	r12, cpsr
316
317	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
318
319	push	{r4-r7}
320	push	{r0-r3}
321
322	mov	r5, r12			/* Save CPSR in a preserved register */
323	mrs	r6, cpsr		/* Save current CPSR */
324
325	bl	thread_get_ctx_regs
326
327	pop	{r1-r4}			/* r0-r3 pushed above */
328	stm	r0!, {r1-r4}
329	pop	{r1-r4}			/* r4-r7 pushed above */
330	stm	r0!, {r1-r4}
331	stm	r0!, {r8-r11}
332
333	pop	{r12, lr}
334	stm	r0!, {r12}
335
336        cps     #CPSR_MODE_SYS
337        stm     r0!, {sp, lr}
338
339        cps     #CPSR_MODE_SVC
340        mrs     r1, spsr
341        stm     r0!, {r1, sp, lr}
342
343	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
344	msr	cpsr, r6		/* Restore mode */
345
346	mov	r0, r5			/* Return original CPSR */
347	bx	lr
348UNWIND(	.fnend)
349END_FUNC thread_save_state
350
351FUNC thread_std_smc_entry , :
352UNWIND(	.fnstart)
353UNWIND(	.cantunwind)
354	/* Pass r0-r7 in a struct thread_smc_args */
355	push	{r0-r7}
356	mov	r0, sp
357	bl	__thread_std_smc_entry
358	/*
359	 * Load the returned r0-r3 into preserved registers and skip the
360	 * "returned" r4-r7 since they will not be returned to normal
361	 * world.
362	 */
363	pop	{r4-r7}
364	add	sp, #(4 * 4)
365
366	/* Disable interrupts before switching to temporary stack */
367	cpsid	aif
368	bl	thread_get_tmp_sp
369	mov	sp, r0
370
371	bl	thread_state_free
372
373	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
374	mov	r1, r4
375	mov	r2, r5
376	mov	r3, r6
377	mov	r4, r7
378	smc	#0
379	b	.	/* SMC should not return */
380UNWIND(	.fnend)
381END_FUNC thread_std_smc_entry
382
383
384/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
385FUNC thread_rpc , :
386/*
387 * r0-r2 are used to pass parameters to normal world
388 * r0-r5 are used to pass return vaule back from normal world
389 *
390 * note that r3 is used to pass "resume information", that is, which
391 * thread it is that should resume.
392 *
393 * Since the this function is following AAPCS we need to preserve r4-r5
394 * which are otherwise modified when returning back from normal world.
395 */
396UNWIND(	.fnstart)
397	push	{r4-r5, lr}
398UNWIND(	.save	{r4-r5, lr})
399	push	{r0}
400UNWIND(	.save	{r0})
401
402	bl	thread_save_state
403	mov	r4, r0			/* Save original CPSR */
404
405	/*
406 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
407	 */
408	bl	thread_get_tmp_sp
409	ldr	r5, [sp]		/* Get pointer to rv[] */
410	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
411	mov	sp, r0			/* Switch to tmp stack */
412
413	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
414	mov	r1, r4			/* CPSR to restore */
415	ldr	r2, =.thread_rpc_return
416	bl	thread_state_suspend
417	mov	r4, r0			/* Supply thread index */
418	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
419	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
420	smc	#0
421	b	.	/* SMC should not return */
422
423.thread_rpc_return:
424	/*
425	 * At this point has the stack pointer been restored to the value
426	 * it had when thread_save_state() was called above.
427	 *
428	 * Jumps here from thread_resume above when RPC has returned. The
429	 * IRQ and FIQ bits are restored to what they where when this
430	 * function was originally entered.
431	 */
432	pop	{r12}			/* Get pointer to rv[] */
433	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
434	pop	{r4-r5, pc}
435UNWIND(	.fnend)
436END_FUNC thread_rpc
437KEEP_PAGER thread_rpc
438
439/*
440 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
441 *			    unsigned long a2, unsigned long a3)
442 */
443FUNC thread_smc , :
444UNWIND(	.fnstart)
445	smc	#0
446	bx	lr
447UNWIND(	.fnend)
448END_FUNC thread_smc
449
450FUNC thread_init_vbar , :
451UNWIND(	.fnstart)
452	/* Set vector (VBAR) */
453	write_vbar r0
454	bx	lr
455UNWIND(	.fnend)
456END_FUNC thread_init_vbar
457KEEP_PAGER thread_init_vbar
458
459/*
460 * Below are low level routines handling entry and return from user mode.
461 *
462 * thread_enter_user_mode() saves all that registers user mode can change
463 * so kernel mode can restore needed registers when resuming execution
464 * after the call to thread_enter_user_mode() has returned.
465 * thread_enter_user_mode() doesn't return directly since it enters user
466 * mode instead, it's thread_unwind_user_mode() that does the
467 * returning by restoring the registers saved by thread_enter_user_mode().
468 *
469 * There's three ways for thread_enter_user_mode() to return to caller,
470 * user TA calls utee_return, user TA calls utee_panic or through an abort.
471 *
472 * Calls to utee_return or utee_panic are handled as:
473 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
474 * calls syscall_return() or syscall_panic().
475 *
476 * These function calls returns normally except thread_svc_handler() which
477 * which is an exception handling routine so it reads return address and
478 * SPSR to restore from the stack. syscall_return() and syscall_panic()
479 * changes return address and SPSR used by thread_svc_handler() to instead of
480 * returning into user mode as with other syscalls it returns into
481 * thread_unwind_user_mode() in kernel mode instead.  When
482 * thread_svc_handler() returns the stack pointer at the point where
483 * thread_enter_user_mode() left it so this is where
484 * thread_unwind_user_mode() can operate.
485 *
486 * Aborts are handled in a similar way but by thread_abort_handler()
487 * instead, when the pager sees that it's an abort from user mode that
488 * can't be handled it updates SPSR and return address used by
489 * thread_abort_handler() to return into thread_unwind_user_mode()
490 * instead.
491 */
492
493/*
494 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
495 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
496 *               unsigned long user_func, unsigned long spsr,
497 *               uint32_t *exit_status0, uint32_t *exit_status1)
498 *
499 */
500FUNC __thread_enter_user_mode , :
501UNWIND(	.fnstart)
502UNWIND(	.cantunwind)
503	/*
504	 * Save all registers to allow syscall_return() to resume execution
505	 * as if this function would have returned. This is also used in
506	 * syscall_panic().
507	 *
508	 * If stack usage of this function is changed
509	 * thread_unwind_user_mode() has to be updated.
510	 */
511	push    {r4-r12,lr}
512
513	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
514	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
515	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
516
517	/*
518	 * Save old user sp and set new user sp.
519	 */
520	cps	#CPSR_MODE_SYS
521	mov	r7, sp
522	mov     sp, r4
523	cps	#CPSR_MODE_SVC
524	push	{r7,r8}
525
526	/* Prepare user mode entry via eret_to_user_mode */
527	cpsid	aif
528	msr     spsr_fsxc, r6
529	mov	lr, r5
530
531	b	eret_to_user_mode
532UNWIND(	.fnend)
533END_FUNC __thread_enter_user_mode
534
535/*
536 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
537 *              uint32_t exit_status1);
538 * See description in thread.h
539 */
540FUNC thread_unwind_user_mode , :
541UNWIND(	.fnstart)
542UNWIND(	.cantunwind)
543	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
544	str	r1, [ip]
545	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
546	str	r2, [ip]
547
548	/* Restore old user sp */
549	pop	{r4,r7}
550	cps	#CPSR_MODE_SYS
551	mov	sp, r4
552	cps	#CPSR_MODE_SVC
553
554	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
555UNWIND(	.fnend)
556END_FUNC thread_unwind_user_mode
557
558	.macro maybe_restore_mapping
559		/*
560		 * This macro is a bit hard to read due to all the ifdefs,
561		 * we're testing for two different configs which makes four
562		 * different combinations.
563		 *
564		 * - With LPAE, and then some extra code if with
565		 *   CFG_CORE_UNMAP_CORE_AT_EL0
566		 * - Without LPAE, and then some extra code if with
567		 *   CFG_CORE_UNMAP_CORE_AT_EL0
568		 */
569
570		/*
571		 * At this point we can't rely on any memory being writable
572		 * yet, so we're using TPIDRPRW to store r0, and if with
573		 * LPAE TPIDRURO to store r1 too.
574		 */
575		write_tpidrprw r0
576#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
577		write_tpidruro r1
578#endif
579
580#ifdef CFG_WITH_LPAE
581		read_ttbr0_64bit r0, r1
582		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
583		beq	11f
584
585#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
586		/*
587		 * Update the mapping to use the full kernel mode mapping.
588		 * Since the translation table could reside above 4GB we'll
589		 * have to use 64-bit arithmetics.
590		 */
591		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
592		sbc	r1, r1, #0
593#endif
594		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
595		write_ttbr0_64bit r0, r1
596		isb
597
598#else /*!CFG_WITH_LPAE*/
599		read_contextidr r0
600		tst	r0, #1
601		beq	11f
602
603		/* Update the mapping to use the full kernel mode mapping. */
604		bic	r0, r0, #1
605		write_contextidr r0
606		isb
607#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
608		read_ttbr1 r0
609		sub	r0, r0, #CORE_MMU_L1_TBL_OFFSET
610		write_ttbr1 r0
611		isb
612#endif
613
614#endif /*!CFG_WITH_LPAE*/
615
616#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
617		ldr	r0, =thread_user_kcode_offset
618		ldr	r0, [r0]
619		read_vbar r1
620		add	r1, r1, r0
621		write_vbar r1
622		isb
623
624	11:	/*
625		 * The PC is adjusted unconditionally to guard against the
626		 * case there was an FIQ just before we did the "cpsid aif".
627		 */
628		ldr	r0, =22f
629		bx	r0
630	22:
631#else
632	11:
633#endif
634		read_tpidrprw r0
635#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
636		read_tpidruro r1
637#endif
638	.endm
639
640/* The handler of native interrupt. */
641.macro	native_intr_handler mode:req
642	cpsid	aif
643	maybe_restore_mapping
644
645	/*
646	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
647	 * address
648	 */
649	sub     lr, lr, #4
650
651	/*
652	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
653	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
654	 * because the secure monitor doesn't save those. The treatment of
655	 * the banked fiq registers is somewhat analogous to the lazy save
656	 * of VFP registers.
657	 */
658	.ifc	\mode\(),fiq
659	push	{r0-r3, r8-r12, lr}
660	.else
661	push	{r0-r3, r12, lr}
662	.endif
663
664	bl	thread_check_canaries
665	ldr	lr, =thread_nintr_handler_ptr
666	ldr	lr, [lr]
667	blx	lr
668
669	mrs	r0, spsr
670	cmp_spsr_user_mode r0
671
672	.ifc	\mode\(),fiq
673	pop	{r0-r3, r8-r12, lr}
674	.else
675	pop	{r0-r3, r12, lr}
676	.endif
677
678	movnes	pc, lr
679	b	eret_to_user_mode
680.endm
681
682/* The handler of foreign interrupt. */
683.macro foreign_intr_handler mode:req
684	cpsid	aif
685	maybe_restore_mapping
686
687	sub	lr, lr, #4
688	push	{r12}
689
690	.ifc	\mode\(),fiq
691	/*
692	 * If a foreign (non-secure) interrupt is received as a FIQ we need
693	 * to check that we're in a saveable state or if we need to mask
694	 * the interrupt to be handled later.
695	 *
696	 * The window when this is needed is quite narrow, it's between
697	 * entering the exception vector and until the "cpsid" instruction
698	 * of the handler has been executed.
699	 *
700	 * Currently we can save the state properly if the FIQ is received
701	 * while in user or svc (kernel) mode.
702	 *
703	 * If we're returning to abort, undef or irq mode we're returning
704	 * with the mapping restored. This is OK since before the handler
705	 * we're returning to eventually returns to user mode the reduced
706	 * mapping will be restored.
707	 */
708	mrs	r12, spsr
709	and	r12, r12, #ARM32_CPSR_MODE_MASK
710	cmp	r12, #ARM32_CPSR_MODE_USR
711	cmpne	r12, #ARM32_CPSR_MODE_SVC
712	beq	1f
713	mrs	r12, spsr
714	orr	r12, r12, #ARM32_CPSR_F
715	msr	spsr_fsxc, r12
716	pop	{r12}
717	movs	pc, lr
7181:
719	.endif
720
721	push	{lr}
722
723	.ifc	\mode\(),fiq
724	bl	thread_save_state_fiq
725	.else
726	bl	thread_save_state
727	.endif
728
729	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
730	mrs	r1, spsr
731	pop	{r2}
732	pop	{r12}
733	blx	thread_state_suspend
734	mov	r4, r0		/* Supply thread index */
735
736	/*
737	 * Switch to SVC mode and copy current stack pointer as it already
738	 * is the tmp stack.
739	 */
740	mov	r0, sp
741	cps	#CPSR_MODE_SVC
742	mov	sp, r0
743
744	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
745	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
746	mov	r2, #0
747	mov	r3, #0
748	/* r4 is already filled in above */
749	smc	#0
750	b	.	/* SMC should not return */
751.endm
752
753	.section .text.thread_excp_vect
754        .align	5
755FUNC thread_excp_vect , :
756UNWIND(	.fnstart)
757UNWIND(	.cantunwind)
758	b	.			/* Reset			*/
759	b	thread_und_handler	/* Undefined instruction	*/
760	b	thread_svc_handler	/* System call			*/
761	b	thread_pabort_handler	/* Prefetch abort		*/
762	b	thread_dabort_handler	/* Data abort			*/
763	b	.			/* Reserved			*/
764	b	thread_irq_handler	/* IRQ				*/
765	b	thread_fiq_handler	/* FIQ				*/
766#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
767	.macro vector_prologue_spectre
768		/*
769		 * This depends on SP being 8 byte aligned, that is, the
770		 * lowest three bits in SP are zero.
771		 *
772		 * To avoid unexpected speculation we need to invalidate
773		 * the branch predictor before we do the first branch. It
774		 * doesn't matter if it's a conditional or an unconditional
775		 * branch speculation can still occur.
776		 *
777		 * The idea is to form a specific bit pattern in the lowest
778		 * three bits of SP depending on which entry in the vector
779		 * we enter via.  This is done by adding 1 to SP in each
780		 * entry but the last.
781		 */
782		add	sp, sp, #1	/* 7:111 Reset			*/
783		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
784		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
785		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
786		add	sp, sp, #1	/* 3:011 Data abort		*/
787		add	sp, sp, #1	/* 2:010 Reserved		*/
788		add	sp, sp, #1	/* 1:001 IRQ			*/
789		write_tpidrprw r0	/* 0:000 FIQ			*/
790	.endm
791
792        .align	5
793	.global thread_excp_vect_workaround_a15
794thread_excp_vect_workaround_a15:
795	vector_prologue_spectre
796	mrs	r0, spsr
797	cmp_spsr_user_mode r0
798	bne	1f
799	/*
800	 * Invalidate the branch predictor for the current processor.
801	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
802	 * effective.
803	 * Note that the BPIALL instruction is not effective in
804	 * invalidating the branch predictor on Cortex-A15. For that CPU,
805	 * set ACTLR[0] to 1 during early processor initialisation, and
806	 * invalidate the branch predictor by performing an ICIALLU
807	 * instruction. See also:
808	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
809	 */
810	write_iciallu
811	isb
812	b	1f
813
814        .align	5
815	.global thread_excp_vect_workaround
816thread_excp_vect_workaround:
817	vector_prologue_spectre
818	mrs	r0, spsr
819	cmp_spsr_user_mode r0
820	bne	1f
821	/* Invalidate the branch predictor for the current processor. */
822	write_bpiall
823	isb
824
8251:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
826	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
827	add	pc, pc, r0, LSL #3
828	nop
829
830	read_tpidrprw r0
831	b	thread_fiq_handler	/* FIQ				*/
832	read_tpidrprw r0
833	b	thread_irq_handler	/* IRQ				*/
834	read_tpidrprw r0
835	b	.			/* Reserved			*/
836	read_tpidrprw r0
837	b	thread_dabort_handler	/* Data abort			*/
838	read_tpidrprw r0
839	b	thread_pabort_handler	/* Prefetch abort		*/
840	read_tpidrprw r0
841	b	thread_svc_handler	/* System call			*/
842	read_tpidrprw r0
843	b	thread_und_handler	/* Undefined instruction	*/
844	read_tpidrprw r0
845	b	.			/* Reset			*/
846#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
847
848thread_und_handler:
849	cpsid	aif
850	maybe_restore_mapping
851	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
852	mrs	r1, spsr
853	tst	r1, #CPSR_T
854	subne	lr, lr, #2
855	subeq	lr, lr, #4
856	mov	r0, #ABORT_TYPE_UNDEF
857	b	thread_abort_common
858
859thread_dabort_handler:
860	cpsid	aif
861	maybe_restore_mapping
862	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
863	sub	lr, lr, #8
864	mov	r0, #ABORT_TYPE_DATA
865	b	thread_abort_common
866
867thread_pabort_handler:
868	cpsid	aif
869	maybe_restore_mapping
870	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
871	sub	lr, lr, #4
872	mov	r0, #ABORT_TYPE_PREFETCH
873
874thread_abort_common:
875	/*
876	 * At this label:
877	 * cpsr is in mode undef or abort
878	 * sp is still pointing to struct thread_core_local belonging to
879	 * this core.
880	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
881	 * {r2-r11, ip} are untouched.
882	 * r0 holds the first argument for abort_handler()
883	 */
884
885	/*
886	 * Update core local flags.
887	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
888	 */
889	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
890	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
891	orr	r1, r1, #THREAD_CLF_ABORT
892
893	/*
894	 * Select stack and update flags accordingly
895	 *
896	 * Normal case:
897	 * If the abort stack is unused select that.
898	 *
899	 * Fatal error handling:
900	 * If we're already using the abort stack as noted by bit
901	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
902	 * field we're selecting the temporary stack instead to be able to
903	 * make a stack trace of the abort in abort mode.
904	 *
905	 * r1 is initialized as a temporary stack pointer until we've
906	 * switched to system mode.
907	 */
908	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
909	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
910	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
911	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
912	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
913
914	/*
915	 * Store registers on stack fitting struct thread_abort_regs
916	 * start from the end of the struct
917	 * {r2-r11, ip}
918	 * Load content of previously saved {r0-r1} and stores
919	 * it up to the pad field.
920	 * After this is only {usr_sp, usr_lr} missing in the struct
921	 */
922	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
923	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
924	/* Push the original {r0-r1} on the selected stack */
925	stmdb	r1!, {r2-r3}
926	mrs	r3, spsr
927	/* Push {pad, spsr, elr} on the selected stack */
928	stmdb	r1!, {r2, r3, lr}
929
930	cps	#CPSR_MODE_SYS
931	str	lr, [r1, #-4]!
932	str	sp, [r1, #-4]!
933	mov	sp, r1
934
935	bl	abort_handler
936
937	mov	ip, sp
938	ldr	sp, [ip], #4
939	ldr	lr, [ip], #4
940
941	/*
942	 * Even if we entered via CPSR_MODE_UND, we are returning via
943	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
944	 * here.
945	 */
946	cps	#CPSR_MODE_ABT
947	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
948	msr	spsr_fsxc, r1
949
950	/* Update core local flags */
951	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
952	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
953	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
954
955	cmp_spsr_user_mode r1
956	ldm	ip, {r0-r11, ip}
957	movnes	pc, lr
958	b	eret_to_user_mode
959	/* end thread_abort_common */
960
961thread_svc_handler:
962	cpsid	aif
963
964	maybe_restore_mapping
965
966	push	{r0-r7, lr}
967	mrs	r0, spsr
968	push	{r0}
969	mov	r0, sp
970	bl	tee_svc_handler
971	cpsid	aif	/* In case something was unmasked */
972	pop	{r0}
973	msr	spsr_fsxc, r0
974	cmp_spsr_user_mode r0
975	pop	{r0-r7, lr}
976	movnes	pc, lr
977	b	eret_to_user_mode
978	/* end thread_svc_handler */
979
980thread_fiq_handler:
981#if defined(CFG_ARM_GICV3)
982	foreign_intr_handler	fiq
983#else
984	native_intr_handler	fiq
985#endif
986	/* end thread_fiq_handler */
987
988thread_irq_handler:
989#if defined(CFG_ARM_GICV3)
990	native_intr_handler	irq
991#else
992	foreign_intr_handler	irq
993#endif
994	/* end thread_irq_handler */
995
996	/*
997	 * Returns to user mode.
998	 * Expects to be jumped to with lr pointing to the user space
999	 * address to jump to and spsr holding the desired cpsr. Async
1000	 * abort, irq and fiq should be masked.
1001	 */
1002eret_to_user_mode:
1003	write_tpidrprw r0
1004#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1005	write_tpidruro r1
1006#endif
1007
1008#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1009	ldr	r0, =thread_user_kcode_offset
1010	ldr	r0, [r0]
1011	read_vbar r1
1012	sub	r1, r1, r0
1013	write_vbar r1
1014	isb
1015
1016	/* Jump into the reduced mapping before the full mapping is removed */
1017	ldr	r1, =1f
1018	sub	r1, r1, r0
1019	bx	r1
10201:
1021#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1022
1023#ifdef CFG_WITH_LPAE
1024	read_ttbr0_64bit r0, r1
1025#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1026	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1027#endif
1028	/* switch to user ASID */
1029	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
1030	write_ttbr0_64bit r0, r1
1031	isb
1032#else /*!CFG_WITH_LPAE*/
1033#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1034	read_ttbr1 r0
1035	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1036	write_ttbr1 r0
1037	isb
1038#endif
1039	read_contextidr r0
1040	orr	r0, r0, #BIT(0)
1041	write_contextidr r0
1042	isb
1043#endif /*!CFG_WITH_LPAE*/
1044
1045	read_tpidrprw r0
1046#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1047	read_tpidruro r1
1048#endif
1049
1050	movs	pc, lr
1051	/*
1052	 * Make sure that literals are placed before the
1053	 * thread_excp_vect_end label.
1054	 */
1055	.pool
1056UNWIND(	.fnend)
1057	.global thread_excp_vect_end
1058thread_excp_vect_end:
1059END_FUNC thread_excp_vect
1060