xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 146a256b264a275a5930efa3989205b04216e984)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/thread_defs.h>
14#include <kernel/unwind.h>
15#include <mm/core_mmu.h>
16#include <sm/optee_smc.h>
17#include <sm/teesmc_opteed.h>
18#include <sm/teesmc_opteed_macros.h>
19
20#include "thread_private.h"
21
22	.syntax unified
23	.arch_extension sec
24
25	.macro cmp_spsr_user_mode reg:req
26		/*
27		 * We're only testing the lower 4 bits as bit 5 (0x10)
28		 * always is set.
29		 */
30		tst	\reg, #0x0f
31	.endm
32
33LOCAL_FUNC vector_std_smc_entry , :
34UNWIND(	.fnstart)
35UNWIND(	.cantunwind)
36	push	{r0-r7}
37	mov	r0, sp
38	bl	thread_handle_std_smc
39	/*
40	 * Normally thread_handle_std_smc() should return via
41	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
42	 * hasn't switched stack (error detected) it will do a normal "C"
43	 * return.
44	 */
45	pop	{r1-r8}
46	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
47	smc	#0
48	b	.	/* SMC should not return */
49UNWIND(	.fnend)
50END_FUNC vector_std_smc_entry
51
52LOCAL_FUNC vector_fast_smc_entry , :
53UNWIND(	.fnstart)
54UNWIND(	.cantunwind)
55	push	{r0-r7}
56	mov	r0, sp
57	bl	thread_handle_fast_smc
58	pop	{r1-r8}
59	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
60	smc	#0
61	b	.	/* SMC should not return */
62UNWIND(	.fnend)
63END_FUNC vector_fast_smc_entry
64
65LOCAL_FUNC vector_fiq_entry , :
66UNWIND(	.fnstart)
67UNWIND(	.cantunwind)
68 	/* Secure Monitor received a FIQ and passed control to us. */
69	bl	thread_check_canaries
70	ldr	lr, =thread_nintr_handler_ptr
71 	ldr	lr, [lr]
72 	blx	lr
73	mov	r1, r0
74	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
75	smc	#0
76	b	.	/* SMC should not return */
77UNWIND(	.fnend)
78END_FUNC vector_fiq_entry
79
80LOCAL_FUNC vector_cpu_on_entry , :
81UNWIND(	.fnstart)
82UNWIND(	.cantunwind)
83	ldr	lr, =thread_cpu_on_handler_ptr
84	ldr	lr, [lr]
85	blx	lr
86	mov	r1, r0
87	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
88	smc	#0
89	b	.	/* SMC should not return */
90UNWIND(	.fnend)
91END_FUNC vector_cpu_on_entry
92
93LOCAL_FUNC vector_cpu_off_entry , :
94UNWIND(	.fnstart)
95UNWIND(	.cantunwind)
96	ldr	lr, =thread_cpu_off_handler_ptr
97	ldr	lr, [lr]
98	blx	lr
99	mov	r1, r0
100	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
101	smc	#0
102	b	.	/* SMC should not return */
103UNWIND(	.fnend)
104END_FUNC vector_cpu_off_entry
105
106LOCAL_FUNC vector_cpu_suspend_entry , :
107UNWIND(	.fnstart)
108UNWIND(	.cantunwind)
109	ldr	lr, =thread_cpu_suspend_handler_ptr
110	ldr	lr, [lr]
111	blx	lr
112	mov	r1, r0
113	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
114	smc	#0
115	b	.	/* SMC should not return */
116UNWIND(	.fnend)
117END_FUNC vector_cpu_suspend_entry
118
119LOCAL_FUNC vector_cpu_resume_entry , :
120UNWIND(	.fnstart)
121UNWIND(	.cantunwind)
122	ldr	lr, =thread_cpu_resume_handler_ptr
123	ldr	lr, [lr]
124	blx	lr
125	mov	r1, r0
126	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
127	smc	#0
128	b	.	/* SMC should not return */
129UNWIND(	.fnend)
130END_FUNC vector_cpu_resume_entry
131
132LOCAL_FUNC vector_system_off_entry , :
133UNWIND(	.fnstart)
134UNWIND(	.cantunwind)
135	ldr	lr, =thread_system_off_handler_ptr
136	ldr	lr, [lr]
137	blx	lr
138	mov	r1, r0
139	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
140	smc	#0
141	b	.	/* SMC should not return */
142UNWIND(	.fnend)
143END_FUNC vector_system_off_entry
144
145LOCAL_FUNC vector_system_reset_entry , :
146UNWIND(	.fnstart)
147UNWIND(	.cantunwind)
148	ldr	lr, =thread_system_reset_handler_ptr
149	ldr	lr, [lr]
150	blx	lr
151	mov	r1, r0
152	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
153	smc	#0
154	b	.	/* SMC should not return */
155UNWIND(	.fnend)
156END_FUNC vector_system_reset_entry
157
158/*
159 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
160 * initialization.  Also used when compiled with the internal monitor, but
161 * the cpu_*_entry and system_*_entry are not used then.
162 *
163 * Note that ARM-TF depends on the layout of this vector table, any change
164 * in layout has to be synced with ARM-TF.
165 */
166FUNC thread_vector_table , :
167UNWIND(	.fnstart)
168UNWIND(	.cantunwind)
169	b	vector_std_smc_entry
170	b	vector_fast_smc_entry
171	b	vector_cpu_on_entry
172	b	vector_cpu_off_entry
173	b	vector_cpu_resume_entry
174	b	vector_cpu_suspend_entry
175	b	vector_fiq_entry
176	b	vector_system_off_entry
177	b	vector_system_reset_entry
178UNWIND(	.fnend)
179END_FUNC thread_vector_table
180KEEP_PAGER thread_vector_table
181
182FUNC thread_set_abt_sp , :
183UNWIND(	.fnstart)
184UNWIND(	.cantunwind)
185	mrs	r1, cpsr
186	cps	#CPSR_MODE_ABT
187	mov	sp, r0
188	msr	cpsr, r1
189	bx	lr
190UNWIND(	.fnend)
191END_FUNC thread_set_abt_sp
192
193FUNC thread_set_und_sp , :
194UNWIND(	.fnstart)
195UNWIND(	.cantunwind)
196	mrs	r1, cpsr
197	cps	#CPSR_MODE_UND
198	mov	sp, r0
199	msr	cpsr, r1
200	bx	lr
201UNWIND(	.fnend)
202END_FUNC thread_set_und_sp
203
204FUNC thread_set_irq_sp , :
205UNWIND(	.fnstart)
206UNWIND(	.cantunwind)
207	mrs	r1, cpsr
208	cps	#CPSR_MODE_IRQ
209	mov	sp, r0
210	msr	cpsr, r1
211	bx	lr
212UNWIND(	.fnend)
213END_FUNC thread_set_irq_sp
214
215FUNC thread_set_fiq_sp , :
216UNWIND(	.fnstart)
217UNWIND(	.cantunwind)
218	mrs	r1, cpsr
219	cps	#CPSR_MODE_FIQ
220	mov	sp, r0
221	msr	cpsr, r1
222	bx	lr
223UNWIND(	.fnend)
224END_FUNC thread_set_fiq_sp
225
226/* void thread_resume(struct thread_ctx_regs *regs) */
227FUNC thread_resume , :
228UNWIND(	.fnstart)
229UNWIND(	.cantunwind)
230	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
231
232	cps	#CPSR_MODE_SYS
233	ldm	r12!, {sp, lr}
234
235	cps	#CPSR_MODE_SVC
236	ldm	r12!, {r1, sp, lr}
237	msr	spsr_fsxc, r1
238
239	ldm	r12, {r1, r2}
240
241	/*
242	 * Switching to some other mode than SVC as we need to set spsr in
243	 * order to return into the old state properly and it may be SVC
244	 * mode we're returning to.
245	 */
246	cps	#CPSR_MODE_ABT
247	cmp_spsr_user_mode r2
248	mov	lr, r1
249	msr	spsr_fsxc, r2
250	ldm	r0, {r0-r12}
251	movsne	pc, lr
252	b	eret_to_user_mode
253UNWIND(	.fnend)
254END_FUNC thread_resume
255
256/*
257 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
258 * the banked r8-r12 registers, returns original CPSR.
259 */
260LOCAL_FUNC thread_save_state_fiq , :
261UNWIND(	.fnstart)
262UNWIND(	.cantunwind)
263	mov	r9, lr
264
265	/*
266	 * Uses stack for temporary storage, while storing needed
267	 * context in the thread context struct.
268	 */
269
270	mrs	r8, cpsr
271
272	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
273
274	push	{r4-r7}
275	push	{r0-r3}
276
277	mrs	r6, cpsr		/* Save current CPSR */
278
279	bl	thread_get_ctx_regs
280
281	pop	{r1-r4}			/* r0-r3 pushed above */
282	stm	r0!, {r1-r4}
283	pop	{r1-r4}			/* r4-r7 pushed above */
284	stm	r0!, {r1-r4}
285
286	cps     #CPSR_MODE_SYS
287	stm	r0!, {r8-r12}
288	stm     r0!, {sp, lr}
289
290	cps     #CPSR_MODE_SVC
291	mrs     r1, spsr
292	stm     r0!, {r1, sp, lr}
293
294	/* back to fiq mode */
295	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
296	msr	cpsr, r6		/* Restore mode */
297
298	mov	r0, r8			/* Return original CPSR */
299	bx	r9
300UNWIND(	.fnend)
301END_FUNC thread_save_state_fiq
302
303/*
304 * Disables IRQ and FIQ and saves state of thread, returns original
305 * CPSR.
306 */
307LOCAL_FUNC thread_save_state , :
308UNWIND(	.fnstart)
309UNWIND(	.cantunwind)
310	push	{r12, lr}
311	/*
312	 * Uses stack for temporary storage, while storing needed
313	 * context in the thread context struct.
314	 */
315
316	mrs	r12, cpsr
317
318	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
319
320	push	{r4-r7}
321	push	{r0-r3}
322
323	mov	r5, r12			/* Save CPSR in a preserved register */
324	mrs	r6, cpsr		/* Save current CPSR */
325
326	bl	thread_get_ctx_regs
327
328	pop	{r1-r4}			/* r0-r3 pushed above */
329	stm	r0!, {r1-r4}
330	pop	{r1-r4}			/* r4-r7 pushed above */
331	stm	r0!, {r1-r4}
332	stm	r0!, {r8-r11}
333
334	pop	{r12, lr}
335	stm	r0!, {r12}
336
337        cps     #CPSR_MODE_SYS
338        stm     r0!, {sp, lr}
339
340        cps     #CPSR_MODE_SVC
341        mrs     r1, spsr
342        stm     r0!, {r1, sp, lr}
343
344	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
345	msr	cpsr, r6		/* Restore mode */
346
347	mov	r0, r5			/* Return original CPSR */
348	bx	lr
349UNWIND(	.fnend)
350END_FUNC thread_save_state
351
352FUNC thread_std_smc_entry , :
353UNWIND(	.fnstart)
354UNWIND(	.cantunwind)
355	/* Pass r0-r7 in a struct thread_smc_args */
356	push	{r0-r7}
357	mov	r0, sp
358	bl	__thread_std_smc_entry
359	/*
360	 * Load the returned r0-r3 into preserved registers and skip the
361	 * "returned" r4-r7 since they will not be returned to normal
362	 * world.
363	 */
364	pop	{r4-r7}
365	add	sp, #(4 * 4)
366
367	/* Disable interrupts before switching to temporary stack */
368	cpsid	aif
369	bl	thread_get_tmp_sp
370	mov	sp, r0
371
372	bl	thread_state_free
373
374	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
375	mov	r1, r4
376	mov	r2, r5
377	mov	r3, r6
378	mov	r4, r7
379	smc	#0
380	b	.	/* SMC should not return */
381UNWIND(	.fnend)
382END_FUNC thread_std_smc_entry
383
384
385/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
386FUNC thread_rpc , :
387/*
388 * r0-r2 are used to pass parameters to normal world
389 * r0-r5 are used to pass return vaule back from normal world
390 *
391 * note that r3 is used to pass "resume information", that is, which
392 * thread it is that should resume.
393 *
394 * Since the this function is following AAPCS we need to preserve r4-r5
395 * which are otherwise modified when returning back from normal world.
396 */
397UNWIND(	.fnstart)
398	push	{r4-r5, lr}
399UNWIND(	.save	{r4-r5, lr})
400	push	{r0}
401UNWIND(	.save	{r0})
402
403	bl	thread_save_state
404	mov	r4, r0			/* Save original CPSR */
405
406	/*
407 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
408	 */
409	bl	thread_get_tmp_sp
410	ldr	r5, [sp]		/* Get pointer to rv[] */
411	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
412	mov	sp, r0			/* Switch to tmp stack */
413
414	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
415	mov	r1, r4			/* CPSR to restore */
416	ldr	r2, =.thread_rpc_return
417	bl	thread_state_suspend
418	mov	r4, r0			/* Supply thread index */
419	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
420	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
421	smc	#0
422	b	.	/* SMC should not return */
423
424.thread_rpc_return:
425	/*
426	 * At this point has the stack pointer been restored to the value
427	 * it had when thread_save_state() was called above.
428	 *
429	 * Jumps here from thread_resume above when RPC has returned. The
430	 * IRQ and FIQ bits are restored to what they where when this
431	 * function was originally entered.
432	 */
433	pop	{r12}			/* Get pointer to rv[] */
434	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
435	pop	{r4-r5, pc}
436UNWIND(	.fnend)
437END_FUNC thread_rpc
438KEEP_PAGER thread_rpc
439
440/*
441 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
442 *			    unsigned long a2, unsigned long a3)
443 */
444FUNC thread_smc , :
445UNWIND(	.fnstart)
446	smc	#0
447	bx	lr
448UNWIND(	.fnend)
449END_FUNC thread_smc
450
451FUNC thread_init_vbar , :
452UNWIND(	.fnstart)
453	/* Set vector (VBAR) */
454	write_vbar r0
455	bx	lr
456UNWIND(	.fnend)
457END_FUNC thread_init_vbar
458KEEP_PAGER thread_init_vbar
459
460/*
461 * Below are low level routines handling entry and return from user mode.
462 *
463 * thread_enter_user_mode() saves all that registers user mode can change
464 * so kernel mode can restore needed registers when resuming execution
465 * after the call to thread_enter_user_mode() has returned.
466 * thread_enter_user_mode() doesn't return directly since it enters user
467 * mode instead, it's thread_unwind_user_mode() that does the
468 * returning by restoring the registers saved by thread_enter_user_mode().
469 *
470 * There's three ways for thread_enter_user_mode() to return to caller,
471 * user TA calls utee_return, user TA calls utee_panic or through an abort.
472 *
473 * Calls to utee_return or utee_panic are handled as:
474 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
475 * calls syscall_return() or syscall_panic().
476 *
477 * These function calls returns normally except thread_svc_handler() which
478 * which is an exception handling routine so it reads return address and
479 * SPSR to restore from the stack. syscall_return() and syscall_panic()
480 * changes return address and SPSR used by thread_svc_handler() to instead of
481 * returning into user mode as with other syscalls it returns into
482 * thread_unwind_user_mode() in kernel mode instead.  When
483 * thread_svc_handler() returns the stack pointer at the point where
484 * thread_enter_user_mode() left it so this is where
485 * thread_unwind_user_mode() can operate.
486 *
487 * Aborts are handled in a similar way but by thread_abort_handler()
488 * instead, when the pager sees that it's an abort from user mode that
489 * can't be handled it updates SPSR and return address used by
490 * thread_abort_handler() to return into thread_unwind_user_mode()
491 * instead.
492 */
493
494/*
495 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
496 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
497 *               unsigned long user_func, unsigned long spsr,
498 *               uint32_t *exit_status0, uint32_t *exit_status1)
499 *
500 */
501FUNC __thread_enter_user_mode , :
502UNWIND(	.fnstart)
503UNWIND(	.cantunwind)
504	/*
505	 * Save all registers to allow syscall_return() to resume execution
506	 * as if this function would have returned. This is also used in
507	 * syscall_panic().
508	 *
509	 * If stack usage of this function is changed
510	 * thread_unwind_user_mode() has to be updated.
511	 */
512	push    {r4-r12,lr}
513
514	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
515	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
516	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
517
518	/*
519	 * Save old user sp and set new user sp.
520	 */
521	cps	#CPSR_MODE_SYS
522	mov	r7, sp
523	mov     sp, r4
524	cps	#CPSR_MODE_SVC
525	push	{r7,r8}
526
527	/* Prepare user mode entry via eret_to_user_mode */
528	cpsid	aif
529	msr     spsr_fsxc, r6
530	mov	lr, r5
531
532	b	eret_to_user_mode
533UNWIND(	.fnend)
534END_FUNC __thread_enter_user_mode
535
536/*
537 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
538 *              uint32_t exit_status1);
539 * See description in thread.h
540 */
541FUNC thread_unwind_user_mode , :
542UNWIND(	.fnstart)
543UNWIND(	.cantunwind)
544	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
545	str	r1, [ip]
546	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
547	str	r2, [ip]
548
549	/* Restore old user sp */
550	pop	{r4,r7}
551	cps	#CPSR_MODE_SYS
552	mov	sp, r4
553	cps	#CPSR_MODE_SVC
554
555	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
556UNWIND(	.fnend)
557END_FUNC thread_unwind_user_mode
558
559	.macro maybe_restore_mapping
560		/*
561		 * This macro is a bit hard to read due to all the ifdefs,
562		 * we're testing for two different configs which makes four
563		 * different combinations.
564		 *
565		 * - With LPAE, and then some extra code if with
566		 *   CFG_CORE_UNMAP_CORE_AT_EL0
567		 * - Without LPAE, and then some extra code if with
568		 *   CFG_CORE_UNMAP_CORE_AT_EL0
569		 */
570
571		/*
572		 * At this point we can't rely on any memory being writable
573		 * yet, so we're using TPIDRPRW to store r0, and if with
574		 * LPAE TPIDRURO to store r1 too.
575		 */
576		write_tpidrprw r0
577#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
578		write_tpidruro r1
579#endif
580
581#ifdef CFG_WITH_LPAE
582		read_ttbr0_64bit r0, r1
583		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
584		beq	11f
585
586#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
587		/*
588		 * Update the mapping to use the full kernel mode mapping.
589		 * Since the translation table could reside above 4GB we'll
590		 * have to use 64-bit arithmetics.
591		 */
592		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
593		sbc	r1, r1, #0
594#endif
595		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
596		write_ttbr0_64bit r0, r1
597		isb
598
599#else /*!CFG_WITH_LPAE*/
600		read_contextidr r0
601		tst	r0, #1
602		beq	11f
603
604		/* Update the mapping to use the full kernel mode mapping. */
605		bic	r0, r0, #1
606		write_contextidr r0
607		isb
608#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
609		read_ttbr1 r0
610		sub	r0, r0, #CORE_MMU_L1_TBL_OFFSET
611		write_ttbr1 r0
612		isb
613#endif
614
615#endif /*!CFG_WITH_LPAE*/
616
617#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
618		ldr	r0, =thread_user_kcode_offset
619		ldr	r0, [r0]
620		read_vbar r1
621		add	r1, r1, r0
622		write_vbar r1
623		isb
624
625	11:	/*
626		 * The PC is adjusted unconditionally to guard against the
627		 * case there was an FIQ just before we did the "cpsid aif".
628		 */
629		ldr	r0, =22f
630		bx	r0
631	22:
632#else
633	11:
634#endif
635		read_tpidrprw r0
636#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
637		read_tpidruro r1
638#endif
639	.endm
640
641/* The handler of native interrupt. */
642.macro	native_intr_handler mode:req
643	cpsid	aif
644	maybe_restore_mapping
645
646	/*
647	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
648	 * address
649	 */
650	sub     lr, lr, #4
651
652	/*
653	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
654	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
655	 * because the secure monitor doesn't save those. The treatment of
656	 * the banked fiq registers is somewhat analogous to the lazy save
657	 * of VFP registers.
658	 */
659	.ifc	\mode\(),fiq
660	push	{r0-r3, r8-r12, lr}
661	.else
662	push	{r0-r3, r12, lr}
663	.endif
664
665	bl	thread_check_canaries
666	ldr	lr, =thread_nintr_handler_ptr
667	ldr	lr, [lr]
668	blx	lr
669
670	mrs	r0, spsr
671	cmp_spsr_user_mode r0
672
673	.ifc	\mode\(),fiq
674	pop	{r0-r3, r8-r12, lr}
675	.else
676	pop	{r0-r3, r12, lr}
677	.endif
678
679	movsne	pc, lr
680	b	eret_to_user_mode
681.endm
682
683/* The handler of foreign interrupt. */
684.macro foreign_intr_handler mode:req
685	cpsid	aif
686	maybe_restore_mapping
687
688	sub	lr, lr, #4
689	push	{r12}
690
691	.ifc	\mode\(),fiq
692	/*
693	 * If a foreign (non-secure) interrupt is received as a FIQ we need
694	 * to check that we're in a saveable state or if we need to mask
695	 * the interrupt to be handled later.
696	 *
697	 * The window when this is needed is quite narrow, it's between
698	 * entering the exception vector and until the "cpsid" instruction
699	 * of the handler has been executed.
700	 *
701	 * Currently we can save the state properly if the FIQ is received
702	 * while in user or svc (kernel) mode.
703	 *
704	 * If we're returning to abort, undef or irq mode we're returning
705	 * with the mapping restored. This is OK since before the handler
706	 * we're returning to eventually returns to user mode the reduced
707	 * mapping will be restored.
708	 */
709	mrs	r12, spsr
710	and	r12, r12, #ARM32_CPSR_MODE_MASK
711	cmp	r12, #ARM32_CPSR_MODE_USR
712	cmpne	r12, #ARM32_CPSR_MODE_SVC
713	beq	1f
714	mrs	r12, spsr
715	orr	r12, r12, #ARM32_CPSR_F
716	msr	spsr_fsxc, r12
717	pop	{r12}
718	movs	pc, lr
7191:
720	.endif
721
722	push	{lr}
723
724	.ifc	\mode\(),fiq
725	bl	thread_save_state_fiq
726	.else
727	bl	thread_save_state
728	.endif
729
730	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
731	mrs	r1, spsr
732	pop	{r2}
733	pop	{r12}
734	blx	thread_state_suspend
735	mov	r4, r0		/* Supply thread index */
736
737	/*
738	 * Switch to SVC mode and copy current stack pointer as it already
739	 * is the tmp stack.
740	 */
741	mov	r0, sp
742	cps	#CPSR_MODE_SVC
743	mov	sp, r0
744
745	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
746	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
747	mov	r2, #0
748	mov	r3, #0
749	/* r4 is already filled in above */
750	smc	#0
751	b	.	/* SMC should not return */
752.endm
753
754	.section .text.thread_excp_vect
755        .align	5
756FUNC thread_excp_vect , :
757UNWIND(	.fnstart)
758UNWIND(	.cantunwind)
759	b	.			/* Reset			*/
760	b	thread_und_handler	/* Undefined instruction	*/
761	b	thread_svc_handler	/* System call			*/
762	b	thread_pabort_handler	/* Prefetch abort		*/
763	b	thread_dabort_handler	/* Data abort			*/
764	b	.			/* Reserved			*/
765	b	thread_irq_handler	/* IRQ				*/
766	b	thread_fiq_handler	/* FIQ				*/
767#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
768	.macro vector_prologue_spectre
769		/*
770		 * This depends on SP being 8 byte aligned, that is, the
771		 * lowest three bits in SP are zero.
772		 *
773		 * To avoid unexpected speculation we need to invalidate
774		 * the branch predictor before we do the first branch. It
775		 * doesn't matter if it's a conditional or an unconditional
776		 * branch speculation can still occur.
777		 *
778		 * The idea is to form a specific bit pattern in the lowest
779		 * three bits of SP depending on which entry in the vector
780		 * we enter via.  This is done by adding 1 to SP in each
781		 * entry but the last.
782		 */
783		add	sp, sp, #1	/* 7:111 Reset			*/
784		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
785		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
786		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
787		add	sp, sp, #1	/* 3:011 Data abort		*/
788		add	sp, sp, #1	/* 2:010 Reserved		*/
789		add	sp, sp, #1	/* 1:001 IRQ			*/
790		write_tpidrprw r0	/* 0:000 FIQ			*/
791	.endm
792
793        .align	5
794	.global thread_excp_vect_workaround_a15
795thread_excp_vect_workaround_a15:
796	vector_prologue_spectre
797	mrs	r0, spsr
798	cmp_spsr_user_mode r0
799	bne	1f
800	/*
801	 * Invalidate the branch predictor for the current processor.
802	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
803	 * effective.
804	 * Note that the BPIALL instruction is not effective in
805	 * invalidating the branch predictor on Cortex-A15. For that CPU,
806	 * set ACTLR[0] to 1 during early processor initialisation, and
807	 * invalidate the branch predictor by performing an ICIALLU
808	 * instruction. See also:
809	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
810	 */
811	write_iciallu
812	isb
813	b	1f
814
815        .align	5
816	.global thread_excp_vect_workaround
817thread_excp_vect_workaround:
818	vector_prologue_spectre
819	mrs	r0, spsr
820	cmp_spsr_user_mode r0
821	bne	1f
822	/* Invalidate the branch predictor for the current processor. */
823	write_bpiall
824	isb
825
8261:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
827	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
828	add	pc, pc, r0, LSL #3
829	nop
830
831	read_tpidrprw r0
832	b	thread_fiq_handler	/* FIQ				*/
833	read_tpidrprw r0
834	b	thread_irq_handler	/* IRQ				*/
835	read_tpidrprw r0
836	b	.			/* Reserved			*/
837	read_tpidrprw r0
838	b	thread_dabort_handler	/* Data abort			*/
839	read_tpidrprw r0
840	b	thread_pabort_handler	/* Prefetch abort		*/
841	read_tpidrprw r0
842	b	thread_svc_handler	/* System call			*/
843	read_tpidrprw r0
844	b	thread_und_handler	/* Undefined instruction	*/
845	read_tpidrprw r0
846	b	.			/* Reset			*/
847#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
848
849thread_und_handler:
850	cpsid	aif
851	maybe_restore_mapping
852	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
853	mrs	r1, spsr
854	tst	r1, #CPSR_T
855	subne	lr, lr, #2
856	subeq	lr, lr, #4
857	mov	r0, #ABORT_TYPE_UNDEF
858	b	thread_abort_common
859
860thread_dabort_handler:
861	cpsid	aif
862	maybe_restore_mapping
863	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
864	sub	lr, lr, #8
865	mov	r0, #ABORT_TYPE_DATA
866	b	thread_abort_common
867
868thread_pabort_handler:
869	cpsid	aif
870	maybe_restore_mapping
871	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
872	sub	lr, lr, #4
873	mov	r0, #ABORT_TYPE_PREFETCH
874
875thread_abort_common:
876	/*
877	 * At this label:
878	 * cpsr is in mode undef or abort
879	 * sp is still pointing to struct thread_core_local belonging to
880	 * this core.
881	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
882	 * {r2-r11, ip} are untouched.
883	 * r0 holds the first argument for abort_handler()
884	 */
885
886	/*
887	 * Update core local flags.
888	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
889	 */
890	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
891	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
892	orr	r1, r1, #THREAD_CLF_ABORT
893
894	/*
895	 * Select stack and update flags accordingly
896	 *
897	 * Normal case:
898	 * If the abort stack is unused select that.
899	 *
900	 * Fatal error handling:
901	 * If we're already using the abort stack as noted by bit
902	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
903	 * field we're selecting the temporary stack instead to be able to
904	 * make a stack trace of the abort in abort mode.
905	 *
906	 * r1 is initialized as a temporary stack pointer until we've
907	 * switched to system mode.
908	 */
909	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
910	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
911	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
912	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
913	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
914
915	/*
916	 * Store registers on stack fitting struct thread_abort_regs
917	 * start from the end of the struct
918	 * {r2-r11, ip}
919	 * Load content of previously saved {r0-r1} and stores
920	 * it up to the pad field.
921	 * After this is only {usr_sp, usr_lr} missing in the struct
922	 */
923	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
924	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
925	/* Push the original {r0-r1} on the selected stack */
926	stmdb	r1!, {r2-r3}
927	mrs	r3, spsr
928	/* Push {pad, spsr, elr} on the selected stack */
929	stmdb	r1!, {r2, r3, lr}
930
931	cps	#CPSR_MODE_SYS
932	str	lr, [r1, #-4]!
933	str	sp, [r1, #-4]!
934	mov	sp, r1
935
936	bl	abort_handler
937
938	mov	ip, sp
939	ldr	sp, [ip], #4
940	ldr	lr, [ip], #4
941
942	/*
943	 * Even if we entered via CPSR_MODE_UND, we are returning via
944	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
945	 * here.
946	 */
947	cps	#CPSR_MODE_ABT
948	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
949	msr	spsr_fsxc, r1
950
951	/* Update core local flags */
952	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
953	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
954	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
955
956	cmp_spsr_user_mode r1
957	ldm	ip, {r0-r11, ip}
958	movsne	pc, lr
959	b	eret_to_user_mode
960	/* end thread_abort_common */
961
962thread_svc_handler:
963	cpsid	aif
964
965	maybe_restore_mapping
966
967	push	{r0-r7, lr}
968	mrs	r0, spsr
969	push	{r0}
970	mov	r0, sp
971	bl	tee_svc_handler
972	cpsid	aif	/* In case something was unmasked */
973	pop	{r0}
974	msr	spsr_fsxc, r0
975	cmp_spsr_user_mode r0
976	pop	{r0-r7, lr}
977	movsne	pc, lr
978	b	eret_to_user_mode
979	/* end thread_svc_handler */
980
981thread_fiq_handler:
982#if defined(CFG_ARM_GICV3)
983	foreign_intr_handler	fiq
984#else
985	native_intr_handler	fiq
986#endif
987	/* end thread_fiq_handler */
988
989thread_irq_handler:
990#if defined(CFG_ARM_GICV3)
991	native_intr_handler	irq
992#else
993	foreign_intr_handler	irq
994#endif
995	/* end thread_irq_handler */
996
997	/*
998	 * Returns to user mode.
999	 * Expects to be jumped to with lr pointing to the user space
1000	 * address to jump to and spsr holding the desired cpsr. Async
1001	 * abort, irq and fiq should be masked.
1002	 */
1003eret_to_user_mode:
1004	write_tpidrprw r0
1005#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1006	write_tpidruro r1
1007#endif
1008
1009#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1010	ldr	r0, =thread_user_kcode_offset
1011	ldr	r0, [r0]
1012	read_vbar r1
1013	sub	r1, r1, r0
1014	write_vbar r1
1015	isb
1016
1017	/* Jump into the reduced mapping before the full mapping is removed */
1018	ldr	r1, =1f
1019	sub	r1, r1, r0
1020	bx	r1
10211:
1022#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1023
1024#ifdef CFG_WITH_LPAE
1025	read_ttbr0_64bit r0, r1
1026#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1027	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1028#endif
1029	/* switch to user ASID */
1030	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
1031	write_ttbr0_64bit r0, r1
1032	isb
1033#else /*!CFG_WITH_LPAE*/
1034#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1035	read_ttbr1 r0
1036	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1037	write_ttbr1 r0
1038	isb
1039#endif
1040	read_contextidr r0
1041	orr	r0, r0, #BIT(0)
1042	write_contextidr r0
1043	isb
1044#endif /*!CFG_WITH_LPAE*/
1045
1046	read_tpidrprw r0
1047#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1048	read_tpidruro r1
1049#endif
1050
1051	movs	pc, lr
1052
1053	/*
1054	 * void icache_inv_user_range(void *addr, size_t size);
1055	 *
1056	 * This function has to execute with the user space ASID active,
1057	 * this means executing with reduced mapping and the code needs
1058	 * to be located here together with the vector.
1059	 */
1060	.global icache_inv_user_range
1061	.type icache_inv_user_range , %function
1062icache_inv_user_range:
1063	push	{r4-r7}
1064
1065	/* Mask all exceptions */
1066	mrs	r4, cpsr	/* This register must be preserved */
1067	cpsid	aif
1068
1069#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1070	ldr	r2, =thread_user_kcode_offset
1071	ldr	r2, [r2]
1072	read_vbar r5		/* This register must be preserved */
1073	sub	r3, r5, r2
1074	write_vbar r3
1075	isb
1076
1077	/* Jump into the reduced mapping before the full mapping is removed */
1078	ldr	r3, =1f
1079	sub	r3, r3, r2
1080	bx	r3
10811:
1082#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1083
1084#ifdef CFG_WITH_LPAE
1085	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
1086#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1087	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
1088#endif
1089	/* switch to user ASID */
1090	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
1091	write_ttbr0_64bit r2, r3
1092	isb
1093#else /*!CFG_WITH_LPAE*/
1094#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1095	read_ttbr1 r6		/* This register must be preserved */
1096	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
1097	write_ttbr1 r2
1098	isb
1099#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1100	read_contextidr r7	/* This register must be preserved */
1101	orr	r2, r7, #BIT(0)
1102	write_contextidr r2
1103	isb
1104#endif /*!CFG_WITH_LPAE*/
1105
1106	/*
1107	 * Do the actual icache invalidation
1108	 */
1109
1110	/* Calculate minimum icache line size, result in r2 */
1111	read_ctr r3
1112	and     r3, r3, #CTR_IMINLINE_MASK
1113	mov     r2, #CTR_WORD_SIZE
1114	lsl     r2, r2, r3
1115
1116	add	r1, r0, r1
1117	sub	r3, r2, #1
1118	bic	r0, r0, r3
11191:
1120	write_icimvau r0
1121	add	r0, r0, r2
1122	cmp	r0, r1
1123	blo	1b
1124
1125	/* Invalidate entire branch predictor array inner shareable */
1126	write_bpiallis
1127
1128	dsb	ishst
1129	isb
1130
1131#ifdef CFG_WITH_LPAE
1132	write_ttbr0_64bit r6, r7
1133	isb
1134#else /*!CFG_WITH_LPAE*/
1135	write_contextidr r7
1136	isb
1137#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1138	write_ttbr1 r6
1139	isb
1140#endif
1141#endif /*!CFG_WITH_LPAE*/
1142
1143#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1144	write_vbar r5
1145	isb
1146	/*
1147	 * The PC is adjusted unconditionally to guard against the
1148	 * case there was an FIQ just before we did the "cpsid aif".
1149	 */
1150	ldr	r0, =1f
1151	bx	r0
11521:
1153#endif
1154
1155	msr	cpsr_fsxc, r4	/* Restore exceptions */
1156	pop	{r4-r7}
1157	bx	lr		/* End of icache_inv_user_range() */
1158
1159	/*
1160	 * Make sure that literals are placed before the
1161	 * thread_excp_vect_end label.
1162	 */
1163	.pool
1164UNWIND(	.fnend)
1165	.global thread_excp_vect_end
1166thread_excp_vect_end:
1167END_FUNC thread_excp_vect
1168