xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 757331fc1216e0c1742c00123cc8c3349de3e884)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_defs.h>
15#include <kernel/unwind.h>
16#include <mm/core_mmu.h>
17#include <sm/optee_smc.h>
18#include <sm/teesmc_opteed.h>
19#include <sm/teesmc_opteed_macros.h>
20
21#include "thread_private.h"
22
23	.syntax unified
24	.arch_extension sec
25
26	.macro cmp_spsr_user_mode reg:req
27		/*
28		 * We're only testing the lower 4 bits as bit 5 (0x10)
29		 * always is set.
30		 */
31		tst	\reg, #0x0f
32	.endm
33
34LOCAL_FUNC vector_std_smc_entry , :
35UNWIND(	.fnstart)
36UNWIND(	.cantunwind)
37	push	{r0-r7}
38	mov	r0, sp
39	bl	thread_handle_std_smc
40	/*
41	 * Normally thread_handle_std_smc() should return via
42	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
43	 * hasn't switched stack (error detected) it will do a normal "C"
44	 * return.
45	 */
46	pop	{r1-r8}
47	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
48	smc	#0
49	b	.	/* SMC should not return */
50UNWIND(	.fnend)
51END_FUNC vector_std_smc_entry
52
53LOCAL_FUNC vector_fast_smc_entry , :
54UNWIND(	.fnstart)
55UNWIND(	.cantunwind)
56	push	{r0-r7}
57	mov	r0, sp
58	bl	thread_handle_fast_smc
59	pop	{r1-r8}
60	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
61	smc	#0
62	b	.	/* SMC should not return */
63UNWIND(	.fnend)
64END_FUNC vector_fast_smc_entry
65
66LOCAL_FUNC vector_fiq_entry , :
67UNWIND(	.fnstart)
68UNWIND(	.cantunwind)
69 	/* Secure Monitor received a FIQ and passed control to us. */
70	bl	thread_check_canaries
71	ldr	lr, =thread_nintr_handler_ptr
72 	ldr	lr, [lr]
73 	blx	lr
74	mov	r1, r0
75	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
76	smc	#0
77	b	.	/* SMC should not return */
78UNWIND(	.fnend)
79END_FUNC vector_fiq_entry
80
81LOCAL_FUNC vector_cpu_on_entry , :
82UNWIND(	.fnstart)
83UNWIND(	.cantunwind)
84	ldr	lr, =thread_cpu_on_handler_ptr
85	ldr	lr, [lr]
86	blx	lr
87	mov	r1, r0
88	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
89	smc	#0
90	b	.	/* SMC should not return */
91UNWIND(	.fnend)
92END_FUNC vector_cpu_on_entry
93
94LOCAL_FUNC vector_cpu_off_entry , :
95UNWIND(	.fnstart)
96UNWIND(	.cantunwind)
97	ldr	lr, =thread_cpu_off_handler_ptr
98	ldr	lr, [lr]
99	blx	lr
100	mov	r1, r0
101	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
102	smc	#0
103	b	.	/* SMC should not return */
104UNWIND(	.fnend)
105END_FUNC vector_cpu_off_entry
106
107LOCAL_FUNC vector_cpu_suspend_entry , :
108UNWIND(	.fnstart)
109UNWIND(	.cantunwind)
110	ldr	lr, =thread_cpu_suspend_handler_ptr
111	ldr	lr, [lr]
112	blx	lr
113	mov	r1, r0
114	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
115	smc	#0
116	b	.	/* SMC should not return */
117UNWIND(	.fnend)
118END_FUNC vector_cpu_suspend_entry
119
120LOCAL_FUNC vector_cpu_resume_entry , :
121UNWIND(	.fnstart)
122UNWIND(	.cantunwind)
123	ldr	lr, =thread_cpu_resume_handler_ptr
124	ldr	lr, [lr]
125	blx	lr
126	mov	r1, r0
127	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
128	smc	#0
129	b	.	/* SMC should not return */
130UNWIND(	.fnend)
131END_FUNC vector_cpu_resume_entry
132
133LOCAL_FUNC vector_system_off_entry , :
134UNWIND(	.fnstart)
135UNWIND(	.cantunwind)
136	ldr	lr, =thread_system_off_handler_ptr
137	ldr	lr, [lr]
138	blx	lr
139	mov	r1, r0
140	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
141	smc	#0
142	b	.	/* SMC should not return */
143UNWIND(	.fnend)
144END_FUNC vector_system_off_entry
145
146LOCAL_FUNC vector_system_reset_entry , :
147UNWIND(	.fnstart)
148UNWIND(	.cantunwind)
149	ldr	lr, =thread_system_reset_handler_ptr
150	ldr	lr, [lr]
151	blx	lr
152	mov	r1, r0
153	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
154	smc	#0
155	b	.	/* SMC should not return */
156UNWIND(	.fnend)
157END_FUNC vector_system_reset_entry
158
159/*
160 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
161 * initialization.  Also used when compiled with the internal monitor, but
162 * the cpu_*_entry and system_*_entry are not used then.
163 *
164 * Note that ARM-TF depends on the layout of this vector table, any change
165 * in layout has to be synced with ARM-TF.
166 */
167FUNC thread_vector_table , :
168UNWIND(	.fnstart)
169UNWIND(	.cantunwind)
170	b	vector_std_smc_entry
171	b	vector_fast_smc_entry
172	b	vector_cpu_on_entry
173	b	vector_cpu_off_entry
174	b	vector_cpu_resume_entry
175	b	vector_cpu_suspend_entry
176	b	vector_fiq_entry
177	b	vector_system_off_entry
178	b	vector_system_reset_entry
179UNWIND(	.fnend)
180END_FUNC thread_vector_table
181KEEP_PAGER thread_vector_table
182
183FUNC thread_set_abt_sp , :
184UNWIND(	.fnstart)
185UNWIND(	.cantunwind)
186	mrs	r1, cpsr
187	cps	#CPSR_MODE_ABT
188	mov	sp, r0
189	msr	cpsr, r1
190	bx	lr
191UNWIND(	.fnend)
192END_FUNC thread_set_abt_sp
193
194FUNC thread_set_und_sp , :
195UNWIND(	.fnstart)
196UNWIND(	.cantunwind)
197	mrs	r1, cpsr
198	cps	#CPSR_MODE_UND
199	mov	sp, r0
200	msr	cpsr, r1
201	bx	lr
202UNWIND(	.fnend)
203END_FUNC thread_set_und_sp
204
205FUNC thread_set_irq_sp , :
206UNWIND(	.fnstart)
207UNWIND(	.cantunwind)
208	mrs	r1, cpsr
209	cps	#CPSR_MODE_IRQ
210	mov	sp, r0
211	msr	cpsr, r1
212	bx	lr
213UNWIND(	.fnend)
214END_FUNC thread_set_irq_sp
215
216FUNC thread_set_fiq_sp , :
217UNWIND(	.fnstart)
218UNWIND(	.cantunwind)
219	mrs	r1, cpsr
220	cps	#CPSR_MODE_FIQ
221	mov	sp, r0
222	msr	cpsr, r1
223	bx	lr
224UNWIND(	.fnend)
225END_FUNC thread_set_fiq_sp
226
227/* void thread_resume(struct thread_ctx_regs *regs) */
228FUNC thread_resume , :
229UNWIND(	.fnstart)
230UNWIND(	.cantunwind)
231	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
232
233	cps	#CPSR_MODE_SYS
234	ldr	sp, [r12], #4
235	ldr	lr, [r12], #4
236
237	cps	#CPSR_MODE_SVC
238	ldr	r1, [r12], #4
239	ldr	sp, [r12], #4
240	ldr	lr, [r12], #4
241	msr	spsr_fsxc, r1
242
243	ldm	r12, {r1, r2}
244
245	/*
246	 * Switching to some other mode than SVC as we need to set spsr in
247	 * order to return into the old state properly and it may be SVC
248	 * mode we're returning to.
249	 */
250	cps	#CPSR_MODE_ABT
251	cmp_spsr_user_mode r2
252	mov	lr, r1
253	msr	spsr_fsxc, r2
254	ldm	r0, {r0-r12}
255	movsne	pc, lr
256	b	eret_to_user_mode
257UNWIND(	.fnend)
258END_FUNC thread_resume
259
260/*
261 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
262 * the banked r8-r12 registers, returns original CPSR.
263 */
264LOCAL_FUNC thread_save_state_fiq , :
265UNWIND(	.fnstart)
266UNWIND(	.cantunwind)
267	mov	r9, lr
268
269	/*
270	 * Uses stack for temporary storage, while storing needed
271	 * context in the thread context struct.
272	 */
273
274	mrs	r8, cpsr
275
276	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
277
278	push	{r4-r7}
279	push	{r0-r3}
280
281	mrs	r6, cpsr		/* Save current CPSR */
282
283	bl	thread_get_ctx_regs
284
285	pop	{r1-r4}			/* r0-r3 pushed above */
286	stm	r0!, {r1-r4}
287	pop	{r1-r4}			/* r4-r7 pushed above */
288	stm	r0!, {r1-r4}
289
290	cps     #CPSR_MODE_SYS
291	stm	r0!, {r8-r12}
292	str	sp, [r0], #4
293	str	lr, [r0], #4
294
295	cps     #CPSR_MODE_SVC
296	mrs     r1, spsr
297	str	r1, [r0], #4
298	str	sp, [r0], #4
299	str	lr, [r0], #4
300
301	/* back to fiq mode */
302	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
303	msr	cpsr, r6		/* Restore mode */
304
305	mov	r0, r8			/* Return original CPSR */
306	bx	r9
307UNWIND(	.fnend)
308END_FUNC thread_save_state_fiq
309
310/*
311 * Disables IRQ and FIQ and saves state of thread, returns original
312 * CPSR.
313 */
314LOCAL_FUNC thread_save_state , :
315UNWIND(	.fnstart)
316UNWIND(	.cantunwind)
317	push	{r12, lr}
318	/*
319	 * Uses stack for temporary storage, while storing needed
320	 * context in the thread context struct.
321	 */
322
323	mrs	r12, cpsr
324
325	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
326
327	push	{r4-r7}
328	push	{r0-r3}
329
330	mov	r5, r12			/* Save CPSR in a preserved register */
331	mrs	r6, cpsr		/* Save current CPSR */
332
333	bl	thread_get_ctx_regs
334
335	pop	{r1-r4}			/* r0-r3 pushed above */
336	stm	r0!, {r1-r4}
337	pop	{r1-r4}			/* r4-r7 pushed above */
338	stm	r0!, {r1-r4}
339	stm	r0!, {r8-r11}
340
341	pop	{r12, lr}
342	stm	r0!, {r12}
343
344        cps     #CPSR_MODE_SYS
345	str	sp, [r0], #4
346	str	lr, [r0], #4
347
348        cps     #CPSR_MODE_SVC
349        mrs     r1, spsr
350	str	r1, [r0], #4
351	str	sp, [r0], #4
352	str	lr, [r0], #4
353
354	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
355	msr	cpsr, r6		/* Restore mode */
356
357	mov	r0, r5			/* Return original CPSR */
358	bx	lr
359UNWIND(	.fnend)
360END_FUNC thread_save_state
361
362FUNC thread_std_smc_entry , :
363UNWIND(	.fnstart)
364UNWIND(	.cantunwind)
365	/* Pass r0-r7 in a struct thread_smc_args */
366	push	{r0-r7}
367	mov	r0, sp
368	bl	__thread_std_smc_entry
369	/*
370	 * Load the returned r0-r3 into preserved registers and skip the
371	 * "returned" r4-r7 since they will not be returned to normal
372	 * world.
373	 */
374	pop	{r4-r7}
375	add	sp, #(4 * 4)
376
377	/* Disable interrupts before switching to temporary stack */
378	cpsid	aif
379	bl	thread_get_tmp_sp
380	mov	sp, r0
381
382	bl	thread_state_free
383
384	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
385	mov	r1, r4
386	mov	r2, r5
387	mov	r3, r6
388	mov	r4, r7
389	smc	#0
390	b	.	/* SMC should not return */
391UNWIND(	.fnend)
392END_FUNC thread_std_smc_entry
393
394
395/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
396FUNC thread_rpc , :
397/*
398 * r0-r2 are used to pass parameters to normal world
399 * r0-r5 are used to pass return vaule back from normal world
400 *
401 * note that r3 is used to pass "resume information", that is, which
402 * thread it is that should resume.
403 *
404 * Since the this function is following AAPCS we need to preserve r4-r5
405 * which are otherwise modified when returning back from normal world.
406 */
407UNWIND(	.fnstart)
408	push	{r4-r5, lr}
409UNWIND(	.save	{r4-r5, lr})
410	push	{r0}
411UNWIND(	.save	{r0})
412
413	bl	thread_save_state
414	mov	r4, r0			/* Save original CPSR */
415
416	/*
417 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
418	 */
419	bl	thread_get_tmp_sp
420	ldr	r5, [sp]		/* Get pointer to rv[] */
421	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
422	mov	sp, r0			/* Switch to tmp stack */
423
424	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
425	mov	r1, r4			/* CPSR to restore */
426	ldr	r2, =.thread_rpc_return
427	bl	thread_state_suspend
428	mov	r4, r0			/* Supply thread index */
429	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
430	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
431	smc	#0
432	b	.	/* SMC should not return */
433
434.thread_rpc_return:
435	/*
436	 * At this point has the stack pointer been restored to the value
437	 * it had when thread_save_state() was called above.
438	 *
439	 * Jumps here from thread_resume above when RPC has returned. The
440	 * IRQ and FIQ bits are restored to what they where when this
441	 * function was originally entered.
442	 */
443	pop	{r12}			/* Get pointer to rv[] */
444	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
445	pop	{r4-r5, pc}
446UNWIND(	.fnend)
447END_FUNC thread_rpc
448KEEP_PAGER thread_rpc
449
450/*
451 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
452 *			    unsigned long a2, unsigned long a3)
453 */
454FUNC thread_smc , :
455UNWIND(	.fnstart)
456	smc	#0
457	bx	lr
458UNWIND(	.fnend)
459END_FUNC thread_smc
460
461FUNC thread_init_vbar , :
462UNWIND(	.fnstart)
463	/* Set vector (VBAR) */
464	write_vbar r0
465	bx	lr
466UNWIND(	.fnend)
467END_FUNC thread_init_vbar
468KEEP_PAGER thread_init_vbar
469
470/*
471 * Below are low level routines handling entry and return from user mode.
472 *
473 * thread_enter_user_mode() saves all that registers user mode can change
474 * so kernel mode can restore needed registers when resuming execution
475 * after the call to thread_enter_user_mode() has returned.
476 * thread_enter_user_mode() doesn't return directly since it enters user
477 * mode instead, it's thread_unwind_user_mode() that does the
478 * returning by restoring the registers saved by thread_enter_user_mode().
479 *
480 * There's three ways for thread_enter_user_mode() to return to caller,
481 * user TA calls utee_return, user TA calls utee_panic or through an abort.
482 *
483 * Calls to utee_return or utee_panic are handled as:
484 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
485 * calls syscall_return() or syscall_panic().
486 *
487 * These function calls returns normally except thread_svc_handler() which
488 * which is an exception handling routine so it reads return address and
489 * SPSR to restore from the stack. syscall_return() and syscall_panic()
490 * changes return address and SPSR used by thread_svc_handler() to instead of
491 * returning into user mode as with other syscalls it returns into
492 * thread_unwind_user_mode() in kernel mode instead.  When
493 * thread_svc_handler() returns the stack pointer at the point where
494 * thread_enter_user_mode() left it so this is where
495 * thread_unwind_user_mode() can operate.
496 *
497 * Aborts are handled in a similar way but by thread_abort_handler()
498 * instead, when the pager sees that it's an abort from user mode that
499 * can't be handled it updates SPSR and return address used by
500 * thread_abort_handler() to return into thread_unwind_user_mode()
501 * instead.
502 */
503
504/*
505 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
506 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
507 *               unsigned long user_func, unsigned long spsr,
508 *               uint32_t *exit_status0, uint32_t *exit_status1)
509 *
510 */
511FUNC __thread_enter_user_mode , :
512UNWIND(	.fnstart)
513UNWIND(	.cantunwind)
514	/*
515	 * Save all registers to allow syscall_return() to resume execution
516	 * as if this function would have returned. This is also used in
517	 * syscall_panic().
518	 *
519	 * If stack usage of this function is changed
520	 * thread_unwind_user_mode() has to be updated.
521	 */
522	push    {r4-r12,lr}
523
524	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
525	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
526	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
527
528	/*
529	 * Save old user sp and set new user sp.
530	 */
531	cps	#CPSR_MODE_SYS
532	mov	r7, sp
533	mov     sp, r4
534	cps	#CPSR_MODE_SVC
535	push	{r7,r8}
536
537	/* Prepare user mode entry via eret_to_user_mode */
538	cpsid	aif
539	msr     spsr_fsxc, r6
540	mov	lr, r5
541
542	b	eret_to_user_mode
543UNWIND(	.fnend)
544END_FUNC __thread_enter_user_mode
545
546/*
547 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
548 *              uint32_t exit_status1);
549 * See description in thread.h
550 */
551FUNC thread_unwind_user_mode , :
552UNWIND(	.fnstart)
553UNWIND(	.cantunwind)
554	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
555	str	r1, [ip]
556	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
557	str	r2, [ip]
558
559	/* Restore old user sp */
560	pop	{r4,r7}
561	cps	#CPSR_MODE_SYS
562	mov	sp, r4
563	cps	#CPSR_MODE_SVC
564
565	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
566UNWIND(	.fnend)
567END_FUNC thread_unwind_user_mode
568
569	.macro maybe_restore_mapping
570		/*
571		 * This macro is a bit hard to read due to all the ifdefs,
572		 * we're testing for two different configs which makes four
573		 * different combinations.
574		 *
575		 * - With LPAE, and then some extra code if with
576		 *   CFG_CORE_UNMAP_CORE_AT_EL0
577		 * - Without LPAE, and then some extra code if with
578		 *   CFG_CORE_UNMAP_CORE_AT_EL0
579		 */
580
581		/*
582		 * At this point we can't rely on any memory being writable
583		 * yet, so we're using TPIDRPRW to store r0, and if with
584		 * LPAE TPIDRURO to store r1 too.
585		 */
586		write_tpidrprw r0
587#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
588		write_tpidruro r1
589#endif
590
591#ifdef CFG_WITH_LPAE
592		read_ttbr0_64bit r0, r1
593		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
594		beq	11f
595
596#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
597		/*
598		 * Update the mapping to use the full kernel mode mapping.
599		 * Since the translation table could reside above 4GB we'll
600		 * have to use 64-bit arithmetics.
601		 */
602		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
603		sbc	r1, r1, #0
604#endif
605		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
606		write_ttbr0_64bit r0, r1
607		isb
608
609#else /*!CFG_WITH_LPAE*/
610		read_contextidr r0
611		tst	r0, #1
612		beq	11f
613
614		/* Update the mapping to use the full kernel mode mapping. */
615		bic	r0, r0, #1
616		write_contextidr r0
617		isb
618#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
619		read_ttbr1 r0
620		sub	r0, r0, #CORE_MMU_L1_TBL_OFFSET
621		write_ttbr1 r0
622		isb
623#endif
624
625#endif /*!CFG_WITH_LPAE*/
626
627#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
628		ldr	r0, =thread_user_kcode_offset
629		ldr	r0, [r0]
630		read_vbar r1
631		add	r1, r1, r0
632		write_vbar r1
633		isb
634
635	11:	/*
636		 * The PC is adjusted unconditionally to guard against the
637		 * case there was an FIQ just before we did the "cpsid aif".
638		 */
639		ldr	r0, =22f
640		bx	r0
641	22:
642#else
643	11:
644#endif
645		read_tpidrprw r0
646#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
647		read_tpidruro r1
648#endif
649	.endm
650
651/* The handler of native interrupt. */
652.macro	native_intr_handler mode:req
653	cpsid	aif
654	maybe_restore_mapping
655
656	/*
657	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
658	 * address
659	 */
660	sub     lr, lr, #4
661
662	/*
663	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
664	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
665	 * because the secure monitor doesn't save those. The treatment of
666	 * the banked fiq registers is somewhat analogous to the lazy save
667	 * of VFP registers.
668	 */
669	.ifc	\mode\(),fiq
670	push	{r0-r3, r8-r12, lr}
671	.else
672	push	{r0-r3, r12, lr}
673	.endif
674
675	bl	thread_check_canaries
676	ldr	lr, =thread_nintr_handler_ptr
677	ldr	lr, [lr]
678	blx	lr
679
680	mrs	r0, spsr
681	cmp_spsr_user_mode r0
682
683	.ifc	\mode\(),fiq
684	pop	{r0-r3, r8-r12, lr}
685	.else
686	pop	{r0-r3, r12, lr}
687	.endif
688
689	movsne	pc, lr
690	b	eret_to_user_mode
691.endm
692
693/* The handler of foreign interrupt. */
694.macro foreign_intr_handler mode:req
695	cpsid	aif
696	maybe_restore_mapping
697
698	sub	lr, lr, #4
699	push	{r12}
700
701	.ifc	\mode\(),fiq
702	/*
703	 * If a foreign (non-secure) interrupt is received as a FIQ we need
704	 * to check that we're in a saveable state or if we need to mask
705	 * the interrupt to be handled later.
706	 *
707	 * The window when this is needed is quite narrow, it's between
708	 * entering the exception vector and until the "cpsid" instruction
709	 * of the handler has been executed.
710	 *
711	 * Currently we can save the state properly if the FIQ is received
712	 * while in user or svc (kernel) mode.
713	 *
714	 * If we're returning to abort, undef or irq mode we're returning
715	 * with the mapping restored. This is OK since before the handler
716	 * we're returning to eventually returns to user mode the reduced
717	 * mapping will be restored.
718	 */
719	mrs	r12, spsr
720	and	r12, r12, #ARM32_CPSR_MODE_MASK
721	cmp	r12, #ARM32_CPSR_MODE_USR
722	cmpne	r12, #ARM32_CPSR_MODE_SVC
723	beq	1f
724	mrs	r12, spsr
725	orr	r12, r12, #ARM32_CPSR_F
726	msr	spsr_fsxc, r12
727	pop	{r12}
728	movs	pc, lr
7291:
730	.endif
731
732	push	{lr}
733
734	.ifc	\mode\(),fiq
735	bl	thread_save_state_fiq
736	.else
737	bl	thread_save_state
738	.endif
739
740	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
741	mrs	r1, spsr
742	pop	{r2}
743	pop	{r12}
744	blx	thread_state_suspend
745	mov	r4, r0		/* Supply thread index */
746
747	/*
748	 * Switch to SVC mode and copy current stack pointer as it already
749	 * is the tmp stack.
750	 */
751	mov	r0, sp
752	cps	#CPSR_MODE_SVC
753	mov	sp, r0
754
755#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
756	/*
757	 * Prevent leaking information about which entries has been used in
758	 * cache. We're relying on the secure monitor/dispatcher to take
759	 * care of the BTB.
760	 */
761	mov	r0, #DCACHE_OP_CLEAN_INV
762	bl	dcache_op_louis
763	write_iciallu
764#endif
765
766	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
767	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
768	mov	r2, #0
769	mov	r3, #0
770	/* r4 is already filled in above */
771	smc	#0
772	b	.	/* SMC should not return */
773.endm
774
775	.section .text.thread_excp_vect
776        .align	5
777FUNC thread_excp_vect , :
778UNWIND(	.fnstart)
779UNWIND(	.cantunwind)
780	b	.			/* Reset			*/
781	b	thread_und_handler	/* Undefined instruction	*/
782	b	thread_svc_handler	/* System call			*/
783	b	thread_pabort_handler	/* Prefetch abort		*/
784	b	thread_dabort_handler	/* Data abort			*/
785	b	.			/* Reserved			*/
786	b	thread_irq_handler	/* IRQ				*/
787	b	thread_fiq_handler	/* FIQ				*/
788#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
789	.macro vector_prologue_spectre
790		/*
791		 * This depends on SP being 8 byte aligned, that is, the
792		 * lowest three bits in SP are zero.
793		 *
794		 * To avoid unexpected speculation we need to invalidate
795		 * the branch predictor before we do the first branch. It
796		 * doesn't matter if it's a conditional or an unconditional
797		 * branch speculation can still occur.
798		 *
799		 * The idea is to form a specific bit pattern in the lowest
800		 * three bits of SP depending on which entry in the vector
801		 * we enter via.  This is done by adding 1 to SP in each
802		 * entry but the last.
803		 */
804		add	sp, sp, #1	/* 7:111 Reset			*/
805		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
806		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
807		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
808		add	sp, sp, #1	/* 3:011 Data abort		*/
809		add	sp, sp, #1	/* 2:010 Reserved		*/
810		add	sp, sp, #1	/* 1:001 IRQ			*/
811		write_tpidrprw r0	/* 0:000 FIQ			*/
812	.endm
813
814        .align	5
815	.global thread_excp_vect_workaround_a15
816thread_excp_vect_workaround_a15:
817	vector_prologue_spectre
818	mrs	r0, spsr
819	cmp_spsr_user_mode r0
820	bne	1f
821	/*
822	 * Invalidate the branch predictor for the current processor.
823	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
824	 * effective.
825	 * Note that the BPIALL instruction is not effective in
826	 * invalidating the branch predictor on Cortex-A15. For that CPU,
827	 * set ACTLR[0] to 1 during early processor initialisation, and
828	 * invalidate the branch predictor by performing an ICIALLU
829	 * instruction. See also:
830	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
831	 */
832	write_iciallu
833	isb
834	b	1f
835
836        .align	5
837	.global thread_excp_vect_workaround
838thread_excp_vect_workaround:
839	vector_prologue_spectre
840	mrs	r0, spsr
841	cmp_spsr_user_mode r0
842	bne	1f
843	/* Invalidate the branch predictor for the current processor. */
844	write_bpiall
845	isb
846
8471:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
848	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
849	add	pc, pc, r0, LSL #3
850	nop
851
852	read_tpidrprw r0
853	b	thread_fiq_handler	/* FIQ				*/
854	read_tpidrprw r0
855	b	thread_irq_handler	/* IRQ				*/
856	read_tpidrprw r0
857	b	.			/* Reserved			*/
858	read_tpidrprw r0
859	b	thread_dabort_handler	/* Data abort			*/
860	read_tpidrprw r0
861	b	thread_pabort_handler	/* Prefetch abort		*/
862	read_tpidrprw r0
863	b	thread_svc_handler	/* System call			*/
864	read_tpidrprw r0
865	b	thread_und_handler	/* Undefined instruction	*/
866	read_tpidrprw r0
867	b	.			/* Reset			*/
868#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
869
870thread_und_handler:
871	cpsid	aif
872	maybe_restore_mapping
873	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
874	mrs	r1, spsr
875	tst	r1, #CPSR_T
876	subne	lr, lr, #2
877	subeq	lr, lr, #4
878	mov	r0, #ABORT_TYPE_UNDEF
879	b	thread_abort_common
880
881thread_dabort_handler:
882	cpsid	aif
883	maybe_restore_mapping
884	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
885	sub	lr, lr, #8
886	mov	r0, #ABORT_TYPE_DATA
887	b	thread_abort_common
888
889thread_pabort_handler:
890	cpsid	aif
891	maybe_restore_mapping
892	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
893	sub	lr, lr, #4
894	mov	r0, #ABORT_TYPE_PREFETCH
895
896thread_abort_common:
897	/*
898	 * At this label:
899	 * cpsr is in mode undef or abort
900	 * sp is still pointing to struct thread_core_local belonging to
901	 * this core.
902	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
903	 * {r2-r11, ip} are untouched.
904	 * r0 holds the first argument for abort_handler()
905	 */
906
907	/*
908	 * Update core local flags.
909	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
910	 */
911	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
912	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
913	orr	r1, r1, #THREAD_CLF_ABORT
914
915	/*
916	 * Select stack and update flags accordingly
917	 *
918	 * Normal case:
919	 * If the abort stack is unused select that.
920	 *
921	 * Fatal error handling:
922	 * If we're already using the abort stack as noted by bit
923	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
924	 * field we're selecting the temporary stack instead to be able to
925	 * make a stack trace of the abort in abort mode.
926	 *
927	 * r1 is initialized as a temporary stack pointer until we've
928	 * switched to system mode.
929	 */
930	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
931	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
932	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
933	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
934	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
935
936	/*
937	 * Store registers on stack fitting struct thread_abort_regs
938	 * start from the end of the struct
939	 * {r2-r11, ip}
940	 * Load content of previously saved {r0-r1} and stores
941	 * it up to the pad field.
942	 * After this is only {usr_sp, usr_lr} missing in the struct
943	 */
944	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
945	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
946	/* Push the original {r0-r1} on the selected stack */
947	stmdb	r1!, {r2-r3}
948	mrs	r3, spsr
949	/* Push {pad, spsr, elr} on the selected stack */
950	stmdb	r1!, {r2, r3, lr}
951
952	cps	#CPSR_MODE_SYS
953	str	lr, [r1, #-4]!
954	str	sp, [r1, #-4]!
955	mov	sp, r1
956
957	bl	abort_handler
958
959	mov	ip, sp
960	ldr	sp, [ip], #4
961	ldr	lr, [ip], #4
962
963	/*
964	 * Even if we entered via CPSR_MODE_UND, we are returning via
965	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
966	 * here.
967	 */
968	cps	#CPSR_MODE_ABT
969	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
970	msr	spsr_fsxc, r1
971
972	/* Update core local flags */
973	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
974	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
975	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
976
977	cmp_spsr_user_mode r1
978	ldm	ip, {r0-r11, ip}
979	movsne	pc, lr
980	b	eret_to_user_mode
981	/* end thread_abort_common */
982
983thread_svc_handler:
984	cpsid	aif
985
986	maybe_restore_mapping
987
988	push	{r0-r7, lr}
989	mrs	r0, spsr
990	push	{r0}
991	mov	r0, sp
992	bl	tee_svc_handler
993	cpsid	aif	/* In case something was unmasked */
994	pop	{r0}
995	msr	spsr_fsxc, r0
996	cmp_spsr_user_mode r0
997	pop	{r0-r7, lr}
998	movsne	pc, lr
999	b	eret_to_user_mode
1000	/* end thread_svc_handler */
1001
1002thread_fiq_handler:
1003#if defined(CFG_ARM_GICV3)
1004	foreign_intr_handler	fiq
1005#else
1006	native_intr_handler	fiq
1007#endif
1008	/* end thread_fiq_handler */
1009
1010thread_irq_handler:
1011#if defined(CFG_ARM_GICV3)
1012	native_intr_handler	irq
1013#else
1014	foreign_intr_handler	irq
1015#endif
1016	/* end thread_irq_handler */
1017
1018	/*
1019	 * Returns to user mode.
1020	 * Expects to be jumped to with lr pointing to the user space
1021	 * address to jump to and spsr holding the desired cpsr. Async
1022	 * abort, irq and fiq should be masked.
1023	 */
1024eret_to_user_mode:
1025	write_tpidrprw r0
1026#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1027	write_tpidruro r1
1028#endif
1029
1030#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1031	ldr	r0, =thread_user_kcode_offset
1032	ldr	r0, [r0]
1033	read_vbar r1
1034	sub	r1, r1, r0
1035	write_vbar r1
1036	isb
1037
1038	/* Jump into the reduced mapping before the full mapping is removed */
1039	ldr	r1, =1f
1040	sub	r1, r1, r0
1041	bx	r1
10421:
1043#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1044
1045#ifdef CFG_WITH_LPAE
1046	read_ttbr0_64bit r0, r1
1047#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1048	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1049#endif
1050	/* switch to user ASID */
1051	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
1052	write_ttbr0_64bit r0, r1
1053	isb
1054#else /*!CFG_WITH_LPAE*/
1055#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1056	read_ttbr1 r0
1057	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1058	write_ttbr1 r0
1059	isb
1060#endif
1061	read_contextidr r0
1062	orr	r0, r0, #BIT(0)
1063	write_contextidr r0
1064	isb
1065#endif /*!CFG_WITH_LPAE*/
1066
1067	read_tpidrprw r0
1068#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1069	read_tpidruro r1
1070#endif
1071
1072	movs	pc, lr
1073
1074	/*
1075	 * void icache_inv_user_range(void *addr, size_t size);
1076	 *
1077	 * This function has to execute with the user space ASID active,
1078	 * this means executing with reduced mapping and the code needs
1079	 * to be located here together with the vector.
1080	 */
1081	.global icache_inv_user_range
1082	.type icache_inv_user_range , %function
1083icache_inv_user_range:
1084	push	{r4-r7}
1085
1086	/* Mask all exceptions */
1087	mrs	r4, cpsr	/* This register must be preserved */
1088	cpsid	aif
1089
1090#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1091	ldr	r2, =thread_user_kcode_offset
1092	ldr	r2, [r2]
1093	read_vbar r5		/* This register must be preserved */
1094	sub	r3, r5, r2
1095	write_vbar r3
1096	isb
1097
1098	/* Jump into the reduced mapping before the full mapping is removed */
1099	ldr	r3, =1f
1100	sub	r3, r3, r2
1101	bx	r3
11021:
1103#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1104
1105#ifdef CFG_WITH_LPAE
1106	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
1107#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1108	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
1109#endif
1110	/* switch to user ASID */
1111	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
1112	write_ttbr0_64bit r2, r3
1113	isb
1114#else /*!CFG_WITH_LPAE*/
1115#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1116	read_ttbr1 r6		/* This register must be preserved */
1117	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
1118	write_ttbr1 r2
1119	isb
1120#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1121	read_contextidr r7	/* This register must be preserved */
1122	orr	r2, r7, #BIT(0)
1123	write_contextidr r2
1124	isb
1125#endif /*!CFG_WITH_LPAE*/
1126
1127	/*
1128	 * Do the actual icache invalidation
1129	 */
1130
1131	/* Calculate minimum icache line size, result in r2 */
1132	read_ctr r3
1133	and     r3, r3, #CTR_IMINLINE_MASK
1134	mov     r2, #CTR_WORD_SIZE
1135	lsl     r2, r2, r3
1136
1137	add	r1, r0, r1
1138	sub	r3, r2, #1
1139	bic	r0, r0, r3
11401:
1141	write_icimvau r0
1142	add	r0, r0, r2
1143	cmp	r0, r1
1144	blo	1b
1145
1146	/* Invalidate entire branch predictor array inner shareable */
1147	write_bpiallis
1148
1149	dsb	ishst
1150	isb
1151
1152#ifdef CFG_WITH_LPAE
1153	write_ttbr0_64bit r6, r7
1154	isb
1155#else /*!CFG_WITH_LPAE*/
1156	write_contextidr r7
1157	isb
1158#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1159	write_ttbr1 r6
1160	isb
1161#endif
1162#endif /*!CFG_WITH_LPAE*/
1163
1164#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1165	write_vbar r5
1166	isb
1167	/*
1168	 * The PC is adjusted unconditionally to guard against the
1169	 * case there was an FIQ just before we did the "cpsid aif".
1170	 */
1171	ldr	r0, =1f
1172	bx	r0
11731:
1174#endif
1175
1176	msr	cpsr_fsxc, r4	/* Restore exceptions */
1177	pop	{r4-r7}
1178	bx	lr		/* End of icache_inv_user_range() */
1179
1180	/*
1181	 * Make sure that literals are placed before the
1182	 * thread_excp_vect_end label.
1183	 */
1184	.pool
1185UNWIND(	.fnend)
1186	.global thread_excp_vect_end
1187thread_excp_vect_end:
1188END_FUNC thread_excp_vect
1189