xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision b8bb0afa738e6038bbd92b57742aa2526df9f20a)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/thread_defs.h>
14#include <kernel/unwind.h>
15#include <mm/core_mmu.h>
16#include <sm/optee_smc.h>
17#include <sm/teesmc_opteed.h>
18#include <sm/teesmc_opteed_macros.h>
19
20#include "thread_private.h"
21
22	.arch_extension sec
23
24	.macro cmp_spsr_user_mode reg:req
25		/*
26		 * We're only testing the lower 4 bits as bit 5 (0x10)
27		 * always is set.
28		 */
29		tst	\reg, #0x0f
30	.endm
31
32LOCAL_FUNC vector_std_smc_entry , :
33UNWIND(	.fnstart)
34UNWIND(	.cantunwind)
35	push	{r0-r7}
36	mov	r0, sp
37	bl	thread_handle_std_smc
38	/*
39	 * Normally thread_handle_std_smc() should return via
40	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
41	 * hasn't switched stack (error detected) it will do a normal "C"
42	 * return.
43	 */
44	pop	{r1-r8}
45	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
46	smc	#0
47	b	.	/* SMC should not return */
48UNWIND(	.fnend)
49END_FUNC vector_std_smc_entry
50
51LOCAL_FUNC vector_fast_smc_entry , :
52UNWIND(	.fnstart)
53UNWIND(	.cantunwind)
54	push	{r0-r7}
55	mov	r0, sp
56	bl	thread_handle_fast_smc
57	pop	{r1-r8}
58	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
59	smc	#0
60	b	.	/* SMC should not return */
61UNWIND(	.fnend)
62END_FUNC vector_fast_smc_entry
63
64LOCAL_FUNC vector_fiq_entry , :
65UNWIND(	.fnstart)
66UNWIND(	.cantunwind)
67 	/* Secure Monitor received a FIQ and passed control to us. */
68	bl	thread_check_canaries
69	ldr	lr, =thread_nintr_handler_ptr
70 	ldr	lr, [lr]
71 	blx	lr
72	mov	r1, r0
73	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
74	smc	#0
75	b	.	/* SMC should not return */
76UNWIND(	.fnend)
77END_FUNC vector_fiq_entry
78
79LOCAL_FUNC vector_cpu_on_entry , :
80UNWIND(	.fnstart)
81UNWIND(	.cantunwind)
82	ldr	lr, =thread_cpu_on_handler_ptr
83	ldr	lr, [lr]
84	blx	lr
85	mov	r1, r0
86	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
87	smc	#0
88	b	.	/* SMC should not return */
89UNWIND(	.fnend)
90END_FUNC vector_cpu_on_entry
91
92LOCAL_FUNC vector_cpu_off_entry , :
93UNWIND(	.fnstart)
94UNWIND(	.cantunwind)
95	ldr	lr, =thread_cpu_off_handler_ptr
96	ldr	lr, [lr]
97	blx	lr
98	mov	r1, r0
99	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
100	smc	#0
101	b	.	/* SMC should not return */
102UNWIND(	.fnend)
103END_FUNC vector_cpu_off_entry
104
105LOCAL_FUNC vector_cpu_suspend_entry , :
106UNWIND(	.fnstart)
107UNWIND(	.cantunwind)
108	ldr	lr, =thread_cpu_suspend_handler_ptr
109	ldr	lr, [lr]
110	blx	lr
111	mov	r1, r0
112	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
113	smc	#0
114	b	.	/* SMC should not return */
115UNWIND(	.fnend)
116END_FUNC vector_cpu_suspend_entry
117
118LOCAL_FUNC vector_cpu_resume_entry , :
119UNWIND(	.fnstart)
120UNWIND(	.cantunwind)
121	ldr	lr, =thread_cpu_resume_handler_ptr
122	ldr	lr, [lr]
123	blx	lr
124	mov	r1, r0
125	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
126	smc	#0
127	b	.	/* SMC should not return */
128UNWIND(	.fnend)
129END_FUNC vector_cpu_resume_entry
130
131LOCAL_FUNC vector_system_off_entry , :
132UNWIND(	.fnstart)
133UNWIND(	.cantunwind)
134	ldr	lr, =thread_system_off_handler_ptr
135	ldr	lr, [lr]
136	blx	lr
137	mov	r1, r0
138	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
139	smc	#0
140	b	.	/* SMC should not return */
141UNWIND(	.fnend)
142END_FUNC vector_system_off_entry
143
144LOCAL_FUNC vector_system_reset_entry , :
145UNWIND(	.fnstart)
146UNWIND(	.cantunwind)
147	ldr	lr, =thread_system_reset_handler_ptr
148	ldr	lr, [lr]
149	blx	lr
150	mov	r1, r0
151	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
152	smc	#0
153	b	.	/* SMC should not return */
154UNWIND(	.fnend)
155END_FUNC vector_system_reset_entry
156
157/*
158 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
159 * initialization.  Also used when compiled with the internal monitor, but
160 * the cpu_*_entry and system_*_entry are not used then.
161 *
162 * Note that ARM-TF depends on the layout of this vector table, any change
163 * in layout has to be synced with ARM-TF.
164 */
165FUNC thread_vector_table , :
166UNWIND(	.fnstart)
167UNWIND(	.cantunwind)
168	b	vector_std_smc_entry
169	b	vector_fast_smc_entry
170	b	vector_cpu_on_entry
171	b	vector_cpu_off_entry
172	b	vector_cpu_resume_entry
173	b	vector_cpu_suspend_entry
174	b	vector_fiq_entry
175	b	vector_system_off_entry
176	b	vector_system_reset_entry
177UNWIND(	.fnend)
178END_FUNC thread_vector_table
179KEEP_PAGER thread_vector_table
180
181FUNC thread_set_abt_sp , :
182UNWIND(	.fnstart)
183UNWIND(	.cantunwind)
184	mrs	r1, cpsr
185	cps	#CPSR_MODE_ABT
186	mov	sp, r0
187	msr	cpsr, r1
188	bx	lr
189UNWIND(	.fnend)
190END_FUNC thread_set_abt_sp
191
192FUNC thread_set_und_sp , :
193UNWIND(	.fnstart)
194UNWIND(	.cantunwind)
195	mrs	r1, cpsr
196	cps	#CPSR_MODE_UND
197	mov	sp, r0
198	msr	cpsr, r1
199	bx	lr
200UNWIND(	.fnend)
201END_FUNC thread_set_und_sp
202
203FUNC thread_set_irq_sp , :
204UNWIND(	.fnstart)
205UNWIND(	.cantunwind)
206	mrs	r1, cpsr
207	cps	#CPSR_MODE_IRQ
208	mov	sp, r0
209	msr	cpsr, r1
210	bx	lr
211UNWIND(	.fnend)
212END_FUNC thread_set_irq_sp
213
214FUNC thread_set_fiq_sp , :
215UNWIND(	.fnstart)
216UNWIND(	.cantunwind)
217	mrs	r1, cpsr
218	cps	#CPSR_MODE_FIQ
219	mov	sp, r0
220	msr	cpsr, r1
221	bx	lr
222UNWIND(	.fnend)
223END_FUNC thread_set_fiq_sp
224
225/* void thread_resume(struct thread_ctx_regs *regs) */
226FUNC thread_resume , :
227UNWIND(	.fnstart)
228UNWIND(	.cantunwind)
229	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
230
231	cps	#CPSR_MODE_SYS
232	ldm	r12!, {sp, lr}
233
234	cps	#CPSR_MODE_SVC
235	ldm	r12!, {r1, sp, lr}
236	msr	spsr_fsxc, r1
237
238	ldm	r12, {r1, r2}
239
240	/*
241	 * Switching to some other mode than SVC as we need to set spsr in
242	 * order to return into the old state properly and it may be SVC
243	 * mode we're returning to.
244	 */
245	cps	#CPSR_MODE_ABT
246	cmp_spsr_user_mode r2
247	mov	lr, r1
248	msr	spsr_fsxc, r2
249	ldm	r0, {r0-r12}
250	movnes	pc, lr
251	b	eret_to_user_mode
252UNWIND(	.fnend)
253END_FUNC thread_resume
254
255/*
256 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
257 * the banked r8-r12 registers, returns original CPSR.
258 */
259LOCAL_FUNC thread_save_state_fiq , :
260UNWIND(	.fnstart)
261UNWIND(	.cantunwind)
262	mov	r9, lr
263
264	/*
265	 * Uses stack for temporary storage, while storing needed
266	 * context in the thread context struct.
267	 */
268
269	mrs	r8, cpsr
270
271	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
272
273	push	{r4-r7}
274	push	{r0-r3}
275
276	mrs	r6, cpsr		/* Save current CPSR */
277
278	bl	thread_get_ctx_regs
279
280	pop	{r1-r4}			/* r0-r3 pushed above */
281	stm	r0!, {r1-r4}
282	pop	{r1-r4}			/* r4-r7 pushed above */
283	stm	r0!, {r1-r4}
284
285	cps     #CPSR_MODE_SYS
286	stm	r0!, {r8-r12}
287	stm     r0!, {sp, lr}
288
289	cps     #CPSR_MODE_SVC
290	mrs     r1, spsr
291	stm     r0!, {r1, sp, lr}
292
293	/* back to fiq mode */
294	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
295	msr	cpsr, r6		/* Restore mode */
296
297	mov	r0, r8			/* Return original CPSR */
298	bx	r9
299UNWIND(	.fnend)
300END_FUNC thread_save_state_fiq
301
302/*
303 * Disables IRQ and FIQ and saves state of thread, returns original
304 * CPSR.
305 */
306LOCAL_FUNC thread_save_state , :
307UNWIND(	.fnstart)
308UNWIND(	.cantunwind)
309	push	{r12, lr}
310	/*
311	 * Uses stack for temporary storage, while storing needed
312	 * context in the thread context struct.
313	 */
314
315	mrs	r12, cpsr
316
317	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
318
319	push	{r4-r7}
320	push	{r0-r3}
321
322	mov	r5, r12			/* Save CPSR in a preserved register */
323	mrs	r6, cpsr		/* Save current CPSR */
324
325	bl	thread_get_ctx_regs
326
327	pop	{r1-r4}			/* r0-r3 pushed above */
328	stm	r0!, {r1-r4}
329	pop	{r1-r4}			/* r4-r7 pushed above */
330	stm	r0!, {r1-r4}
331	stm	r0!, {r8-r11}
332
333	pop	{r12, lr}
334	stm	r0!, {r12}
335
336        cps     #CPSR_MODE_SYS
337        stm     r0!, {sp, lr}
338
339        cps     #CPSR_MODE_SVC
340        mrs     r1, spsr
341        stm     r0!, {r1, sp, lr}
342
343	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
344	msr	cpsr, r6		/* Restore mode */
345
346	mov	r0, r5			/* Return original CPSR */
347	bx	lr
348UNWIND(	.fnend)
349END_FUNC thread_save_state
350
351FUNC thread_std_smc_entry , :
352UNWIND(	.fnstart)
353UNWIND(	.cantunwind)
354	/* Pass r0-r7 in a struct thread_smc_args */
355	push	{r0-r7}
356	mov	r0, sp
357	bl	__thread_std_smc_entry
358	/*
359	 * Load the returned r0-r3 into preserved registers and skip the
360	 * "returned" r4-r7 since they will not be returned to normal
361	 * world.
362	 */
363	pop	{r4-r7}
364	add	sp, #(4 * 4)
365
366	/* Disable interrupts before switching to temporary stack */
367	cpsid	aif
368	bl	thread_get_tmp_sp
369	mov	sp, r0
370
371	bl	thread_state_free
372
373	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
374	mov	r1, r4
375	mov	r2, r5
376	mov	r3, r6
377	mov	r4, r7
378	smc	#0
379	b	.	/* SMC should not return */
380UNWIND(	.fnend)
381END_FUNC thread_std_smc_entry
382
383
384/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
385FUNC thread_rpc , :
386/*
387 * r0-r2 are used to pass parameters to normal world
388 * r0-r5 are used to pass return vaule back from normal world
389 *
390 * note that r3 is used to pass "resume information", that is, which
391 * thread it is that should resume.
392 *
393 * Since the this function is following AAPCS we need to preserve r4-r5
394 * which are otherwise modified when returning back from normal world.
395 */
396UNWIND(	.fnstart)
397	push	{r4-r5, lr}
398UNWIND(	.save	{r4-r5, lr})
399	push	{r0}
400UNWIND(	.save	{r0})
401
402	bl	thread_save_state
403	mov	r4, r0			/* Save original CPSR */
404
405	/*
406 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
407	 */
408	bl	thread_get_tmp_sp
409	ldr	r5, [sp]		/* Get pointer to rv[] */
410	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
411	mov	sp, r0			/* Switch to tmp stack */
412
413	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
414	mov	r1, r4			/* CPSR to restore */
415	ldr	r2, =.thread_rpc_return
416	bl	thread_state_suspend
417	mov	r4, r0			/* Supply thread index */
418	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
419	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
420	smc	#0
421	b	.	/* SMC should not return */
422
423.thread_rpc_return:
424	/*
425	 * At this point has the stack pointer been restored to the value
426	 * it had when thread_save_state() was called above.
427	 *
428	 * Jumps here from thread_resume above when RPC has returned. The
429	 * IRQ and FIQ bits are restored to what they where when this
430	 * function was originally entered.
431	 */
432	pop	{r12}			/* Get pointer to rv[] */
433	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
434	pop	{r4-r5, pc}
435UNWIND(	.fnend)
436END_FUNC thread_rpc
437KEEP_PAGER thread_rpc
438
439/*
440 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
441 *			    unsigned long a2, unsigned long a3)
442 */
443FUNC thread_smc , :
444UNWIND(	.fnstart)
445	smc	#0
446	bx	lr
447UNWIND(	.fnend)
448END_FUNC thread_smc
449
450FUNC thread_init_vbar , :
451UNWIND(	.fnstart)
452	/* Set vector (VBAR) */
453	write_vbar r0
454	bx	lr
455UNWIND(	.fnend)
456END_FUNC thread_init_vbar
457KEEP_PAGER thread_init_vbar
458
459/*
460 * Below are low level routines handling entry and return from user mode.
461 *
462 * thread_enter_user_mode() saves all that registers user mode can change
463 * so kernel mode can restore needed registers when resuming execution
464 * after the call to thread_enter_user_mode() has returned.
465 * thread_enter_user_mode() doesn't return directly since it enters user
466 * mode instead, it's thread_unwind_user_mode() that does the
467 * returning by restoring the registers saved by thread_enter_user_mode().
468 *
469 * There's three ways for thread_enter_user_mode() to return to caller,
470 * user TA calls utee_return, user TA calls utee_panic or through an abort.
471 *
472 * Calls to utee_return or utee_panic are handled as:
473 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
474 * calls syscall_return() or syscall_panic().
475 *
476 * These function calls returns normally except thread_svc_handler() which
477 * which is an exception handling routine so it reads return address and
478 * SPSR to restore from the stack. syscall_return() and syscall_panic()
479 * changes return address and SPSR used by thread_svc_handler() to instead of
480 * returning into user mode as with other syscalls it returns into
481 * thread_unwind_user_mode() in kernel mode instead.  When
482 * thread_svc_handler() returns the stack pointer at the point where
483 * thread_enter_user_mode() left it so this is where
484 * thread_unwind_user_mode() can operate.
485 *
486 * Aborts are handled in a similar way but by thread_abort_handler()
487 * instead, when the pager sees that it's an abort from user mode that
488 * can't be handled it updates SPSR and return address used by
489 * thread_abort_handler() to return into thread_unwind_user_mode()
490 * instead.
491 */
492
493/*
494 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
495 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
496 *               unsigned long user_func, unsigned long spsr,
497 *               uint32_t *exit_status0, uint32_t *exit_status1)
498 *
499 */
500FUNC __thread_enter_user_mode , :
501UNWIND(	.fnstart)
502UNWIND(	.cantunwind)
503	/*
504	 * Save all registers to allow syscall_return() to resume execution
505	 * as if this function would have returned. This is also used in
506	 * syscall_panic().
507	 *
508	 * If stack usage of this function is changed
509	 * thread_unwind_user_mode() has to be updated.
510	 */
511	push    {r4-r12,lr}
512
513	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
514	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
515	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
516
517	/*
518	 * Save old user sp and set new user sp.
519	 */
520	cps	#CPSR_MODE_SYS
521	mov	r7, sp
522	mov     sp, r4
523	cps	#CPSR_MODE_SVC
524	push	{r7,r8}
525
526	/* Prepare user mode entry via eret_to_user_mode */
527	cpsid	aif
528	msr     spsr_fsxc, r6
529	mov	lr, r5
530
531	b	eret_to_user_mode
532UNWIND(	.fnend)
533END_FUNC __thread_enter_user_mode
534
535/*
536 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
537 *              uint32_t exit_status1);
538 * See description in thread.h
539 */
540FUNC thread_unwind_user_mode , :
541UNWIND(	.fnstart)
542UNWIND(	.cantunwind)
543	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
544	str	r1, [ip]
545	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
546	str	r2, [ip]
547
548	/* Restore old user sp */
549	pop	{r4,r7}
550	cps	#CPSR_MODE_SYS
551	mov	sp, r4
552	cps	#CPSR_MODE_SVC
553
554	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
555UNWIND(	.fnend)
556END_FUNC thread_unwind_user_mode
557
558	.macro maybe_restore_mapping
559		/*
560		 * This macro is a bit hard to read due to all the ifdefs,
561		 * we're testing for two different configs which makes four
562		 * different combinations.
563		 *
564		 * - With LPAE, and then some extra code if with
565		 *   CFG_CORE_UNMAP_CORE_AT_EL0
566		 * - Without LPAE, and then some extra code if with
567		 *   CFG_CORE_UNMAP_CORE_AT_EL0
568		 */
569
570		/*
571		 * At this point we can't rely on any memory being writable
572		 * yet, so we're using TPIDRPRW to store r0, and if with
573		 * LPAE TPIDRURO to store r1 too.
574		 */
575		write_tpidrprw r0
576#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
577		write_tpidruro r1
578#endif
579
580#ifdef CFG_WITH_LPAE
581		read_ttbr0_64bit r0, r1
582		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
583		beq	11f
584
585#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
586		/*
587		 * Update the mapping to use the full kernel mode mapping.
588		 * Since the translation table could reside above 4GB we'll
589		 * have to use 64-bit arithmetics.
590		 */
591		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
592		sbc	r1, r1, #0
593#endif
594		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
595		write_ttbr0_64bit r0, r1
596		isb
597
598#else /*!CFG_WITH_LPAE*/
599		read_contextidr r0
600		tst	r0, #1
601		beq	11f
602
603		/* Update the mapping to use the full kernel mode mapping. */
604		bic	r0, r0, #1
605		write_contextidr r0
606		isb
607#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
608		read_ttbr1 r0
609		sub	r0, r0, #CORE_MMU_L1_TBL_OFFSET
610		write_ttbr1 r0
611		isb
612#endif
613
614#endif /*!CFG_WITH_LPAE*/
615
616#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
617		ldr	r0, =thread_user_kcode_offset
618		ldr	r0, [r0]
619		read_vbar r1
620		add	r1, r1, r0
621		write_vbar r1
622		isb
623
624	11:	/*
625		 * The PC is adjusted unconditionally to guard against the
626		 * case there was an FIQ just before we did the "cpsid aif".
627		 */
628		ldr	r0, =22f
629		bx	r0
630	22:
631#else
632	11:
633#endif
634		read_tpidrprw r0
635#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
636		read_tpidruro r1
637#endif
638	.endm
639
640/* The handler of native interrupt. */
641.macro	native_intr_handler mode:req
642	cpsid	aif
643	maybe_restore_mapping
644
645	/*
646	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
647	 * address
648	 */
649	sub     lr, lr, #4
650
651	/*
652	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
653	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
654	 * because the secure monitor doesn't save those. The treatment of
655	 * the banked fiq registers is somewhat analogous to the lazy save
656	 * of VFP registers.
657	 */
658	.ifc	\mode\(),fiq
659	push	{r0-r3, r8-r12, lr}
660	.else
661	push	{r0-r3, r12, lr}
662	.endif
663
664	bl	thread_check_canaries
665	ldr	lr, =thread_nintr_handler_ptr
666	ldr	lr, [lr]
667	blx	lr
668
669	mrs	r0, spsr
670	cmp_spsr_user_mode r0
671
672	.ifc	\mode\(),fiq
673	pop	{r0-r3, r8-r12, lr}
674	.else
675	pop	{r0-r3, r12, lr}
676	.endif
677
678	movnes	pc, lr
679	b	eret_to_user_mode
680.endm
681
682/* The handler of foreign interrupt. */
683.macro foreign_intr_handler mode:req
684	cpsid	aif
685	maybe_restore_mapping
686
687	sub	lr, lr, #4
688	push	{lr}
689	push	{r12}
690
691	.ifc	\mode\(),fiq
692	bl	thread_save_state_fiq
693	.else
694	bl	thread_save_state
695	.endif
696
697	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
698	mrs	r1, spsr
699	pop	{r12}
700	pop	{r2}
701	blx	thread_state_suspend
702	mov	r4, r0		/* Supply thread index */
703
704	/*
705	 * Switch to SVC mode and copy current stack pointer as it already
706	 * is the tmp stack.
707	 */
708	mov	r0, sp
709	cps	#CPSR_MODE_SVC
710	mov	sp, r0
711
712	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
713	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
714	mov	r2, #0
715	mov	r3, #0
716	/* r4 is already filled in above */
717	smc	#0
718	b	.	/* SMC should not return */
719.endm
720
721	.section .text.thread_excp_vect
722        .align	5
723FUNC thread_excp_vect , :
724UNWIND(	.fnstart)
725UNWIND(	.cantunwind)
726	b	.			/* Reset			*/
727	b	thread_und_handler	/* Undefined instruction	*/
728	b	thread_svc_handler	/* System call			*/
729	b	thread_pabort_handler	/* Prefetch abort		*/
730	b	thread_dabort_handler	/* Data abort			*/
731	b	.			/* Reserved			*/
732	b	thread_irq_handler	/* IRQ				*/
733	b	thread_fiq_handler	/* FIQ				*/
734#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
735	.macro vector_prologue_spectre
736		/*
737		 * This depends on SP being 8 byte aligned, that is, the
738		 * lowest three bits in SP are zero.
739		 *
740		 * To avoid unexpected speculation we need to invalidate
741		 * the branch predictor before we do the first branch. It
742		 * doesn't matter if it's a conditional or an unconditional
743		 * branch speculation can still occur.
744		 *
745		 * The idea is to form a specific bit pattern in the lowest
746		 * three bits of SP depending on which entry in the vector
747		 * we enter via.  This is done by adding 1 to SP in each
748		 * entry but the last.
749		 */
750		add	sp, sp, #1	/* 7:111 Reset			*/
751		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
752		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
753		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
754		add	sp, sp, #1	/* 3:011 Data abort		*/
755		add	sp, sp, #1	/* 2:010 Reserved		*/
756		add	sp, sp, #1	/* 1:001 IRQ			*/
757		write_tpidrprw r0	/* 0:000 FIQ			*/
758	.endm
759
760        .align	5
761	.global thread_excp_vect_workaround_a15
762thread_excp_vect_workaround_a15:
763	vector_prologue_spectre
764	mrs	r0, spsr
765	cmp_spsr_user_mode r0
766	bne	1f
767	/*
768	 * Invalidate the branch predictor for the current processor.
769	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
770	 * effective.
771	 * Note that the BPIALL instruction is not effective in
772	 * invalidating the branch predictor on Cortex-A15. For that CPU,
773	 * set ACTLR[0] to 1 during early processor initialisation, and
774	 * invalidate the branch predictor by performing an ICIALLU
775	 * instruction. See also:
776	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
777	 */
778	write_iciallu
779	isb
780	b	1f
781
782        .align	5
783	.global thread_excp_vect_workaround
784thread_excp_vect_workaround:
785	vector_prologue_spectre
786	mrs	r0, spsr
787	cmp_spsr_user_mode r0
788	bne	1f
789	/* Invalidate the branch predictor for the current processor. */
790	write_bpiall
791	isb
792
7931:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
794	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
795	add	pc, pc, r0, LSL #3
796	nop
797
798	read_tpidrprw r0
799	b	thread_fiq_handler	/* FIQ				*/
800	read_tpidrprw r0
801	b	thread_irq_handler	/* IRQ				*/
802	read_tpidrprw r0
803	b	.			/* Reserved			*/
804	read_tpidrprw r0
805	b	thread_dabort_handler	/* Data abort			*/
806	read_tpidrprw r0
807	b	thread_pabort_handler	/* Prefetch abort		*/
808	read_tpidrprw r0
809	b	thread_svc_handler	/* System call			*/
810	read_tpidrprw r0
811	b	thread_und_handler	/* Undefined instruction	*/
812	read_tpidrprw r0
813	b	.			/* Reset			*/
814#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
815
816thread_und_handler:
817	cpsid	aif
818	maybe_restore_mapping
819	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
820	mrs	r1, spsr
821	tst	r1, #CPSR_T
822	subne	lr, lr, #2
823	subeq	lr, lr, #4
824	mov	r0, #ABORT_TYPE_UNDEF
825	b	thread_abort_common
826
827thread_dabort_handler:
828	cpsid	aif
829	maybe_restore_mapping
830	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
831	sub	lr, lr, #8
832	mov	r0, #ABORT_TYPE_DATA
833	b	thread_abort_common
834
835thread_pabort_handler:
836	cpsid	aif
837	maybe_restore_mapping
838	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
839	sub	lr, lr, #4
840	mov	r0, #ABORT_TYPE_PREFETCH
841
842thread_abort_common:
843	/*
844	 * At this label:
845	 * cpsr is in mode undef or abort
846	 * sp is still pointing to struct thread_core_local belonging to
847	 * this core.
848	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
849	 * {r2-r11, ip} are untouched.
850	 * r0 holds the first argument for abort_handler()
851	 */
852
853	/*
854	 * Update core local flags.
855	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
856	 */
857	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
858	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
859	orr	r1, r1, #THREAD_CLF_ABORT
860
861	/*
862	 * Select stack and update flags accordingly
863	 *
864	 * Normal case:
865	 * If the abort stack is unused select that.
866	 *
867	 * Fatal error handling:
868	 * If we're already using the abort stack as noted by bit
869	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
870	 * field we're selecting the temporary stack instead to be able to
871	 * make a stack trace of the abort in abort mode.
872	 *
873	 * r1 is initialized as a temporary stack pointer until we've
874	 * switched to system mode.
875	 */
876	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
877	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
878	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
879	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
880	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
881
882	/*
883	 * Store registers on stack fitting struct thread_abort_regs
884	 * start from the end of the struct
885	 * {r2-r11, ip}
886	 * Load content of previously saved {r0-r1} and stores
887	 * it up to the pad field.
888	 * After this is only {usr_sp, usr_lr} missing in the struct
889	 */
890	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
891	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
892	/* Push the original {r0-r1} on the selected stack */
893	stmdb	r1!, {r2-r3}
894	mrs	r3, spsr
895	/* Push {pad, spsr, elr} on the selected stack */
896	stmdb	r1!, {r2, r3, lr}
897
898	cps	#CPSR_MODE_SYS
899	str	lr, [r1, #-4]!
900	str	sp, [r1, #-4]!
901	mov	sp, r1
902
903	bl	abort_handler
904
905	mov	ip, sp
906	ldr	sp, [ip], #4
907	ldr	lr, [ip], #4
908
909	/*
910	 * Even if we entered via CPSR_MODE_UND, we are returning via
911	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
912	 * here.
913	 */
914	cps	#CPSR_MODE_ABT
915	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
916	msr	spsr_fsxc, r1
917
918	/* Update core local flags */
919	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
920	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
921	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
922
923	cmp_spsr_user_mode r1
924	ldm	ip, {r0-r11, ip}
925	movnes	pc, lr
926	b	eret_to_user_mode
927	/* end thread_abort_common */
928
929thread_svc_handler:
930	cpsid	aif
931
932	maybe_restore_mapping
933
934	push	{r0-r7, lr}
935	mrs	r0, spsr
936	push	{r0}
937	mov	r0, sp
938	bl	tee_svc_handler
939	cpsid	aif	/* In case something was unmasked */
940	pop	{r0}
941	msr	spsr_fsxc, r0
942	cmp_spsr_user_mode r0
943	pop	{r0-r7, lr}
944	movnes	pc, lr
945	b	eret_to_user_mode
946	/* end thread_svc_handler */
947
948thread_fiq_handler:
949#if defined(CFG_ARM_GICV3)
950	foreign_intr_handler	fiq
951#else
952	native_intr_handler	fiq
953#endif
954	/* end thread_fiq_handler */
955
956thread_irq_handler:
957#if defined(CFG_ARM_GICV3)
958	native_intr_handler	irq
959#else
960	foreign_intr_handler	irq
961#endif
962	/* end thread_irq_handler */
963
964	/*
965	 * Returns to user mode.
966	 * Expects to be jumped to with lr pointing to the user space
967	 * address to jump to and spsr holding the desired cpsr. Async
968	 * abort, irq and fiq should be masked.
969	 */
970eret_to_user_mode:
971	write_tpidrprw r0
972#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
973	write_tpidruro r1
974#endif
975
976#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
977	ldr	r0, =thread_user_kcode_offset
978	ldr	r0, [r0]
979	read_vbar r1
980	sub	r1, r1, r0
981	write_vbar r1
982	isb
983
984	/* Jump into the reduced mapping before the full mapping is removed */
985	ldr	r1, =1f
986	sub	r1, r1, r0
987	bx	r1
9881:
989#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
990
991#ifdef CFG_WITH_LPAE
992	read_ttbr0_64bit r0, r1
993#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
994	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
995#endif
996	/* switch to user ASID */
997	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
998	write_ttbr0_64bit r0, r1
999	isb
1000#else /*!CFG_WITH_LPAE*/
1001#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1002	read_ttbr1 r0
1003	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1004	write_ttbr1 r0
1005	isb
1006#endif
1007	read_contextidr r0
1008	orr	r0, r0, #BIT(0)
1009	write_contextidr r0
1010	isb
1011#endif /*!CFG_WITH_LPAE*/
1012
1013	read_tpidrprw r0
1014#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1015	read_tpidruro r1
1016#endif
1017
1018	movs	pc, lr
1019UNWIND(	.fnend)
1020	.global thread_excp_vect_end
1021thread_excp_vect_end:
1022END_FUNC thread_excp_vect
1023