xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision e3d56a52e27b7ad429dc806bc4ef29d7ca929ea7)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/thread_defs.h>
14#include <kernel/unwind.h>
15#include <mm/core_mmu.h>
16#include <sm/optee_smc.h>
17#include <sm/teesmc_opteed.h>
18#include <sm/teesmc_opteed_macros.h>
19
20#include "thread_private.h"
21
22	.syntax unified
23	.arch_extension sec
24
25	.macro cmp_spsr_user_mode reg:req
26		/*
27		 * We're only testing the lower 4 bits as bit 5 (0x10)
28		 * always is set.
29		 */
30		tst	\reg, #0x0f
31	.endm
32
33LOCAL_FUNC vector_std_smc_entry , :
34UNWIND(	.fnstart)
35UNWIND(	.cantunwind)
36	push	{r0-r7}
37	mov	r0, sp
38	bl	thread_handle_std_smc
39	/*
40	 * Normally thread_handle_std_smc() should return via
41	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
42	 * hasn't switched stack (error detected) it will do a normal "C"
43	 * return.
44	 */
45	pop	{r1-r8}
46	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
47	smc	#0
48	b	.	/* SMC should not return */
49UNWIND(	.fnend)
50END_FUNC vector_std_smc_entry
51
52LOCAL_FUNC vector_fast_smc_entry , :
53UNWIND(	.fnstart)
54UNWIND(	.cantunwind)
55	push	{r0-r7}
56	mov	r0, sp
57	bl	thread_handle_fast_smc
58	pop	{r1-r8}
59	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
60	smc	#0
61	b	.	/* SMC should not return */
62UNWIND(	.fnend)
63END_FUNC vector_fast_smc_entry
64
65LOCAL_FUNC vector_fiq_entry , :
66UNWIND(	.fnstart)
67UNWIND(	.cantunwind)
68 	/* Secure Monitor received a FIQ and passed control to us. */
69	bl	thread_check_canaries
70	ldr	lr, =thread_nintr_handler_ptr
71 	ldr	lr, [lr]
72 	blx	lr
73	mov	r1, r0
74	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
75	smc	#0
76	b	.	/* SMC should not return */
77UNWIND(	.fnend)
78END_FUNC vector_fiq_entry
79
80LOCAL_FUNC vector_cpu_on_entry , :
81UNWIND(	.fnstart)
82UNWIND(	.cantunwind)
83	ldr	lr, =thread_cpu_on_handler_ptr
84	ldr	lr, [lr]
85	blx	lr
86	mov	r1, r0
87	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
88	smc	#0
89	b	.	/* SMC should not return */
90UNWIND(	.fnend)
91END_FUNC vector_cpu_on_entry
92
93LOCAL_FUNC vector_cpu_off_entry , :
94UNWIND(	.fnstart)
95UNWIND(	.cantunwind)
96	ldr	lr, =thread_cpu_off_handler_ptr
97	ldr	lr, [lr]
98	blx	lr
99	mov	r1, r0
100	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
101	smc	#0
102	b	.	/* SMC should not return */
103UNWIND(	.fnend)
104END_FUNC vector_cpu_off_entry
105
106LOCAL_FUNC vector_cpu_suspend_entry , :
107UNWIND(	.fnstart)
108UNWIND(	.cantunwind)
109	ldr	lr, =thread_cpu_suspend_handler_ptr
110	ldr	lr, [lr]
111	blx	lr
112	mov	r1, r0
113	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
114	smc	#0
115	b	.	/* SMC should not return */
116UNWIND(	.fnend)
117END_FUNC vector_cpu_suspend_entry
118
119LOCAL_FUNC vector_cpu_resume_entry , :
120UNWIND(	.fnstart)
121UNWIND(	.cantunwind)
122	ldr	lr, =thread_cpu_resume_handler_ptr
123	ldr	lr, [lr]
124	blx	lr
125	mov	r1, r0
126	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
127	smc	#0
128	b	.	/* SMC should not return */
129UNWIND(	.fnend)
130END_FUNC vector_cpu_resume_entry
131
132LOCAL_FUNC vector_system_off_entry , :
133UNWIND(	.fnstart)
134UNWIND(	.cantunwind)
135	ldr	lr, =thread_system_off_handler_ptr
136	ldr	lr, [lr]
137	blx	lr
138	mov	r1, r0
139	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
140	smc	#0
141	b	.	/* SMC should not return */
142UNWIND(	.fnend)
143END_FUNC vector_system_off_entry
144
145LOCAL_FUNC vector_system_reset_entry , :
146UNWIND(	.fnstart)
147UNWIND(	.cantunwind)
148	ldr	lr, =thread_system_reset_handler_ptr
149	ldr	lr, [lr]
150	blx	lr
151	mov	r1, r0
152	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
153	smc	#0
154	b	.	/* SMC should not return */
155UNWIND(	.fnend)
156END_FUNC vector_system_reset_entry
157
158/*
159 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
160 * initialization.  Also used when compiled with the internal monitor, but
161 * the cpu_*_entry and system_*_entry are not used then.
162 *
163 * Note that ARM-TF depends on the layout of this vector table, any change
164 * in layout has to be synced with ARM-TF.
165 */
166FUNC thread_vector_table , :
167UNWIND(	.fnstart)
168UNWIND(	.cantunwind)
169	b	vector_std_smc_entry
170	b	vector_fast_smc_entry
171	b	vector_cpu_on_entry
172	b	vector_cpu_off_entry
173	b	vector_cpu_resume_entry
174	b	vector_cpu_suspend_entry
175	b	vector_fiq_entry
176	b	vector_system_off_entry
177	b	vector_system_reset_entry
178UNWIND(	.fnend)
179END_FUNC thread_vector_table
180KEEP_PAGER thread_vector_table
181
182FUNC thread_set_abt_sp , :
183UNWIND(	.fnstart)
184UNWIND(	.cantunwind)
185	mrs	r1, cpsr
186	cps	#CPSR_MODE_ABT
187	mov	sp, r0
188	msr	cpsr, r1
189	bx	lr
190UNWIND(	.fnend)
191END_FUNC thread_set_abt_sp
192
193FUNC thread_set_und_sp , :
194UNWIND(	.fnstart)
195UNWIND(	.cantunwind)
196	mrs	r1, cpsr
197	cps	#CPSR_MODE_UND
198	mov	sp, r0
199	msr	cpsr, r1
200	bx	lr
201UNWIND(	.fnend)
202END_FUNC thread_set_und_sp
203
204FUNC thread_set_irq_sp , :
205UNWIND(	.fnstart)
206UNWIND(	.cantunwind)
207	mrs	r1, cpsr
208	cps	#CPSR_MODE_IRQ
209	mov	sp, r0
210	msr	cpsr, r1
211	bx	lr
212UNWIND(	.fnend)
213END_FUNC thread_set_irq_sp
214
215FUNC thread_set_fiq_sp , :
216UNWIND(	.fnstart)
217UNWIND(	.cantunwind)
218	mrs	r1, cpsr
219	cps	#CPSR_MODE_FIQ
220	mov	sp, r0
221	msr	cpsr, r1
222	bx	lr
223UNWIND(	.fnend)
224END_FUNC thread_set_fiq_sp
225
226/* void thread_resume(struct thread_ctx_regs *regs) */
227FUNC thread_resume , :
228UNWIND(	.fnstart)
229UNWIND(	.cantunwind)
230	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
231
232	cps	#CPSR_MODE_SYS
233	ldr	sp, [r12], #4
234	ldr	lr, [r12], #4
235
236	cps	#CPSR_MODE_SVC
237	ldr	r1, [r12], #4
238	ldr	sp, [r12], #4
239	ldr	lr, [r12], #4
240	msr	spsr_fsxc, r1
241
242	ldm	r12, {r1, r2}
243
244	/*
245	 * Switching to some other mode than SVC as we need to set spsr in
246	 * order to return into the old state properly and it may be SVC
247	 * mode we're returning to.
248	 */
249	cps	#CPSR_MODE_ABT
250	cmp_spsr_user_mode r2
251	mov	lr, r1
252	msr	spsr_fsxc, r2
253	ldm	r0, {r0-r12}
254	movsne	pc, lr
255	b	eret_to_user_mode
256UNWIND(	.fnend)
257END_FUNC thread_resume
258
259/*
260 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
261 * the banked r8-r12 registers, returns original CPSR.
262 */
263LOCAL_FUNC thread_save_state_fiq , :
264UNWIND(	.fnstart)
265UNWIND(	.cantunwind)
266	mov	r9, lr
267
268	/*
269	 * Uses stack for temporary storage, while storing needed
270	 * context in the thread context struct.
271	 */
272
273	mrs	r8, cpsr
274
275	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
276
277	push	{r4-r7}
278	push	{r0-r3}
279
280	mrs	r6, cpsr		/* Save current CPSR */
281
282	bl	thread_get_ctx_regs
283
284	pop	{r1-r4}			/* r0-r3 pushed above */
285	stm	r0!, {r1-r4}
286	pop	{r1-r4}			/* r4-r7 pushed above */
287	stm	r0!, {r1-r4}
288
289	cps     #CPSR_MODE_SYS
290	stm	r0!, {r8-r12}
291	str	sp, [r0], #4
292	str	lr, [r0], #4
293
294	cps     #CPSR_MODE_SVC
295	mrs     r1, spsr
296	str	r1, [r0], #4
297	str	sp, [r0], #4
298	str	lr, [r0], #4
299
300	/* back to fiq mode */
301	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
302	msr	cpsr, r6		/* Restore mode */
303
304	mov	r0, r8			/* Return original CPSR */
305	bx	r9
306UNWIND(	.fnend)
307END_FUNC thread_save_state_fiq
308
309/*
310 * Disables IRQ and FIQ and saves state of thread, returns original
311 * CPSR.
312 */
313LOCAL_FUNC thread_save_state , :
314UNWIND(	.fnstart)
315UNWIND(	.cantunwind)
316	push	{r12, lr}
317	/*
318	 * Uses stack for temporary storage, while storing needed
319	 * context in the thread context struct.
320	 */
321
322	mrs	r12, cpsr
323
324	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
325
326	push	{r4-r7}
327	push	{r0-r3}
328
329	mov	r5, r12			/* Save CPSR in a preserved register */
330	mrs	r6, cpsr		/* Save current CPSR */
331
332	bl	thread_get_ctx_regs
333
334	pop	{r1-r4}			/* r0-r3 pushed above */
335	stm	r0!, {r1-r4}
336	pop	{r1-r4}			/* r4-r7 pushed above */
337	stm	r0!, {r1-r4}
338	stm	r0!, {r8-r11}
339
340	pop	{r12, lr}
341	stm	r0!, {r12}
342
343        cps     #CPSR_MODE_SYS
344	str	sp, [r0], #4
345	str	lr, [r0], #4
346
347        cps     #CPSR_MODE_SVC
348        mrs     r1, spsr
349	str	r1, [r0], #4
350	str	sp, [r0], #4
351	str	lr, [r0], #4
352
353	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
354	msr	cpsr, r6		/* Restore mode */
355
356	mov	r0, r5			/* Return original CPSR */
357	bx	lr
358UNWIND(	.fnend)
359END_FUNC thread_save_state
360
361FUNC thread_std_smc_entry , :
362UNWIND(	.fnstart)
363UNWIND(	.cantunwind)
364	/* Pass r0-r7 in a struct thread_smc_args */
365	push	{r0-r7}
366	mov	r0, sp
367	bl	__thread_std_smc_entry
368	/*
369	 * Load the returned r0-r3 into preserved registers and skip the
370	 * "returned" r4-r7 since they will not be returned to normal
371	 * world.
372	 */
373	pop	{r4-r7}
374	add	sp, #(4 * 4)
375
376	/* Disable interrupts before switching to temporary stack */
377	cpsid	aif
378	bl	thread_get_tmp_sp
379	mov	sp, r0
380
381	bl	thread_state_free
382
383	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
384	mov	r1, r4
385	mov	r2, r5
386	mov	r3, r6
387	mov	r4, r7
388	smc	#0
389	b	.	/* SMC should not return */
390UNWIND(	.fnend)
391END_FUNC thread_std_smc_entry
392
393
394/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
395FUNC thread_rpc , :
396/*
397 * r0-r2 are used to pass parameters to normal world
398 * r0-r5 are used to pass return vaule back from normal world
399 *
400 * note that r3 is used to pass "resume information", that is, which
401 * thread it is that should resume.
402 *
403 * Since the this function is following AAPCS we need to preserve r4-r5
404 * which are otherwise modified when returning back from normal world.
405 */
406UNWIND(	.fnstart)
407	push	{r4-r5, lr}
408UNWIND(	.save	{r4-r5, lr})
409	push	{r0}
410UNWIND(	.save	{r0})
411
412	bl	thread_save_state
413	mov	r4, r0			/* Save original CPSR */
414
415	/*
416 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
417	 */
418	bl	thread_get_tmp_sp
419	ldr	r5, [sp]		/* Get pointer to rv[] */
420	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
421	mov	sp, r0			/* Switch to tmp stack */
422
423	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
424	mov	r1, r4			/* CPSR to restore */
425	ldr	r2, =.thread_rpc_return
426	bl	thread_state_suspend
427	mov	r4, r0			/* Supply thread index */
428	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
429	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
430	smc	#0
431	b	.	/* SMC should not return */
432
433.thread_rpc_return:
434	/*
435	 * At this point has the stack pointer been restored to the value
436	 * it had when thread_save_state() was called above.
437	 *
438	 * Jumps here from thread_resume above when RPC has returned. The
439	 * IRQ and FIQ bits are restored to what they where when this
440	 * function was originally entered.
441	 */
442	pop	{r12}			/* Get pointer to rv[] */
443	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
444	pop	{r4-r5, pc}
445UNWIND(	.fnend)
446END_FUNC thread_rpc
447KEEP_PAGER thread_rpc
448
449/*
450 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
451 *			    unsigned long a2, unsigned long a3)
452 */
453FUNC thread_smc , :
454UNWIND(	.fnstart)
455	smc	#0
456	bx	lr
457UNWIND(	.fnend)
458END_FUNC thread_smc
459
460FUNC thread_init_vbar , :
461UNWIND(	.fnstart)
462	/* Set vector (VBAR) */
463	write_vbar r0
464	bx	lr
465UNWIND(	.fnend)
466END_FUNC thread_init_vbar
467KEEP_PAGER thread_init_vbar
468
469/*
470 * Below are low level routines handling entry and return from user mode.
471 *
472 * thread_enter_user_mode() saves all that registers user mode can change
473 * so kernel mode can restore needed registers when resuming execution
474 * after the call to thread_enter_user_mode() has returned.
475 * thread_enter_user_mode() doesn't return directly since it enters user
476 * mode instead, it's thread_unwind_user_mode() that does the
477 * returning by restoring the registers saved by thread_enter_user_mode().
478 *
479 * There's three ways for thread_enter_user_mode() to return to caller,
480 * user TA calls utee_return, user TA calls utee_panic or through an abort.
481 *
482 * Calls to utee_return or utee_panic are handled as:
483 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
484 * calls syscall_return() or syscall_panic().
485 *
486 * These function calls returns normally except thread_svc_handler() which
487 * which is an exception handling routine so it reads return address and
488 * SPSR to restore from the stack. syscall_return() and syscall_panic()
489 * changes return address and SPSR used by thread_svc_handler() to instead of
490 * returning into user mode as with other syscalls it returns into
491 * thread_unwind_user_mode() in kernel mode instead.  When
492 * thread_svc_handler() returns the stack pointer at the point where
493 * thread_enter_user_mode() left it so this is where
494 * thread_unwind_user_mode() can operate.
495 *
496 * Aborts are handled in a similar way but by thread_abort_handler()
497 * instead, when the pager sees that it's an abort from user mode that
498 * can't be handled it updates SPSR and return address used by
499 * thread_abort_handler() to return into thread_unwind_user_mode()
500 * instead.
501 */
502
503/*
504 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
505 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
506 *               unsigned long user_func, unsigned long spsr,
507 *               uint32_t *exit_status0, uint32_t *exit_status1)
508 *
509 */
510FUNC __thread_enter_user_mode , :
511UNWIND(	.fnstart)
512UNWIND(	.cantunwind)
513	/*
514	 * Save all registers to allow syscall_return() to resume execution
515	 * as if this function would have returned. This is also used in
516	 * syscall_panic().
517	 *
518	 * If stack usage of this function is changed
519	 * thread_unwind_user_mode() has to be updated.
520	 */
521	push    {r4-r12,lr}
522
523	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
524	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
525	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
526
527	/*
528	 * Save old user sp and set new user sp.
529	 */
530	cps	#CPSR_MODE_SYS
531	mov	r7, sp
532	mov     sp, r4
533	cps	#CPSR_MODE_SVC
534	push	{r7,r8}
535
536	/* Prepare user mode entry via eret_to_user_mode */
537	cpsid	aif
538	msr     spsr_fsxc, r6
539	mov	lr, r5
540
541	b	eret_to_user_mode
542UNWIND(	.fnend)
543END_FUNC __thread_enter_user_mode
544
545/*
546 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
547 *              uint32_t exit_status1);
548 * See description in thread.h
549 */
550FUNC thread_unwind_user_mode , :
551UNWIND(	.fnstart)
552UNWIND(	.cantunwind)
553	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
554	str	r1, [ip]
555	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
556	str	r2, [ip]
557
558	/* Restore old user sp */
559	pop	{r4,r7}
560	cps	#CPSR_MODE_SYS
561	mov	sp, r4
562	cps	#CPSR_MODE_SVC
563
564	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
565UNWIND(	.fnend)
566END_FUNC thread_unwind_user_mode
567
568	.macro maybe_restore_mapping
569		/*
570		 * This macro is a bit hard to read due to all the ifdefs,
571		 * we're testing for two different configs which makes four
572		 * different combinations.
573		 *
574		 * - With LPAE, and then some extra code if with
575		 *   CFG_CORE_UNMAP_CORE_AT_EL0
576		 * - Without LPAE, and then some extra code if with
577		 *   CFG_CORE_UNMAP_CORE_AT_EL0
578		 */
579
580		/*
581		 * At this point we can't rely on any memory being writable
582		 * yet, so we're using TPIDRPRW to store r0, and if with
583		 * LPAE TPIDRURO to store r1 too.
584		 */
585		write_tpidrprw r0
586#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
587		write_tpidruro r1
588#endif
589
590#ifdef CFG_WITH_LPAE
591		read_ttbr0_64bit r0, r1
592		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
593		beq	11f
594
595#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
596		/*
597		 * Update the mapping to use the full kernel mode mapping.
598		 * Since the translation table could reside above 4GB we'll
599		 * have to use 64-bit arithmetics.
600		 */
601		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
602		sbc	r1, r1, #0
603#endif
604		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
605		write_ttbr0_64bit r0, r1
606		isb
607
608#else /*!CFG_WITH_LPAE*/
609		read_contextidr r0
610		tst	r0, #1
611		beq	11f
612
613		/* Update the mapping to use the full kernel mode mapping. */
614		bic	r0, r0, #1
615		write_contextidr r0
616		isb
617#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
618		read_ttbr1 r0
619		sub	r0, r0, #CORE_MMU_L1_TBL_OFFSET
620		write_ttbr1 r0
621		isb
622#endif
623
624#endif /*!CFG_WITH_LPAE*/
625
626#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
627		ldr	r0, =thread_user_kcode_offset
628		ldr	r0, [r0]
629		read_vbar r1
630		add	r1, r1, r0
631		write_vbar r1
632		isb
633
634	11:	/*
635		 * The PC is adjusted unconditionally to guard against the
636		 * case there was an FIQ just before we did the "cpsid aif".
637		 */
638		ldr	r0, =22f
639		bx	r0
640	22:
641#else
642	11:
643#endif
644		read_tpidrprw r0
645#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
646		read_tpidruro r1
647#endif
648	.endm
649
650/* The handler of native interrupt. */
651.macro	native_intr_handler mode:req
652	cpsid	aif
653	maybe_restore_mapping
654
655	/*
656	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
657	 * address
658	 */
659	sub     lr, lr, #4
660
661	/*
662	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
663	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
664	 * because the secure monitor doesn't save those. The treatment of
665	 * the banked fiq registers is somewhat analogous to the lazy save
666	 * of VFP registers.
667	 */
668	.ifc	\mode\(),fiq
669	push	{r0-r3, r8-r12, lr}
670	.else
671	push	{r0-r3, r12, lr}
672	.endif
673
674	bl	thread_check_canaries
675	ldr	lr, =thread_nintr_handler_ptr
676	ldr	lr, [lr]
677	blx	lr
678
679	mrs	r0, spsr
680	cmp_spsr_user_mode r0
681
682	.ifc	\mode\(),fiq
683	pop	{r0-r3, r8-r12, lr}
684	.else
685	pop	{r0-r3, r12, lr}
686	.endif
687
688	movsne	pc, lr
689	b	eret_to_user_mode
690.endm
691
692/* The handler of foreign interrupt. */
693.macro foreign_intr_handler mode:req
694	cpsid	aif
695	maybe_restore_mapping
696
697	sub	lr, lr, #4
698	push	{r12}
699
700	.ifc	\mode\(),fiq
701	/*
702	 * If a foreign (non-secure) interrupt is received as a FIQ we need
703	 * to check that we're in a saveable state or if we need to mask
704	 * the interrupt to be handled later.
705	 *
706	 * The window when this is needed is quite narrow, it's between
707	 * entering the exception vector and until the "cpsid" instruction
708	 * of the handler has been executed.
709	 *
710	 * Currently we can save the state properly if the FIQ is received
711	 * while in user or svc (kernel) mode.
712	 *
713	 * If we're returning to abort, undef or irq mode we're returning
714	 * with the mapping restored. This is OK since before the handler
715	 * we're returning to eventually returns to user mode the reduced
716	 * mapping will be restored.
717	 */
718	mrs	r12, spsr
719	and	r12, r12, #ARM32_CPSR_MODE_MASK
720	cmp	r12, #ARM32_CPSR_MODE_USR
721	cmpne	r12, #ARM32_CPSR_MODE_SVC
722	beq	1f
723	mrs	r12, spsr
724	orr	r12, r12, #ARM32_CPSR_F
725	msr	spsr_fsxc, r12
726	pop	{r12}
727	movs	pc, lr
7281:
729	.endif
730
731	push	{lr}
732
733	.ifc	\mode\(),fiq
734	bl	thread_save_state_fiq
735	.else
736	bl	thread_save_state
737	.endif
738
739	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
740	mrs	r1, spsr
741	pop	{r2}
742	pop	{r12}
743	blx	thread_state_suspend
744	mov	r4, r0		/* Supply thread index */
745
746	/*
747	 * Switch to SVC mode and copy current stack pointer as it already
748	 * is the tmp stack.
749	 */
750	mov	r0, sp
751	cps	#CPSR_MODE_SVC
752	mov	sp, r0
753
754	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
755	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
756	mov	r2, #0
757	mov	r3, #0
758	/* r4 is already filled in above */
759	smc	#0
760	b	.	/* SMC should not return */
761.endm
762
763	.section .text.thread_excp_vect
764        .align	5
765FUNC thread_excp_vect , :
766UNWIND(	.fnstart)
767UNWIND(	.cantunwind)
768	b	.			/* Reset			*/
769	b	thread_und_handler	/* Undefined instruction	*/
770	b	thread_svc_handler	/* System call			*/
771	b	thread_pabort_handler	/* Prefetch abort		*/
772	b	thread_dabort_handler	/* Data abort			*/
773	b	.			/* Reserved			*/
774	b	thread_irq_handler	/* IRQ				*/
775	b	thread_fiq_handler	/* FIQ				*/
776#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
777	.macro vector_prologue_spectre
778		/*
779		 * This depends on SP being 8 byte aligned, that is, the
780		 * lowest three bits in SP are zero.
781		 *
782		 * To avoid unexpected speculation we need to invalidate
783		 * the branch predictor before we do the first branch. It
784		 * doesn't matter if it's a conditional or an unconditional
785		 * branch speculation can still occur.
786		 *
787		 * The idea is to form a specific bit pattern in the lowest
788		 * three bits of SP depending on which entry in the vector
789		 * we enter via.  This is done by adding 1 to SP in each
790		 * entry but the last.
791		 */
792		add	sp, sp, #1	/* 7:111 Reset			*/
793		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
794		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
795		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
796		add	sp, sp, #1	/* 3:011 Data abort		*/
797		add	sp, sp, #1	/* 2:010 Reserved		*/
798		add	sp, sp, #1	/* 1:001 IRQ			*/
799		write_tpidrprw r0	/* 0:000 FIQ			*/
800	.endm
801
802        .align	5
803	.global thread_excp_vect_workaround_a15
804thread_excp_vect_workaround_a15:
805	vector_prologue_spectre
806	mrs	r0, spsr
807	cmp_spsr_user_mode r0
808	bne	1f
809	/*
810	 * Invalidate the branch predictor for the current processor.
811	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
812	 * effective.
813	 * Note that the BPIALL instruction is not effective in
814	 * invalidating the branch predictor on Cortex-A15. For that CPU,
815	 * set ACTLR[0] to 1 during early processor initialisation, and
816	 * invalidate the branch predictor by performing an ICIALLU
817	 * instruction. See also:
818	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
819	 */
820	write_iciallu
821	isb
822	b	1f
823
824        .align	5
825	.global thread_excp_vect_workaround
826thread_excp_vect_workaround:
827	vector_prologue_spectre
828	mrs	r0, spsr
829	cmp_spsr_user_mode r0
830	bne	1f
831	/* Invalidate the branch predictor for the current processor. */
832	write_bpiall
833	isb
834
8351:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
836	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
837	add	pc, pc, r0, LSL #3
838	nop
839
840	read_tpidrprw r0
841	b	thread_fiq_handler	/* FIQ				*/
842	read_tpidrprw r0
843	b	thread_irq_handler	/* IRQ				*/
844	read_tpidrprw r0
845	b	.			/* Reserved			*/
846	read_tpidrprw r0
847	b	thread_dabort_handler	/* Data abort			*/
848	read_tpidrprw r0
849	b	thread_pabort_handler	/* Prefetch abort		*/
850	read_tpidrprw r0
851	b	thread_svc_handler	/* System call			*/
852	read_tpidrprw r0
853	b	thread_und_handler	/* Undefined instruction	*/
854	read_tpidrprw r0
855	b	.			/* Reset			*/
856#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
857
858thread_und_handler:
859	cpsid	aif
860	maybe_restore_mapping
861	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
862	mrs	r1, spsr
863	tst	r1, #CPSR_T
864	subne	lr, lr, #2
865	subeq	lr, lr, #4
866	mov	r0, #ABORT_TYPE_UNDEF
867	b	thread_abort_common
868
869thread_dabort_handler:
870	cpsid	aif
871	maybe_restore_mapping
872	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
873	sub	lr, lr, #8
874	mov	r0, #ABORT_TYPE_DATA
875	b	thread_abort_common
876
877thread_pabort_handler:
878	cpsid	aif
879	maybe_restore_mapping
880	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
881	sub	lr, lr, #4
882	mov	r0, #ABORT_TYPE_PREFETCH
883
884thread_abort_common:
885	/*
886	 * At this label:
887	 * cpsr is in mode undef or abort
888	 * sp is still pointing to struct thread_core_local belonging to
889	 * this core.
890	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
891	 * {r2-r11, ip} are untouched.
892	 * r0 holds the first argument for abort_handler()
893	 */
894
895	/*
896	 * Update core local flags.
897	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
898	 */
899	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
900	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
901	orr	r1, r1, #THREAD_CLF_ABORT
902
903	/*
904	 * Select stack and update flags accordingly
905	 *
906	 * Normal case:
907	 * If the abort stack is unused select that.
908	 *
909	 * Fatal error handling:
910	 * If we're already using the abort stack as noted by bit
911	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
912	 * field we're selecting the temporary stack instead to be able to
913	 * make a stack trace of the abort in abort mode.
914	 *
915	 * r1 is initialized as a temporary stack pointer until we've
916	 * switched to system mode.
917	 */
918	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
919	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
920	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
921	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
922	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
923
924	/*
925	 * Store registers on stack fitting struct thread_abort_regs
926	 * start from the end of the struct
927	 * {r2-r11, ip}
928	 * Load content of previously saved {r0-r1} and stores
929	 * it up to the pad field.
930	 * After this is only {usr_sp, usr_lr} missing in the struct
931	 */
932	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
933	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
934	/* Push the original {r0-r1} on the selected stack */
935	stmdb	r1!, {r2-r3}
936	mrs	r3, spsr
937	/* Push {pad, spsr, elr} on the selected stack */
938	stmdb	r1!, {r2, r3, lr}
939
940	cps	#CPSR_MODE_SYS
941	str	lr, [r1, #-4]!
942	str	sp, [r1, #-4]!
943	mov	sp, r1
944
945	bl	abort_handler
946
947	mov	ip, sp
948	ldr	sp, [ip], #4
949	ldr	lr, [ip], #4
950
951	/*
952	 * Even if we entered via CPSR_MODE_UND, we are returning via
953	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
954	 * here.
955	 */
956	cps	#CPSR_MODE_ABT
957	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
958	msr	spsr_fsxc, r1
959
960	/* Update core local flags */
961	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
962	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
963	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
964
965	cmp_spsr_user_mode r1
966	ldm	ip, {r0-r11, ip}
967	movsne	pc, lr
968	b	eret_to_user_mode
969	/* end thread_abort_common */
970
971thread_svc_handler:
972	cpsid	aif
973
974	maybe_restore_mapping
975
976	push	{r0-r7, lr}
977	mrs	r0, spsr
978	push	{r0}
979	mov	r0, sp
980	bl	tee_svc_handler
981	cpsid	aif	/* In case something was unmasked */
982	pop	{r0}
983	msr	spsr_fsxc, r0
984	cmp_spsr_user_mode r0
985	pop	{r0-r7, lr}
986	movsne	pc, lr
987	b	eret_to_user_mode
988	/* end thread_svc_handler */
989
990thread_fiq_handler:
991#if defined(CFG_ARM_GICV3)
992	foreign_intr_handler	fiq
993#else
994	native_intr_handler	fiq
995#endif
996	/* end thread_fiq_handler */
997
998thread_irq_handler:
999#if defined(CFG_ARM_GICV3)
1000	native_intr_handler	irq
1001#else
1002	foreign_intr_handler	irq
1003#endif
1004	/* end thread_irq_handler */
1005
1006	/*
1007	 * Returns to user mode.
1008	 * Expects to be jumped to with lr pointing to the user space
1009	 * address to jump to and spsr holding the desired cpsr. Async
1010	 * abort, irq and fiq should be masked.
1011	 */
1012eret_to_user_mode:
1013	write_tpidrprw r0
1014#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1015	write_tpidruro r1
1016#endif
1017
1018#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1019	ldr	r0, =thread_user_kcode_offset
1020	ldr	r0, [r0]
1021	read_vbar r1
1022	sub	r1, r1, r0
1023	write_vbar r1
1024	isb
1025
1026	/* Jump into the reduced mapping before the full mapping is removed */
1027	ldr	r1, =1f
1028	sub	r1, r1, r0
1029	bx	r1
10301:
1031#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1032
1033#ifdef CFG_WITH_LPAE
1034	read_ttbr0_64bit r0, r1
1035#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1036	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1037#endif
1038	/* switch to user ASID */
1039	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
1040	write_ttbr0_64bit r0, r1
1041	isb
1042#else /*!CFG_WITH_LPAE*/
1043#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1044	read_ttbr1 r0
1045	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1046	write_ttbr1 r0
1047	isb
1048#endif
1049	read_contextidr r0
1050	orr	r0, r0, #BIT(0)
1051	write_contextidr r0
1052	isb
1053#endif /*!CFG_WITH_LPAE*/
1054
1055	read_tpidrprw r0
1056#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1057	read_tpidruro r1
1058#endif
1059
1060	movs	pc, lr
1061
1062	/*
1063	 * void icache_inv_user_range(void *addr, size_t size);
1064	 *
1065	 * This function has to execute with the user space ASID active,
1066	 * this means executing with reduced mapping and the code needs
1067	 * to be located here together with the vector.
1068	 */
1069	.global icache_inv_user_range
1070	.type icache_inv_user_range , %function
1071icache_inv_user_range:
1072	push	{r4-r7}
1073
1074	/* Mask all exceptions */
1075	mrs	r4, cpsr	/* This register must be preserved */
1076	cpsid	aif
1077
1078#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1079	ldr	r2, =thread_user_kcode_offset
1080	ldr	r2, [r2]
1081	read_vbar r5		/* This register must be preserved */
1082	sub	r3, r5, r2
1083	write_vbar r3
1084	isb
1085
1086	/* Jump into the reduced mapping before the full mapping is removed */
1087	ldr	r3, =1f
1088	sub	r3, r3, r2
1089	bx	r3
10901:
1091#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1092
1093#ifdef CFG_WITH_LPAE
1094	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
1095#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1096	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
1097#endif
1098	/* switch to user ASID */
1099	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
1100	write_ttbr0_64bit r2, r3
1101	isb
1102#else /*!CFG_WITH_LPAE*/
1103#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1104	read_ttbr1 r6		/* This register must be preserved */
1105	add	r2, r6, #CORE_MMU_L1_TBL_OFFSET
1106	write_ttbr1 r2
1107	isb
1108#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1109	read_contextidr r7	/* This register must be preserved */
1110	orr	r2, r7, #BIT(0)
1111	write_contextidr r2
1112	isb
1113#endif /*!CFG_WITH_LPAE*/
1114
1115	/*
1116	 * Do the actual icache invalidation
1117	 */
1118
1119	/* Calculate minimum icache line size, result in r2 */
1120	read_ctr r3
1121	and     r3, r3, #CTR_IMINLINE_MASK
1122	mov     r2, #CTR_WORD_SIZE
1123	lsl     r2, r2, r3
1124
1125	add	r1, r0, r1
1126	sub	r3, r2, #1
1127	bic	r0, r0, r3
11281:
1129	write_icimvau r0
1130	add	r0, r0, r2
1131	cmp	r0, r1
1132	blo	1b
1133
1134	/* Invalidate entire branch predictor array inner shareable */
1135	write_bpiallis
1136
1137	dsb	ishst
1138	isb
1139
1140#ifdef CFG_WITH_LPAE
1141	write_ttbr0_64bit r6, r7
1142	isb
1143#else /*!CFG_WITH_LPAE*/
1144	write_contextidr r7
1145	isb
1146#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1147	write_ttbr1 r6
1148	isb
1149#endif
1150#endif /*!CFG_WITH_LPAE*/
1151
1152#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1153	write_vbar r5
1154	isb
1155	/*
1156	 * The PC is adjusted unconditionally to guard against the
1157	 * case there was an FIQ just before we did the "cpsid aif".
1158	 */
1159	ldr	r0, =1f
1160	bx	r0
11611:
1162#endif
1163
1164	msr	cpsr_fsxc, r4	/* Restore exceptions */
1165	pop	{r4-r7}
1166	bx	lr		/* End of icache_inv_user_range() */
1167
1168	/*
1169	 * Make sure that literals are placed before the
1170	 * thread_excp_vect_end label.
1171	 */
1172	.pool
1173UNWIND(	.fnend)
1174	.global thread_excp_vect_end
1175thread_excp_vect_end:
1176END_FUNC thread_excp_vect
1177