xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision baa999cd61495093ce1e9c43251e655b3a14da67)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2017, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <arm32_macros.S>
31#include <arm.h>
32#include <asm-defines.h>
33#include <asm.S>
34#include <keep.h>
35#include <kernel/abort.h>
36#include <kernel/thread_defs.h>
37#include <kernel/unwind.h>
38#include <mm/core_mmu.h>
39#include <sm/optee_smc.h>
40#include <sm/teesmc_opteed.h>
41#include <sm/teesmc_opteed_macros.h>
42
43#include "thread_private.h"
44
45	.macro cmp_spsr_user_mode reg:req
46		/*
47		 * We're only testing the lower 4 bits as bit 5 (0x10)
48		 * always is set.
49		 */
50		tst	\reg, #0x0f
51	.endm
52
53LOCAL_FUNC vector_std_smc_entry , :
54UNWIND(	.fnstart)
55UNWIND(	.cantunwind)
56	push	{r0-r7}
57	mov	r0, sp
58	bl	thread_handle_std_smc
59	/*
60	 * Normally thread_handle_std_smc() should return via
61	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
62	 * hasn't switched stack (error detected) it will do a normal "C"
63	 * return.
64	 */
65	pop	{r1-r8}
66	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
67	smc	#0
68	b	.	/* SMC should not return */
69UNWIND(	.fnend)
70END_FUNC vector_std_smc_entry
71
72LOCAL_FUNC vector_fast_smc_entry , :
73UNWIND(	.fnstart)
74UNWIND(	.cantunwind)
75	push	{r0-r7}
76	mov	r0, sp
77	bl	thread_handle_fast_smc
78	pop	{r1-r8}
79	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
80	smc	#0
81	b	.	/* SMC should not return */
82UNWIND(	.fnend)
83END_FUNC vector_fast_smc_entry
84
85LOCAL_FUNC vector_fiq_entry , :
86UNWIND(	.fnstart)
87UNWIND(	.cantunwind)
88 	/* Secure Monitor received a FIQ and passed control to us. */
89	bl	thread_check_canaries
90	ldr	lr, =thread_nintr_handler_ptr
91 	ldr	lr, [lr]
92 	blx	lr
93	mov	r1, r0
94	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
95	smc	#0
96	b	.	/* SMC should not return */
97UNWIND(	.fnend)
98END_FUNC vector_fiq_entry
99
100LOCAL_FUNC vector_cpu_on_entry , :
101UNWIND(	.fnstart)
102UNWIND(	.cantunwind)
103	ldr	lr, =thread_cpu_on_handler_ptr
104	ldr	lr, [lr]
105	blx	lr
106	mov	r1, r0
107	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
108	smc	#0
109	b	.	/* SMC should not return */
110UNWIND(	.fnend)
111END_FUNC vector_cpu_on_entry
112
113LOCAL_FUNC vector_cpu_off_entry , :
114UNWIND(	.fnstart)
115UNWIND(	.cantunwind)
116	ldr	lr, =thread_cpu_off_handler_ptr
117	ldr	lr, [lr]
118	blx	lr
119	mov	r1, r0
120	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
121	smc	#0
122	b	.	/* SMC should not return */
123UNWIND(	.fnend)
124END_FUNC vector_cpu_off_entry
125
126LOCAL_FUNC vector_cpu_suspend_entry , :
127UNWIND(	.fnstart)
128UNWIND(	.cantunwind)
129	ldr	lr, =thread_cpu_suspend_handler_ptr
130	ldr	lr, [lr]
131	blx	lr
132	mov	r1, r0
133	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
134	smc	#0
135	b	.	/* SMC should not return */
136UNWIND(	.fnend)
137END_FUNC vector_cpu_suspend_entry
138
139LOCAL_FUNC vector_cpu_resume_entry , :
140UNWIND(	.fnstart)
141UNWIND(	.cantunwind)
142	ldr	lr, =thread_cpu_resume_handler_ptr
143	ldr	lr, [lr]
144	blx	lr
145	mov	r1, r0
146	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
147	smc	#0
148	b	.	/* SMC should not return */
149UNWIND(	.fnend)
150END_FUNC vector_cpu_resume_entry
151
152LOCAL_FUNC vector_system_off_entry , :
153UNWIND(	.fnstart)
154UNWIND(	.cantunwind)
155	ldr	lr, =thread_system_off_handler_ptr
156	ldr	lr, [lr]
157	blx	lr
158	mov	r1, r0
159	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
160	smc	#0
161	b	.	/* SMC should not return */
162UNWIND(	.fnend)
163END_FUNC vector_system_off_entry
164
165LOCAL_FUNC vector_system_reset_entry , :
166UNWIND(	.fnstart)
167UNWIND(	.cantunwind)
168	ldr	lr, =thread_system_reset_handler_ptr
169	ldr	lr, [lr]
170	blx	lr
171	mov	r1, r0
172	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
173	smc	#0
174	b	.	/* SMC should not return */
175UNWIND(	.fnend)
176END_FUNC vector_system_reset_entry
177
178/*
179 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
180 * initialization.  Also used when compiled with the internal monitor, but
181 * the cpu_*_entry and system_*_entry are not used then.
182 *
183 * Note that ARM-TF depends on the layout of this vector table, any change
184 * in layout has to be synced with ARM-TF.
185 */
186FUNC thread_vector_table , :
187UNWIND(	.fnstart)
188UNWIND(	.cantunwind)
189	b	vector_std_smc_entry
190	b	vector_fast_smc_entry
191	b	vector_cpu_on_entry
192	b	vector_cpu_off_entry
193	b	vector_cpu_resume_entry
194	b	vector_cpu_suspend_entry
195	b	vector_fiq_entry
196	b	vector_system_off_entry
197	b	vector_system_reset_entry
198UNWIND(	.fnend)
199END_FUNC thread_vector_table
200KEEP_PAGER thread_vector_table
201
202FUNC thread_set_abt_sp , :
203UNWIND(	.fnstart)
204UNWIND(	.cantunwind)
205	mrs	r1, cpsr
206	cps	#CPSR_MODE_ABT
207	mov	sp, r0
208	msr	cpsr, r1
209	bx	lr
210UNWIND(	.fnend)
211END_FUNC thread_set_abt_sp
212
213FUNC thread_set_und_sp , :
214UNWIND(	.fnstart)
215UNWIND(	.cantunwind)
216	mrs	r1, cpsr
217	cps	#CPSR_MODE_UND
218	mov	sp, r0
219	msr	cpsr, r1
220	bx	lr
221UNWIND(	.fnend)
222END_FUNC thread_set_und_sp
223
224FUNC thread_set_irq_sp , :
225UNWIND(	.fnstart)
226UNWIND(	.cantunwind)
227	mrs	r1, cpsr
228	cps	#CPSR_MODE_IRQ
229	mov	sp, r0
230	msr	cpsr, r1
231	bx	lr
232UNWIND(	.fnend)
233END_FUNC thread_set_irq_sp
234
235FUNC thread_set_fiq_sp , :
236UNWIND(	.fnstart)
237UNWIND(	.cantunwind)
238	mrs	r1, cpsr
239	cps	#CPSR_MODE_FIQ
240	mov	sp, r0
241	msr	cpsr, r1
242	bx	lr
243UNWIND(	.fnend)
244END_FUNC thread_set_fiq_sp
245
246/* void thread_resume(struct thread_ctx_regs *regs) */
247FUNC thread_resume , :
248UNWIND(	.fnstart)
249UNWIND(	.cantunwind)
250	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
251
252	cps	#CPSR_MODE_SYS
253	ldm	r12!, {sp, lr}
254
255	cps	#CPSR_MODE_SVC
256	ldm	r12!, {r1, sp, lr}
257	msr	spsr_fsxc, r1
258
259	ldm	r12, {r1, r2}
260
261	/*
262	 * Switching to some other mode than SVC as we need to set spsr in
263	 * order to return into the old state properly and it may be SVC
264	 * mode we're returning to.
265	 */
266	cps	#CPSR_MODE_ABT
267	cmp_spsr_user_mode r2
268	mov	lr, r1
269	msr	spsr_fsxc, r2
270	ldm	r0, {r0-r12}
271	movnes	pc, lr
272	b	eret_to_user_mode
273UNWIND(	.fnend)
274END_FUNC thread_resume
275
276/*
277 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
278 * the banked r8-r12 registers, returns original CPSR.
279 */
280LOCAL_FUNC thread_save_state_fiq , :
281UNWIND(	.fnstart)
282UNWIND(	.cantunwind)
283	mov	r9, lr
284
285	/*
286	 * Uses stack for temporary storage, while storing needed
287	 * context in the thread context struct.
288	 */
289
290	mrs	r8, cpsr
291
292	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
293
294	push	{r4-r7}
295	push	{r0-r3}
296
297	mrs	r6, cpsr		/* Save current CPSR */
298
299	bl	thread_get_ctx_regs
300
301	pop	{r1-r4}			/* r0-r3 pushed above */
302	stm	r0!, {r1-r4}
303	pop	{r1-r4}			/* r4-r7 pushed above */
304	stm	r0!, {r1-r4}
305
306	cps     #CPSR_MODE_SYS
307	stm	r0!, {r8-r12}
308	stm     r0!, {sp, lr}
309
310	cps     #CPSR_MODE_SVC
311	mrs     r1, spsr
312	stm     r0!, {r1, sp, lr}
313
314	/* back to fiq mode */
315	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
316	msr	cpsr, r6		/* Restore mode */
317
318	mov	r0, r8			/* Return original CPSR */
319	bx	r9
320UNWIND(	.fnend)
321END_FUNC thread_save_state_fiq
322
323/*
324 * Disables IRQ and FIQ and saves state of thread, returns original
325 * CPSR.
326 */
327LOCAL_FUNC thread_save_state , :
328UNWIND(	.fnstart)
329UNWIND(	.cantunwind)
330	push	{r12, lr}
331	/*
332	 * Uses stack for temporary storage, while storing needed
333	 * context in the thread context struct.
334	 */
335
336	mrs	r12, cpsr
337
338	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
339
340	push	{r4-r7}
341	push	{r0-r3}
342
343	mov	r5, r12			/* Save CPSR in a preserved register */
344	mrs	r6, cpsr		/* Save current CPSR */
345
346	bl	thread_get_ctx_regs
347
348	pop	{r1-r4}			/* r0-r3 pushed above */
349	stm	r0!, {r1-r4}
350	pop	{r1-r4}			/* r4-r7 pushed above */
351	stm	r0!, {r1-r4}
352	stm	r0!, {r8-r11}
353
354	pop	{r12, lr}
355	stm	r0!, {r12}
356
357        cps     #CPSR_MODE_SYS
358        stm     r0!, {sp, lr}
359
360        cps     #CPSR_MODE_SVC
361        mrs     r1, spsr
362        stm     r0!, {r1, sp, lr}
363
364	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
365	msr	cpsr, r6		/* Restore mode */
366
367	mov	r0, r5			/* Return original CPSR */
368	bx	lr
369UNWIND(	.fnend)
370END_FUNC thread_save_state
371
372FUNC thread_std_smc_entry , :
373UNWIND(	.fnstart)
374UNWIND(	.cantunwind)
375	/* Pass r0-r7 in a struct thread_smc_args */
376	push	{r0-r7}
377	mov	r0, sp
378	bl	__thread_std_smc_entry
379	/*
380	 * Load the returned r0-r3 into preserved registers and skip the
381	 * "returned" r4-r7 since they will not be returned to normal
382	 * world.
383	 */
384	pop	{r4-r7}
385	add	sp, #(4 * 4)
386
387	/* Disable interrupts before switching to temporary stack */
388	cpsid	aif
389	bl	thread_get_tmp_sp
390	mov	sp, r0
391
392	bl	thread_state_free
393
394	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
395	mov	r1, r4
396	mov	r2, r5
397	mov	r3, r6
398	mov	r4, r7
399	smc	#0
400	b	.	/* SMC should not return */
401UNWIND(	.fnend)
402END_FUNC thread_std_smc_entry
403
404
405/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
406FUNC thread_rpc , :
407/*
408 * r0-r2 are used to pass parameters to normal world
409 * r0-r5 are used to pass return vaule back from normal world
410 *
411 * note that r3 is used to pass "resume information", that is, which
412 * thread it is that should resume.
413 *
414 * Since the this function is following AAPCS we need to preserve r4-r5
415 * which are otherwise modified when returning back from normal world.
416 */
417UNWIND(	.fnstart)
418	push	{r4-r5, lr}
419UNWIND(	.save	{r4-r5, lr})
420	push	{r0}
421UNWIND(	.save	{r0})
422
423	bl	thread_save_state
424	mov	r4, r0			/* Save original CPSR */
425
426	/*
427 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
428	 */
429	bl	thread_get_tmp_sp
430	ldr	r5, [sp]		/* Get pointer to rv[] */
431	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
432	mov	sp, r0			/* Switch to tmp stack */
433
434	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
435	mov	r1, r4			/* CPSR to restore */
436	ldr	r2, =.thread_rpc_return
437	bl	thread_state_suspend
438	mov	r4, r0			/* Supply thread index */
439	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
440	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
441	smc	#0
442	b	.	/* SMC should not return */
443
444.thread_rpc_return:
445	/*
446	 * At this point has the stack pointer been restored to the value
447	 * it had when thread_save_state() was called above.
448	 *
449	 * Jumps here from thread_resume above when RPC has returned. The
450	 * IRQ and FIQ bits are restored to what they where when this
451	 * function was originally entered.
452	 */
453	pop	{r12}			/* Get pointer to rv[] */
454	stm	r12, {r0-r5}		/* Store r0-r5 into rv[] */
455	pop	{r4-r5, pc}
456UNWIND(	.fnend)
457END_FUNC thread_rpc
458KEEP_PAGER thread_rpc
459
460FUNC thread_init_vbar , :
461UNWIND(	.fnstart)
462	/* Set vector (VBAR) */
463#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
464	/*
465	 * For unrecognized CPUs we fall back to the vector used for
466	 * unaffected CPUs. Cortex A-15 has special treatment compared to
467	 * the other affected Cortex CPUs.
468	 */
469	read_midr r1
470	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
471	cmp	r2, #MIDR_IMPLEMENTER_ARM
472	bne	1f
473
474	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
475			#MIDR_PRIMARY_PART_NUM_WIDTH
476
477	movw	r3, #CORTEX_A8_PART_NUM
478	cmp	r2, r3
479	movwne	r3, #CORTEX_A9_PART_NUM
480	cmpne	r2, r3
481	movwne	r3, #CORTEX_A17_PART_NUM
482	cmpne	r2, r3
483	movwne	r3, #CORTEX_A57_PART_NUM
484	cmpne	r2, r3
485	movwne	r3, #CORTEX_A72_PART_NUM
486	cmpne	r2, r3
487	movwne	r3, #CORTEX_A73_PART_NUM
488	cmpne	r2, r3
489	movwne	r3, #CORTEX_A75_PART_NUM
490	cmpne	r2, r3
491	ldreq	r0, =exception_vector_bpiall
492	beq	2f
493
494	movw	r3, #CORTEX_A15_PART_NUM
495	cmp	r2, r3
496	ldreq	r0, =exception_vector_a15
497	beq	2f
498#endif
4991:	ldr	r0, =thread_vect_table
5002:	write_vbar r0
501	bx	lr
502UNWIND(	.fnend)
503END_FUNC thread_init_vbar
504KEEP_PAGER thread_init_vbar
505
506/*
507 * Below are low level routines handling entry and return from user mode.
508 *
509 * thread_enter_user_mode() saves all that registers user mode can change
510 * so kernel mode can restore needed registers when resuming execution
511 * after the call to thread_enter_user_mode() has returned.
512 * thread_enter_user_mode() doesn't return directly since it enters user
513 * mode instead, it's thread_unwind_user_mode() that does the
514 * returning by restoring the registers saved by thread_enter_user_mode().
515 *
516 * There's three ways for thread_enter_user_mode() to return to caller,
517 * user TA calls utee_return, user TA calls utee_panic or through an abort.
518 *
519 * Calls to utee_return or utee_panic are handled as:
520 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
521 * calls syscall_return() or syscall_panic().
522 *
523 * These function calls returns normally except thread_svc_handler() which
524 * which is an exception handling routine so it reads return address and
525 * SPSR to restore from the stack. syscall_return() and syscall_panic()
526 * changes return address and SPSR used by thread_svc_handler() to instead of
527 * returning into user mode as with other syscalls it returns into
528 * thread_unwind_user_mode() in kernel mode instead.  When
529 * thread_svc_handler() returns the stack pointer at the point where
530 * thread_enter_user_mode() left it so this is where
531 * thread_unwind_user_mode() can operate.
532 *
533 * Aborts are handled in a similar way but by thread_abort_handler()
534 * instead, when the pager sees that it's an abort from user mode that
535 * can't be handled it updates SPSR and return address used by
536 * thread_abort_handler() to return into thread_unwind_user_mode()
537 * instead.
538 */
539
540/*
541 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
542 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
543 *               unsigned long user_func, unsigned long spsr,
544 *               uint32_t *exit_status0, uint32_t *exit_status1)
545 *
546 */
547FUNC __thread_enter_user_mode , :
548UNWIND(	.fnstart)
549UNWIND(	.cantunwind)
550	/*
551	 * Save all registers to allow syscall_return() to resume execution
552	 * as if this function would have returned. This is also used in
553	 * syscall_panic().
554	 *
555	 * If stack usage of this function is changed
556	 * thread_unwind_user_mode() has to be updated.
557	 */
558	push    {r4-r12,lr}
559
560	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
561	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
562	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
563
564	/*
565	 * Save old user sp and set new user sp.
566	 */
567	cps	#CPSR_MODE_SYS
568	mov	r7, sp
569	mov     sp, r4
570	cps	#CPSR_MODE_SVC
571	push	{r7,r8}
572
573	/* Prepare user mode entry via eret_to_user_mode */
574	cpsid	aif
575	msr     spsr_fsxc, r6
576	mov	lr, r5
577
578	b	eret_to_user_mode
579UNWIND(	.fnend)
580END_FUNC __thread_enter_user_mode
581
582/*
583 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
584 *              uint32_t exit_status1);
585 * See description in thread.h
586 */
587FUNC thread_unwind_user_mode , :
588UNWIND(	.fnstart)
589UNWIND(	.cantunwind)
590	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
591	str	r1, [ip]
592	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
593	str	r2, [ip]
594
595	/* Restore old user sp */
596	pop	{r4,r7}
597	cps	#CPSR_MODE_SYS
598	mov	sp, r4
599	cps	#CPSR_MODE_SVC
600
601	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
602UNWIND(	.fnend)
603END_FUNC thread_unwind_user_mode
604
605	.macro maybe_restore_mapping
606		/*
607		 * This macro is a bit hard to read due to all the ifdefs,
608		 * we're testing for two different configs which makes four
609		 * different combinations.
610		 *
611		 * - With LPAE, and then some extra code if with
612		 *   CFG_CORE_UNMAP_CORE_AT_EL0
613		 * - Without LPAE, and then some extra code if with
614		 *   CFG_CORE_UNMAP_CORE_AT_EL0
615		 */
616
617		/*
618		 * At this point we can't rely on any memory being writable
619		 * yet, so we're using TPIDRPRW to store r0, and if with
620		 * LPAE TPIDRURO to store r1 too.
621		 */
622		write_tpidrprw r0
623#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
624		write_tpidruro r1
625#endif
626
627#ifdef CFG_WITH_LPAE
628		read_ttbr0_64bit r0, r1
629		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
630		beq	11f
631
632#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
633		/*
634		 * Update the mapping to use the full kernel mode mapping.
635		 * Since the translation table could reside above 4GB we'll
636		 * have to use 64-bit arithmetics.
637		 */
638		subs	r0, r0, #CORE_MMU_L1_TBL_OFFSET
639		sbc	r1, r1, #0
640#endif
641		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
642		write_ttbr0_64bit r0, r1
643		isb
644
645#else /*!CFG_WITH_LPAE*/
646		read_contextidr r0
647		tst	r0, #1
648		beq	11f
649
650		/* Update the mapping to use the full kernel mode mapping. */
651		bic	r0, r0, #1
652		write_contextidr r0
653		isb
654#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
655		read_ttbr1 r0
656		sub	r0, r0, #CORE_MMU_L1_TBL_OFFSET
657		write_ttbr1 r0
658		isb
659#endif
660
661#endif /*!CFG_WITH_LPAE*/
662
663#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
664		ldr	r0, =thread_user_kcode_offset
665		ldr	r0, [r0]
666		read_vbar r1
667		add	r1, r1, r0
668		write_vbar r1
669		isb
670
671	11:	/*
672		 * The PC is adjusted unconditionally to guard against the
673		 * case there was an FIQ just before we did the "cpsid aif".
674		 */
675		ldr	r0, =22f
676		bx	r0
677	22:
678#else
679	11:
680#endif
681		read_tpidrprw r0
682#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
683		read_tpidruro r1
684#endif
685	.endm
686
687/* The handler of native interrupt. */
688.macro	native_intr_handler mode:req
689	cpsid	aif
690	maybe_restore_mapping
691
692	/*
693	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
694	 * address
695	 */
696	sub     lr, lr, #4
697
698	/*
699	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
700	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
701	 * because the secure monitor doesn't save those. The treatment of
702	 * the banked fiq registers is somewhat analogous to the lazy save
703	 * of VFP registers.
704	 */
705	.ifc	\mode\(),fiq
706	push	{r0-r3, r8-r12, lr}
707	.else
708	push	{r0-r3, r12, lr}
709	.endif
710
711	bl	thread_check_canaries
712	ldr	lr, =thread_nintr_handler_ptr
713	ldr	lr, [lr]
714	blx	lr
715
716	mrs	r0, spsr
717	cmp_spsr_user_mode r0
718
719	.ifc	\mode\(),fiq
720	pop	{r0-r3, r8-r12, lr}
721	.else
722	pop	{r0-r3, r12, lr}
723	.endif
724
725	movnes	pc, lr
726	b	eret_to_user_mode
727.endm
728
729/* The handler of foreign interrupt. */
730.macro foreign_intr_handler mode:req
731	cpsid	aif
732	maybe_restore_mapping
733
734	sub	lr, lr, #4
735	push	{lr}
736	push	{r12}
737
738	.ifc	\mode\(),fiq
739	bl	thread_save_state_fiq
740	.else
741	bl	thread_save_state
742	.endif
743
744	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
745	mrs	r1, spsr
746	pop	{r12}
747	pop	{r2}
748	blx	thread_state_suspend
749	mov	r4, r0		/* Supply thread index */
750
751	/*
752	 * Switch to SVC mode and copy current stack pointer as it already
753	 * is the tmp stack.
754	 */
755	mov	r0, sp
756	cps	#CPSR_MODE_SVC
757	mov	sp, r0
758
759	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
760	ldr	r1, =OPTEE_SMC_RETURN_RPC_FOREIGN_INTR
761	mov	r2, #0
762	mov	r3, #0
763	/* r4 is already filled in above */
764	smc	#0
765	b	.	/* SMC should not return */
766.endm
767
768	.section .text.thread_vect_table
769        .align	5
770FUNC thread_vect_table , :
771UNWIND(	.fnstart)
772UNWIND(	.cantunwind)
773	b	.			/* Reset			*/
774	b	thread_und_handler	/* Undefined instruction	*/
775	b	thread_svc_handler	/* System call			*/
776	b	thread_pabort_handler	/* Prefetch abort		*/
777	b	thread_dabort_handler	/* Data abort			*/
778	b	.			/* Reserved			*/
779	b	thread_irq_handler	/* IRQ				*/
780	b	thread_fiq_handler	/* FIQ				*/
781#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
782	.macro vector_prologue_spectre
783		/*
784		 * This depends on SP being 8 byte aligned, that is, the
785		 * lowest three bits in SP are zero.
786		 *
787		 * To avoid unexpected speculation we need to invalidate
788		 * the branch predictor before we do the first branch. It
789		 * doesn't matter if it's a conditional or an unconditional
790		 * branch speculation can still occur.
791		 *
792		 * The idea is to form a specific bit pattern in the lowest
793		 * three bits of SP depending on which entry in the vector
794		 * we enter via.  This is done by adding 1 to SP in each
795		 * entry but the last.
796		 */
797		add	sp, sp, #1	/* 7:111 Reset			*/
798		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
799		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
800		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
801		add	sp, sp, #1	/* 3:011 Data abort		*/
802		add	sp, sp, #1	/* 2:010 Reserved		*/
803		add	sp, sp, #1	/* 1:001 IRQ			*/
804		write_tpidrprw r0	/* 0:000 FIQ			*/
805	.endm
806
807        .align	5
808exception_vector_a15:
809	vector_prologue_spectre
810	mrs	r0, spsr
811	cmp_spsr_user_mode r0
812	bne	1f
813	/*
814	 * Invalidate the branch predictor for the current processor.
815	 * Note that the BPIALL instruction is not effective in
816	 * invalidating the branch predictor on Cortex-A15. For that CPU,
817	 * set ACTLR[0] to 1 during early processor initialisation, and
818	 * invalidate the branch predictor by performing an ICIALLU
819	 * instruction. See also:
820	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
821	 */
822	write_iciallu
823	isb
824	b	1f
825
826        .align	5
827exception_vector_bpiall:
828	vector_prologue_spectre
829	mrs	r0, spsr
830	cmp_spsr_user_mode r0
831	bne	1f
832	/* Invalidate the branch predictor for the current processor. */
833	write_bpiall
834	isb
835
8361:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
837	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
838	add	pc, pc, r0, LSL #3
839	nop
840
841	read_tpidrprw r0
842	b	thread_fiq_handler	/* FIQ				*/
843	read_tpidrprw r0
844	b	thread_irq_handler	/* IRQ				*/
845	read_tpidrprw r0
846	b	.			/* Reserved			*/
847	read_tpidrprw r0
848	b	thread_dabort_handler	/* Data abort			*/
849	read_tpidrprw r0
850	b	thread_pabort_handler	/* Prefetch abort		*/
851	read_tpidrprw r0
852	b	thread_svc_handler	/* System call			*/
853	read_tpidrprw r0
854	b	thread_und_handler	/* Undefined instruction	*/
855	read_tpidrprw r0
856	b	.			/* Reset			*/
857#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
858
859thread_und_handler:
860	cpsid	aif
861	maybe_restore_mapping
862	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
863	mrs	r1, spsr
864	tst	r1, #CPSR_T
865	subne	lr, lr, #2
866	subeq	lr, lr, #4
867	mov	r0, #ABORT_TYPE_UNDEF
868	b	thread_abort_common
869
870thread_dabort_handler:
871	cpsid	aif
872	maybe_restore_mapping
873	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
874	sub	lr, lr, #8
875	mov	r0, #ABORT_TYPE_DATA
876	b	thread_abort_common
877
878thread_pabort_handler:
879	cpsid	aif
880	maybe_restore_mapping
881	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
882	sub	lr, lr, #4
883	mov	r0, #ABORT_TYPE_PREFETCH
884
885thread_abort_common:
886	/*
887	 * At this label:
888	 * cpsr is in mode undef or abort
889	 * sp is still pointing to struct thread_core_local belonging to
890	 * this core.
891	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
892	 * {r2-r11, ip} are untouched.
893	 * r0 holds the first argument for abort_handler()
894	 */
895
896	/*
897	 * Update core local flags.
898	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
899	 */
900	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
901	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
902	orr	r1, r1, #THREAD_CLF_ABORT
903
904	/*
905	 * Select stack and update flags accordingly
906	 *
907	 * Normal case:
908	 * If the abort stack is unused select that.
909	 *
910	 * Fatal error handling:
911	 * If we're already using the abort stack as noted by bit
912	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
913	 * field we're selecting the temporary stack instead to be able to
914	 * make a stack trace of the abort in abort mode.
915	 *
916	 * r1 is initialized as a temporary stack pointer until we've
917	 * switched to system mode.
918	 */
919	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
920	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
921	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
922	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
923	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
924
925	/*
926	 * Store registers on stack fitting struct thread_abort_regs
927	 * start from the end of the struct
928	 * {r2-r11, ip}
929	 * Load content of previously saved {r0-r1} and stores
930	 * it up to the pad field.
931	 * After this is only {usr_sp, usr_lr} missing in the struct
932	 */
933	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
934	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
935	/* Push the original {r0-r1} on the selected stack */
936	stmdb	r1!, {r2-r3}
937	mrs	r3, spsr
938	/* Push {pad, spsr, elr} on the selected stack */
939	stmdb	r1!, {r2, r3, lr}
940
941	cps	#CPSR_MODE_SYS
942	str	lr, [r1, #-4]!
943	str	sp, [r1, #-4]!
944	mov	sp, r1
945
946	bl	abort_handler
947
948	mov	ip, sp
949	ldr	sp, [ip], #4
950	ldr	lr, [ip], #4
951
952	/*
953	 * Even if we entered via CPSR_MODE_UND, we are returning via
954	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
955	 * here.
956	 */
957	cps	#CPSR_MODE_ABT
958	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
959	msr	spsr_fsxc, r1
960
961	/* Update core local flags */
962	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
963	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
964	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
965
966	cmp_spsr_user_mode r1
967	ldm	ip, {r0-r11, ip}
968	movnes	pc, lr
969	b	eret_to_user_mode
970	/* end thread_abort_common */
971
972thread_svc_handler:
973	cpsid	aif
974
975	maybe_restore_mapping
976
977	push	{r0-r7, lr}
978	mrs	r0, spsr
979	push	{r0}
980	mov	r0, sp
981	bl	tee_svc_handler
982	cpsid	aif	/* In case something was unmasked */
983	pop	{r0}
984	msr	spsr_fsxc, r0
985	cmp_spsr_user_mode r0
986	pop	{r0-r7, lr}
987	movnes	pc, lr
988	b	eret_to_user_mode
989	/* end thread_svc_handler */
990
991thread_fiq_handler:
992#if defined(CFG_ARM_GICV3)
993	foreign_intr_handler	fiq
994#else
995	native_intr_handler	fiq
996#endif
997	/* end thread_fiq_handler */
998
999thread_irq_handler:
1000#if defined(CFG_ARM_GICV3)
1001	native_intr_handler	irq
1002#else
1003	foreign_intr_handler	irq
1004#endif
1005	/* end thread_irq_handler */
1006
1007	/*
1008	 * Returns to user mode.
1009	 * Expects to be jumped to with lr pointing to the user space
1010	 * address to jump to and spsr holding the desired cpsr. Async
1011	 * abort, irq and fiq should be masked.
1012	 */
1013eret_to_user_mode:
1014	write_tpidrprw r0
1015#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1016	write_tpidruro r1
1017#endif
1018
1019#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1020	ldr	r0, =thread_user_kcode_offset
1021	ldr	r0, [r0]
1022	read_vbar r1
1023	sub	r1, r1, r0
1024	write_vbar r1
1025	isb
1026
1027	/* Jump into the reduced mapping before the full mapping is removed */
1028	ldr	r1, =1f
1029	sub	r1, r1, r0
1030	bx	r1
10311:
1032#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
1033
1034#ifdef CFG_WITH_LPAE
1035	read_ttbr0_64bit r0, r1
1036#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1037	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1038#endif
1039	/* switch to user ASID */
1040	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
1041	write_ttbr0_64bit r0, r1
1042	isb
1043#else /*!CFG_WITH_LPAE*/
1044#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1045	read_ttbr1 r0
1046	add	r0, r0, #CORE_MMU_L1_TBL_OFFSET
1047	write_ttbr1 r0
1048	isb
1049#endif
1050	read_contextidr r0
1051	orr	r0, r0, #BIT(0)
1052	write_contextidr r0
1053	isb
1054#endif /*!CFG_WITH_LPAE*/
1055
1056	read_tpidrprw r0
1057#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
1058	read_tpidruro r1
1059#endif
1060
1061	movs	pc, lr
1062UNWIND(	.fnend)
1063END_FUNC thread_vect_table
1064