xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 3b4ffdf0eea446f592b08ead6d1554247e6d2d9e)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2020, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread.h>
15#include <kernel/thread_private.h>
16#include <mm/core_mmu.h>
17
18	.syntax unified
19	.arch_extension sec
20
21	.macro cmp_spsr_user_mode reg:req
22		/*
23		 * We're only testing the lower 4 bits as bit 5 (0x10)
24		 * always is set.
25		 */
26		tst	\reg, #0x0f
27	.endm
28
29FUNC thread_set_abt_sp , :
30UNWIND(	.cantunwind)
31	mrs	r1, cpsr
32	cps	#CPSR_MODE_ABT
33	mov	sp, r0
34	msr	cpsr, r1
35	bx	lr
36END_FUNC thread_set_abt_sp
37
38FUNC thread_set_und_sp , :
39UNWIND(	.cantunwind)
40	mrs	r1, cpsr
41	cps	#CPSR_MODE_UND
42	mov	sp, r0
43	msr	cpsr, r1
44	bx	lr
45END_FUNC thread_set_und_sp
46
47FUNC thread_set_irq_sp , :
48UNWIND(	.cantunwind)
49	mrs	r1, cpsr
50	cps	#CPSR_MODE_IRQ
51	mov	sp, r0
52	msr	cpsr, r1
53	bx	lr
54END_FUNC thread_set_irq_sp
55
56FUNC thread_set_fiq_sp , :
57UNWIND(	.cantunwind)
58	mrs	r1, cpsr
59	cps	#CPSR_MODE_FIQ
60	mov	sp, r0
61	msr	cpsr, r1
62	bx	lr
63END_FUNC thread_set_fiq_sp
64
65FUNC thread_get_usr_sp , :
66	mrs	r1, cpsr
67	cpsid	aif
68	cps	#CPSR_MODE_SYS
69	mov	r0, sp
70	msr	cpsr, r1
71	bx	lr
72END_FUNC thread_get_usr_sp
73
74FUNC thread_get_usr_lr , :
75	mrs	r1, cpsr
76	cpsid	aif
77	cps	#CPSR_MODE_SYS
78	mov	r0, lr
79	msr	cpsr, r1
80	bx	lr
81END_FUNC thread_get_usr_lr
82
83FUNC thread_set_usr_lr , :
84	mrs	r1, cpsr
85	cpsid	aif
86	cps	#CPSR_MODE_SYS
87	mov	lr, r0
88	msr	cpsr, r1
89	bx	lr
90END_FUNC thread_set_usr_lr
91
92/* void thread_resume(struct thread_ctx_regs *regs) */
93FUNC thread_resume , :
94UNWIND(	.cantunwind)
95	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
96
97	cps	#CPSR_MODE_SYS
98	ldr	sp, [r12], #4
99	ldr	lr, [r12], #4
100
101	cps	#CPSR_MODE_SVC
102	ldr	r1, [r12], #4
103	ldr	sp, [r12], #4
104	ldr	lr, [r12], #4
105	msr	spsr_fsxc, r1
106
107	ldm	r12, {r1, r2}
108
109	/*
110	 * Switching to some other mode than SVC as we need to set spsr in
111	 * order to return into the old state properly and it may be SVC
112	 * mode we're returning to.
113	 */
114	cps	#CPSR_MODE_ABT
115	cmp_spsr_user_mode r2
116	mov	lr, r1
117	msr	spsr_fsxc, r2
118	ldm	r0, {r0-r12}
119	movsne	pc, lr
120	b	eret_to_user_mode
121END_FUNC thread_resume
122
123/*
124 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
125 * the banked r8-r12 registers, returns original CPSR.
126 */
127LOCAL_FUNC thread_save_state_fiq , :
128UNWIND(	.cantunwind)
129	mov	r9, lr
130
131	/*
132	 * Uses stack for temporary storage, while storing needed
133	 * context in the thread context struct.
134	 */
135
136	mrs	r8, cpsr
137
138	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
139
140	push	{r4-r7}
141	push	{r0-r3}
142
143	mrs	r6, cpsr		/* Save current CPSR */
144
145	bl	thread_get_ctx_regs
146
147	pop	{r1-r4}			/* r0-r3 pushed above */
148	stm	r0!, {r1-r4}
149	pop	{r1-r4}			/* r4-r7 pushed above */
150	stm	r0!, {r1-r4}
151
152	cps     #CPSR_MODE_SYS
153	stm	r0!, {r8-r12}
154	str	sp, [r0], #4
155	str	lr, [r0], #4
156
157	cps     #CPSR_MODE_SVC
158	mrs     r1, spsr
159	str	r1, [r0], #4
160	str	sp, [r0], #4
161	str	lr, [r0], #4
162
163	/* back to fiq mode */
164	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
165	msr	cpsr, r6		/* Restore mode */
166
167	mov	r0, r8			/* Return original CPSR */
168	bx	r9
169END_FUNC thread_save_state_fiq
170
171/*
172 * Disables IRQ and FIQ and saves state of thread, returns original
173 * CPSR.
174 */
175FUNC thread_save_state , :
176UNWIND(	.cantunwind)
177	push	{r12, lr}
178	/*
179	 * Uses stack for temporary storage, while storing needed
180	 * context in the thread context struct.
181	 */
182
183	mrs	r12, cpsr
184
185	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
186
187	push	{r4-r7}
188	push	{r0-r3}
189
190	mov	r5, r12			/* Save CPSR in a preserved register */
191	mrs	r6, cpsr		/* Save current CPSR */
192
193	bl	thread_get_ctx_regs
194
195	pop	{r1-r4}			/* r0-r3 pushed above */
196	stm	r0!, {r1-r4}
197	pop	{r1-r4}			/* r4-r7 pushed above */
198	stm	r0!, {r1-r4}
199	stm	r0!, {r8-r11}
200
201	pop	{r12, lr}
202	stm	r0!, {r12}
203
204        cps     #CPSR_MODE_SYS
205	str	sp, [r0], #4
206	str	lr, [r0], #4
207
208        cps     #CPSR_MODE_SVC
209        mrs     r1, spsr
210	str	r1, [r0], #4
211	str	sp, [r0], #4
212	str	lr, [r0], #4
213
214	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
215	msr	cpsr, r6		/* Restore mode */
216
217	mov	r0, r5			/* Return original CPSR */
218	bx	lr
219END_FUNC thread_save_state
220
221#ifdef CFG_CORE_SEL2_SPMC
222/*
223 * unsigned long thread_hvc(unsigned long func_id, unsigned long a1,
224 *			    unsigned long a2, unsigned long a3)
225 */
226FUNC thread_hvc , :
227	push	{r4-r7}
228UNWIND(	.save	{r4-r7})
229	hvc	#0
230	pop	{r4-r7}
231	bx	lr
232END_FUNC thread_hvc
233#endif /*CFG_CORE_SEL2_SPMC*/
234
235/*
236 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
237 *			    unsigned long a2, unsigned long a3)
238 */
239FUNC thread_smc , :
240	push	{r4-r7}
241UNWIND(	.save	{r4-r7})
242	smc	#0
243	pop	{r4-r7}
244	bx	lr
245END_FUNC thread_smc
246
247/* void thread_smccc(struct thread_smc_args *arg_res) */
248FUNC thread_smccc , :
249	push	{r4-r7}
250	push	{r0, lr}
251	ldm	r0, {r0-r7}
252#ifdef CFG_CORE_SEL2_SPMC
253	hvc	#0
254#else
255	smc	#0
256#endif
257	pop	{r12, lr}
258	stm	r12, {r0-r7}
259	pop	{r4-r7}
260	bx	lr
261END_FUNC thread_smccc
262
263FUNC thread_init_vbar , :
264	/* Set vector (VBAR) */
265	write_vbar r0
266	bx	lr
267END_FUNC thread_init_vbar
268DECLARE_KEEP_PAGER thread_init_vbar
269
270/*
271 * Below are low level routines handling entry and return from user mode.
272 *
273 * thread_enter_user_mode() saves all that registers user mode can change
274 * so kernel mode can restore needed registers when resuming execution
275 * after the call to thread_enter_user_mode() has returned.
276 * thread_enter_user_mode() doesn't return directly since it enters user
277 * mode instead, it's thread_unwind_user_mode() that does the
278 * returning by restoring the registers saved by thread_enter_user_mode().
279 *
280 * There's three ways for thread_enter_user_mode() to return to caller,
281 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
282 *
283 * Calls to _utee_return or _utee_panic are handled as:
284 * __thread_svc_handler() -> thread_scall_handler() -> scall_do_call() which
285 * calls syscall_return() or syscall_panic().
286 *
287 * These function calls returns normally except thread_scall_handler() which
288 * which is an exception handling routine so it reads return address and
289 * SPSR to restore from the stack. syscall_return() and syscall_panic()
290 * changes return address and SPSR used by thread_scall_handler() to instead of
291 * returning into user mode as with other syscalls it returns into
292 * thread_unwind_user_mode() in kernel mode instead.  When
293 * thread_scall_handler() returns the stack pointer at the point where
294 * thread_enter_user_mode() left it so this is where
295 * thread_unwind_user_mode() can operate.
296 *
297 * Aborts are handled in a similar way but by thread_abort_handler()
298 * instead, when the pager sees that it's an abort from user mode that
299 * can't be handled it updates SPSR and return address used by
300 * thread_abort_handler() to return into thread_unwind_user_mode()
301 * instead.
302 */
303
304/*
305 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
306 *				     uint32_t *exit_status0,
307 *				     uint32_t *exit_status1);
308 *
309 * This function depends on being called with exceptions masked.
310 */
311FUNC __thread_enter_user_mode , :
312UNWIND(	.cantunwind)
313	/*
314	 * Save all registers to allow syscall_return() to resume execution
315	 * as if this function would have returned. This is also used in
316	 * syscall_panic().
317	 *
318	 * If stack usage of this function is changed
319	 * thread_unwind_user_mode() has to be updated.
320	 */
321	push    {r4-r12,lr}
322
323	/*
324	 * Save old user sp and set new user sp.
325	 */
326	cps	#CPSR_MODE_SYS
327	mov	r4, sp
328	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
329	cps	#CPSR_MODE_SVC
330
331	push	{r1, r2, r4, r5}
332
333	/* Prepare user mode entry via eret_to_user_mode */
334	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
335	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
336	msr     spsr_fsxc, r4
337
338	ldm	r0, {r0-r12}
339
340	b	eret_to_user_mode
341END_FUNC __thread_enter_user_mode
342
343/*
344 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
345 *              uint32_t exit_status1);
346 * See description in thread.h
347 */
348FUNC thread_unwind_user_mode , :
349UNWIND(	.cantunwind)
350	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
351	pop	{r4-r7}
352	str	r1, [r4]
353	str	r2, [r5]
354
355	/* Restore old user sp */
356	cps	#CPSR_MODE_SYS
357	mov	sp, r6
358	cps	#CPSR_MODE_SVC
359
360	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
361	pop     {r4-r12,pc}
362END_FUNC thread_unwind_user_mode
363
364	.macro maybe_restore_mapping
365		/*
366		 * This macro is a bit hard to read due to all the ifdefs,
367		 * we're testing for two different configs which makes four
368		 * different combinations.
369		 *
370		 * - With LPAE, and then some extra code if with
371		 *   CFG_CORE_UNMAP_CORE_AT_EL0
372		 * - Without LPAE, and then some extra code if with
373		 *   CFG_CORE_UNMAP_CORE_AT_EL0
374		 */
375
376		/*
377		 * At this point we can't rely on any memory being writable
378		 * yet, so we're using TPIDRPRW to store r0, and if with
379		 * LPAE TPIDRURO to store r1 too.
380		 */
381		write_tpidrprw r0
382#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
383		write_tpidruro r1
384#endif
385
386#ifdef CFG_WITH_LPAE
387		read_ttbr0_64bit r0, r1
388		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
389		beq	11f
390
391#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
392		/*
393		 * Update the mapping to use the full kernel mode mapping.
394		 * Since the translation table could reside above 4GB we'll
395		 * have to use 64-bit arithmetics.
396		 */
397		subs	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
398		sbc	r1, r1, #0
399#endif
400		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
401		write_ttbr0_64bit r0, r1
402		isb
403
404#else /*!CFG_WITH_LPAE*/
405		read_contextidr r0
406		tst	r0, #1
407		beq	11f
408
409		/* Update the mapping to use the full kernel mode mapping. */
410		bic	r0, r0, #1
411		write_contextidr r0
412		isb
413#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
414		read_ttbcr r0
415		bic	r0, r0, #TTBCR_PD1
416		write_ttbcr r0
417		isb
418#endif
419
420#endif /*!CFG_WITH_LPAE*/
421
422#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
423		ldr	r0, =thread_user_kcode_offset
424		ldr	r0, [r0]
425		read_vbar r1
426		add	r1, r1, r0
427		write_vbar r1
428		isb
429
430	11:	/*
431		 * The PC is adjusted unconditionally to guard against the
432		 * case there was an FIQ just before we did the "cpsid aif".
433		 */
434		ldr	r0, =22f
435		bx	r0
436	22:
437#else
438	11:
439#endif
440		read_tpidrprw r0
441#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
442		read_tpidruro r1
443#endif
444	.endm
445
446/* The handler of native interrupt. */
447.macro	native_intr_handler mode:req
448	cpsid	aif
449	maybe_restore_mapping
450
451	/*
452	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
453	 * address
454	 */
455	sub     lr, lr, #4
456
457	/*
458	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
459	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
460	 * because the secure monitor doesn't save those. The treatment of
461	 * the banked fiq registers is somewhat analogous to the lazy save
462	 * of VFP registers.
463	 */
464	.ifc	\mode\(),fiq
465	push	{r0-r3, r8-r12, lr}
466	.else
467	push	{r0-r3, r12, lr}
468	.endif
469
470	/*
471	 * Use SP_abt to update core local flags.
472	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP |
473	 *         THREAD_CLF_{FIQ|IRQ};
474	 */
475	cps     #CPSR_MODE_ABT
476	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
477	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
478	.ifc    \mode\(),fiq
479	orr     r1, r1, #(THREAD_CLF_TMP | THREAD_CLF_FIQ)
480	.else
481	orr     r1, r1, #(THREAD_CLF_TMP | THREAD_CLF_IRQ)
482	.endif
483	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
484	.ifc    \mode\(),fiq
485	cps     #CPSR_MODE_FIQ
486	.else
487	cps     #CPSR_MODE_IRQ
488	.endif
489
490	bl	thread_check_canaries
491	bl	interrupt_main_handler
492
493	/*
494	 * Use SP_abt to update core local flags.
495	 * flags >>= THREAD_CLF_SAVED_SHIFT;
496	 */
497	cps     #CPSR_MODE_ABT
498	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
499	lsr     r1, r1, #THREAD_CLF_SAVED_SHIFT
500	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
501	.ifc    \mode\(),fiq
502	cps     #CPSR_MODE_FIQ
503	.else
504	cps     #CPSR_MODE_IRQ
505	.endif
506
507	mrs	r0, spsr
508	cmp_spsr_user_mode r0
509
510	.ifc	\mode\(),fiq
511	pop	{r0-r3, r8-r12, lr}
512	.else
513	pop	{r0-r3, r12, lr}
514	.endif
515
516	movsne	pc, lr
517	b	eret_to_user_mode
518.endm
519
520/* The handler of foreign interrupt. */
521.macro foreign_intr_handler mode:req
522	cpsid	aif
523	maybe_restore_mapping
524
525	sub	lr, lr, #4
526	push	{r12}
527
528	.ifc	\mode\(),fiq
529	/*
530	 * If a foreign (non-secure) interrupt is received as a FIQ we need
531	 * to check that we're in a saveable state or if we need to mask
532	 * the interrupt to be handled later.
533	 *
534	 * The window when this is needed is quite narrow, it's between
535	 * entering the exception vector and until the "cpsid" instruction
536	 * of the handler has been executed.
537	 *
538	 * Currently we can save the state properly if the FIQ is received
539	 * while in user or svc (kernel) mode.
540	 *
541	 * If we're returning to abort, undef or irq mode we're returning
542	 * with the mapping restored. This is OK since before the handler
543	 * we're returning to eventually returns to user mode the reduced
544	 * mapping will be restored.
545	 */
546	mrs	r12, spsr
547	and	r12, r12, #ARM32_CPSR_MODE_MASK
548	cmp	r12, #ARM32_CPSR_MODE_USR
549	cmpne	r12, #ARM32_CPSR_MODE_SVC
550	beq	1f
551	mrs	r12, spsr
552	orr	r12, r12, #ARM32_CPSR_F
553	msr	spsr_fsxc, r12
554	pop	{r12}
555	movs	pc, lr
5561:
557	.endif
558
559	push	{lr}
560
561	.ifc	\mode\(),fiq
562	bl	thread_save_state_fiq
563	.else
564	bl	thread_save_state
565	.endif
566
567#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
568	/*
569	 * Prevent leaking information about which entries has been used in
570	 * cache. We're relying on the secure monitor/dispatcher to take
571	 * care of the BTB.
572	 */
573	mov	r0, #DCACHE_OP_CLEAN_INV
574	bl	dcache_op_louis
575	write_iciallu
576#endif
577
578	/*
579	 * Use SP_abt to update core local flags.
580	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
581	 */
582	cps     #CPSR_MODE_ABT
583	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
584	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
585	orr     r1, r1, #THREAD_CLF_TMP
586	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
587	.ifc    \mode\(),fiq
588	cps     #CPSR_MODE_FIQ
589	.else
590	cps     #CPSR_MODE_IRQ
591	.endif
592
593	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
594	mrs	r1, spsr
595	pop	{r2}
596	pop	{r12}
597	blx	thread_state_suspend
598
599	/*
600	 * Switch to SVC mode and copy current stack pointer as it already
601	 * is the tmp stack.
602	 */
603	mov	r1, sp
604	cps	#CPSR_MODE_SVC
605	mov	sp, r1
606
607	/* Passing thread index in r0 */
608	b	thread_foreign_intr_exit
609.endm
610
611FUNC thread_excp_vect , :, align=32
612UNWIND(	.cantunwind)
613	b	.			/* Reset			*/
614	b	__thread_und_handler	/* Undefined instruction	*/
615	b	__thread_svc_handler	/* System call			*/
616	b	__thread_pabort_handler	/* Prefetch abort		*/
617	b	__thread_dabort_handler	/* Data abort			*/
618	b	.			/* Reserved			*/
619	b	__thread_irq_handler	/* IRQ				*/
620	b	__thread_fiq_handler	/* FIQ				*/
621#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
622	.macro vector_prologue_spectre
623		/*
624		 * This depends on SP being 8 byte aligned, that is, the
625		 * lowest three bits in SP are zero.
626		 *
627		 * To avoid unexpected speculation we need to invalidate
628		 * the branch predictor before we do the first branch. It
629		 * doesn't matter if it's a conditional or an unconditional
630		 * branch speculation can still occur.
631		 *
632		 * The idea is to form a specific bit pattern in the lowest
633		 * three bits of SP depending on which entry in the vector
634		 * we enter via.  This is done by adding 1 to SP in each
635		 * entry but the last.
636		 */
637		add	sp, sp, #1	/* 7:111 Reset			*/
638		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
639		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
640		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
641		add	sp, sp, #1	/* 3:011 Data abort		*/
642		add	sp, sp, #1	/* 2:010 Reserved		*/
643		add	sp, sp, #1	/* 1:001 IRQ			*/
644		cpsid   aif		/* 0:000 FIQ			*/
645	.endm
646
647        .balign	32
648	.global thread_excp_vect_wa_a15_spectre_v2
649thread_excp_vect_wa_a15_spectre_v2:
650	vector_prologue_spectre
651	write_tpidrprw r0
652	mrs	r0, spsr
653	cmp_spsr_user_mode r0
654	bne	1f
655	/*
656	 * Invalidate the branch predictor for the current processor.
657	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
658	 * effective.
659	 * Note that the BPIALL instruction is not effective in
660	 * invalidating the branch predictor on Cortex-A15. For that CPU,
661	 * set ACTLR[0] to 1 during early processor initialisation, and
662	 * invalidate the branch predictor by performing an ICIALLU
663	 * instruction. See also:
664	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
665	 */
666	write_iciallu
667	isb
668	b	1f
669
670        .balign	32
671	.global thread_excp_vect_wa_spectre_v2
672thread_excp_vect_wa_spectre_v2:
673	vector_prologue_spectre
674	write_tpidrprw r0
675	mrs	r0, spsr
676	cmp_spsr_user_mode r0
677	bne	1f
678	/* Invalidate the branch predictor for the current processor. */
679	write_bpiall
680	isb
681
6821:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
683	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
684	add	pc, pc, r0, LSL #3
685	nop
686
687	read_tpidrprw r0
688	b	__thread_fiq_handler	/* FIQ				*/
689	read_tpidrprw r0
690	b	__thread_irq_handler	/* IRQ				*/
691	read_tpidrprw r0
692	b	.			/* Reserved			*/
693	read_tpidrprw r0
694	b	__thread_dabort_handler	/* Data abort			*/
695	read_tpidrprw r0
696	b	__thread_pabort_handler	/* Prefetch abort		*/
697	read_tpidrprw r0
698	b	__thread_svc_handler	/* System call			*/
699	read_tpidrprw r0
700	b	__thread_und_handler	/* Undefined instruction	*/
701	read_tpidrprw r0
702	b	.			/* Reset			*/
703#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
704
705__thread_und_handler:
706	cpsid	aif
707	maybe_restore_mapping
708	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
709	mrs	r1, spsr
710	tst	r1, #CPSR_T
711	subne	lr, lr, #2
712	subeq	lr, lr, #4
713	mov	r0, #ABORT_TYPE_UNDEF
714	b	__thread_abort_common
715
716__thread_dabort_handler:
717	cpsid	aif
718	maybe_restore_mapping
719	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
720	sub	lr, lr, #8
721	mov	r0, #ABORT_TYPE_DATA
722	b	__thread_abort_common
723
724__thread_pabort_handler:
725	cpsid	aif
726	maybe_restore_mapping
727	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
728	sub	lr, lr, #4
729	mov	r0, #ABORT_TYPE_PREFETCH
730
731__thread_abort_common:
732	/*
733	 * At this label:
734	 * cpsr is in mode undef or abort
735	 * sp is still pointing to struct thread_core_local belonging to
736	 * this core.
737	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
738	 * {r2-r11, ip} are untouched.
739	 * r0 holds the first argument for abort_handler()
740	 */
741
742	/*
743	 * Update core local flags.
744	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
745	 */
746	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
747	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
748	orr	r1, r1, #THREAD_CLF_ABORT
749
750	/*
751	 * Select stack and update flags accordingly
752	 *
753	 * Normal case:
754	 * If the abort stack is unused select that.
755	 *
756	 * Fatal error handling:
757	 * If we're already using the abort stack as noted by bit
758	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
759	 * field we're selecting the temporary stack instead to be able to
760	 * make a stack trace of the abort in abort mode.
761	 *
762	 * r1 is initialized as a temporary stack pointer until we've
763	 * switched to system mode.
764	 */
765	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
766	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
767	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
768	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
769	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
770
771	/*
772	 * Store registers on stack fitting struct thread_abort_regs
773	 * start from the end of the struct
774	 * {r2-r11, ip}
775	 * Load content of previously saved {r0-r1} and stores
776	 * it up to the pad field.
777	 * After this is only {usr_sp, usr_lr} missing in the struct
778	 */
779	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
780	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
781	/* Push the original {r0-r1} on the selected stack */
782	stmdb	r1!, {r2-r3}
783	mrs	r3, spsr
784	/* Push {pad, spsr, elr} on the selected stack */
785	stmdb	r1!, {r2, r3, lr}
786
787	cps	#CPSR_MODE_SYS
788	str	lr, [r1, #-4]!
789	str	sp, [r1, #-4]!
790	mov	sp, r1
791
792	bl	abort_handler
793
794	mov	ip, sp
795	ldr	sp, [ip], #4
796	ldr	lr, [ip], #4
797
798	/*
799	 * Even if we entered via CPSR_MODE_UND, we are returning via
800	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
801	 * here.
802	 */
803	cps	#CPSR_MODE_ABT
804	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
805	msr	spsr_fsxc, r1
806
807	/* Update core local flags */
808	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
809	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
810	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
811
812	cmp_spsr_user_mode r1
813	ldm	ip, {r0-r11, ip}
814	movsne	pc, lr
815	b	eret_to_user_mode
816	/* end thread_abort_common */
817
818__thread_svc_handler:
819	cpsid	aif
820
821	maybe_restore_mapping
822
823	push	{r0-r7, lr}
824	mrs	r0, spsr
825	push	{r0}
826	mov	r0, sp
827	bl	thread_scall_handler
828	cpsid	aif	/* In case something was unmasked */
829	pop	{r0}
830	msr	spsr_fsxc, r0
831	cmp_spsr_user_mode r0
832	pop	{r0-r7, lr}
833	movsne	pc, lr
834	b	eret_to_user_mode
835	/* end thread_svc_handler */
836
837__thread_fiq_handler:
838#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
839	foreign_intr_handler	fiq
840#else
841	native_intr_handler	fiq
842#endif
843	/* end thread_fiq_handler */
844
845__thread_irq_handler:
846#if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
847	native_intr_handler	irq
848#else
849	foreign_intr_handler	irq
850#endif
851	/* end thread_irq_handler */
852
853	/*
854	 * Returns to user mode.
855	 * Expects to be jumped to with lr pointing to the user space
856	 * address to jump to and spsr holding the desired cpsr. Async
857	 * abort, irq and fiq should be masked.
858	 */
859eret_to_user_mode:
860	write_tpidrprw r0
861#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
862	write_tpidruro r1
863#endif
864
865#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
866	ldr	r0, =thread_user_kcode_offset
867	ldr	r0, [r0]
868	read_vbar r1
869	sub	r1, r1, r0
870	write_vbar r1
871	isb
872
873	/* Jump into the reduced mapping before the full mapping is removed */
874	ldr	r1, =1f
875	sub	r1, r1, r0
876	bx	r1
8771:
878#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
879
880#ifdef CFG_WITH_LPAE
881	read_ttbr0_64bit r0, r1
882#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
883	add	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
884#endif
885	/* switch to user ASID */
886	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
887	write_ttbr0_64bit r0, r1
888	isb
889#else /*!CFG_WITH_LPAE*/
890#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
891	read_ttbcr r0
892	orr	r0, r0, #TTBCR_PD1
893	write_ttbcr r0
894	isb
895#endif
896	read_contextidr r0
897	orr	r0, r0, #BIT(0)
898	write_contextidr r0
899	isb
900#endif /*!CFG_WITH_LPAE*/
901
902	read_tpidrprw r0
903#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
904	read_tpidruro r1
905#endif
906
907	movs	pc, lr
908
909	/*
910	 * void icache_inv_user_range(void *addr, size_t size);
911	 *
912	 * This function has to execute with the user space ASID active,
913	 * this means executing with reduced mapping and the code needs
914	 * to be located here together with the vector.
915	 */
916	.global icache_inv_user_range
917	.type icache_inv_user_range , %function
918icache_inv_user_range:
919	push	{r4-r7}
920
921	/* Mask all exceptions */
922	mrs	r4, cpsr	/* This register must be preserved */
923	cpsid	aif
924
925#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
926	ldr	r2, =thread_user_kcode_offset
927	ldr	r2, [r2]
928	read_vbar r5		/* This register must be preserved */
929	sub	r3, r5, r2
930	write_vbar r3
931	isb
932
933	/* Jump into the reduced mapping before the full mapping is removed */
934	ldr	r3, =1f
935	sub	r3, r3, r2
936	bx	r3
9371:
938#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
939
940#ifdef CFG_WITH_LPAE
941	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
942	/* switch to user ASID */
943	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
944#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
945	add	r2, r6, #CORE_MMU_BASE_TABLE_OFFSET
946	write_ttbr0_64bit r2, r3
947#else
948	write_ttbr0_64bit r6, r3
949#endif
950	isb
951#else /*!CFG_WITH_LPAE*/
952#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
953	read_ttbcr r6	/* This register must be preserved */
954	orr	r2, r6, #TTBCR_PD1
955	write_ttbcr r2
956	isb
957#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
958	read_contextidr r7	/* This register must be preserved */
959	orr	r2, r7, #BIT(0)
960	write_contextidr r2
961	isb
962#endif /*!CFG_WITH_LPAE*/
963
964	/*
965	 * Do the actual icache invalidation
966	 */
967
968	/* Calculate minimum icache line size, result in r2 */
969	read_ctr r3
970	and     r3, r3, #CTR_IMINLINE_MASK
971	mov     r2, #CTR_WORD_SIZE
972	lsl     r2, r2, r3
973
974	add	r1, r0, r1
975	sub	r3, r2, #1
976	bic	r0, r0, r3
9771:
978	write_icimvau r0
979	add	r0, r0, r2
980	cmp	r0, r1
981	blo	1b
982
983	/* Invalidate entire branch predictor array inner shareable */
984	write_bpiallis
985
986	dsb	ishst
987	isb
988
989#ifdef CFG_WITH_LPAE
990	write_ttbr0_64bit r6, r7
991	isb
992#else /*!CFG_WITH_LPAE*/
993	write_contextidr r7
994	isb
995#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
996	write_ttbcr r6
997	isb
998#endif
999#endif /*!CFG_WITH_LPAE*/
1000
1001#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1002	write_vbar r5
1003	isb
1004	/*
1005	 * The PC is adjusted unconditionally to guard against the
1006	 * case there was an FIQ just before we did the "cpsid aif".
1007	 */
1008	ldr	r0, =1f
1009	bx	r0
10101:
1011#endif
1012
1013	msr	cpsr_fsxc, r4	/* Restore exceptions */
1014	pop	{r4-r7}
1015	bx	lr		/* End of icache_inv_user_range() */
1016
1017	/*
1018	 * Make sure that literals are placed before the
1019	 * thread_excp_vect_end label.
1020	 */
1021	.pool
1022	.global thread_excp_vect_end
1023thread_excp_vect_end:
1024END_FUNC thread_excp_vect
1025