xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision 39cb90b059b4be162f56d806df6224ab4956d791)
11bb92983SJerome Forissier/* SPDX-License-Identifier: BSD-2-Clause */
2abe38974SJens Wiklander/*
3fae8192bSJens Wiklander * Copyright (c) 2016-2020, Linaro Limited
4abe38974SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V.
5abe38974SJens Wiklander */
6abe38974SJens Wiklander
7abe38974SJens Wiklander#include <arm32_macros.S>
80e7659caSJens Wiklander#include <arm.h>
90e7659caSJens Wiklander#include <asm.S>
1089fe7c3cSJerome Forissier#include <generated/asm-defines.h>
110e7659caSJens Wiklander#include <keep.h>
123f4d6849SJens Wiklander#include <sm/optee_smc.h>
1365363c52SEtienne Carriere#include <sm/sm.h>
143f4d6849SJens Wiklander#include <sm/teesmc_opteed.h>
153f4d6849SJens Wiklander#include <sm/teesmc_opteed_macros.h>
163bc90f3dSJens Wiklander#include <util.h>
17abe38974SJens Wiklander
18e72c941fSJens Wiklander#define SM_CTX_SEC_END	(SM_CTX_SEC + SM_CTX_SEC_SIZE)
19e72c941fSJens Wiklander
209de8272eSJerome Forissier	.macro save_regs mode
219de8272eSJerome Forissier	cps	\mode
229de8272eSJerome Forissier	mrs	r2, spsr
239de8272eSJerome Forissier	str	r2, [r0], #4
249de8272eSJerome Forissier	str	sp, [r0], #4
259de8272eSJerome Forissier	str	lr, [r0], #4
269de8272eSJerome Forissier	.endm
279de8272eSJerome Forissier
280160fec3SJerome ForissierFUNC sm_save_unbanked_regs , :
29923c1f34SJens WiklanderUNWIND(	.cantunwind)
30abe38974SJens Wiklander	/* User mode registers has to be saved from system mode */
31abe38974SJens Wiklander	cps	#CPSR_MODE_SYS
329de8272eSJerome Forissier	str	sp, [r0], #4
339de8272eSJerome Forissier	str	lr, [r0], #4
34abe38974SJens Wiklander
359de8272eSJerome Forissier	save_regs	#CPSR_MODE_IRQ
369de8272eSJerome Forissier	save_regs	#CPSR_MODE_FIQ
379de8272eSJerome Forissier	save_regs	#CPSR_MODE_SVC
389de8272eSJerome Forissier	save_regs	#CPSR_MODE_ABT
399de8272eSJerome Forissier	save_regs	#CPSR_MODE_UND
40abe38974SJens Wiklander
418267e19bSJerome Forissier#ifdef CFG_SM_NO_CYCLE_COUNTING
428267e19bSJerome Forissier	read_pmcr r2
438267e19bSJerome Forissier	stm	r0!, {r2}
448267e19bSJerome Forissier#endif
45edaf8c38SSumit Garg
46099918f6SSumit Garg#ifdef CFG_FTRACE_SUPPORT
47edaf8c38SSumit Garg	read_cntkctl r2
48edaf8c38SSumit Garg	stm	r0!, {r2}
49edaf8c38SSumit Garg#endif
50abe38974SJens Wiklander	cps	#CPSR_MODE_MON
51abe38974SJens Wiklander	bx	lr
520160fec3SJerome ForissierEND_FUNC sm_save_unbanked_regs
53abe38974SJens Wiklander
549de8272eSJerome Forissier	.macro restore_regs mode
559de8272eSJerome Forissier	cps	\mode
569de8272eSJerome Forissier	ldr	r2, [r0], #4
579de8272eSJerome Forissier	ldr	sp, [r0], #4
589de8272eSJerome Forissier	ldr	lr, [r0], #4
599de8272eSJerome Forissier	msr	spsr_fsxc, r2
609de8272eSJerome Forissier	.endm
619de8272eSJerome Forissier
62abe38974SJens Wiklander/* Restores the mode specific registers */
630160fec3SJerome ForissierFUNC sm_restore_unbanked_regs , :
64923c1f34SJens WiklanderUNWIND(	.cantunwind)
65abe38974SJens Wiklander	/* User mode registers has to be saved from system mode */
66abe38974SJens Wiklander	cps	#CPSR_MODE_SYS
679de8272eSJerome Forissier	ldr	sp, [r0], #4
689de8272eSJerome Forissier	ldr	lr, [r0], #4
69abe38974SJens Wiklander
709de8272eSJerome Forissier	restore_regs	#CPSR_MODE_IRQ
719de8272eSJerome Forissier	restore_regs	#CPSR_MODE_FIQ
729de8272eSJerome Forissier	restore_regs	#CPSR_MODE_SVC
739de8272eSJerome Forissier	restore_regs	#CPSR_MODE_ABT
749de8272eSJerome Forissier	restore_regs	#CPSR_MODE_UND
75abe38974SJens Wiklander
768267e19bSJerome Forissier#ifdef CFG_SM_NO_CYCLE_COUNTING
778267e19bSJerome Forissier	ldm	r0!, {r2}
788267e19bSJerome Forissier	write_pmcr r2
798267e19bSJerome Forissier#endif
80edaf8c38SSumit Garg
81099918f6SSumit Garg#ifdef CFG_FTRACE_SUPPORT
82edaf8c38SSumit Garg	ldm	r0!, {r2}
83edaf8c38SSumit Garg	write_cntkctl r2
84edaf8c38SSumit Garg#endif
85abe38974SJens Wiklander	cps	#CPSR_MODE_MON
86abe38974SJens Wiklander	bx	lr
870160fec3SJerome ForissierEND_FUNC sm_restore_unbanked_regs
88abe38974SJens Wiklander
893f4d6849SJens Wiklander/*
90c822f03fSJens Wiklander * stack_tmp is used as stack, the top of the stack is reserved to hold
91c822f03fSJens Wiklander * struct sm_ctx, everything below is for normal stack usage. As several
92c822f03fSJens Wiklander * different CPU modes are using the same stack it's important that switch
93c822f03fSJens Wiklander * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
94c822f03fSJens Wiklander * Async abort has to be masked while using stack_tmp.
953f4d6849SJens Wiklander */
96abe38974SJens WiklanderLOCAL_FUNC sm_smc_entry , :
97923c1f34SJens WiklanderUNWIND(	.cantunwind)
98abe38974SJens Wiklander	srsdb	sp!, #CPSR_MODE_MON
993f4d6849SJens Wiklander	push	{r0-r7}
100abe38974SJens Wiklander
1013f4d6849SJens Wiklander	clrex		/* Clear the exclusive monitor */
102abe38974SJens Wiklander
103abe38974SJens Wiklander	/* Find out if we're doing an secure or non-secure entry */
104abe38974SJens Wiklander	read_scr r1
105abe38974SJens Wiklander	tst	r1, #SCR_NS
1063f4d6849SJens Wiklander	bne	.smc_from_nsec
107abe38974SJens Wiklander
1083f4d6849SJens Wiklander	/*
1093f4d6849SJens Wiklander	 * As we're coming from secure world (NS bit cleared) the stack
1103f4d6849SJens Wiklander	 * pointer points to sm_ctx.sec.r0 at this stage. After the
1113f4d6849SJens Wiklander	 * instruction below the stack pointer points to sm_ctx.
1123f4d6849SJens Wiklander	 */
1133f4d6849SJens Wiklander	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
1143f4d6849SJens Wiklander
115abe38974SJens Wiklander	/* Save secure context */
1163f4d6849SJens Wiklander	add	r0, sp, #SM_CTX_SEC
1170160fec3SJerome Forissier	bl	sm_save_unbanked_regs
118abe38974SJens Wiklander
1193f4d6849SJens Wiklander	/*
1203f4d6849SJens Wiklander	 * On FIQ exit we're restoring the non-secure context unchanged, on
1213f4d6849SJens Wiklander	 * all other exits we're shifting r1-r4 from secure context into
1223f4d6849SJens Wiklander	 * r0-r3 in non-secure context.
1233f4d6849SJens Wiklander	 */
1243f4d6849SJens Wiklander	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
1253f4d6849SJens Wiklander	ldm	r8, {r0-r4}
1263f4d6849SJens Wiklander	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
1273f4d6849SJens Wiklander	cmp	r0, r9
1283f4d6849SJens Wiklander	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
1293f4d6849SJens Wiklander	stmne	r8, {r1-r4}
130abe38974SJens Wiklander
131abe38974SJens Wiklander	/* Restore non-secure context */
1323f4d6849SJens Wiklander	add	r0, sp, #SM_CTX_NSEC
1330160fec3SJerome Forissier	bl	sm_restore_unbanked_regs
1343f4d6849SJens Wiklander
1353f4d6849SJens Wiklander.sm_ret_to_nsec:
1363f4d6849SJens Wiklander	/*
1373f4d6849SJens Wiklander	 * Return to non-secure world
1383f4d6849SJens Wiklander	 */
1393f4d6849SJens Wiklander	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
1403f4d6849SJens Wiklander	ldm	r0, {r8-r12}
141abe38974SJens Wiklander
14214d6d42bSJens Wiklander#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
14314d6d42bSJens Wiklander	/*
14414d6d42bSJens Wiklander	 * Prevent leaking information about which code has been executed.
14514d6d42bSJens Wiklander	 * This is required to be used together with
14614d6d42bSJens Wiklander	 * CFG_CORE_WORKAROUND_SPECTRE_BP to protect Cortex A15 CPUs too.
14714d6d42bSJens Wiklander	 *
14814d6d42bSJens Wiklander	 * CFG_CORE_WORKAROUND_SPECTRE_BP also invalidates the branch
14914d6d42bSJens Wiklander	 * predictor on affected CPUs. In the cases where an alternative
15014d6d42bSJens Wiklander	 * vector has been installed the branch predictor is already
15114d6d42bSJens Wiklander	 * invalidated so invalidating here again would be redundant, but
15214d6d42bSJens Wiklander	 * testing for that is more trouble than it's worth.
15314d6d42bSJens Wiklander	 */
15414d6d42bSJens Wiklander	write_bpiall
15514d6d42bSJens Wiklander#endif
15614d6d42bSJens Wiklander
157abe38974SJens Wiklander	/* Update SCR */
158abe38974SJens Wiklander	read_scr r0
159abe38974SJens Wiklander	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
160abe38974SJens Wiklander	write_scr r0
161bda4804cSJens Wiklander	/*
162bda4804cSJens Wiklander	 * isb not needed since we're doing an exception return below
163bda4804cSJens Wiklander	 * without dependency to the changes in SCR before that.
164bda4804cSJens Wiklander	 */
165abe38974SJens Wiklander
1663f4d6849SJens Wiklander	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
1673f4d6849SJens Wiklander	b	.sm_exit
168abe38974SJens Wiklander
1693f4d6849SJens Wiklander.smc_from_nsec:
1703f4d6849SJens Wiklander	/*
1713f4d6849SJens Wiklander	 * As we're coming from non-secure world (NS bit set) the stack
1723f4d6849SJens Wiklander	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
1733f4d6849SJens Wiklander	 * instruction below the stack pointer points to sm_ctx.
1743f4d6849SJens Wiklander	 */
1753f4d6849SJens Wiklander	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
1763f4d6849SJens Wiklander
177abe38974SJens Wiklander	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
178abe38974SJens Wiklander	write_scr r1
179bda4804cSJens Wiklander	isb
180abe38974SJens Wiklander
1813f4d6849SJens Wiklander	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
1823f4d6849SJens Wiklander	stm	r0, {r8-r12}
1833f4d6849SJens Wiklander
1843f4d6849SJens Wiklander	mov	r0, sp
1853f4d6849SJens Wiklander	bl	sm_from_nsec
18665363c52SEtienne Carriere	cmp	r0, #SM_EXIT_TO_NON_SECURE
1873f4d6849SJens Wiklander	beq	.sm_ret_to_nsec
188abe38974SJens Wiklander
189abe38974SJens Wiklander	/*
1903f4d6849SJens Wiklander	 * Continue into secure world
191abe38974SJens Wiklander	 */
1923f4d6849SJens Wiklander	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
193abe38974SJens Wiklander
1943f4d6849SJens Wiklander.sm_exit:
1953f4d6849SJens Wiklander	pop	{r0-r7}
196abe38974SJens Wiklander	rfefd	sp!
197abe38974SJens WiklanderEND_FUNC sm_smc_entry
198abe38974SJens Wiklander
199abe38974SJens Wiklander/*
200abe38974SJens Wiklander * FIQ handling
201abe38974SJens Wiklander *
2023f4d6849SJens Wiklander * Saves CPU context in the same way as sm_smc_entry() above. The CPU
2033f4d6849SJens Wiklander * context will later be restored by sm_smc_entry() when handling a return
204abe38974SJens Wiklander * from FIQ.
205abe38974SJens Wiklander */
206abe38974SJens WiklanderLOCAL_FUNC sm_fiq_entry , :
207923c1f34SJens WiklanderUNWIND(	.cantunwind)
208abe38974SJens Wiklander	/* FIQ has a +4 offset for lr compared to preferred return address */
209abe38974SJens Wiklander	sub	lr, lr, #4
2103f4d6849SJens Wiklander	/* sp points just past struct sm_sec_ctx */
211abe38974SJens Wiklander	srsdb	sp!, #CPSR_MODE_MON
2123f4d6849SJens Wiklander	push	{r0-r7}
2133f4d6849SJens Wiklander
2143f4d6849SJens Wiklander	clrex		/* Clear the exclusive monitor */
2153f4d6849SJens Wiklander
2163f4d6849SJens Wiklander	/*
2173f4d6849SJens Wiklander	 * As we're coming from non-secure world the stack pointer points
2183f4d6849SJens Wiklander	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
2193f4d6849SJens Wiklander	 * stack pointer points to sm_ctx.
2203f4d6849SJens Wiklander	 */
2213f4d6849SJens Wiklander	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
222abe38974SJens Wiklander
223abe38974SJens Wiklander	/* Update SCR */
224abe38974SJens Wiklander	read_scr r1
225f2dec49bSJens Wiklander	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
226abe38974SJens Wiklander	write_scr r1
227bda4804cSJens Wiklander	isb
228abe38974SJens Wiklander
229abe38974SJens Wiklander	/* Save non-secure context */
2303f4d6849SJens Wiklander	add	r0, sp, #SM_CTX_NSEC
2310160fec3SJerome Forissier	bl	sm_save_unbanked_regs
232dd24684eSJens Wiklander	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
2333f4d6849SJens Wiklander	stm	r0!, {r8-r12}
234abe38974SJens Wiklander
2353f4d6849SJens Wiklander	/* Set FIQ entry */
236651d7537SJens Wiklander	ldr	r0, =vector_fiq_entry
2373f4d6849SJens Wiklander	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
238abe38974SJens Wiklander
239abe38974SJens Wiklander	/* Restore secure context */
2403f4d6849SJens Wiklander	add	r0, sp, #SM_CTX_SEC
2410160fec3SJerome Forissier	bl	sm_restore_unbanked_regs
242abe38974SJens Wiklander
2433f4d6849SJens Wiklander	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
2443f4d6849SJens Wiklander
245abe38974SJens Wiklander	rfefd	sp!
246abe38974SJens WiklanderEND_FUNC sm_fiq_entry
247abe38974SJens Wiklander
248fae8192bSJens WiklanderLOCAL_FUNC sm_vect_table , :, align=32
249923c1f34SJens WiklanderUNWIND(	.cantunwind)
2502ac6322dSJens Wiklander	b	.		/* Reset			*/
2512ac6322dSJens Wiklander	b	.		/* Undefined instruction	*/
2522ac6322dSJens Wiklander	b	sm_smc_entry	/* Secure monitor call		*/
2532ac6322dSJens Wiklander	b	.		/* Prefetch abort		*/
2542ac6322dSJens Wiklander	b	.		/* Data abort			*/
2552ac6322dSJens Wiklander	b	.		/* Reserved			*/
2562ac6322dSJens Wiklander	b	.		/* IRQ				*/
2572ac6322dSJens Wiklander	b	sm_fiq_entry	/* FIQ				*/
258fae8192bSJens WiklanderEND_FUNC sm_vect_table
2592ac6322dSJens Wiklander
2603bc90f3dSJens Wiklander#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
2612ac6322dSJens Wiklander	.macro vector_prologue_spectre
2623bc90f3dSJens Wiklander		/*
2632ac6322dSJens Wiklander		 * This depends on SP being 8 byte aligned, that is, the
2642ac6322dSJens Wiklander		 * lowest three bits in SP are zero.
2653bc90f3dSJens Wiklander		 *
2662ac6322dSJens Wiklander		 * The idea is to form a specific bit pattern in the lowest
2672ac6322dSJens Wiklander		 * three bits of SP depending on which entry in the vector
2682ac6322dSJens Wiklander		 * we enter via.  This is done by adding 1 to SP in each
2692ac6322dSJens Wiklander		 * entry but the last.
2703bc90f3dSJens Wiklander		 */
2713bc90f3dSJens Wiklander		add	sp, sp, #1	/* 7:111 Reset			*/
2723bc90f3dSJens Wiklander		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
2733bc90f3dSJens Wiklander		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
2743bc90f3dSJens Wiklander		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
2753bc90f3dSJens Wiklander		add	sp, sp, #1	/* 3:011 Data abort		*/
2763bc90f3dSJens Wiklander		add	sp, sp, #1	/* 2:010 Reserved		*/
2773bc90f3dSJens Wiklander		add	sp, sp, #1	/* 1:001 IRQ			*/
2783bc90f3dSJens Wiklander		nop			/* 0:000 FIQ			*/
2792ac6322dSJens Wiklander	.endm
2803bc90f3dSJens Wiklander
281fae8192bSJens WiklanderLOCAL_FUNC sm_vect_table_a15 , :, align=32
282*39cb90b0SJens WiklanderUNWIND(	.cantunwind)
2832ac6322dSJens Wiklander	vector_prologue_spectre
2842ac6322dSJens Wiklander	/*
2852ac6322dSJens Wiklander	 * Invalidate the branch predictor for the current processor.
286ae9208f1SJens Wiklander	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
287ae9208f1SJens Wiklander	 * effective.
2882ac6322dSJens Wiklander	 * Note that the BPIALL instruction is not effective in
2892ac6322dSJens Wiklander	 * invalidating the branch predictor on Cortex-A15. For that CPU,
2902ac6322dSJens Wiklander	 * set ACTLR[0] to 1 during early processor initialisation, and
2912ac6322dSJens Wiklander	 * invalidate the branch predictor by performing an ICIALLU
2922ac6322dSJens Wiklander	 * instruction. See also:
2932ac6322dSJens Wiklander	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
2942ac6322dSJens Wiklander	 */
2952ac6322dSJens Wiklander	write_iciallu
2962ac6322dSJens Wiklander	isb
2972ac6322dSJens Wiklander	b	1f
298fae8192bSJens WiklanderEND_FUNC sm_vect_table_a15
2992ac6322dSJens Wiklander
300fae8192bSJens Wiklander
301fae8192bSJens WiklanderLOCAL_FUNC sm_vect_table_bpiall , :, align=32
302*39cb90b0SJens WiklanderUNWIND(	.cantunwind)
3032ac6322dSJens Wiklander	vector_prologue_spectre
3043bc90f3dSJens Wiklander	/* Invalidate the branch predictor for the current processor. */
3053bc90f3dSJens Wiklander	write_bpiall
3063bc90f3dSJens Wiklander	isb
3073bc90f3dSJens Wiklander
3082ac6322dSJens Wiklander1:
3093bc90f3dSJens Wiklander	/*
3103bc90f3dSJens Wiklander	 * Only two exception does normally occur, smc and fiq. With all
3113bc90f3dSJens Wiklander	 * other exceptions it's good enough to just spinn, the lowest bits
3123bc90f3dSJens Wiklander	 * still tells which exception we're stuck with when attaching a
3133bc90f3dSJens Wiklander	 * debugger.
3143bc90f3dSJens Wiklander	 */
3153bc90f3dSJens Wiklander
3163bc90f3dSJens Wiklander	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
3173bc90f3dSJens Wiklander	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
3183bc90f3dSJens Wiklander	beq	sm_fiq_entry
3193bc90f3dSJens Wiklander
3203bc90f3dSJens Wiklander	/* Test for SMC, xor the lowest bits of SP to be 0 */
3213bc90f3dSJens Wiklander	eor	sp, sp, #(BIT(0) | BIT(2))
3223bc90f3dSJens Wiklander	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
3233bc90f3dSJens Wiklander	beq	sm_smc_entry
3243bc90f3dSJens Wiklander
3253bc90f3dSJens Wiklander	/* unhandled exception */
3263bc90f3dSJens Wiklander	b	.
327fae8192bSJens WiklanderEND_FUNC sm_vect_table_bpiall
3283bc90f3dSJens Wiklander#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
329abe38974SJens Wiklander
330abe38974SJens Wiklander/* void sm_init(vaddr_t stack_pointer); */
331abe38974SJens WiklanderFUNC sm_init , :
332abe38974SJens Wiklander	/* Set monitor stack */
333abe38974SJens Wiklander	mrs	r1, cpsr
334abe38974SJens Wiklander	cps	#CPSR_MODE_MON
3353f4d6849SJens Wiklander	/* Point just beyond sm_ctx.sec */
336e72c941fSJens Wiklander	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END)
3375051b512SPeng Fan
3385051b512SPeng Fan#ifdef CFG_INIT_CNTVOFF
3395051b512SPeng Fan	read_scr r0
3405051b512SPeng Fan	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
3415051b512SPeng Fan	write_scr r0
3425051b512SPeng Fan	isb
3435051b512SPeng Fan
3445051b512SPeng Fan	/*
3455051b512SPeng Fan	 * Accessing CNTVOFF:
3465051b512SPeng Fan	 * If the implementation includes the Virtualization Extensions
3475051b512SPeng Fan	 * this is a RW register, accessible from Hyp mode, and
3485051b512SPeng Fan	 * from Monitor mode when SCR.NS is set to 1.
3495051b512SPeng Fan	 * If the implementation includes the Security Extensions
3505051b512SPeng Fan	 * but not the Virtualization Extensions, an MCRR or MRRC to
3515051b512SPeng Fan	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
3525051b512SPeng Fan	 * mode, regardless of the value of SCR.NS.
3535051b512SPeng Fan	 */
3546cea5715SJens Wiklander	read_id_pfr1 r2
3555051b512SPeng Fan	mov	r3, r2
3565051b512SPeng Fan	ands    r3, r3, #IDPFR1_GENTIMER_MASK
3575051b512SPeng Fan	beq	.no_gentimer
3585051b512SPeng Fan	ands    r2, r2, #IDPFR1_VIRT_MASK
3595051b512SPeng Fan	beq	.no_gentimer
3605051b512SPeng Fan	mov	r2, #0
3615051b512SPeng Fan	write_cntvoff r2, r2
3625051b512SPeng Fan
3635051b512SPeng Fan.no_gentimer:
3645051b512SPeng Fan	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
3655051b512SPeng Fan	write_scr r0
3665051b512SPeng Fan	isb
3675051b512SPeng Fan#endif
3688267e19bSJerome Forissier#ifdef CFG_SM_NO_CYCLE_COUNTING
3698267e19bSJerome Forissier	read_pmcr r0
3708267e19bSJerome Forissier	orr	r0, #PMCR_DP
3718267e19bSJerome Forissier	write_pmcr r0
3728267e19bSJerome Forissier#endif
373abe38974SJens Wiklander	msr	cpsr, r1
374abe38974SJens Wiklander
3752ac6322dSJens Wiklander#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
3762ac6322dSJens Wiklander	/*
3772ac6322dSJens Wiklander	 * For unrecognized CPUs we fall back to the vector used for
3782ac6322dSJens Wiklander	 * unaffected CPUs. Cortex A-15 has special treatment compared to
3792ac6322dSJens Wiklander	 * the other affected Cortex CPUs.
3802ac6322dSJens Wiklander	 */
3812ac6322dSJens Wiklander	read_midr r1
3822ac6322dSJens Wiklander	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
3832ac6322dSJens Wiklander	cmp	r2, #MIDR_IMPLEMENTER_ARM
3842ac6322dSJens Wiklander	bne	1f
3852ac6322dSJens Wiklander
3862ac6322dSJens Wiklander	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
3872ac6322dSJens Wiklander			#MIDR_PRIMARY_PART_NUM_WIDTH
3882ac6322dSJens Wiklander
389fcdfb7f2SJens Wiklander	movw	r3, #CORTEX_A8_PART_NUM
3902ac6322dSJens Wiklander	cmp	r2, r3
391fcdfb7f2SJens Wiklander	movwne	r3, #CORTEX_A9_PART_NUM
3922ac6322dSJens Wiklander	cmpne	r2, r3
393fcdfb7f2SJens Wiklander	movwne	r3, #CORTEX_A17_PART_NUM
3942ac6322dSJens Wiklander	cmpne	r2, r3
3952ac6322dSJens Wiklander	ldreq	r0, =sm_vect_table_bpiall
3962ac6322dSJens Wiklander	beq	2f
3972ac6322dSJens Wiklander
398fcdfb7f2SJens Wiklander	movw	r3, #CORTEX_A15_PART_NUM
3992ac6322dSJens Wiklander	cmp	r2, r3
4002ac6322dSJens Wiklander	ldreq	r0, =sm_vect_table_a15
4012ac6322dSJens Wiklander	beq	2f
4022ac6322dSJens Wiklander#endif
403abe38974SJens Wiklander	/* Set monitor vector (MVBAR) */
4042ac6322dSJens Wiklander1:	ldr	r0, =sm_vect_table
4052ac6322dSJens Wiklander2:	write_mvbar r0
406abe38974SJens Wiklander
4073f4d6849SJens Wiklander	bx	lr
408abe38974SJens WiklanderEND_FUNC sm_init
4093639b55fSJerome ForissierDECLARE_KEEP_PAGER sm_init
4103f4d6849SJens Wiklander
4113f4d6849SJens Wiklander
4123f4d6849SJens Wiklander/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
4133f4d6849SJens WiklanderFUNC sm_get_nsec_ctx , :
4143f4d6849SJens Wiklander	mrs	r1, cpsr
4153f4d6849SJens Wiklander	cps	#CPSR_MODE_MON
416e72c941fSJens Wiklander	/*
417e72c941fSJens Wiklander	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec,
418e72c941fSJens Wiklander	 * which allows us to calculate the address of sm_ctx.nsec.
419e72c941fSJens Wiklander	 */
420e72c941fSJens Wiklander	add	r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END)
4213f4d6849SJens Wiklander	msr	cpsr, r1
4223f4d6849SJens Wiklander
4233f4d6849SJens Wiklander	bx	lr
4243f4d6849SJens WiklanderEND_FUNC sm_get_nsec_ctx
425