xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision aaec75ec87470731e54ff9a1cbf5b72c0d6ee9bd)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/unwind.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16#include <util.h>
17
18	.section .text.sm_asm
19
20FUNC sm_save_modes_regs , :
21UNWIND(	.fnstart)
22UNWIND(	.cantunwind)
23	/* User mode registers has to be saved from system mode */
24	cps	#CPSR_MODE_SYS
25	stm	r0!, {sp, lr}
26
27	cps	#CPSR_MODE_IRQ
28	mrs	r2, spsr
29	stm	r0!, {r2, sp, lr}
30
31	cps	#CPSR_MODE_FIQ
32	mrs	r2, spsr
33	stm	r0!, {r2, sp, lr}
34
35	cps	#CPSR_MODE_SVC
36	mrs	r2, spsr
37	stm	r0!, {r2, sp, lr}
38
39	cps	#CPSR_MODE_ABT
40	mrs	r2, spsr
41	stm	r0!, {r2, sp, lr}
42
43	cps	#CPSR_MODE_UND
44	mrs	r2, spsr
45	stm	r0!, {r2, sp, lr}
46
47	cps	#CPSR_MODE_MON
48	bx	lr
49UNWIND(	.fnend)
50END_FUNC sm_save_modes_regs
51
52/* Restores the mode specific registers */
53FUNC sm_restore_modes_regs , :
54UNWIND(	.fnstart)
55UNWIND(	.cantunwind)
56	/* User mode registers has to be saved from system mode */
57	cps	#CPSR_MODE_SYS
58	ldm	r0!, {sp, lr}
59
60	cps	#CPSR_MODE_IRQ
61	ldm	r0!, {r2, sp, lr}
62	msr	spsr_fsxc, r2
63
64	cps	#CPSR_MODE_FIQ
65	ldm	r0!, {r2, sp, lr}
66	msr	spsr_fsxc, r2
67
68	cps	#CPSR_MODE_SVC
69	ldm	r0!, {r2, sp, lr}
70	msr	spsr_fsxc, r2
71
72	cps	#CPSR_MODE_ABT
73	ldm	r0!, {r2, sp, lr}
74	msr	spsr_fsxc, r2
75
76	cps	#CPSR_MODE_UND
77	ldm	r0!, {r2, sp, lr}
78	msr	spsr_fsxc, r2
79
80	cps	#CPSR_MODE_MON
81	bx	lr
82UNWIND(	.fnend)
83END_FUNC sm_restore_modes_regs
84
85/*
86 * stack_tmp is used as stack, the top of the stack is reserved to hold
87 * struct sm_ctx, everything below is for normal stack usage. As several
88 * different CPU modes are using the same stack it's important that switch
89 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
90 * Async abort has to be masked while using stack_tmp.
91 */
92LOCAL_FUNC sm_smc_entry , :
93UNWIND(	.fnstart)
94UNWIND(	.cantunwind)
95	srsdb	sp!, #CPSR_MODE_MON
96	push	{r0-r7}
97
98	clrex		/* Clear the exclusive monitor */
99
100	/* Find out if we're doing an secure or non-secure entry */
101	read_scr r1
102	tst	r1, #SCR_NS
103	bne	.smc_from_nsec
104
105	/*
106	 * As we're coming from secure world (NS bit cleared) the stack
107	 * pointer points to sm_ctx.sec.r0 at this stage. After the
108	 * instruction below the stack pointer points to sm_ctx.
109	 */
110	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
111
112	/* Save secure context */
113	add	r0, sp, #SM_CTX_SEC
114	bl	sm_save_modes_regs
115
116	/*
117	 * On FIQ exit we're restoring the non-secure context unchanged, on
118	 * all other exits we're shifting r1-r4 from secure context into
119	 * r0-r3 in non-secure context.
120	 */
121	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
122	ldm	r8, {r0-r4}
123	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
124	cmp	r0, r9
125	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
126	stmne	r8, {r1-r4}
127
128	/* Restore non-secure context */
129	add	r0, sp, #SM_CTX_NSEC
130	bl	sm_restore_modes_regs
131
132.sm_ret_to_nsec:
133	/*
134	 * Return to non-secure world
135	 */
136	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
137	ldm	r0, {r8-r12}
138
139	/* Update SCR */
140	read_scr r0
141	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
142	write_scr r0
143	/*
144	 * isb not needed since we're doing an exception return below
145	 * without dependency to the changes in SCR before that.
146	 */
147
148	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
149	b	.sm_exit
150
151.smc_from_nsec:
152	/*
153	 * As we're coming from non-secure world (NS bit set) the stack
154	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
155	 * instruction below the stack pointer points to sm_ctx.
156	 */
157	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
158
159	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
160	write_scr r1
161	isb
162
163	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
164	stm	r0, {r8-r12}
165
166	mov	r0, sp
167	bl	sm_from_nsec
168	cmp	r0, #0
169	beq	.sm_ret_to_nsec
170
171	/*
172	 * Continue into secure world
173	 */
174	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
175
176.sm_exit:
177	pop	{r0-r7}
178	rfefd	sp!
179UNWIND(	.fnend)
180END_FUNC sm_smc_entry
181
182/*
183 * FIQ handling
184 *
185 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
186 * context will later be restored by sm_smc_entry() when handling a return
187 * from FIQ.
188 */
189LOCAL_FUNC sm_fiq_entry , :
190UNWIND(	.fnstart)
191UNWIND(	.cantunwind)
192	/* FIQ has a +4 offset for lr compared to preferred return address */
193	sub	lr, lr, #4
194	/* sp points just past struct sm_sec_ctx */
195	srsdb	sp!, #CPSR_MODE_MON
196	push	{r0-r7}
197
198	clrex		/* Clear the exclusive monitor */
199
200	/*
201	 * As we're coming from non-secure world the stack pointer points
202	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
203	 * stack pointer points to sm_ctx.
204	 */
205	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
206
207	/* Update SCR */
208	read_scr r1
209	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
210	write_scr r1
211	isb
212
213	/* Save non-secure context */
214	add	r0, sp, #SM_CTX_NSEC
215	bl	sm_save_modes_regs
216	stm	r0!, {r8-r12}
217
218	/* Set FIQ entry */
219	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
220	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
221
222	/* Restore secure context */
223	add	r0, sp, #SM_CTX_SEC
224	bl	sm_restore_modes_regs
225
226	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
227
228	rfefd	sp!
229UNWIND(	.fnend)
230END_FUNC sm_fiq_entry
231
232	.section .text.sm_vect_table
233        .align	5
234LOCAL_FUNC sm_vect_table , :
235UNWIND(	.fnstart)
236UNWIND(	.cantunwind)
237	b	.		/* Reset			*/
238	b	.		/* Undefined instruction	*/
239	b	sm_smc_entry	/* Secure monitor call		*/
240	b	.		/* Prefetch abort		*/
241	b	.		/* Data abort			*/
242	b	.		/* Reserved			*/
243	b	.		/* IRQ				*/
244	b	sm_fiq_entry	/* FIQ				*/
245
246#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
247	.macro vector_prologue_spectre
248		/*
249		 * This depends on SP being 8 byte aligned, that is, the
250		 * lowest three bits in SP are zero.
251		 *
252		 * The idea is to form a specific bit pattern in the lowest
253		 * three bits of SP depending on which entry in the vector
254		 * we enter via.  This is done by adding 1 to SP in each
255		 * entry but the last.
256		 */
257		add	sp, sp, #1	/* 7:111 Reset			*/
258		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
259		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
260		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
261		add	sp, sp, #1	/* 3:011 Data abort		*/
262		add	sp, sp, #1	/* 2:010 Reserved		*/
263		add	sp, sp, #1	/* 1:001 IRQ			*/
264		nop			/* 0:000 FIQ			*/
265	.endm
266
267	.align 5
268sm_vect_table_a15:
269	vector_prologue_spectre
270	/*
271	 * Invalidate the branch predictor for the current processor.
272	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
273	 * effective.
274	 * Note that the BPIALL instruction is not effective in
275	 * invalidating the branch predictor on Cortex-A15. For that CPU,
276	 * set ACTLR[0] to 1 during early processor initialisation, and
277	 * invalidate the branch predictor by performing an ICIALLU
278	 * instruction. See also:
279	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
280	 */
281	write_iciallu
282	isb
283	b	1f
284
285	.align 5
286sm_vect_table_bpiall:
287	vector_prologue_spectre
288	/* Invalidate the branch predictor for the current processor. */
289	write_bpiall
290	isb
291
2921:
293	/*
294	 * Only two exception does normally occur, smc and fiq. With all
295	 * other exceptions it's good enough to just spinn, the lowest bits
296	 * still tells which exception we're stuck with when attaching a
297	 * debugger.
298	 */
299
300	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
301	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
302	beq	sm_fiq_entry
303
304	/* Test for SMC, xor the lowest bits of SP to be 0 */
305	eor	sp, sp, #(BIT(0) | BIT(2))
306	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
307	beq	sm_smc_entry
308
309	/* unhandled exception */
310	b	.
311#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
312UNWIND(	.fnend)
313END_FUNC sm_vect_table
314
315/* void sm_init(vaddr_t stack_pointer); */
316FUNC sm_init , :
317UNWIND(	.fnstart)
318	/* Set monitor stack */
319	mrs	r1, cpsr
320	cps	#CPSR_MODE_MON
321	/* Point just beyond sm_ctx.sec */
322	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_NSEC)
323
324#ifdef CFG_INIT_CNTVOFF
325	read_scr r0
326	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
327	write_scr r0
328	isb
329
330	/*
331	 * Accessing CNTVOFF:
332	 * If the implementation includes the Virtualization Extensions
333	 * this is a RW register, accessible from Hyp mode, and
334	 * from Monitor mode when SCR.NS is set to 1.
335	 * If the implementation includes the Security Extensions
336	 * but not the Virtualization Extensions, an MCRR or MRRC to
337	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
338	 * mode, regardless of the value of SCR.NS.
339	 */
340	read_idpfr1 r2
341	mov	r3, r2
342	ands    r3, r3, #IDPFR1_GENTIMER_MASK
343	beq	.no_gentimer
344	ands    r2, r2, #IDPFR1_VIRT_MASK
345	beq	.no_gentimer
346	mov	r2, #0
347	write_cntvoff r2, r2
348
349.no_gentimer:
350	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
351	write_scr r0
352	isb
353#endif
354
355	msr	cpsr, r1
356
357#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
358	/*
359	 * For unrecognized CPUs we fall back to the vector used for
360	 * unaffected CPUs. Cortex A-15 has special treatment compared to
361	 * the other affected Cortex CPUs.
362	 */
363	read_midr r1
364	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
365	cmp	r2, #MIDR_IMPLEMENTER_ARM
366	bne	1f
367
368	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
369			#MIDR_PRIMARY_PART_NUM_WIDTH
370
371	movw	r3, #CORTEX_A8_PART_NUM
372	cmp	r2, r3
373	movwne	r3, #CORTEX_A9_PART_NUM
374	cmpne	r2, r3
375	movwne	r3, #CORTEX_A17_PART_NUM
376	cmpne	r2, r3
377	ldreq	r0, =sm_vect_table_bpiall
378	beq	2f
379
380	movw	r3, #CORTEX_A15_PART_NUM
381	cmp	r2, r3
382	ldreq	r0, =sm_vect_table_a15
383	beq	2f
384#endif
385	/* Set monitor vector (MVBAR) */
3861:	ldr	r0, =sm_vect_table
3872:	write_mvbar r0
388
389	bx	lr
390END_FUNC sm_init
391KEEP_PAGER sm_init
392
393
394/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
395FUNC sm_get_nsec_ctx , :
396	mrs	r1, cpsr
397	cps	#CPSR_MODE_MON
398	mov	r0, sp
399	msr	cpsr, r1
400
401	/*
402	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec
403	 * which is sm_ctx.nsec
404	 */
405	bx	lr
406END_FUNC sm_get_nsec_ctx
407