xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision b7f0111da7253c0eac2c6f34ba686d843270d4b5)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <arm32_macros.S>
31#include <arm.h>
32#include <asm-defines.h>
33#include <asm.S>
34#include <keep.h>
35#include <kernel/unwind.h>
36#include <sm/optee_smc.h>
37#include <sm/teesmc_opteed.h>
38#include <sm/teesmc_opteed_macros.h>
39#include <util.h>
40
41	.section .text.sm_asm
42
43FUNC sm_save_modes_regs , :
44UNWIND(	.fnstart)
45UNWIND(	.cantunwind)
46	/* User mode registers has to be saved from system mode */
47	cps	#CPSR_MODE_SYS
48	stm	r0!, {sp, lr}
49
50	cps	#CPSR_MODE_IRQ
51	mrs	r2, spsr
52	stm	r0!, {r2, sp, lr}
53
54	cps	#CPSR_MODE_FIQ
55	mrs	r2, spsr
56	stm	r0!, {r2, sp, lr}
57
58	cps	#CPSR_MODE_SVC
59	mrs	r2, spsr
60	stm	r0!, {r2, sp, lr}
61
62	cps	#CPSR_MODE_ABT
63	mrs	r2, spsr
64	stm	r0!, {r2, sp, lr}
65
66	cps	#CPSR_MODE_UND
67	mrs	r2, spsr
68	stm	r0!, {r2, sp, lr}
69
70	cps	#CPSR_MODE_MON
71	bx	lr
72UNWIND(	.fnend)
73END_FUNC sm_save_modes_regs
74
75/* Restores the mode specific registers */
76FUNC sm_restore_modes_regs , :
77UNWIND(	.fnstart)
78UNWIND(	.cantunwind)
79	/* User mode registers has to be saved from system mode */
80	cps	#CPSR_MODE_SYS
81	ldm	r0!, {sp, lr}
82
83	cps	#CPSR_MODE_IRQ
84	ldm	r0!, {r2, sp, lr}
85	msr	spsr_fsxc, r2
86
87	cps	#CPSR_MODE_FIQ
88	ldm	r0!, {r2, sp, lr}
89	msr	spsr_fsxc, r2
90
91	cps	#CPSR_MODE_SVC
92	ldm	r0!, {r2, sp, lr}
93	msr	spsr_fsxc, r2
94
95	cps	#CPSR_MODE_ABT
96	ldm	r0!, {r2, sp, lr}
97	msr	spsr_fsxc, r2
98
99	cps	#CPSR_MODE_UND
100	ldm	r0!, {r2, sp, lr}
101	msr	spsr_fsxc, r2
102
103	cps	#CPSR_MODE_MON
104	bx	lr
105UNWIND(	.fnend)
106END_FUNC sm_restore_modes_regs
107
108/*
109 * stack_tmp is used as stack, the top of the stack is reserved to hold
110 * struct sm_ctx, everything below is for normal stack usage. As several
111 * different CPU modes are using the same stack it's important that switch
112 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
113 * Async abort has to be masked while using stack_tmp.
114 */
115LOCAL_FUNC sm_smc_entry , :
116UNWIND(	.fnstart)
117UNWIND(	.cantunwind)
118	srsdb	sp!, #CPSR_MODE_MON
119	push	{r0-r7}
120
121	clrex		/* Clear the exclusive monitor */
122
123	/* Find out if we're doing an secure or non-secure entry */
124	read_scr r1
125	tst	r1, #SCR_NS
126	bne	.smc_from_nsec
127
128	/*
129	 * As we're coming from secure world (NS bit cleared) the stack
130	 * pointer points to sm_ctx.sec.r0 at this stage. After the
131	 * instruction below the stack pointer points to sm_ctx.
132	 */
133	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
134
135	/* Save secure context */
136	add	r0, sp, #SM_CTX_SEC
137	bl	sm_save_modes_regs
138
139	/*
140	 * On FIQ exit we're restoring the non-secure context unchanged, on
141	 * all other exits we're shifting r1-r4 from secure context into
142	 * r0-r3 in non-secure context.
143	 */
144	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
145	ldm	r8, {r0-r4}
146	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
147	cmp	r0, r9
148	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
149	stmne	r8, {r1-r4}
150
151	/* Restore non-secure context */
152	add	r0, sp, #SM_CTX_NSEC
153	bl	sm_restore_modes_regs
154
155.sm_ret_to_nsec:
156	/*
157	 * Return to non-secure world
158	 */
159	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
160	ldm	r0, {r8-r12}
161
162	/* Update SCR */
163	read_scr r0
164	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
165	write_scr r0
166
167	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
168	b	.sm_exit
169
170.smc_from_nsec:
171	/*
172	 * As we're coming from non-secure world (NS bit set) the stack
173	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
174	 * instruction below the stack pointer points to sm_ctx.
175	 */
176	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
177
178	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
179	write_scr r1
180
181	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
182	stm	r0, {r8-r12}
183
184	mov	r0, sp
185	bl	sm_from_nsec
186	cmp	r0, #0
187	beq	.sm_ret_to_nsec
188
189	/*
190	 * Continue into secure world
191	 */
192	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
193
194.sm_exit:
195	pop	{r0-r7}
196	rfefd	sp!
197UNWIND(	.fnend)
198END_FUNC sm_smc_entry
199
200/*
201 * FIQ handling
202 *
203 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
204 * context will later be restored by sm_smc_entry() when handling a return
205 * from FIQ.
206 */
207LOCAL_FUNC sm_fiq_entry , :
208UNWIND(	.fnstart)
209UNWIND(	.cantunwind)
210	/* FIQ has a +4 offset for lr compared to preferred return address */
211	sub	lr, lr, #4
212	/* sp points just past struct sm_sec_ctx */
213	srsdb	sp!, #CPSR_MODE_MON
214	push	{r0-r7}
215
216	clrex		/* Clear the exclusive monitor */
217
218	/*
219	 * As we're coming from non-secure world the stack pointer points
220	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
221	 * stack pointer points to sm_ctx.
222	 */
223	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
224
225	/* Update SCR */
226	read_scr r1
227	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
228	write_scr r1
229
230	/* Save non-secure context */
231	add	r0, sp, #SM_CTX_NSEC
232	bl	sm_save_modes_regs
233	stm	r0!, {r8-r12}
234
235	/* Set FIQ entry */
236	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
237	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
238
239	/* Restore secure context */
240	add	r0, sp, #SM_CTX_SEC
241	bl	sm_restore_modes_regs
242
243	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
244
245	rfefd	sp!
246UNWIND(	.fnend)
247END_FUNC sm_fiq_entry
248
249	.section .text.sm_vect_table
250        .align	5
251LOCAL_FUNC sm_vect_table , :
252UNWIND(	.fnstart)
253UNWIND(	.cantunwind)
254	b	.		/* Reset			*/
255	b	.		/* Undefined instruction	*/
256	b	sm_smc_entry	/* Secure monitor call		*/
257	b	.		/* Prefetch abort		*/
258	b	.		/* Data abort			*/
259	b	.		/* Reserved			*/
260	b	.		/* IRQ				*/
261	b	sm_fiq_entry	/* FIQ				*/
262
263#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
264	.macro vector_prologue_spectre
265		/*
266		 * This depends on SP being 8 byte aligned, that is, the
267		 * lowest three bits in SP are zero.
268		 *
269		 * The idea is to form a specific bit pattern in the lowest
270		 * three bits of SP depending on which entry in the vector
271		 * we enter via.  This is done by adding 1 to SP in each
272		 * entry but the last.
273		 */
274		add	sp, sp, #1	/* 7:111 Reset			*/
275		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
276		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
277		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
278		add	sp, sp, #1	/* 3:011 Data abort		*/
279		add	sp, sp, #1	/* 2:010 Reserved		*/
280		add	sp, sp, #1	/* 1:001 IRQ			*/
281		nop			/* 0:000 FIQ			*/
282	.endm
283
284	.align 5
285sm_vect_table_a15:
286	vector_prologue_spectre
287	/*
288	 * Invalidate the branch predictor for the current processor.
289	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
290	 * effective.
291	 * Note that the BPIALL instruction is not effective in
292	 * invalidating the branch predictor on Cortex-A15. For that CPU,
293	 * set ACTLR[0] to 1 during early processor initialisation, and
294	 * invalidate the branch predictor by performing an ICIALLU
295	 * instruction. See also:
296	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
297	 */
298	write_iciallu
299	isb
300	b	1f
301
302	.align 5
303sm_vect_table_bpiall:
304	vector_prologue_spectre
305	/* Invalidate the branch predictor for the current processor. */
306	write_bpiall
307	isb
308
3091:
310	/*
311	 * Only two exception does normally occur, smc and fiq. With all
312	 * other exceptions it's good enough to just spinn, the lowest bits
313	 * still tells which exception we're stuck with when attaching a
314	 * debugger.
315	 */
316
317	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
318	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
319	beq	sm_fiq_entry
320
321	/* Test for SMC, xor the lowest bits of SP to be 0 */
322	eor	sp, sp, #(BIT(0) | BIT(2))
323	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
324	beq	sm_smc_entry
325
326	/* unhandled exception */
327	b	.
328#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
329UNWIND(	.fnend)
330END_FUNC sm_vect_table
331
332/* void sm_init(vaddr_t stack_pointer); */
333FUNC sm_init , :
334UNWIND(	.fnstart)
335	/* Set monitor stack */
336	mrs	r1, cpsr
337	cps	#CPSR_MODE_MON
338	/* Point just beyond sm_ctx.sec */
339	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_NSEC)
340
341#ifdef CFG_INIT_CNTVOFF
342	read_scr r0
343	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
344	write_scr r0
345	isb
346
347	/*
348	 * Accessing CNTVOFF:
349	 * If the implementation includes the Virtualization Extensions
350	 * this is a RW register, accessible from Hyp mode, and
351	 * from Monitor mode when SCR.NS is set to 1.
352	 * If the implementation includes the Security Extensions
353	 * but not the Virtualization Extensions, an MCRR or MRRC to
354	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
355	 * mode, regardless of the value of SCR.NS.
356	 */
357	read_idpfr1 r2
358	mov	r3, r2
359	ands    r3, r3, #IDPFR1_GENTIMER_MASK
360	beq	.no_gentimer
361	ands    r2, r2, #IDPFR1_VIRT_MASK
362	beq	.no_gentimer
363	mov	r2, #0
364	write_cntvoff r2, r2
365
366.no_gentimer:
367	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
368	write_scr r0
369	isb
370#endif
371
372	msr	cpsr, r1
373
374#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
375	/*
376	 * For unrecognized CPUs we fall back to the vector used for
377	 * unaffected CPUs. Cortex A-15 has special treatment compared to
378	 * the other affected Cortex CPUs.
379	 */
380	read_midr r1
381	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
382	cmp	r2, #MIDR_IMPLEMENTER_ARM
383	bne	1f
384
385	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
386			#MIDR_PRIMARY_PART_NUM_WIDTH
387
388	movw	r3, #CORTEX_A8_PART_NUM
389	cmp	r2, r3
390	movwne	r3, #CORTEX_A9_PART_NUM
391	cmpne	r2, r3
392	movwne	r3, #CORTEX_A17_PART_NUM
393	cmpne	r2, r3
394	ldreq	r0, =sm_vect_table_bpiall
395	beq	2f
396
397	movw	r3, #CORTEX_A15_PART_NUM
398	cmp	r2, r3
399	ldreq	r0, =sm_vect_table_a15
400	beq	2f
401#endif
402	/* Set monitor vector (MVBAR) */
4031:	ldr	r0, =sm_vect_table
4042:	write_mvbar r0
405
406	bx	lr
407END_FUNC sm_init
408KEEP_PAGER sm_init
409
410
411/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
412FUNC sm_get_nsec_ctx , :
413	mrs	r1, cpsr
414	cps	#CPSR_MODE_MON
415	mov	r0, sp
416	msr	cpsr, r1
417
418	/*
419	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec
420	 * which is sm_ctx.nsec
421	 */
422	bx	lr
423END_FUNC sm_get_nsec_ctx
424