xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision b8bb0afa738e6038bbd92b57742aa2526df9f20a)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/unwind.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16#include <util.h>
17
18#define SM_CTX_SEC_END	(SM_CTX_SEC + SM_CTX_SEC_SIZE)
19
20	.section .text.sm_asm
21
22FUNC sm_save_unbanked_regs , :
23UNWIND(	.fnstart)
24UNWIND(	.cantunwind)
25	/* User mode registers has to be saved from system mode */
26	cps	#CPSR_MODE_SYS
27	stm	r0!, {sp, lr}
28
29	cps	#CPSR_MODE_IRQ
30	mrs	r2, spsr
31	stm	r0!, {r2, sp, lr}
32
33	cps	#CPSR_MODE_FIQ
34	mrs	r2, spsr
35	stm	r0!, {r2, sp, lr}
36
37	cps	#CPSR_MODE_SVC
38	mrs	r2, spsr
39	stm	r0!, {r2, sp, lr}
40
41	cps	#CPSR_MODE_ABT
42	mrs	r2, spsr
43	stm	r0!, {r2, sp, lr}
44
45	cps	#CPSR_MODE_UND
46	mrs	r2, spsr
47	stm	r0!, {r2, sp, lr}
48
49#ifdef CFG_SM_NO_CYCLE_COUNTING
50	read_pmcr r2
51	stm	r0!, {r2}
52#endif
53	cps	#CPSR_MODE_MON
54	bx	lr
55UNWIND(	.fnend)
56END_FUNC sm_save_unbanked_regs
57
58/* Restores the mode specific registers */
59FUNC sm_restore_unbanked_regs , :
60UNWIND(	.fnstart)
61UNWIND(	.cantunwind)
62	/* User mode registers has to be saved from system mode */
63	cps	#CPSR_MODE_SYS
64	ldm	r0!, {sp, lr}
65
66	cps	#CPSR_MODE_IRQ
67	ldm	r0!, {r2, sp, lr}
68	msr	spsr_fsxc, r2
69
70	cps	#CPSR_MODE_FIQ
71	ldm	r0!, {r2, sp, lr}
72	msr	spsr_fsxc, r2
73
74	cps	#CPSR_MODE_SVC
75	ldm	r0!, {r2, sp, lr}
76	msr	spsr_fsxc, r2
77
78	cps	#CPSR_MODE_ABT
79	ldm	r0!, {r2, sp, lr}
80	msr	spsr_fsxc, r2
81
82	cps	#CPSR_MODE_UND
83	ldm	r0!, {r2, sp, lr}
84	msr	spsr_fsxc, r2
85
86#ifdef CFG_SM_NO_CYCLE_COUNTING
87	ldm	r0!, {r2}
88	write_pmcr r2
89#endif
90	cps	#CPSR_MODE_MON
91	bx	lr
92UNWIND(	.fnend)
93END_FUNC sm_restore_unbanked_regs
94
95/*
96 * stack_tmp is used as stack, the top of the stack is reserved to hold
97 * struct sm_ctx, everything below is for normal stack usage. As several
98 * different CPU modes are using the same stack it's important that switch
99 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
100 * Async abort has to be masked while using stack_tmp.
101 */
102LOCAL_FUNC sm_smc_entry , :
103UNWIND(	.fnstart)
104UNWIND(	.cantunwind)
105	srsdb	sp!, #CPSR_MODE_MON
106	push	{r0-r7}
107
108	clrex		/* Clear the exclusive monitor */
109
110	/* Find out if we're doing an secure or non-secure entry */
111	read_scr r1
112	tst	r1, #SCR_NS
113	bne	.smc_from_nsec
114
115	/*
116	 * As we're coming from secure world (NS bit cleared) the stack
117	 * pointer points to sm_ctx.sec.r0 at this stage. After the
118	 * instruction below the stack pointer points to sm_ctx.
119	 */
120	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
121
122	/* Save secure context */
123	add	r0, sp, #SM_CTX_SEC
124	bl	sm_save_unbanked_regs
125
126	/*
127	 * On FIQ exit we're restoring the non-secure context unchanged, on
128	 * all other exits we're shifting r1-r4 from secure context into
129	 * r0-r3 in non-secure context.
130	 */
131	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
132	ldm	r8, {r0-r4}
133	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
134	cmp	r0, r9
135	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
136	stmne	r8, {r1-r4}
137
138	/* Restore non-secure context */
139	add	r0, sp, #SM_CTX_NSEC
140	bl	sm_restore_unbanked_regs
141
142.sm_ret_to_nsec:
143	/*
144	 * Return to non-secure world
145	 */
146	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
147	ldm	r0, {r8-r12}
148
149	/* Update SCR */
150	read_scr r0
151	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
152	write_scr r0
153	/*
154	 * isb not needed since we're doing an exception return below
155	 * without dependency to the changes in SCR before that.
156	 */
157
158	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
159	b	.sm_exit
160
161.smc_from_nsec:
162	/*
163	 * As we're coming from non-secure world (NS bit set) the stack
164	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
165	 * instruction below the stack pointer points to sm_ctx.
166	 */
167	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
168
169	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
170	write_scr r1
171	isb
172
173	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
174	stm	r0, {r8-r12}
175
176	mov	r0, sp
177	bl	sm_from_nsec
178	cmp	r0, #0
179	beq	.sm_ret_to_nsec
180
181	/*
182	 * Continue into secure world
183	 */
184	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
185
186.sm_exit:
187	pop	{r0-r7}
188	rfefd	sp!
189UNWIND(	.fnend)
190END_FUNC sm_smc_entry
191
192/*
193 * FIQ handling
194 *
195 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
196 * context will later be restored by sm_smc_entry() when handling a return
197 * from FIQ.
198 */
199LOCAL_FUNC sm_fiq_entry , :
200UNWIND(	.fnstart)
201UNWIND(	.cantunwind)
202	/* FIQ has a +4 offset for lr compared to preferred return address */
203	sub	lr, lr, #4
204	/* sp points just past struct sm_sec_ctx */
205	srsdb	sp!, #CPSR_MODE_MON
206	push	{r0-r7}
207
208	clrex		/* Clear the exclusive monitor */
209
210	/*
211	 * As we're coming from non-secure world the stack pointer points
212	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
213	 * stack pointer points to sm_ctx.
214	 */
215	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
216
217	/* Update SCR */
218	read_scr r1
219	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
220	write_scr r1
221	isb
222
223	/* Save non-secure context */
224	add	r0, sp, #SM_CTX_NSEC
225	bl	sm_save_unbanked_regs
226	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
227	stm	r0!, {r8-r12}
228
229	/* Set FIQ entry */
230	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
231	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
232
233	/* Restore secure context */
234	add	r0, sp, #SM_CTX_SEC
235	bl	sm_restore_unbanked_regs
236
237	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
238
239	rfefd	sp!
240UNWIND(	.fnend)
241END_FUNC sm_fiq_entry
242
243	.section .text.sm_vect_table
244        .align	5
245LOCAL_FUNC sm_vect_table , :
246UNWIND(	.fnstart)
247UNWIND(	.cantunwind)
248	b	.		/* Reset			*/
249	b	.		/* Undefined instruction	*/
250	b	sm_smc_entry	/* Secure monitor call		*/
251	b	.		/* Prefetch abort		*/
252	b	.		/* Data abort			*/
253	b	.		/* Reserved			*/
254	b	.		/* IRQ				*/
255	b	sm_fiq_entry	/* FIQ				*/
256
257#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
258	.macro vector_prologue_spectre
259		/*
260		 * This depends on SP being 8 byte aligned, that is, the
261		 * lowest three bits in SP are zero.
262		 *
263		 * The idea is to form a specific bit pattern in the lowest
264		 * three bits of SP depending on which entry in the vector
265		 * we enter via.  This is done by adding 1 to SP in each
266		 * entry but the last.
267		 */
268		add	sp, sp, #1	/* 7:111 Reset			*/
269		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
270		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
271		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
272		add	sp, sp, #1	/* 3:011 Data abort		*/
273		add	sp, sp, #1	/* 2:010 Reserved		*/
274		add	sp, sp, #1	/* 1:001 IRQ			*/
275		nop			/* 0:000 FIQ			*/
276	.endm
277
278	.align 5
279sm_vect_table_a15:
280	vector_prologue_spectre
281	/*
282	 * Invalidate the branch predictor for the current processor.
283	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
284	 * effective.
285	 * Note that the BPIALL instruction is not effective in
286	 * invalidating the branch predictor on Cortex-A15. For that CPU,
287	 * set ACTLR[0] to 1 during early processor initialisation, and
288	 * invalidate the branch predictor by performing an ICIALLU
289	 * instruction. See also:
290	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
291	 */
292	write_iciallu
293	isb
294	b	1f
295
296	.align 5
297sm_vect_table_bpiall:
298	vector_prologue_spectre
299	/* Invalidate the branch predictor for the current processor. */
300	write_bpiall
301	isb
302
3031:
304	/*
305	 * Only two exception does normally occur, smc and fiq. With all
306	 * other exceptions it's good enough to just spinn, the lowest bits
307	 * still tells which exception we're stuck with when attaching a
308	 * debugger.
309	 */
310
311	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
312	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
313	beq	sm_fiq_entry
314
315	/* Test for SMC, xor the lowest bits of SP to be 0 */
316	eor	sp, sp, #(BIT(0) | BIT(2))
317	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
318	beq	sm_smc_entry
319
320	/* unhandled exception */
321	b	.
322#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
323UNWIND(	.fnend)
324END_FUNC sm_vect_table
325
326/* void sm_init(vaddr_t stack_pointer); */
327FUNC sm_init , :
328UNWIND(	.fnstart)
329	/* Set monitor stack */
330	mrs	r1, cpsr
331	cps	#CPSR_MODE_MON
332	/* Point just beyond sm_ctx.sec */
333	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END)
334
335#ifdef CFG_INIT_CNTVOFF
336	read_scr r0
337	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
338	write_scr r0
339	isb
340
341	/*
342	 * Accessing CNTVOFF:
343	 * If the implementation includes the Virtualization Extensions
344	 * this is a RW register, accessible from Hyp mode, and
345	 * from Monitor mode when SCR.NS is set to 1.
346	 * If the implementation includes the Security Extensions
347	 * but not the Virtualization Extensions, an MCRR or MRRC to
348	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
349	 * mode, regardless of the value of SCR.NS.
350	 */
351	read_id_pfr1 r2
352	mov	r3, r2
353	ands    r3, r3, #IDPFR1_GENTIMER_MASK
354	beq	.no_gentimer
355	ands    r2, r2, #IDPFR1_VIRT_MASK
356	beq	.no_gentimer
357	mov	r2, #0
358	write_cntvoff r2, r2
359
360.no_gentimer:
361	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
362	write_scr r0
363	isb
364#endif
365#ifdef CFG_SM_NO_CYCLE_COUNTING
366	read_pmcr r0
367	orr	r0, #PMCR_DP
368	write_pmcr r0
369#endif
370	msr	cpsr, r1
371
372#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
373	/*
374	 * For unrecognized CPUs we fall back to the vector used for
375	 * unaffected CPUs. Cortex A-15 has special treatment compared to
376	 * the other affected Cortex CPUs.
377	 */
378	read_midr r1
379	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
380	cmp	r2, #MIDR_IMPLEMENTER_ARM
381	bne	1f
382
383	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
384			#MIDR_PRIMARY_PART_NUM_WIDTH
385
386	movw	r3, #CORTEX_A8_PART_NUM
387	cmp	r2, r3
388	movwne	r3, #CORTEX_A9_PART_NUM
389	cmpne	r2, r3
390	movwne	r3, #CORTEX_A17_PART_NUM
391	cmpne	r2, r3
392	ldreq	r0, =sm_vect_table_bpiall
393	beq	2f
394
395	movw	r3, #CORTEX_A15_PART_NUM
396	cmp	r2, r3
397	ldreq	r0, =sm_vect_table_a15
398	beq	2f
399#endif
400	/* Set monitor vector (MVBAR) */
4011:	ldr	r0, =sm_vect_table
4022:	write_mvbar r0
403
404	bx	lr
405END_FUNC sm_init
406KEEP_PAGER sm_init
407
408
409/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
410FUNC sm_get_nsec_ctx , :
411	mrs	r1, cpsr
412	cps	#CPSR_MODE_MON
413	/*
414	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec,
415	 * which allows us to calculate the address of sm_ctx.nsec.
416	 */
417	add	r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END)
418	msr	cpsr, r1
419
420	bx	lr
421END_FUNC sm_get_nsec_ctx
422