xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision 8bbd9b374a51a1b8617796aae8a70c271543357f)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/unwind.h>
13#include <sm/optee_smc.h>
14#include <sm/sm.h>
15#include <sm/teesmc_opteed.h>
16#include <sm/teesmc_opteed_macros.h>
17#include <util.h>
18
19#define SM_CTX_SEC_END	(SM_CTX_SEC + SM_CTX_SEC_SIZE)
20
21	.section .text.sm_asm
22
23FUNC sm_save_unbanked_regs , :
24UNWIND(	.fnstart)
25UNWIND(	.cantunwind)
26	/* User mode registers has to be saved from system mode */
27	cps	#CPSR_MODE_SYS
28	stm	r0!, {sp, lr}
29
30	cps	#CPSR_MODE_IRQ
31	mrs	r2, spsr
32	stm	r0!, {r2, sp, lr}
33
34	cps	#CPSR_MODE_FIQ
35	mrs	r2, spsr
36	stm	r0!, {r2, sp, lr}
37
38	cps	#CPSR_MODE_SVC
39	mrs	r2, spsr
40	stm	r0!, {r2, sp, lr}
41
42	cps	#CPSR_MODE_ABT
43	mrs	r2, spsr
44	stm	r0!, {r2, sp, lr}
45
46	cps	#CPSR_MODE_UND
47	mrs	r2, spsr
48	stm	r0!, {r2, sp, lr}
49
50#ifdef CFG_SM_NO_CYCLE_COUNTING
51	read_pmcr r2
52	stm	r0!, {r2}
53#endif
54	cps	#CPSR_MODE_MON
55	bx	lr
56UNWIND(	.fnend)
57END_FUNC sm_save_unbanked_regs
58
59/* Restores the mode specific registers */
60FUNC sm_restore_unbanked_regs , :
61UNWIND(	.fnstart)
62UNWIND(	.cantunwind)
63	/* User mode registers has to be saved from system mode */
64	cps	#CPSR_MODE_SYS
65	ldm	r0!, {sp, lr}
66
67	cps	#CPSR_MODE_IRQ
68	ldm	r0!, {r2, sp, lr}
69	msr	spsr_fsxc, r2
70
71	cps	#CPSR_MODE_FIQ
72	ldm	r0!, {r2, sp, lr}
73	msr	spsr_fsxc, r2
74
75	cps	#CPSR_MODE_SVC
76	ldm	r0!, {r2, sp, lr}
77	msr	spsr_fsxc, r2
78
79	cps	#CPSR_MODE_ABT
80	ldm	r0!, {r2, sp, lr}
81	msr	spsr_fsxc, r2
82
83	cps	#CPSR_MODE_UND
84	ldm	r0!, {r2, sp, lr}
85	msr	spsr_fsxc, r2
86
87#ifdef CFG_SM_NO_CYCLE_COUNTING
88	ldm	r0!, {r2}
89	write_pmcr r2
90#endif
91	cps	#CPSR_MODE_MON
92	bx	lr
93UNWIND(	.fnend)
94END_FUNC sm_restore_unbanked_regs
95
96/*
97 * stack_tmp is used as stack, the top of the stack is reserved to hold
98 * struct sm_ctx, everything below is for normal stack usage. As several
99 * different CPU modes are using the same stack it's important that switch
100 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
101 * Async abort has to be masked while using stack_tmp.
102 */
103LOCAL_FUNC sm_smc_entry , :
104UNWIND(	.fnstart)
105UNWIND(	.cantunwind)
106	srsdb	sp!, #CPSR_MODE_MON
107	push	{r0-r7}
108
109	clrex		/* Clear the exclusive monitor */
110
111	/* Find out if we're doing an secure or non-secure entry */
112	read_scr r1
113	tst	r1, #SCR_NS
114	bne	.smc_from_nsec
115
116	/*
117	 * As we're coming from secure world (NS bit cleared) the stack
118	 * pointer points to sm_ctx.sec.r0 at this stage. After the
119	 * instruction below the stack pointer points to sm_ctx.
120	 */
121	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
122
123	/* Save secure context */
124	add	r0, sp, #SM_CTX_SEC
125	bl	sm_save_unbanked_regs
126
127	/*
128	 * On FIQ exit we're restoring the non-secure context unchanged, on
129	 * all other exits we're shifting r1-r4 from secure context into
130	 * r0-r3 in non-secure context.
131	 */
132	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
133	ldm	r8, {r0-r4}
134	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
135	cmp	r0, r9
136	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
137	stmne	r8, {r1-r4}
138
139	/* Restore non-secure context */
140	add	r0, sp, #SM_CTX_NSEC
141	bl	sm_restore_unbanked_regs
142
143.sm_ret_to_nsec:
144	/*
145	 * Return to non-secure world
146	 */
147	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
148	ldm	r0, {r8-r12}
149
150	/* Update SCR */
151	read_scr r0
152	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
153	write_scr r0
154	/*
155	 * isb not needed since we're doing an exception return below
156	 * without dependency to the changes in SCR before that.
157	 */
158
159	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
160	b	.sm_exit
161
162.smc_from_nsec:
163	/*
164	 * As we're coming from non-secure world (NS bit set) the stack
165	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
166	 * instruction below the stack pointer points to sm_ctx.
167	 */
168	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
169
170	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
171	write_scr r1
172	isb
173
174	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
175	stm	r0, {r8-r12}
176
177	mov	r0, sp
178	bl	sm_from_nsec
179	cmp	r0, #SM_EXIT_TO_NON_SECURE
180	beq	.sm_ret_to_nsec
181
182	/*
183	 * Continue into secure world
184	 */
185	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
186
187.sm_exit:
188	pop	{r0-r7}
189	rfefd	sp!
190UNWIND(	.fnend)
191END_FUNC sm_smc_entry
192
193/*
194 * FIQ handling
195 *
196 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
197 * context will later be restored by sm_smc_entry() when handling a return
198 * from FIQ.
199 */
200LOCAL_FUNC sm_fiq_entry , :
201UNWIND(	.fnstart)
202UNWIND(	.cantunwind)
203	/* FIQ has a +4 offset for lr compared to preferred return address */
204	sub	lr, lr, #4
205	/* sp points just past struct sm_sec_ctx */
206	srsdb	sp!, #CPSR_MODE_MON
207	push	{r0-r7}
208
209	clrex		/* Clear the exclusive monitor */
210
211	/*
212	 * As we're coming from non-secure world the stack pointer points
213	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
214	 * stack pointer points to sm_ctx.
215	 */
216	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
217
218	/* Update SCR */
219	read_scr r1
220	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
221	write_scr r1
222	isb
223
224	/* Save non-secure context */
225	add	r0, sp, #SM_CTX_NSEC
226	bl	sm_save_unbanked_regs
227	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
228	stm	r0!, {r8-r12}
229
230	/* Set FIQ entry */
231	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
232	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
233
234	/* Restore secure context */
235	add	r0, sp, #SM_CTX_SEC
236	bl	sm_restore_unbanked_regs
237
238	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
239
240	rfefd	sp!
241UNWIND(	.fnend)
242END_FUNC sm_fiq_entry
243
244	.section .text.sm_vect_table
245        .align	5
246LOCAL_FUNC sm_vect_table , :
247UNWIND(	.fnstart)
248UNWIND(	.cantunwind)
249	b	.		/* Reset			*/
250	b	.		/* Undefined instruction	*/
251	b	sm_smc_entry	/* Secure monitor call		*/
252	b	.		/* Prefetch abort		*/
253	b	.		/* Data abort			*/
254	b	.		/* Reserved			*/
255	b	.		/* IRQ				*/
256	b	sm_fiq_entry	/* FIQ				*/
257
258#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
259	.macro vector_prologue_spectre
260		/*
261		 * This depends on SP being 8 byte aligned, that is, the
262		 * lowest three bits in SP are zero.
263		 *
264		 * The idea is to form a specific bit pattern in the lowest
265		 * three bits of SP depending on which entry in the vector
266		 * we enter via.  This is done by adding 1 to SP in each
267		 * entry but the last.
268		 */
269		add	sp, sp, #1	/* 7:111 Reset			*/
270		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
271		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
272		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
273		add	sp, sp, #1	/* 3:011 Data abort		*/
274		add	sp, sp, #1	/* 2:010 Reserved		*/
275		add	sp, sp, #1	/* 1:001 IRQ			*/
276		nop			/* 0:000 FIQ			*/
277	.endm
278
279	.align 5
280sm_vect_table_a15:
281	vector_prologue_spectre
282	/*
283	 * Invalidate the branch predictor for the current processor.
284	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
285	 * effective.
286	 * Note that the BPIALL instruction is not effective in
287	 * invalidating the branch predictor on Cortex-A15. For that CPU,
288	 * set ACTLR[0] to 1 during early processor initialisation, and
289	 * invalidate the branch predictor by performing an ICIALLU
290	 * instruction. See also:
291	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
292	 */
293	write_iciallu
294	isb
295	b	1f
296
297	.align 5
298sm_vect_table_bpiall:
299	vector_prologue_spectre
300	/* Invalidate the branch predictor for the current processor. */
301	write_bpiall
302	isb
303
3041:
305	/*
306	 * Only two exception does normally occur, smc and fiq. With all
307	 * other exceptions it's good enough to just spinn, the lowest bits
308	 * still tells which exception we're stuck with when attaching a
309	 * debugger.
310	 */
311
312	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
313	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
314	beq	sm_fiq_entry
315
316	/* Test for SMC, xor the lowest bits of SP to be 0 */
317	eor	sp, sp, #(BIT(0) | BIT(2))
318	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
319	beq	sm_smc_entry
320
321	/* unhandled exception */
322	b	.
323#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
324UNWIND(	.fnend)
325END_FUNC sm_vect_table
326
327/* void sm_init(vaddr_t stack_pointer); */
328FUNC sm_init , :
329UNWIND(	.fnstart)
330	/* Set monitor stack */
331	mrs	r1, cpsr
332	cps	#CPSR_MODE_MON
333	/* Point just beyond sm_ctx.sec */
334	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END)
335
336#ifdef CFG_INIT_CNTVOFF
337	read_scr r0
338	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
339	write_scr r0
340	isb
341
342	/*
343	 * Accessing CNTVOFF:
344	 * If the implementation includes the Virtualization Extensions
345	 * this is a RW register, accessible from Hyp mode, and
346	 * from Monitor mode when SCR.NS is set to 1.
347	 * If the implementation includes the Security Extensions
348	 * but not the Virtualization Extensions, an MCRR or MRRC to
349	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
350	 * mode, regardless of the value of SCR.NS.
351	 */
352	read_id_pfr1 r2
353	mov	r3, r2
354	ands    r3, r3, #IDPFR1_GENTIMER_MASK
355	beq	.no_gentimer
356	ands    r2, r2, #IDPFR1_VIRT_MASK
357	beq	.no_gentimer
358	mov	r2, #0
359	write_cntvoff r2, r2
360
361.no_gentimer:
362	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
363	write_scr r0
364	isb
365#endif
366#ifdef CFG_SM_NO_CYCLE_COUNTING
367	read_pmcr r0
368	orr	r0, #PMCR_DP
369	write_pmcr r0
370#endif
371	msr	cpsr, r1
372
373#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
374	/*
375	 * For unrecognized CPUs we fall back to the vector used for
376	 * unaffected CPUs. Cortex A-15 has special treatment compared to
377	 * the other affected Cortex CPUs.
378	 */
379	read_midr r1
380	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
381	cmp	r2, #MIDR_IMPLEMENTER_ARM
382	bne	1f
383
384	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
385			#MIDR_PRIMARY_PART_NUM_WIDTH
386
387	movw	r3, #CORTEX_A8_PART_NUM
388	cmp	r2, r3
389	movwne	r3, #CORTEX_A9_PART_NUM
390	cmpne	r2, r3
391	movwne	r3, #CORTEX_A17_PART_NUM
392	cmpne	r2, r3
393	ldreq	r0, =sm_vect_table_bpiall
394	beq	2f
395
396	movw	r3, #CORTEX_A15_PART_NUM
397	cmp	r2, r3
398	ldreq	r0, =sm_vect_table_a15
399	beq	2f
400#endif
401	/* Set monitor vector (MVBAR) */
4021:	ldr	r0, =sm_vect_table
4032:	write_mvbar r0
404
405	bx	lr
406END_FUNC sm_init
407KEEP_PAGER sm_init
408
409
410/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
411FUNC sm_get_nsec_ctx , :
412	mrs	r1, cpsr
413	cps	#CPSR_MODE_MON
414	/*
415	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec,
416	 * which allows us to calculate the address of sm_ctx.nsec.
417	 */
418	add	r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END)
419	msr	cpsr, r1
420
421	bx	lr
422END_FUNC sm_get_nsec_ctx
423