xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision df24e6517b6454cf906c16979ea0e7546c5c99d5)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/unwind.h>
13#include <sm/optee_smc.h>
14#include <sm/sm.h>
15#include <sm/teesmc_opteed.h>
16#include <sm/teesmc_opteed_macros.h>
17#include <util.h>
18
19#define SM_CTX_SEC_END	(SM_CTX_SEC + SM_CTX_SEC_SIZE)
20
21	.macro save_regs mode
22	cps	\mode
23	mrs	r2, spsr
24	str	r2, [r0], #4
25	str	sp, [r0], #4
26	str	lr, [r0], #4
27	.endm
28
29FUNC sm_save_unbanked_regs , :
30UNWIND(	.fnstart)
31UNWIND(	.cantunwind)
32	/* User mode registers has to be saved from system mode */
33	cps	#CPSR_MODE_SYS
34	str	sp, [r0], #4
35	str	lr, [r0], #4
36
37	save_regs	#CPSR_MODE_IRQ
38	save_regs	#CPSR_MODE_FIQ
39	save_regs	#CPSR_MODE_SVC
40	save_regs	#CPSR_MODE_ABT
41	save_regs	#CPSR_MODE_UND
42
43#ifdef CFG_SM_NO_CYCLE_COUNTING
44	read_pmcr r2
45	stm	r0!, {r2}
46#endif
47
48#ifdef CFG_FTRACE_SUPPORT
49	read_cntkctl r2
50	stm	r0!, {r2}
51#endif
52	cps	#CPSR_MODE_MON
53	bx	lr
54UNWIND(	.fnend)
55END_FUNC sm_save_unbanked_regs
56
57	.macro restore_regs mode
58	cps	\mode
59	ldr	r2, [r0], #4
60	ldr	sp, [r0], #4
61	ldr	lr, [r0], #4
62	msr	spsr_fsxc, r2
63	.endm
64
65/* Restores the mode specific registers */
66FUNC sm_restore_unbanked_regs , :
67UNWIND(	.fnstart)
68UNWIND(	.cantunwind)
69	/* User mode registers has to be saved from system mode */
70	cps	#CPSR_MODE_SYS
71	ldr	sp, [r0], #4
72	ldr	lr, [r0], #4
73
74	restore_regs	#CPSR_MODE_IRQ
75	restore_regs	#CPSR_MODE_FIQ
76	restore_regs	#CPSR_MODE_SVC
77	restore_regs	#CPSR_MODE_ABT
78	restore_regs	#CPSR_MODE_UND
79
80#ifdef CFG_SM_NO_CYCLE_COUNTING
81	ldm	r0!, {r2}
82	write_pmcr r2
83#endif
84
85#ifdef CFG_FTRACE_SUPPORT
86	ldm	r0!, {r2}
87	write_cntkctl r2
88#endif
89	cps	#CPSR_MODE_MON
90	bx	lr
91UNWIND(	.fnend)
92END_FUNC sm_restore_unbanked_regs
93
94/*
95 * stack_tmp is used as stack, the top of the stack is reserved to hold
96 * struct sm_ctx, everything below is for normal stack usage. As several
97 * different CPU modes are using the same stack it's important that switch
98 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
99 * Async abort has to be masked while using stack_tmp.
100 */
101LOCAL_FUNC sm_smc_entry , :
102UNWIND(	.fnstart)
103UNWIND(	.cantunwind)
104	srsdb	sp!, #CPSR_MODE_MON
105	push	{r0-r7}
106
107	clrex		/* Clear the exclusive monitor */
108
109	/* Find out if we're doing an secure or non-secure entry */
110	read_scr r1
111	tst	r1, #SCR_NS
112	bne	.smc_from_nsec
113
114	/*
115	 * As we're coming from secure world (NS bit cleared) the stack
116	 * pointer points to sm_ctx.sec.r0 at this stage. After the
117	 * instruction below the stack pointer points to sm_ctx.
118	 */
119	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
120
121	/* Save secure context */
122	add	r0, sp, #SM_CTX_SEC
123	bl	sm_save_unbanked_regs
124
125	/*
126	 * On FIQ exit we're restoring the non-secure context unchanged, on
127	 * all other exits we're shifting r1-r4 from secure context into
128	 * r0-r3 in non-secure context.
129	 */
130	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
131	ldm	r8, {r0-r4}
132	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
133	cmp	r0, r9
134	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
135	stmne	r8, {r1-r4}
136
137	/* Restore non-secure context */
138	add	r0, sp, #SM_CTX_NSEC
139	bl	sm_restore_unbanked_regs
140
141.sm_ret_to_nsec:
142	/*
143	 * Return to non-secure world
144	 */
145	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
146	ldm	r0, {r8-r12}
147
148#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
149	/*
150	 * Prevent leaking information about which code has been executed.
151	 * This is required to be used together with
152	 * CFG_CORE_WORKAROUND_SPECTRE_BP to protect Cortex A15 CPUs too.
153	 *
154	 * CFG_CORE_WORKAROUND_SPECTRE_BP also invalidates the branch
155	 * predictor on affected CPUs. In the cases where an alternative
156	 * vector has been installed the branch predictor is already
157	 * invalidated so invalidating here again would be redundant, but
158	 * testing for that is more trouble than it's worth.
159	 */
160	write_bpiall
161#endif
162
163	/* Update SCR */
164	read_scr r0
165	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
166	write_scr r0
167	/*
168	 * isb not needed since we're doing an exception return below
169	 * without dependency to the changes in SCR before that.
170	 */
171
172	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
173	b	.sm_exit
174
175.smc_from_nsec:
176	/*
177	 * As we're coming from non-secure world (NS bit set) the stack
178	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
179	 * instruction below the stack pointer points to sm_ctx.
180	 */
181	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
182
183	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
184	write_scr r1
185	isb
186
187	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
188	stm	r0, {r8-r12}
189
190	mov	r0, sp
191	bl	sm_from_nsec
192	cmp	r0, #SM_EXIT_TO_NON_SECURE
193	beq	.sm_ret_to_nsec
194
195	/*
196	 * Continue into secure world
197	 */
198	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
199
200.sm_exit:
201	pop	{r0-r7}
202	rfefd	sp!
203UNWIND(	.fnend)
204END_FUNC sm_smc_entry
205
206/*
207 * FIQ handling
208 *
209 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
210 * context will later be restored by sm_smc_entry() when handling a return
211 * from FIQ.
212 */
213LOCAL_FUNC sm_fiq_entry , :
214UNWIND(	.fnstart)
215UNWIND(	.cantunwind)
216	/* FIQ has a +4 offset for lr compared to preferred return address */
217	sub	lr, lr, #4
218	/* sp points just past struct sm_sec_ctx */
219	srsdb	sp!, #CPSR_MODE_MON
220	push	{r0-r7}
221
222	clrex		/* Clear the exclusive monitor */
223
224	/*
225	 * As we're coming from non-secure world the stack pointer points
226	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
227	 * stack pointer points to sm_ctx.
228	 */
229	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
230
231	/* Update SCR */
232	read_scr r1
233	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
234	write_scr r1
235	isb
236
237	/* Save non-secure context */
238	add	r0, sp, #SM_CTX_NSEC
239	bl	sm_save_unbanked_regs
240	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
241	stm	r0!, {r8-r12}
242
243	/* Set FIQ entry */
244	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
245	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
246
247	/* Restore secure context */
248	add	r0, sp, #SM_CTX_SEC
249	bl	sm_restore_unbanked_regs
250
251	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
252
253	rfefd	sp!
254UNWIND(	.fnend)
255END_FUNC sm_fiq_entry
256
257	.section .text.sm_vect_table
258        .align	5
259LOCAL_FUNC sm_vect_table , :
260UNWIND(	.fnstart)
261UNWIND(	.cantunwind)
262	b	.		/* Reset			*/
263	b	.		/* Undefined instruction	*/
264	b	sm_smc_entry	/* Secure monitor call		*/
265	b	.		/* Prefetch abort		*/
266	b	.		/* Data abort			*/
267	b	.		/* Reserved			*/
268	b	.		/* IRQ				*/
269	b	sm_fiq_entry	/* FIQ				*/
270
271#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
272	.macro vector_prologue_spectre
273		/*
274		 * This depends on SP being 8 byte aligned, that is, the
275		 * lowest three bits in SP are zero.
276		 *
277		 * The idea is to form a specific bit pattern in the lowest
278		 * three bits of SP depending on which entry in the vector
279		 * we enter via.  This is done by adding 1 to SP in each
280		 * entry but the last.
281		 */
282		add	sp, sp, #1	/* 7:111 Reset			*/
283		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
284		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
285		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
286		add	sp, sp, #1	/* 3:011 Data abort		*/
287		add	sp, sp, #1	/* 2:010 Reserved		*/
288		add	sp, sp, #1	/* 1:001 IRQ			*/
289		nop			/* 0:000 FIQ			*/
290	.endm
291
292	.align 5
293sm_vect_table_a15:
294	vector_prologue_spectre
295	/*
296	 * Invalidate the branch predictor for the current processor.
297	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
298	 * effective.
299	 * Note that the BPIALL instruction is not effective in
300	 * invalidating the branch predictor on Cortex-A15. For that CPU,
301	 * set ACTLR[0] to 1 during early processor initialisation, and
302	 * invalidate the branch predictor by performing an ICIALLU
303	 * instruction. See also:
304	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
305	 */
306	write_iciallu
307	isb
308	b	1f
309
310	.align 5
311sm_vect_table_bpiall:
312	vector_prologue_spectre
313	/* Invalidate the branch predictor for the current processor. */
314	write_bpiall
315	isb
316
3171:
318	/*
319	 * Only two exception does normally occur, smc and fiq. With all
320	 * other exceptions it's good enough to just spinn, the lowest bits
321	 * still tells which exception we're stuck with when attaching a
322	 * debugger.
323	 */
324
325	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
326	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
327	beq	sm_fiq_entry
328
329	/* Test for SMC, xor the lowest bits of SP to be 0 */
330	eor	sp, sp, #(BIT(0) | BIT(2))
331	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
332	beq	sm_smc_entry
333
334	/* unhandled exception */
335	b	.
336#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
337UNWIND(	.fnend)
338END_FUNC sm_vect_table
339
340/* void sm_init(vaddr_t stack_pointer); */
341FUNC sm_init , :
342UNWIND(	.fnstart)
343	/* Set monitor stack */
344	mrs	r1, cpsr
345	cps	#CPSR_MODE_MON
346	/* Point just beyond sm_ctx.sec */
347	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END)
348
349#ifdef CFG_INIT_CNTVOFF
350	read_scr r0
351	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
352	write_scr r0
353	isb
354
355	/*
356	 * Accessing CNTVOFF:
357	 * If the implementation includes the Virtualization Extensions
358	 * this is a RW register, accessible from Hyp mode, and
359	 * from Monitor mode when SCR.NS is set to 1.
360	 * If the implementation includes the Security Extensions
361	 * but not the Virtualization Extensions, an MCRR or MRRC to
362	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
363	 * mode, regardless of the value of SCR.NS.
364	 */
365	read_id_pfr1 r2
366	mov	r3, r2
367	ands    r3, r3, #IDPFR1_GENTIMER_MASK
368	beq	.no_gentimer
369	ands    r2, r2, #IDPFR1_VIRT_MASK
370	beq	.no_gentimer
371	mov	r2, #0
372	write_cntvoff r2, r2
373
374.no_gentimer:
375	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
376	write_scr r0
377	isb
378#endif
379#ifdef CFG_SM_NO_CYCLE_COUNTING
380	read_pmcr r0
381	orr	r0, #PMCR_DP
382	write_pmcr r0
383#endif
384	msr	cpsr, r1
385
386#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
387	/*
388	 * For unrecognized CPUs we fall back to the vector used for
389	 * unaffected CPUs. Cortex A-15 has special treatment compared to
390	 * the other affected Cortex CPUs.
391	 */
392	read_midr r1
393	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
394	cmp	r2, #MIDR_IMPLEMENTER_ARM
395	bne	1f
396
397	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
398			#MIDR_PRIMARY_PART_NUM_WIDTH
399
400	movw	r3, #CORTEX_A8_PART_NUM
401	cmp	r2, r3
402	movwne	r3, #CORTEX_A9_PART_NUM
403	cmpne	r2, r3
404	movwne	r3, #CORTEX_A17_PART_NUM
405	cmpne	r2, r3
406	ldreq	r0, =sm_vect_table_bpiall
407	beq	2f
408
409	movw	r3, #CORTEX_A15_PART_NUM
410	cmp	r2, r3
411	ldreq	r0, =sm_vect_table_a15
412	beq	2f
413#endif
414	/* Set monitor vector (MVBAR) */
4151:	ldr	r0, =sm_vect_table
4162:	write_mvbar r0
417
418	bx	lr
419END_FUNC sm_init
420KEEP_PAGER sm_init
421
422
423/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
424FUNC sm_get_nsec_ctx , :
425	mrs	r1, cpsr
426	cps	#CPSR_MODE_MON
427	/*
428	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec,
429	 * which allows us to calculate the address of sm_ctx.nsec.
430	 */
431	add	r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END)
432	msr	cpsr, r1
433
434	bx	lr
435END_FUNC sm_get_nsec_ctx
436