xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision 827be46c173f31c57006af70ca3a15a5b1a7fba3)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <sm/optee_smc.h>
13#include <sm/sm.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16#include <util.h>
17
18#define SM_CTX_SEC_END	(SM_CTX_SEC + SM_CTX_SEC_SIZE)
19
20	.macro save_regs mode
21	cps	\mode
22	mrs	r2, spsr
23	str	r2, [r0], #4
24	str	sp, [r0], #4
25	str	lr, [r0], #4
26	.endm
27
28FUNC sm_save_unbanked_regs , :
29UNWIND(	.fnstart)
30UNWIND(	.cantunwind)
31	/* User mode registers has to be saved from system mode */
32	cps	#CPSR_MODE_SYS
33	str	sp, [r0], #4
34	str	lr, [r0], #4
35
36	save_regs	#CPSR_MODE_IRQ
37	save_regs	#CPSR_MODE_FIQ
38	save_regs	#CPSR_MODE_SVC
39	save_regs	#CPSR_MODE_ABT
40	save_regs	#CPSR_MODE_UND
41
42#ifdef CFG_SM_NO_CYCLE_COUNTING
43	read_pmcr r2
44	stm	r0!, {r2}
45#endif
46
47#ifdef CFG_FTRACE_SUPPORT
48	read_cntkctl r2
49	stm	r0!, {r2}
50#endif
51	cps	#CPSR_MODE_MON
52	bx	lr
53UNWIND(	.fnend)
54END_FUNC sm_save_unbanked_regs
55
56	.macro restore_regs mode
57	cps	\mode
58	ldr	r2, [r0], #4
59	ldr	sp, [r0], #4
60	ldr	lr, [r0], #4
61	msr	spsr_fsxc, r2
62	.endm
63
64/* Restores the mode specific registers */
65FUNC sm_restore_unbanked_regs , :
66UNWIND(	.fnstart)
67UNWIND(	.cantunwind)
68	/* User mode registers has to be saved from system mode */
69	cps	#CPSR_MODE_SYS
70	ldr	sp, [r0], #4
71	ldr	lr, [r0], #4
72
73	restore_regs	#CPSR_MODE_IRQ
74	restore_regs	#CPSR_MODE_FIQ
75	restore_regs	#CPSR_MODE_SVC
76	restore_regs	#CPSR_MODE_ABT
77	restore_regs	#CPSR_MODE_UND
78
79#ifdef CFG_SM_NO_CYCLE_COUNTING
80	ldm	r0!, {r2}
81	write_pmcr r2
82#endif
83
84#ifdef CFG_FTRACE_SUPPORT
85	ldm	r0!, {r2}
86	write_cntkctl r2
87#endif
88	cps	#CPSR_MODE_MON
89	bx	lr
90UNWIND(	.fnend)
91END_FUNC sm_restore_unbanked_regs
92
93/*
94 * stack_tmp is used as stack, the top of the stack is reserved to hold
95 * struct sm_ctx, everything below is for normal stack usage. As several
96 * different CPU modes are using the same stack it's important that switch
97 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
98 * Async abort has to be masked while using stack_tmp.
99 */
100LOCAL_FUNC sm_smc_entry , :
101UNWIND(	.fnstart)
102UNWIND(	.cantunwind)
103	srsdb	sp!, #CPSR_MODE_MON
104	push	{r0-r7}
105
106	clrex		/* Clear the exclusive monitor */
107
108	/* Find out if we're doing an secure or non-secure entry */
109	read_scr r1
110	tst	r1, #SCR_NS
111	bne	.smc_from_nsec
112
113	/*
114	 * As we're coming from secure world (NS bit cleared) the stack
115	 * pointer points to sm_ctx.sec.r0 at this stage. After the
116	 * instruction below the stack pointer points to sm_ctx.
117	 */
118	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
119
120	/* Save secure context */
121	add	r0, sp, #SM_CTX_SEC
122	bl	sm_save_unbanked_regs
123
124	/*
125	 * On FIQ exit we're restoring the non-secure context unchanged, on
126	 * all other exits we're shifting r1-r4 from secure context into
127	 * r0-r3 in non-secure context.
128	 */
129	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
130	ldm	r8, {r0-r4}
131	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
132	cmp	r0, r9
133	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
134	stmne	r8, {r1-r4}
135
136	/* Restore non-secure context */
137	add	r0, sp, #SM_CTX_NSEC
138	bl	sm_restore_unbanked_regs
139
140.sm_ret_to_nsec:
141	/*
142	 * Return to non-secure world
143	 */
144	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
145	ldm	r0, {r8-r12}
146
147#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
148	/*
149	 * Prevent leaking information about which code has been executed.
150	 * This is required to be used together with
151	 * CFG_CORE_WORKAROUND_SPECTRE_BP to protect Cortex A15 CPUs too.
152	 *
153	 * CFG_CORE_WORKAROUND_SPECTRE_BP also invalidates the branch
154	 * predictor on affected CPUs. In the cases where an alternative
155	 * vector has been installed the branch predictor is already
156	 * invalidated so invalidating here again would be redundant, but
157	 * testing for that is more trouble than it's worth.
158	 */
159	write_bpiall
160#endif
161
162	/* Update SCR */
163	read_scr r0
164	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
165	write_scr r0
166	/*
167	 * isb not needed since we're doing an exception return below
168	 * without dependency to the changes in SCR before that.
169	 */
170
171	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
172	b	.sm_exit
173
174.smc_from_nsec:
175	/*
176	 * As we're coming from non-secure world (NS bit set) the stack
177	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
178	 * instruction below the stack pointer points to sm_ctx.
179	 */
180	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
181
182	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
183	write_scr r1
184	isb
185
186	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
187	stm	r0, {r8-r12}
188
189	mov	r0, sp
190	bl	sm_from_nsec
191	cmp	r0, #SM_EXIT_TO_NON_SECURE
192	beq	.sm_ret_to_nsec
193
194	/*
195	 * Continue into secure world
196	 */
197	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
198
199.sm_exit:
200	pop	{r0-r7}
201	rfefd	sp!
202UNWIND(	.fnend)
203END_FUNC sm_smc_entry
204
205/*
206 * FIQ handling
207 *
208 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
209 * context will later be restored by sm_smc_entry() when handling a return
210 * from FIQ.
211 */
212LOCAL_FUNC sm_fiq_entry , :
213UNWIND(	.fnstart)
214UNWIND(	.cantunwind)
215	/* FIQ has a +4 offset for lr compared to preferred return address */
216	sub	lr, lr, #4
217	/* sp points just past struct sm_sec_ctx */
218	srsdb	sp!, #CPSR_MODE_MON
219	push	{r0-r7}
220
221	clrex		/* Clear the exclusive monitor */
222
223	/*
224	 * As we're coming from non-secure world the stack pointer points
225	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
226	 * stack pointer points to sm_ctx.
227	 */
228	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
229
230	/* Update SCR */
231	read_scr r1
232	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
233	write_scr r1
234	isb
235
236	/* Save non-secure context */
237	add	r0, sp, #SM_CTX_NSEC
238	bl	sm_save_unbanked_regs
239	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
240	stm	r0!, {r8-r12}
241
242	/* Set FIQ entry */
243	ldr	r0, =vector_fiq_entry
244	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
245
246	/* Restore secure context */
247	add	r0, sp, #SM_CTX_SEC
248	bl	sm_restore_unbanked_regs
249
250	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
251
252	rfefd	sp!
253UNWIND(	.fnend)
254END_FUNC sm_fiq_entry
255
256        .align	5
257LOCAL_FUNC sm_vect_table , :
258UNWIND(	.fnstart)
259UNWIND(	.cantunwind)
260	b	.		/* Reset			*/
261	b	.		/* Undefined instruction	*/
262	b	sm_smc_entry	/* Secure monitor call		*/
263	b	.		/* Prefetch abort		*/
264	b	.		/* Data abort			*/
265	b	.		/* Reserved			*/
266	b	.		/* IRQ				*/
267	b	sm_fiq_entry	/* FIQ				*/
268
269#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
270	.macro vector_prologue_spectre
271		/*
272		 * This depends on SP being 8 byte aligned, that is, the
273		 * lowest three bits in SP are zero.
274		 *
275		 * The idea is to form a specific bit pattern in the lowest
276		 * three bits of SP depending on which entry in the vector
277		 * we enter via.  This is done by adding 1 to SP in each
278		 * entry but the last.
279		 */
280		add	sp, sp, #1	/* 7:111 Reset			*/
281		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
282		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
283		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
284		add	sp, sp, #1	/* 3:011 Data abort		*/
285		add	sp, sp, #1	/* 2:010 Reserved		*/
286		add	sp, sp, #1	/* 1:001 IRQ			*/
287		nop			/* 0:000 FIQ			*/
288	.endm
289
290	.align 5
291sm_vect_table_a15:
292	vector_prologue_spectre
293	/*
294	 * Invalidate the branch predictor for the current processor.
295	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
296	 * effective.
297	 * Note that the BPIALL instruction is not effective in
298	 * invalidating the branch predictor on Cortex-A15. For that CPU,
299	 * set ACTLR[0] to 1 during early processor initialisation, and
300	 * invalidate the branch predictor by performing an ICIALLU
301	 * instruction. See also:
302	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
303	 */
304	write_iciallu
305	isb
306	b	1f
307
308	.align 5
309sm_vect_table_bpiall:
310	vector_prologue_spectre
311	/* Invalidate the branch predictor for the current processor. */
312	write_bpiall
313	isb
314
3151:
316	/*
317	 * Only two exception does normally occur, smc and fiq. With all
318	 * other exceptions it's good enough to just spinn, the lowest bits
319	 * still tells which exception we're stuck with when attaching a
320	 * debugger.
321	 */
322
323	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
324	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
325	beq	sm_fiq_entry
326
327	/* Test for SMC, xor the lowest bits of SP to be 0 */
328	eor	sp, sp, #(BIT(0) | BIT(2))
329	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
330	beq	sm_smc_entry
331
332	/* unhandled exception */
333	b	.
334#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
335UNWIND(	.fnend)
336END_FUNC sm_vect_table
337
338/* void sm_init(vaddr_t stack_pointer); */
339FUNC sm_init , :
340UNWIND(	.fnstart)
341	/* Set monitor stack */
342	mrs	r1, cpsr
343	cps	#CPSR_MODE_MON
344	/* Point just beyond sm_ctx.sec */
345	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END)
346
347#ifdef CFG_INIT_CNTVOFF
348	read_scr r0
349	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
350	write_scr r0
351	isb
352
353	/*
354	 * Accessing CNTVOFF:
355	 * If the implementation includes the Virtualization Extensions
356	 * this is a RW register, accessible from Hyp mode, and
357	 * from Monitor mode when SCR.NS is set to 1.
358	 * If the implementation includes the Security Extensions
359	 * but not the Virtualization Extensions, an MCRR or MRRC to
360	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
361	 * mode, regardless of the value of SCR.NS.
362	 */
363	read_id_pfr1 r2
364	mov	r3, r2
365	ands    r3, r3, #IDPFR1_GENTIMER_MASK
366	beq	.no_gentimer
367	ands    r2, r2, #IDPFR1_VIRT_MASK
368	beq	.no_gentimer
369	mov	r2, #0
370	write_cntvoff r2, r2
371
372.no_gentimer:
373	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
374	write_scr r0
375	isb
376#endif
377#ifdef CFG_SM_NO_CYCLE_COUNTING
378	read_pmcr r0
379	orr	r0, #PMCR_DP
380	write_pmcr r0
381#endif
382	msr	cpsr, r1
383
384#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
385	/*
386	 * For unrecognized CPUs we fall back to the vector used for
387	 * unaffected CPUs. Cortex A-15 has special treatment compared to
388	 * the other affected Cortex CPUs.
389	 */
390	read_midr r1
391	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
392	cmp	r2, #MIDR_IMPLEMENTER_ARM
393	bne	1f
394
395	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
396			#MIDR_PRIMARY_PART_NUM_WIDTH
397
398	movw	r3, #CORTEX_A8_PART_NUM
399	cmp	r2, r3
400	movwne	r3, #CORTEX_A9_PART_NUM
401	cmpne	r2, r3
402	movwne	r3, #CORTEX_A17_PART_NUM
403	cmpne	r2, r3
404	ldreq	r0, =sm_vect_table_bpiall
405	beq	2f
406
407	movw	r3, #CORTEX_A15_PART_NUM
408	cmp	r2, r3
409	ldreq	r0, =sm_vect_table_a15
410	beq	2f
411#endif
412	/* Set monitor vector (MVBAR) */
4131:	ldr	r0, =sm_vect_table
4142:	write_mvbar r0
415
416	bx	lr
417END_FUNC sm_init
418DECLARE_KEEP_PAGER sm_init
419
420
421/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
422FUNC sm_get_nsec_ctx , :
423	mrs	r1, cpsr
424	cps	#CPSR_MODE_MON
425	/*
426	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec,
427	 * which allows us to calculate the address of sm_ctx.nsec.
428	 */
429	add	r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END)
430	msr	cpsr, r1
431
432	bx	lr
433END_FUNC sm_get_nsec_ctx
434