xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision 28481ff35588fe9c5168776779e92464050be785)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/unwind.h>
13#include <sm/optee_smc.h>
14#include <sm/sm.h>
15#include <sm/teesmc_opteed.h>
16#include <sm/teesmc_opteed_macros.h>
17#include <util.h>
18
19#define SM_CTX_SEC_END	(SM_CTX_SEC + SM_CTX_SEC_SIZE)
20
21	.macro save_regs mode
22	cps	\mode
23	mrs	r2, spsr
24	str	r2, [r0], #4
25	str	sp, [r0], #4
26	str	lr, [r0], #4
27	.endm
28
29FUNC sm_save_unbanked_regs , :
30UNWIND(	.fnstart)
31UNWIND(	.cantunwind)
32	/* User mode registers has to be saved from system mode */
33	cps	#CPSR_MODE_SYS
34	str	sp, [r0], #4
35	str	lr, [r0], #4
36
37	save_regs	#CPSR_MODE_IRQ
38	save_regs	#CPSR_MODE_FIQ
39	save_regs	#CPSR_MODE_SVC
40	save_regs	#CPSR_MODE_ABT
41	save_regs	#CPSR_MODE_UND
42
43#ifdef CFG_SM_NO_CYCLE_COUNTING
44	read_pmcr r2
45	stm	r0!, {r2}
46#endif
47	cps	#CPSR_MODE_MON
48	bx	lr
49UNWIND(	.fnend)
50END_FUNC sm_save_unbanked_regs
51
52	.macro restore_regs mode
53	cps	\mode
54	ldr	r2, [r0], #4
55	ldr	sp, [r0], #4
56	ldr	lr, [r0], #4
57	msr	spsr_fsxc, r2
58	.endm
59
60/* Restores the mode specific registers */
61FUNC sm_restore_unbanked_regs , :
62UNWIND(	.fnstart)
63UNWIND(	.cantunwind)
64	/* User mode registers has to be saved from system mode */
65	cps	#CPSR_MODE_SYS
66	ldr	sp, [r0], #4
67	ldr	lr, [r0], #4
68
69	restore_regs	#CPSR_MODE_IRQ
70	restore_regs	#CPSR_MODE_FIQ
71	restore_regs	#CPSR_MODE_SVC
72	restore_regs	#CPSR_MODE_ABT
73	restore_regs	#CPSR_MODE_UND
74
75#ifdef CFG_SM_NO_CYCLE_COUNTING
76	ldm	r0!, {r2}
77	write_pmcr r2
78#endif
79	cps	#CPSR_MODE_MON
80	bx	lr
81UNWIND(	.fnend)
82END_FUNC sm_restore_unbanked_regs
83
84/*
85 * stack_tmp is used as stack, the top of the stack is reserved to hold
86 * struct sm_ctx, everything below is for normal stack usage. As several
87 * different CPU modes are using the same stack it's important that switch
88 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
89 * Async abort has to be masked while using stack_tmp.
90 */
91LOCAL_FUNC sm_smc_entry , :
92UNWIND(	.fnstart)
93UNWIND(	.cantunwind)
94	srsdb	sp!, #CPSR_MODE_MON
95	push	{r0-r7}
96
97	clrex		/* Clear the exclusive monitor */
98
99	/* Find out if we're doing an secure or non-secure entry */
100	read_scr r1
101	tst	r1, #SCR_NS
102	bne	.smc_from_nsec
103
104	/*
105	 * As we're coming from secure world (NS bit cleared) the stack
106	 * pointer points to sm_ctx.sec.r0 at this stage. After the
107	 * instruction below the stack pointer points to sm_ctx.
108	 */
109	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
110
111	/* Save secure context */
112	add	r0, sp, #SM_CTX_SEC
113	bl	sm_save_unbanked_regs
114
115	/*
116	 * On FIQ exit we're restoring the non-secure context unchanged, on
117	 * all other exits we're shifting r1-r4 from secure context into
118	 * r0-r3 in non-secure context.
119	 */
120	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
121	ldm	r8, {r0-r4}
122	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
123	cmp	r0, r9
124	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
125	stmne	r8, {r1-r4}
126
127	/* Restore non-secure context */
128	add	r0, sp, #SM_CTX_NSEC
129	bl	sm_restore_unbanked_regs
130
131.sm_ret_to_nsec:
132	/*
133	 * Return to non-secure world
134	 */
135	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
136	ldm	r0, {r8-r12}
137
138#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
139	/*
140	 * Prevent leaking information about which code has been executed.
141	 * This is required to be used together with
142	 * CFG_CORE_WORKAROUND_SPECTRE_BP to protect Cortex A15 CPUs too.
143	 *
144	 * CFG_CORE_WORKAROUND_SPECTRE_BP also invalidates the branch
145	 * predictor on affected CPUs. In the cases where an alternative
146	 * vector has been installed the branch predictor is already
147	 * invalidated so invalidating here again would be redundant, but
148	 * testing for that is more trouble than it's worth.
149	 */
150	write_bpiall
151#endif
152
153	/* Update SCR */
154	read_scr r0
155	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
156	write_scr r0
157	/*
158	 * isb not needed since we're doing an exception return below
159	 * without dependency to the changes in SCR before that.
160	 */
161
162	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
163	b	.sm_exit
164
165.smc_from_nsec:
166	/*
167	 * As we're coming from non-secure world (NS bit set) the stack
168	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
169	 * instruction below the stack pointer points to sm_ctx.
170	 */
171	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
172
173	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
174	write_scr r1
175	isb
176
177	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
178	stm	r0, {r8-r12}
179
180	mov	r0, sp
181	bl	sm_from_nsec
182	cmp	r0, #SM_EXIT_TO_NON_SECURE
183	beq	.sm_ret_to_nsec
184
185	/*
186	 * Continue into secure world
187	 */
188	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
189
190.sm_exit:
191	pop	{r0-r7}
192	rfefd	sp!
193UNWIND(	.fnend)
194END_FUNC sm_smc_entry
195
196/*
197 * FIQ handling
198 *
199 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
200 * context will later be restored by sm_smc_entry() when handling a return
201 * from FIQ.
202 */
203LOCAL_FUNC sm_fiq_entry , :
204UNWIND(	.fnstart)
205UNWIND(	.cantunwind)
206	/* FIQ has a +4 offset for lr compared to preferred return address */
207	sub	lr, lr, #4
208	/* sp points just past struct sm_sec_ctx */
209	srsdb	sp!, #CPSR_MODE_MON
210	push	{r0-r7}
211
212	clrex		/* Clear the exclusive monitor */
213
214	/*
215	 * As we're coming from non-secure world the stack pointer points
216	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
217	 * stack pointer points to sm_ctx.
218	 */
219	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
220
221	/* Update SCR */
222	read_scr r1
223	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
224	write_scr r1
225	isb
226
227	/* Save non-secure context */
228	add	r0, sp, #SM_CTX_NSEC
229	bl	sm_save_unbanked_regs
230	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
231	stm	r0!, {r8-r12}
232
233	/* Set FIQ entry */
234	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
235	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
236
237	/* Restore secure context */
238	add	r0, sp, #SM_CTX_SEC
239	bl	sm_restore_unbanked_regs
240
241	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
242
243	rfefd	sp!
244UNWIND(	.fnend)
245END_FUNC sm_fiq_entry
246
247	.section .text.sm_vect_table
248        .align	5
249LOCAL_FUNC sm_vect_table , :
250UNWIND(	.fnstart)
251UNWIND(	.cantunwind)
252	b	.		/* Reset			*/
253	b	.		/* Undefined instruction	*/
254	b	sm_smc_entry	/* Secure monitor call		*/
255	b	.		/* Prefetch abort		*/
256	b	.		/* Data abort			*/
257	b	.		/* Reserved			*/
258	b	.		/* IRQ				*/
259	b	sm_fiq_entry	/* FIQ				*/
260
261#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
262	.macro vector_prologue_spectre
263		/*
264		 * This depends on SP being 8 byte aligned, that is, the
265		 * lowest three bits in SP are zero.
266		 *
267		 * The idea is to form a specific bit pattern in the lowest
268		 * three bits of SP depending on which entry in the vector
269		 * we enter via.  This is done by adding 1 to SP in each
270		 * entry but the last.
271		 */
272		add	sp, sp, #1	/* 7:111 Reset			*/
273		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
274		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
275		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
276		add	sp, sp, #1	/* 3:011 Data abort		*/
277		add	sp, sp, #1	/* 2:010 Reserved		*/
278		add	sp, sp, #1	/* 1:001 IRQ			*/
279		nop			/* 0:000 FIQ			*/
280	.endm
281
282	.align 5
283sm_vect_table_a15:
284	vector_prologue_spectre
285	/*
286	 * Invalidate the branch predictor for the current processor.
287	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
288	 * effective.
289	 * Note that the BPIALL instruction is not effective in
290	 * invalidating the branch predictor on Cortex-A15. For that CPU,
291	 * set ACTLR[0] to 1 during early processor initialisation, and
292	 * invalidate the branch predictor by performing an ICIALLU
293	 * instruction. See also:
294	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
295	 */
296	write_iciallu
297	isb
298	b	1f
299
300	.align 5
301sm_vect_table_bpiall:
302	vector_prologue_spectre
303	/* Invalidate the branch predictor for the current processor. */
304	write_bpiall
305	isb
306
3071:
308	/*
309	 * Only two exception does normally occur, smc and fiq. With all
310	 * other exceptions it's good enough to just spinn, the lowest bits
311	 * still tells which exception we're stuck with when attaching a
312	 * debugger.
313	 */
314
315	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
316	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
317	beq	sm_fiq_entry
318
319	/* Test for SMC, xor the lowest bits of SP to be 0 */
320	eor	sp, sp, #(BIT(0) | BIT(2))
321	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
322	beq	sm_smc_entry
323
324	/* unhandled exception */
325	b	.
326#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
327UNWIND(	.fnend)
328END_FUNC sm_vect_table
329
330/* void sm_init(vaddr_t stack_pointer); */
331FUNC sm_init , :
332UNWIND(	.fnstart)
333	/* Set monitor stack */
334	mrs	r1, cpsr
335	cps	#CPSR_MODE_MON
336	/* Point just beyond sm_ctx.sec */
337	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END)
338
339#ifdef CFG_INIT_CNTVOFF
340	read_scr r0
341	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
342	write_scr r0
343	isb
344
345	/*
346	 * Accessing CNTVOFF:
347	 * If the implementation includes the Virtualization Extensions
348	 * this is a RW register, accessible from Hyp mode, and
349	 * from Monitor mode when SCR.NS is set to 1.
350	 * If the implementation includes the Security Extensions
351	 * but not the Virtualization Extensions, an MCRR or MRRC to
352	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
353	 * mode, regardless of the value of SCR.NS.
354	 */
355	read_id_pfr1 r2
356	mov	r3, r2
357	ands    r3, r3, #IDPFR1_GENTIMER_MASK
358	beq	.no_gentimer
359	ands    r2, r2, #IDPFR1_VIRT_MASK
360	beq	.no_gentimer
361	mov	r2, #0
362	write_cntvoff r2, r2
363
364.no_gentimer:
365	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
366	write_scr r0
367	isb
368#endif
369#ifdef CFG_SM_NO_CYCLE_COUNTING
370	read_pmcr r0
371	orr	r0, #PMCR_DP
372	write_pmcr r0
373#endif
374	msr	cpsr, r1
375
376#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
377	/*
378	 * For unrecognized CPUs we fall back to the vector used for
379	 * unaffected CPUs. Cortex A-15 has special treatment compared to
380	 * the other affected Cortex CPUs.
381	 */
382	read_midr r1
383	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
384	cmp	r2, #MIDR_IMPLEMENTER_ARM
385	bne	1f
386
387	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
388			#MIDR_PRIMARY_PART_NUM_WIDTH
389
390	movw	r3, #CORTEX_A8_PART_NUM
391	cmp	r2, r3
392	movwne	r3, #CORTEX_A9_PART_NUM
393	cmpne	r2, r3
394	movwne	r3, #CORTEX_A17_PART_NUM
395	cmpne	r2, r3
396	ldreq	r0, =sm_vect_table_bpiall
397	beq	2f
398
399	movw	r3, #CORTEX_A15_PART_NUM
400	cmp	r2, r3
401	ldreq	r0, =sm_vect_table_a15
402	beq	2f
403#endif
404	/* Set monitor vector (MVBAR) */
4051:	ldr	r0, =sm_vect_table
4062:	write_mvbar r0
407
408	bx	lr
409END_FUNC sm_init
410KEEP_PAGER sm_init
411
412
413/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
414FUNC sm_get_nsec_ctx , :
415	mrs	r1, cpsr
416	cps	#CPSR_MODE_MON
417	/*
418	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec,
419	 * which allows us to calculate the address of sm_ctx.nsec.
420	 */
421	add	r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END)
422	msr	cpsr, r1
423
424	bx	lr
425END_FUNC sm_get_nsec_ctx
426