xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision 0135712b7922f0258360ca27fc6266e95a9d6221)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/unwind.h>
13#include <sm/optee_smc.h>
14#include <sm/teesmc_opteed.h>
15#include <sm/teesmc_opteed_macros.h>
16#include <util.h>
17
18	.section .text.sm_asm
19
20FUNC sm_save_unbanked_regs , :
21UNWIND(	.fnstart)
22UNWIND(	.cantunwind)
23	/* User mode registers has to be saved from system mode */
24	cps	#CPSR_MODE_SYS
25	stm	r0!, {sp, lr}
26
27	cps	#CPSR_MODE_IRQ
28	mrs	r2, spsr
29	stm	r0!, {r2, sp, lr}
30
31	cps	#CPSR_MODE_FIQ
32	mrs	r2, spsr
33	stm	r0!, {r2, sp, lr}
34
35	cps	#CPSR_MODE_SVC
36	mrs	r2, spsr
37	stm	r0!, {r2, sp, lr}
38
39	cps	#CPSR_MODE_ABT
40	mrs	r2, spsr
41	stm	r0!, {r2, sp, lr}
42
43	cps	#CPSR_MODE_UND
44	mrs	r2, spsr
45	stm	r0!, {r2, sp, lr}
46
47#ifdef CFG_SM_NO_CYCLE_COUNTING
48	read_pmcr r2
49	stm	r0!, {r2}
50#endif
51	cps	#CPSR_MODE_MON
52	bx	lr
53UNWIND(	.fnend)
54END_FUNC sm_save_unbanked_regs
55
56/* Restores the mode specific registers */
57FUNC sm_restore_unbanked_regs , :
58UNWIND(	.fnstart)
59UNWIND(	.cantunwind)
60	/* User mode registers has to be saved from system mode */
61	cps	#CPSR_MODE_SYS
62	ldm	r0!, {sp, lr}
63
64	cps	#CPSR_MODE_IRQ
65	ldm	r0!, {r2, sp, lr}
66	msr	spsr_fsxc, r2
67
68	cps	#CPSR_MODE_FIQ
69	ldm	r0!, {r2, sp, lr}
70	msr	spsr_fsxc, r2
71
72	cps	#CPSR_MODE_SVC
73	ldm	r0!, {r2, sp, lr}
74	msr	spsr_fsxc, r2
75
76	cps	#CPSR_MODE_ABT
77	ldm	r0!, {r2, sp, lr}
78	msr	spsr_fsxc, r2
79
80	cps	#CPSR_MODE_UND
81	ldm	r0!, {r2, sp, lr}
82	msr	spsr_fsxc, r2
83
84#ifdef CFG_SM_NO_CYCLE_COUNTING
85	ldm	r0!, {r2}
86	write_pmcr r2
87#endif
88	cps	#CPSR_MODE_MON
89	bx	lr
90UNWIND(	.fnend)
91END_FUNC sm_restore_unbanked_regs
92
93/*
94 * stack_tmp is used as stack, the top of the stack is reserved to hold
95 * struct sm_ctx, everything below is for normal stack usage. As several
96 * different CPU modes are using the same stack it's important that switch
97 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
98 * Async abort has to be masked while using stack_tmp.
99 */
100LOCAL_FUNC sm_smc_entry , :
101UNWIND(	.fnstart)
102UNWIND(	.cantunwind)
103	srsdb	sp!, #CPSR_MODE_MON
104	push	{r0-r7}
105
106	clrex		/* Clear the exclusive monitor */
107
108	/* Find out if we're doing an secure or non-secure entry */
109	read_scr r1
110	tst	r1, #SCR_NS
111	bne	.smc_from_nsec
112
113	/*
114	 * As we're coming from secure world (NS bit cleared) the stack
115	 * pointer points to sm_ctx.sec.r0 at this stage. After the
116	 * instruction below the stack pointer points to sm_ctx.
117	 */
118	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
119
120	/* Save secure context */
121	add	r0, sp, #SM_CTX_SEC
122	bl	sm_save_unbanked_regs
123
124	/*
125	 * On FIQ exit we're restoring the non-secure context unchanged, on
126	 * all other exits we're shifting r1-r4 from secure context into
127	 * r0-r3 in non-secure context.
128	 */
129	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
130	ldm	r8, {r0-r4}
131	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
132	cmp	r0, r9
133	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
134	stmne	r8, {r1-r4}
135
136	/* Restore non-secure context */
137	add	r0, sp, #SM_CTX_NSEC
138	bl	sm_restore_unbanked_regs
139
140.sm_ret_to_nsec:
141	/*
142	 * Return to non-secure world
143	 */
144	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
145	ldm	r0, {r8-r12}
146
147	/* Update SCR */
148	read_scr r0
149	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
150	write_scr r0
151	/*
152	 * isb not needed since we're doing an exception return below
153	 * without dependency to the changes in SCR before that.
154	 */
155
156	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
157	b	.sm_exit
158
159.smc_from_nsec:
160	/*
161	 * As we're coming from non-secure world (NS bit set) the stack
162	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
163	 * instruction below the stack pointer points to sm_ctx.
164	 */
165	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
166
167	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
168	write_scr r1
169	isb
170
171	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
172	stm	r0, {r8-r12}
173
174	mov	r0, sp
175	bl	sm_from_nsec
176	cmp	r0, #0
177	beq	.sm_ret_to_nsec
178
179	/*
180	 * Continue into secure world
181	 */
182	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
183
184.sm_exit:
185	pop	{r0-r7}
186	rfefd	sp!
187UNWIND(	.fnend)
188END_FUNC sm_smc_entry
189
190/*
191 * FIQ handling
192 *
193 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
194 * context will later be restored by sm_smc_entry() when handling a return
195 * from FIQ.
196 */
197LOCAL_FUNC sm_fiq_entry , :
198UNWIND(	.fnstart)
199UNWIND(	.cantunwind)
200	/* FIQ has a +4 offset for lr compared to preferred return address */
201	sub	lr, lr, #4
202	/* sp points just past struct sm_sec_ctx */
203	srsdb	sp!, #CPSR_MODE_MON
204	push	{r0-r7}
205
206	clrex		/* Clear the exclusive monitor */
207
208	/*
209	 * As we're coming from non-secure world the stack pointer points
210	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
211	 * stack pointer points to sm_ctx.
212	 */
213	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
214
215	/* Update SCR */
216	read_scr r1
217	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
218	write_scr r1
219	isb
220
221	/* Save non-secure context */
222	add	r0, sp, #SM_CTX_NSEC
223	bl	sm_save_unbanked_regs
224	stm	r0!, {r8-r12}
225
226	/* Set FIQ entry */
227	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
228	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
229
230	/* Restore secure context */
231	add	r0, sp, #SM_CTX_SEC
232	bl	sm_restore_unbanked_regs
233
234	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
235
236	rfefd	sp!
237UNWIND(	.fnend)
238END_FUNC sm_fiq_entry
239
240	.section .text.sm_vect_table
241        .align	5
242LOCAL_FUNC sm_vect_table , :
243UNWIND(	.fnstart)
244UNWIND(	.cantunwind)
245	b	.		/* Reset			*/
246	b	.		/* Undefined instruction	*/
247	b	sm_smc_entry	/* Secure monitor call		*/
248	b	.		/* Prefetch abort		*/
249	b	.		/* Data abort			*/
250	b	.		/* Reserved			*/
251	b	.		/* IRQ				*/
252	b	sm_fiq_entry	/* FIQ				*/
253
254#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
255	.macro vector_prologue_spectre
256		/*
257		 * This depends on SP being 8 byte aligned, that is, the
258		 * lowest three bits in SP are zero.
259		 *
260		 * The idea is to form a specific bit pattern in the lowest
261		 * three bits of SP depending on which entry in the vector
262		 * we enter via.  This is done by adding 1 to SP in each
263		 * entry but the last.
264		 */
265		add	sp, sp, #1	/* 7:111 Reset			*/
266		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
267		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
268		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
269		add	sp, sp, #1	/* 3:011 Data abort		*/
270		add	sp, sp, #1	/* 2:010 Reserved		*/
271		add	sp, sp, #1	/* 1:001 IRQ			*/
272		nop			/* 0:000 FIQ			*/
273	.endm
274
275	.align 5
276sm_vect_table_a15:
277	vector_prologue_spectre
278	/*
279	 * Invalidate the branch predictor for the current processor.
280	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
281	 * effective.
282	 * Note that the BPIALL instruction is not effective in
283	 * invalidating the branch predictor on Cortex-A15. For that CPU,
284	 * set ACTLR[0] to 1 during early processor initialisation, and
285	 * invalidate the branch predictor by performing an ICIALLU
286	 * instruction. See also:
287	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
288	 */
289	write_iciallu
290	isb
291	b	1f
292
293	.align 5
294sm_vect_table_bpiall:
295	vector_prologue_spectre
296	/* Invalidate the branch predictor for the current processor. */
297	write_bpiall
298	isb
299
3001:
301	/*
302	 * Only two exception does normally occur, smc and fiq. With all
303	 * other exceptions it's good enough to just spinn, the lowest bits
304	 * still tells which exception we're stuck with when attaching a
305	 * debugger.
306	 */
307
308	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
309	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
310	beq	sm_fiq_entry
311
312	/* Test for SMC, xor the lowest bits of SP to be 0 */
313	eor	sp, sp, #(BIT(0) | BIT(2))
314	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
315	beq	sm_smc_entry
316
317	/* unhandled exception */
318	b	.
319#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
320UNWIND(	.fnend)
321END_FUNC sm_vect_table
322
323/* void sm_init(vaddr_t stack_pointer); */
324FUNC sm_init , :
325UNWIND(	.fnstart)
326	/* Set monitor stack */
327	mrs	r1, cpsr
328	cps	#CPSR_MODE_MON
329	/* Point just beyond sm_ctx.sec */
330	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_NSEC)
331
332#ifdef CFG_INIT_CNTVOFF
333	read_scr r0
334	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
335	write_scr r0
336	isb
337
338	/*
339	 * Accessing CNTVOFF:
340	 * If the implementation includes the Virtualization Extensions
341	 * this is a RW register, accessible from Hyp mode, and
342	 * from Monitor mode when SCR.NS is set to 1.
343	 * If the implementation includes the Security Extensions
344	 * but not the Virtualization Extensions, an MCRR or MRRC to
345	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
346	 * mode, regardless of the value of SCR.NS.
347	 */
348	read_idpfr1 r2
349	mov	r3, r2
350	ands    r3, r3, #IDPFR1_GENTIMER_MASK
351	beq	.no_gentimer
352	ands    r2, r2, #IDPFR1_VIRT_MASK
353	beq	.no_gentimer
354	mov	r2, #0
355	write_cntvoff r2, r2
356
357.no_gentimer:
358	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
359	write_scr r0
360	isb
361#endif
362#ifdef CFG_SM_NO_CYCLE_COUNTING
363	read_pmcr r0
364	orr	r0, #PMCR_DP
365	write_pmcr r0
366#endif
367	msr	cpsr, r1
368
369#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
370	/*
371	 * For unrecognized CPUs we fall back to the vector used for
372	 * unaffected CPUs. Cortex A-15 has special treatment compared to
373	 * the other affected Cortex CPUs.
374	 */
375	read_midr r1
376	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
377	cmp	r2, #MIDR_IMPLEMENTER_ARM
378	bne	1f
379
380	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
381			#MIDR_PRIMARY_PART_NUM_WIDTH
382
383	movw	r3, #CORTEX_A8_PART_NUM
384	cmp	r2, r3
385	movwne	r3, #CORTEX_A9_PART_NUM
386	cmpne	r2, r3
387	movwne	r3, #CORTEX_A17_PART_NUM
388	cmpne	r2, r3
389	ldreq	r0, =sm_vect_table_bpiall
390	beq	2f
391
392	movw	r3, #CORTEX_A15_PART_NUM
393	cmp	r2, r3
394	ldreq	r0, =sm_vect_table_a15
395	beq	2f
396#endif
397	/* Set monitor vector (MVBAR) */
3981:	ldr	r0, =sm_vect_table
3992:	write_mvbar r0
400
401	bx	lr
402END_FUNC sm_init
403KEEP_PAGER sm_init
404
405
406/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
407FUNC sm_get_nsec_ctx , :
408	mrs	r1, cpsr
409	cps	#CPSR_MODE_MON
410	mov	r0, sp
411	msr	cpsr, r1
412
413	/*
414	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec
415	 * which is sm_ctx.nsec
416	 */
417	bx	lr
418END_FUNC sm_get_nsec_ctx
419