xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision e3d56a52e27b7ad429dc806bc4ef29d7ca929ea7)
1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/unwind.h>
13#include <sm/optee_smc.h>
14#include <sm/sm.h>
15#include <sm/teesmc_opteed.h>
16#include <sm/teesmc_opteed_macros.h>
17#include <util.h>
18
19#define SM_CTX_SEC_END	(SM_CTX_SEC + SM_CTX_SEC_SIZE)
20
21	.section .text.sm_asm
22
23	.macro save_regs mode
24	cps	\mode
25	mrs	r2, spsr
26	str	r2, [r0], #4
27	str	sp, [r0], #4
28	str	lr, [r0], #4
29	.endm
30
31FUNC sm_save_unbanked_regs , :
32UNWIND(	.fnstart)
33UNWIND(	.cantunwind)
34	/* User mode registers has to be saved from system mode */
35	cps	#CPSR_MODE_SYS
36	str	sp, [r0], #4
37	str	lr, [r0], #4
38
39	save_regs	#CPSR_MODE_IRQ
40	save_regs	#CPSR_MODE_FIQ
41	save_regs	#CPSR_MODE_SVC
42	save_regs	#CPSR_MODE_ABT
43	save_regs	#CPSR_MODE_UND
44
45#ifdef CFG_SM_NO_CYCLE_COUNTING
46	read_pmcr r2
47	stm	r0!, {r2}
48#endif
49	cps	#CPSR_MODE_MON
50	bx	lr
51UNWIND(	.fnend)
52END_FUNC sm_save_unbanked_regs
53
54	.macro restore_regs mode
55	cps	\mode
56	ldr	r2, [r0], #4
57	ldr	sp, [r0], #4
58	ldr	lr, [r0], #4
59	msr	spsr_fsxc, r2
60	.endm
61
62/* Restores the mode specific registers */
63FUNC sm_restore_unbanked_regs , :
64UNWIND(	.fnstart)
65UNWIND(	.cantunwind)
66	/* User mode registers has to be saved from system mode */
67	cps	#CPSR_MODE_SYS
68	ldr	sp, [r0], #4
69	ldr	lr, [r0], #4
70
71	restore_regs	#CPSR_MODE_IRQ
72	restore_regs	#CPSR_MODE_FIQ
73	restore_regs	#CPSR_MODE_SVC
74	restore_regs	#CPSR_MODE_ABT
75	restore_regs	#CPSR_MODE_UND
76
77#ifdef CFG_SM_NO_CYCLE_COUNTING
78	ldm	r0!, {r2}
79	write_pmcr r2
80#endif
81	cps	#CPSR_MODE_MON
82	bx	lr
83UNWIND(	.fnend)
84END_FUNC sm_restore_unbanked_regs
85
86/*
87 * stack_tmp is used as stack, the top of the stack is reserved to hold
88 * struct sm_ctx, everything below is for normal stack usage. As several
89 * different CPU modes are using the same stack it's important that switch
90 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
91 * Async abort has to be masked while using stack_tmp.
92 */
93LOCAL_FUNC sm_smc_entry , :
94UNWIND(	.fnstart)
95UNWIND(	.cantunwind)
96	srsdb	sp!, #CPSR_MODE_MON
97	push	{r0-r7}
98
99	clrex		/* Clear the exclusive monitor */
100
101	/* Find out if we're doing an secure or non-secure entry */
102	read_scr r1
103	tst	r1, #SCR_NS
104	bne	.smc_from_nsec
105
106	/*
107	 * As we're coming from secure world (NS bit cleared) the stack
108	 * pointer points to sm_ctx.sec.r0 at this stage. After the
109	 * instruction below the stack pointer points to sm_ctx.
110	 */
111	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
112
113	/* Save secure context */
114	add	r0, sp, #SM_CTX_SEC
115	bl	sm_save_unbanked_regs
116
117	/*
118	 * On FIQ exit we're restoring the non-secure context unchanged, on
119	 * all other exits we're shifting r1-r4 from secure context into
120	 * r0-r3 in non-secure context.
121	 */
122	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
123	ldm	r8, {r0-r4}
124	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
125	cmp	r0, r9
126	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
127	stmne	r8, {r1-r4}
128
129	/* Restore non-secure context */
130	add	r0, sp, #SM_CTX_NSEC
131	bl	sm_restore_unbanked_regs
132
133.sm_ret_to_nsec:
134	/*
135	 * Return to non-secure world
136	 */
137	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
138	ldm	r0, {r8-r12}
139
140	/* Update SCR */
141	read_scr r0
142	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
143	write_scr r0
144	/*
145	 * isb not needed since we're doing an exception return below
146	 * without dependency to the changes in SCR before that.
147	 */
148
149	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
150	b	.sm_exit
151
152.smc_from_nsec:
153	/*
154	 * As we're coming from non-secure world (NS bit set) the stack
155	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
156	 * instruction below the stack pointer points to sm_ctx.
157	 */
158	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
159
160	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
161	write_scr r1
162	isb
163
164	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
165	stm	r0, {r8-r12}
166
167	mov	r0, sp
168	bl	sm_from_nsec
169	cmp	r0, #SM_EXIT_TO_NON_SECURE
170	beq	.sm_ret_to_nsec
171
172	/*
173	 * Continue into secure world
174	 */
175	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
176
177.sm_exit:
178	pop	{r0-r7}
179	rfefd	sp!
180UNWIND(	.fnend)
181END_FUNC sm_smc_entry
182
183/*
184 * FIQ handling
185 *
186 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
187 * context will later be restored by sm_smc_entry() when handling a return
188 * from FIQ.
189 */
190LOCAL_FUNC sm_fiq_entry , :
191UNWIND(	.fnstart)
192UNWIND(	.cantunwind)
193	/* FIQ has a +4 offset for lr compared to preferred return address */
194	sub	lr, lr, #4
195	/* sp points just past struct sm_sec_ctx */
196	srsdb	sp!, #CPSR_MODE_MON
197	push	{r0-r7}
198
199	clrex		/* Clear the exclusive monitor */
200
201	/*
202	 * As we're coming from non-secure world the stack pointer points
203	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
204	 * stack pointer points to sm_ctx.
205	 */
206	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
207
208	/* Update SCR */
209	read_scr r1
210	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
211	write_scr r1
212	isb
213
214	/* Save non-secure context */
215	add	r0, sp, #SM_CTX_NSEC
216	bl	sm_save_unbanked_regs
217	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
218	stm	r0!, {r8-r12}
219
220	/* Set FIQ entry */
221	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
222	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
223
224	/* Restore secure context */
225	add	r0, sp, #SM_CTX_SEC
226	bl	sm_restore_unbanked_regs
227
228	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
229
230	rfefd	sp!
231UNWIND(	.fnend)
232END_FUNC sm_fiq_entry
233
234	.section .text.sm_vect_table
235        .align	5
236LOCAL_FUNC sm_vect_table , :
237UNWIND(	.fnstart)
238UNWIND(	.cantunwind)
239	b	.		/* Reset			*/
240	b	.		/* Undefined instruction	*/
241	b	sm_smc_entry	/* Secure monitor call		*/
242	b	.		/* Prefetch abort		*/
243	b	.		/* Data abort			*/
244	b	.		/* Reserved			*/
245	b	.		/* IRQ				*/
246	b	sm_fiq_entry	/* FIQ				*/
247
248#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
249	.macro vector_prologue_spectre
250		/*
251		 * This depends on SP being 8 byte aligned, that is, the
252		 * lowest three bits in SP are zero.
253		 *
254		 * The idea is to form a specific bit pattern in the lowest
255		 * three bits of SP depending on which entry in the vector
256		 * we enter via.  This is done by adding 1 to SP in each
257		 * entry but the last.
258		 */
259		add	sp, sp, #1	/* 7:111 Reset			*/
260		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
261		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
262		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
263		add	sp, sp, #1	/* 3:011 Data abort		*/
264		add	sp, sp, #1	/* 2:010 Reserved		*/
265		add	sp, sp, #1	/* 1:001 IRQ			*/
266		nop			/* 0:000 FIQ			*/
267	.endm
268
269	.align 5
270sm_vect_table_a15:
271	vector_prologue_spectre
272	/*
273	 * Invalidate the branch predictor for the current processor.
274	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
275	 * effective.
276	 * Note that the BPIALL instruction is not effective in
277	 * invalidating the branch predictor on Cortex-A15. For that CPU,
278	 * set ACTLR[0] to 1 during early processor initialisation, and
279	 * invalidate the branch predictor by performing an ICIALLU
280	 * instruction. See also:
281	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
282	 */
283	write_iciallu
284	isb
285	b	1f
286
287	.align 5
288sm_vect_table_bpiall:
289	vector_prologue_spectre
290	/* Invalidate the branch predictor for the current processor. */
291	write_bpiall
292	isb
293
2941:
295	/*
296	 * Only two exception does normally occur, smc and fiq. With all
297	 * other exceptions it's good enough to just spinn, the lowest bits
298	 * still tells which exception we're stuck with when attaching a
299	 * debugger.
300	 */
301
302	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
303	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
304	beq	sm_fiq_entry
305
306	/* Test for SMC, xor the lowest bits of SP to be 0 */
307	eor	sp, sp, #(BIT(0) | BIT(2))
308	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
309	beq	sm_smc_entry
310
311	/* unhandled exception */
312	b	.
313#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
314UNWIND(	.fnend)
315END_FUNC sm_vect_table
316
317/* void sm_init(vaddr_t stack_pointer); */
318FUNC sm_init , :
319UNWIND(	.fnstart)
320	/* Set monitor stack */
321	mrs	r1, cpsr
322	cps	#CPSR_MODE_MON
323	/* Point just beyond sm_ctx.sec */
324	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_SEC_END)
325
326#ifdef CFG_INIT_CNTVOFF
327	read_scr r0
328	orr	r0, r0, #SCR_NS /* Set NS bit in SCR */
329	write_scr r0
330	isb
331
332	/*
333	 * Accessing CNTVOFF:
334	 * If the implementation includes the Virtualization Extensions
335	 * this is a RW register, accessible from Hyp mode, and
336	 * from Monitor mode when SCR.NS is set to 1.
337	 * If the implementation includes the Security Extensions
338	 * but not the Virtualization Extensions, an MCRR or MRRC to
339	 * the CNTVOFF encoding is UNPREDICTABLE if executed in Monitor
340	 * mode, regardless of the value of SCR.NS.
341	 */
342	read_id_pfr1 r2
343	mov	r3, r2
344	ands    r3, r3, #IDPFR1_GENTIMER_MASK
345	beq	.no_gentimer
346	ands    r2, r2, #IDPFR1_VIRT_MASK
347	beq	.no_gentimer
348	mov	r2, #0
349	write_cntvoff r2, r2
350
351.no_gentimer:
352	bic	r0, r0, #SCR_NS /* Clr NS bit in SCR */
353	write_scr r0
354	isb
355#endif
356#ifdef CFG_SM_NO_CYCLE_COUNTING
357	read_pmcr r0
358	orr	r0, #PMCR_DP
359	write_pmcr r0
360#endif
361	msr	cpsr, r1
362
363#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
364	/*
365	 * For unrecognized CPUs we fall back to the vector used for
366	 * unaffected CPUs. Cortex A-15 has special treatment compared to
367	 * the other affected Cortex CPUs.
368	 */
369	read_midr r1
370	ubfx	r2, r1, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH
371	cmp	r2, #MIDR_IMPLEMENTER_ARM
372	bne	1f
373
374	ubfx	r2, r1, #MIDR_PRIMARY_PART_NUM_SHIFT, \
375			#MIDR_PRIMARY_PART_NUM_WIDTH
376
377	movw	r3, #CORTEX_A8_PART_NUM
378	cmp	r2, r3
379	movwne	r3, #CORTEX_A9_PART_NUM
380	cmpne	r2, r3
381	movwne	r3, #CORTEX_A17_PART_NUM
382	cmpne	r2, r3
383	ldreq	r0, =sm_vect_table_bpiall
384	beq	2f
385
386	movw	r3, #CORTEX_A15_PART_NUM
387	cmp	r2, r3
388	ldreq	r0, =sm_vect_table_a15
389	beq	2f
390#endif
391	/* Set monitor vector (MVBAR) */
3921:	ldr	r0, =sm_vect_table
3932:	write_mvbar r0
394
395	bx	lr
396END_FUNC sm_init
397KEEP_PAGER sm_init
398
399
400/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
401FUNC sm_get_nsec_ctx , :
402	mrs	r1, cpsr
403	cps	#CPSR_MODE_MON
404	/*
405	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec,
406	 * which allows us to calculate the address of sm_ctx.nsec.
407	 */
408	add	r0, sp, #(SM_CTX_NSEC - SM_CTX_SEC_END)
409	msr	cpsr, r1
410
411	bx	lr
412END_FUNC sm_get_nsec_ctx
413