xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision fb7ef469dfeb735e60383ad0e7410fe62dd97eb1)
1/*
2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <arm32_macros.S>
30#include <arm.h>
31#include <asm-defines.h>
32#include <asm.S>
33#include <keep.h>
34#include <kernel/unwind.h>
35#include <sm/optee_smc.h>
36#include <sm/teesmc_opteed.h>
37#include <sm/teesmc_opteed_macros.h>
38#include <util.h>
39
40	.section .text.sm_asm
41
42FUNC sm_save_modes_regs , :
43UNWIND(	.fnstart)
44UNWIND(	.cantunwind)
45	/* User mode registers has to be saved from system mode */
46	cps	#CPSR_MODE_SYS
47	stm	r0!, {sp, lr}
48
49	cps	#CPSR_MODE_IRQ
50	mrs	r2, spsr
51	stm	r0!, {r2, sp, lr}
52
53	cps	#CPSR_MODE_FIQ
54	mrs	r2, spsr
55	stm	r0!, {r2, sp, lr}
56
57	cps	#CPSR_MODE_SVC
58	mrs	r2, spsr
59	stm	r0!, {r2, sp, lr}
60
61	cps	#CPSR_MODE_ABT
62	mrs	r2, spsr
63	stm	r0!, {r2, sp, lr}
64
65	cps	#CPSR_MODE_UND
66	mrs	r2, spsr
67	stm	r0!, {r2, sp, lr}
68
69	cps	#CPSR_MODE_MON
70	bx	lr
71UNWIND(	.fnend)
72END_FUNC sm_save_modes_regs
73
74/* Restores the mode specific registers */
75FUNC sm_restore_modes_regs , :
76UNWIND(	.fnstart)
77UNWIND(	.cantunwind)
78	/* User mode registers has to be saved from system mode */
79	cps	#CPSR_MODE_SYS
80	ldm	r0!, {sp, lr}
81
82	cps	#CPSR_MODE_IRQ
83	ldm	r0!, {r2, sp, lr}
84	msr	spsr_fsxc, r2
85
86	cps	#CPSR_MODE_FIQ
87	ldm	r0!, {r2, sp, lr}
88	msr	spsr_fsxc, r2
89
90	cps	#CPSR_MODE_SVC
91	ldm	r0!, {r2, sp, lr}
92	msr	spsr_fsxc, r2
93
94	cps	#CPSR_MODE_ABT
95	ldm	r0!, {r2, sp, lr}
96	msr	spsr_fsxc, r2
97
98	cps	#CPSR_MODE_UND
99	ldm	r0!, {r2, sp, lr}
100	msr	spsr_fsxc, r2
101
102	cps	#CPSR_MODE_MON
103	bx	lr
104UNWIND(	.fnend)
105END_FUNC sm_restore_modes_regs
106
107/*
108 * stack_tmp is used as stack, the top of the stack is reserved to hold
109 * struct sm_ctx, everything below is for normal stack usage. As several
110 * different CPU modes are using the same stack it's important that switch
111 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
112 * Async abort has to be masked while using stack_tmp.
113 */
114LOCAL_FUNC sm_smc_entry , :
115UNWIND(	.fnstart)
116UNWIND(	.cantunwind)
117	srsdb	sp!, #CPSR_MODE_MON
118	push	{r0-r7}
119
120	clrex		/* Clear the exclusive monitor */
121
122	/* Find out if we're doing an secure or non-secure entry */
123	read_scr r1
124	tst	r1, #SCR_NS
125	bne	.smc_from_nsec
126
127	/*
128	 * As we're coming from secure world (NS bit cleared) the stack
129	 * pointer points to sm_ctx.sec.r0 at this stage. After the
130	 * instruction below the stack pointer points to sm_ctx.
131	 */
132	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
133
134	/* Save secure context */
135	add	r0, sp, #SM_CTX_SEC
136	bl	sm_save_modes_regs
137
138	/*
139	 * On FIQ exit we're restoring the non-secure context unchanged, on
140	 * all other exits we're shifting r1-r4 from secure context into
141	 * r0-r3 in non-secure context.
142	 */
143	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
144	ldm	r8, {r0-r4}
145	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
146	cmp	r0, r9
147	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
148	stmne	r8, {r1-r4}
149
150	/* Restore non-secure context */
151	add	r0, sp, #SM_CTX_NSEC
152	bl	sm_restore_modes_regs
153
154.sm_ret_to_nsec:
155	/*
156	 * Return to non-secure world
157	 */
158	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
159	ldm	r0, {r8-r12}
160
161	/* Update SCR */
162	read_scr r0
163	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
164	write_scr r0
165
166	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
167	b	.sm_exit
168
169.smc_from_nsec:
170	/*
171	 * As we're coming from non-secure world (NS bit set) the stack
172	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
173	 * instruction below the stack pointer points to sm_ctx.
174	 */
175	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
176
177	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
178	write_scr r1
179
180	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
181	stm	r0, {r8-r12}
182
183	mov	r0, sp
184	bl	sm_from_nsec
185	cmp	r0, #0
186	beq	.sm_ret_to_nsec
187
188	/*
189	 * Continue into secure world
190	 */
191	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
192
193.sm_exit:
194	pop	{r0-r7}
195	rfefd	sp!
196UNWIND(	.fnend)
197END_FUNC sm_smc_entry
198
199/*
200 * FIQ handling
201 *
202 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
203 * context will later be restored by sm_smc_entry() when handling a return
204 * from FIQ.
205 */
206LOCAL_FUNC sm_fiq_entry , :
207UNWIND(	.fnstart)
208UNWIND(	.cantunwind)
209	/* FIQ has a +4 offset for lr compared to preferred return address */
210	sub	lr, lr, #4
211	/* sp points just past struct sm_sec_ctx */
212	srsdb	sp!, #CPSR_MODE_MON
213	push	{r0-r7}
214
215	clrex		/* Clear the exclusive monitor */
216
217	/*
218	 * As we're coming from non-secure world the stack pointer points
219	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
220	 * stack pointer points to sm_ctx.
221	 */
222	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
223
224	/* Update SCR */
225	read_scr r1
226	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
227	write_scr r1
228
229	/* Save non-secure context */
230	add	r0, sp, #SM_CTX_NSEC
231	bl	sm_save_modes_regs
232	stm	r0!, {r8-r12}
233
234	/* Set FIQ entry */
235	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
236	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
237
238	/* Restore secure context */
239	add	r0, sp, #SM_CTX_SEC
240	bl	sm_restore_modes_regs
241
242	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
243
244	rfefd	sp!
245UNWIND(	.fnend)
246END_FUNC sm_fiq_entry
247
248	.section .text.sm_vect_table
249        .align	5
250LOCAL_FUNC sm_vect_table , :
251UNWIND(	.fnstart)
252UNWIND(	.cantunwind)
253#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP
254	/*
255	 * This depends on SP being 8 byte aligned, that is, the lowest
256	 * three bits in SP are zero.
257	 *
258	 * The idea is to form a specific bit pattern in the lowest three
259	 * bits of SP depending on which entry in the vector we enter via.
260	 * This is done by adding 1 to SP in each entry but the last.
261	 */
262	add	sp, sp, #1	/* 7:111 Reset			*/
263	add	sp, sp, #1	/* 6:110 Undefined instruction	*/
264	add	sp, sp, #1	/* 5:101 Secure monitor call	*/
265	add	sp, sp, #1	/* 4:100 Prefetch abort		*/
266	add	sp, sp, #1	/* 3:011 Data abort		*/
267	add	sp, sp, #1	/* 2:010 Reserved		*/
268	add	sp, sp, #1	/* 1:001 IRQ			*/
269	nop			/* 0:000 FIQ			*/
270
271	/* Invalidate the branch predictor for the current processor. */
272	write_bpiall
273	isb
274
275	/*
276	 * Only two exception does normally occur, smc and fiq. With all
277	 * other exceptions it's good enough to just spinn, the lowest bits
278	 * still tells which exception we're stuck with when attaching a
279	 * debugger.
280	 */
281
282	/* Test for FIQ, all the lowest bits of SP are supposed to be 0 */
283	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
284	beq	sm_fiq_entry
285
286	/* Test for SMC, xor the lowest bits of SP to be 0 */
287	eor	sp, sp, #(BIT(0) | BIT(2))
288	tst	sp, #(BIT(0) | BIT(1) | BIT(2))
289	beq	sm_smc_entry
290
291	/* unhandled exception */
292	b	.
293#else /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
294	b	.		/* Reset			*/
295	b	.		/* Undefined instruction	*/
296	b	sm_smc_entry	/* Secure monitor call		*/
297	b	.		/* Prefetch abort		*/
298	b	.		/* Data abort			*/
299	b	.		/* Reserved			*/
300	b	.		/* IRQ				*/
301	b	sm_fiq_entry	/* FIQ				*/
302#endif /*!CFG_CORE_WORKAROUND_SPECTRE_BP*/
303UNWIND(	.fnend)
304END_FUNC sm_vect_table
305
306/* void sm_init(vaddr_t stack_pointer); */
307FUNC sm_init , :
308UNWIND(	.fnstart)
309	/* Set monitor stack */
310	mrs	r1, cpsr
311	cps	#CPSR_MODE_MON
312	/* Point just beyond sm_ctx.sec */
313	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_NSEC)
314	msr	cpsr, r1
315
316	/* Set monitor vector (MVBAR) */
317	ldr	r0, =sm_vect_table
318	write_mvbar r0
319
320	bx	lr
321END_FUNC sm_init
322KEEP_PAGER sm_init
323
324
325/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
326FUNC sm_get_nsec_ctx , :
327	mrs	r1, cpsr
328	cps	#CPSR_MODE_MON
329	mov	r0, sp
330	msr	cpsr, r1
331
332	/*
333	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec
334	 * which is sm_ctx.nsec
335	 */
336	bx	lr
337END_FUNC sm_get_nsec_ctx
338