xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision 3f4d68499f9e6e59f14b85fd9b5b8c80a73be252)
1/*
2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <asm.S>
30#include <arm.h>
31#include <arm32_macros.S>
32#include <kernel/unwind.h>
33#include <sm/optee_smc.h>
34#include <sm/teesmc_opteed.h>
35#include <sm/teesmc_opteed_macros.h>
36#include <asm-defines.h>
37
38	.section .text.sm_asm
39
40FUNC sm_save_modes_regs , :
41UNWIND(	.fnstart)
42UNWIND(	.cantunwind)
43	/* User mode registers has to be saved from system mode */
44	cps	#CPSR_MODE_SYS
45	stm	r0!, {sp, lr}
46
47	cps	#CPSR_MODE_IRQ
48	mrs	r2, spsr
49	stm	r0!, {r2, sp, lr}
50
51	cps	#CPSR_MODE_FIQ
52	mrs	r2, spsr
53	stm	r0!, {r2, sp, lr}
54
55	cps	#CPSR_MODE_SVC
56	mrs	r2, spsr
57	stm	r0!, {r2, sp, lr}
58
59	cps	#CPSR_MODE_ABT
60	mrs	r2, spsr
61	stm	r0!, {r2, sp, lr}
62
63	cps	#CPSR_MODE_UND
64	mrs	r2, spsr
65	stm	r0!, {r2, sp, lr}
66
67	cps	#CPSR_MODE_MON
68	bx	lr
69UNWIND(	.fnend)
70END_FUNC sm_save_modes_regs
71
72/* Restores the mode specific registers */
73FUNC sm_restore_modes_regs , :
74UNWIND(	.fnstart)
75UNWIND(	.cantunwind)
76	/* User mode registers has to be saved from system mode */
77	cps	#CPSR_MODE_SYS
78	ldm	r0!, {sp, lr}
79
80	cps	#CPSR_MODE_IRQ
81	ldm	r0!, {r2, sp, lr}
82	msr	spsr_fsxc, r2
83
84	cps	#CPSR_MODE_FIQ
85	ldm	r0!, {r2, sp, lr}
86	msr	spsr_fsxc, r2
87
88	cps	#CPSR_MODE_SVC
89	ldm	r0!, {r2, sp, lr}
90	msr	spsr_fsxc, r2
91
92	cps	#CPSR_MODE_ABT
93	ldm	r0!, {r2, sp, lr}
94	msr	spsr_fsxc, r2
95
96	cps	#CPSR_MODE_UND
97	ldm	r0!, {r2, sp, lr}
98	msr	spsr_fsxc, r2
99
100	cps	#CPSR_MODE_MON
101	bx	lr
102UNWIND(	.fnend)
103END_FUNC sm_restore_modes_regs
104
105/*
106 * Stack is utilized as:
107 * generic stack;
108 * struct sm_ctx;
109 */
110LOCAL_FUNC sm_smc_entry , :
111UNWIND(	.fnstart)
112UNWIND(	.cantunwind)
113	srsdb	sp!, #CPSR_MODE_MON
114	push	{r0-r7}
115
116	clrex		/* Clear the exclusive monitor */
117
118	/* Find out if we're doing an secure or non-secure entry */
119	read_scr r1
120	tst	r1, #SCR_NS
121	bne	.smc_from_nsec
122
123	/*
124	 * As we're coming from secure world (NS bit cleared) the stack
125	 * pointer points to sm_ctx.sec.r0 at this stage. After the
126	 * instruction below the stack pointer points to sm_ctx.
127	 */
128	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
129
130	/* Save secure context */
131	add	r0, sp, #SM_CTX_SEC
132	bl	sm_save_modes_regs
133
134	/*
135	 * On FIQ exit we're restoring the non-secure context unchanged, on
136	 * all other exits we're shifting r1-r4 from secure context into
137	 * r0-r3 in non-secure context.
138	 */
139	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
140	ldm	r8, {r0-r4}
141	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
142	cmp	r0, r9
143	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
144	stmne	r8, {r1-r4}
145
146	/* Restore non-secure context */
147	add	r0, sp, #SM_CTX_NSEC
148	bl	sm_restore_modes_regs
149
150.sm_ret_to_nsec:
151	/*
152	 * Return to non-secure world
153	 */
154	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
155	ldm	r0, {r8-r12}
156
157	/* Update SCR */
158	read_scr r0
159	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
160	write_scr r0
161
162	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
163	b	.sm_exit
164
165.smc_from_nsec:
166	/*
167	 * As we're coming from non-secure world (NS bit set) the stack
168	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
169	 * instruction below the stack pointer points to sm_ctx.
170	 */
171	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
172
173	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
174	write_scr r1
175
176	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
177	stm	r0, {r8-r12}
178
179	mov	r0, sp
180	bl	sm_from_nsec
181	cmp	r0, #0
182	beq	.sm_ret_to_nsec
183
184	/*
185	 * Continue into secure world
186	 */
187	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
188
189.sm_exit:
190	pop	{r0-r7}
191	rfefd	sp!
192UNWIND(	.fnend)
193END_FUNC sm_smc_entry
194
195/*
196 * FIQ handling
197 *
198 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
199 * context will later be restored by sm_smc_entry() when handling a return
200 * from FIQ.
201 */
202LOCAL_FUNC sm_fiq_entry , :
203UNWIND(	.fnstart)
204UNWIND(	.cantunwind)
205	/* FIQ has a +4 offset for lr compared to preferred return address */
206	sub	lr, lr, #4
207	/* sp points just past struct sm_sec_ctx */
208	srsdb	sp!, #CPSR_MODE_MON
209	push	{r0-r7}
210
211	clrex		/* Clear the exclusive monitor */
212
213	/*
214	 * As we're coming from non-secure world the stack pointer points
215	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
216	 * stack pointer points to sm_ctx.
217	 */
218	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
219
220	/* Update SCR */
221	read_scr r1
222	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
223	write_scr r1
224
225	/* Save non-secure context */
226	add	r0, sp, #SM_CTX_NSEC
227	bl	sm_save_modes_regs
228	stm	r0!, {r8-r12}
229
230	/* Set FIQ entry */
231	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
232	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
233
234	/* Restore secure context */
235	add	r0, sp, #SM_CTX_SEC
236	bl	sm_restore_modes_regs
237
238	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
239
240	rfefd	sp!
241UNWIND(	.fnend)
242END_FUNC sm_fiq_entry
243
244        .align	5
245LOCAL_FUNC sm_vect_table , :
246UNWIND(	.fnstart)
247UNWIND(	.cantunwind)
248	b	.		/* Reset			*/
249	b	.		/* Undefined instruction	*/
250	b	sm_smc_entry	/* Secure monitor call		*/
251	b	.		/* Prefetch abort		*/
252	b	.		/* Data abort			*/
253	b	.		/* Reserved			*/
254	b	.		/* IRQ				*/
255	b	sm_fiq_entry	/* FIQ				*/
256UNWIND(	.fnend)
257END_FUNC sm_vect_table
258
259/* void sm_init(vaddr_t stack_pointer); */
260FUNC sm_init , :
261UNWIND(	.fnstart)
262	/* Set monitor stack */
263	mrs	r1, cpsr
264	cps	#CPSR_MODE_MON
265	/* Point just beyond sm_ctx.sec */
266	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_NSEC)
267	msr	cpsr, r1
268
269	/* Set monitor vector (MVBAR) */
270	ldr	r0, =sm_vect_table
271	write_mvbar r0
272
273	bx	lr
274END_FUNC sm_init
275
276
277/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
278FUNC sm_get_nsec_ctx , :
279	mrs	r1, cpsr
280	cps	#CPSR_MODE_MON
281	mov	r0, sp
282	msr	cpsr, r1
283
284	/*
285	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec
286	 * which is sm_ctx.nsec
287	 */
288	bx	lr
289END_FUNC sm_get_nsec_ctx
290