xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision ef4bc451c262f007562867ea4e5f4ca9f26459fd)
1/*
2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <asm.S>
30#include <arm.h>
31#include <arm32_macros.S>
32#include <kernel/unwind.h>
33#include <sm/optee_smc.h>
34#include <sm/teesmc_opteed.h>
35#include <sm/teesmc_opteed_macros.h>
36#include <asm-defines.h>
37
38	.section .text.sm_asm
39
40FUNC sm_save_modes_regs , :
41UNWIND(	.fnstart)
42UNWIND(	.cantunwind)
43	/* User mode registers has to be saved from system mode */
44	cps	#CPSR_MODE_SYS
45	stm	r0!, {sp, lr}
46
47	cps	#CPSR_MODE_IRQ
48	mrs	r2, spsr
49	stm	r0!, {r2, sp, lr}
50
51	cps	#CPSR_MODE_FIQ
52	mrs	r2, spsr
53	stm	r0!, {r2, sp, lr}
54
55	cps	#CPSR_MODE_SVC
56	mrs	r2, spsr
57	stm	r0!, {r2, sp, lr}
58
59	cps	#CPSR_MODE_ABT
60	mrs	r2, spsr
61	stm	r0!, {r2, sp, lr}
62
63	cps	#CPSR_MODE_UND
64	mrs	r2, spsr
65	stm	r0!, {r2, sp, lr}
66
67	cps	#CPSR_MODE_MON
68	bx	lr
69UNWIND(	.fnend)
70END_FUNC sm_save_modes_regs
71
72/* Restores the mode specific registers */
73FUNC sm_restore_modes_regs , :
74UNWIND(	.fnstart)
75UNWIND(	.cantunwind)
76	/* User mode registers has to be saved from system mode */
77	cps	#CPSR_MODE_SYS
78	ldm	r0!, {sp, lr}
79
80	cps	#CPSR_MODE_IRQ
81	ldm	r0!, {r2, sp, lr}
82	msr	spsr_fsxc, r2
83
84	cps	#CPSR_MODE_FIQ
85	ldm	r0!, {r2, sp, lr}
86	msr	spsr_fsxc, r2
87
88	cps	#CPSR_MODE_SVC
89	ldm	r0!, {r2, sp, lr}
90	msr	spsr_fsxc, r2
91
92	cps	#CPSR_MODE_ABT
93	ldm	r0!, {r2, sp, lr}
94	msr	spsr_fsxc, r2
95
96	cps	#CPSR_MODE_UND
97	ldm	r0!, {r2, sp, lr}
98	msr	spsr_fsxc, r2
99
100	cps	#CPSR_MODE_MON
101	bx	lr
102UNWIND(	.fnend)
103END_FUNC sm_restore_modes_regs
104
105/*
106 * stack_tmp is used as stack, the top of the stack is reserved to hold
107 * struct sm_ctx, everything below is for normal stack usage. As several
108 * different CPU modes are using the same stack it's important that switch
109 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
110 * Async abort has to be masked while using stack_tmp.
111 */
112LOCAL_FUNC sm_smc_entry , :
113UNWIND(	.fnstart)
114UNWIND(	.cantunwind)
115	srsdb	sp!, #CPSR_MODE_MON
116	push	{r0-r7}
117
118	clrex		/* Clear the exclusive monitor */
119
120	/* Find out if we're doing an secure or non-secure entry */
121	read_scr r1
122	tst	r1, #SCR_NS
123	bne	.smc_from_nsec
124
125	/*
126	 * As we're coming from secure world (NS bit cleared) the stack
127	 * pointer points to sm_ctx.sec.r0 at this stage. After the
128	 * instruction below the stack pointer points to sm_ctx.
129	 */
130	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
131
132	/* Save secure context */
133	add	r0, sp, #SM_CTX_SEC
134	bl	sm_save_modes_regs
135
136	/*
137	 * On FIQ exit we're restoring the non-secure context unchanged, on
138	 * all other exits we're shifting r1-r4 from secure context into
139	 * r0-r3 in non-secure context.
140	 */
141	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
142	ldm	r8, {r0-r4}
143	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
144	cmp	r0, r9
145	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
146	stmne	r8, {r1-r4}
147
148	/* Restore non-secure context */
149	add	r0, sp, #SM_CTX_NSEC
150	bl	sm_restore_modes_regs
151
152.sm_ret_to_nsec:
153	/*
154	 * Return to non-secure world
155	 */
156	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
157	ldm	r0, {r8-r12}
158
159	/* Update SCR */
160	read_scr r0
161	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
162	write_scr r0
163
164	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
165	b	.sm_exit
166
167.smc_from_nsec:
168	/*
169	 * As we're coming from non-secure world (NS bit set) the stack
170	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
171	 * instruction below the stack pointer points to sm_ctx.
172	 */
173	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
174
175	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
176	write_scr r1
177
178	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
179	stm	r0, {r8-r12}
180
181	mov	r0, sp
182	bl	sm_from_nsec
183	cmp	r0, #0
184	beq	.sm_ret_to_nsec
185
186	/*
187	 * Continue into secure world
188	 */
189	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
190
191.sm_exit:
192	pop	{r0-r7}
193	rfefd	sp!
194UNWIND(	.fnend)
195END_FUNC sm_smc_entry
196
197/*
198 * FIQ handling
199 *
200 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
201 * context will later be restored by sm_smc_entry() when handling a return
202 * from FIQ.
203 */
204LOCAL_FUNC sm_fiq_entry , :
205UNWIND(	.fnstart)
206UNWIND(	.cantunwind)
207	/* FIQ has a +4 offset for lr compared to preferred return address */
208	sub	lr, lr, #4
209	/* sp points just past struct sm_sec_ctx */
210	srsdb	sp!, #CPSR_MODE_MON
211	push	{r0-r7}
212
213	clrex		/* Clear the exclusive monitor */
214
215	/*
216	 * As we're coming from non-secure world the stack pointer points
217	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
218	 * stack pointer points to sm_ctx.
219	 */
220	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
221
222	/* Update SCR */
223	read_scr r1
224	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
225	write_scr r1
226
227	/* Save non-secure context */
228	add	r0, sp, #SM_CTX_NSEC
229	bl	sm_save_modes_regs
230	stm	r0!, {r8-r12}
231
232	/* Set FIQ entry */
233	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
234	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
235
236	/* Restore secure context */
237	add	r0, sp, #SM_CTX_SEC
238	bl	sm_restore_modes_regs
239
240	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
241
242	rfefd	sp!
243UNWIND(	.fnend)
244END_FUNC sm_fiq_entry
245
246        .align	5
247LOCAL_FUNC sm_vect_table , :
248UNWIND(	.fnstart)
249UNWIND(	.cantunwind)
250	b	.		/* Reset			*/
251	b	.		/* Undefined instruction	*/
252	b	sm_smc_entry	/* Secure monitor call		*/
253	b	.		/* Prefetch abort		*/
254	b	.		/* Data abort			*/
255	b	.		/* Reserved			*/
256	b	.		/* IRQ				*/
257	b	sm_fiq_entry	/* FIQ				*/
258UNWIND(	.fnend)
259END_FUNC sm_vect_table
260
261/* void sm_init(vaddr_t stack_pointer); */
262FUNC sm_init , :
263UNWIND(	.fnstart)
264	/* Set monitor stack */
265	mrs	r1, cpsr
266	cps	#CPSR_MODE_MON
267	/* Point just beyond sm_ctx.sec */
268	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_NSEC)
269	msr	cpsr, r1
270
271	/* Set monitor vector (MVBAR) */
272	ldr	r0, =sm_vect_table
273	write_mvbar r0
274
275	bx	lr
276END_FUNC sm_init
277
278
279/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
280FUNC sm_get_nsec_ctx , :
281	mrs	r1, cpsr
282	cps	#CPSR_MODE_MON
283	mov	r0, sp
284	msr	cpsr, r1
285
286	/*
287	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec
288	 * which is sm_ctx.nsec
289	 */
290	bx	lr
291END_FUNC sm_get_nsec_ctx
292