xref: /optee_os/core/arch/arm/sm/sm_a32.S (revision b1469ba0bfd0371eb52bd50f5c52eeda7a8f5f1e)
1/*
2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <arm32_macros.S>
30#include <arm.h>
31#include <asm-defines.h>
32#include <asm.S>
33#include <keep.h>
34#include <kernel/unwind.h>
35#include <sm/optee_smc.h>
36#include <sm/teesmc_opteed.h>
37#include <sm/teesmc_opteed_macros.h>
38
39	.section .text.sm_asm
40
41FUNC sm_save_modes_regs , :
42UNWIND(	.fnstart)
43UNWIND(	.cantunwind)
44	/* User mode registers has to be saved from system mode */
45	cps	#CPSR_MODE_SYS
46	stm	r0!, {sp, lr}
47
48	cps	#CPSR_MODE_IRQ
49	mrs	r2, spsr
50	stm	r0!, {r2, sp, lr}
51
52	cps	#CPSR_MODE_FIQ
53	mrs	r2, spsr
54	stm	r0!, {r2, sp, lr}
55
56	cps	#CPSR_MODE_SVC
57	mrs	r2, spsr
58	stm	r0!, {r2, sp, lr}
59
60	cps	#CPSR_MODE_ABT
61	mrs	r2, spsr
62	stm	r0!, {r2, sp, lr}
63
64	cps	#CPSR_MODE_UND
65	mrs	r2, spsr
66	stm	r0!, {r2, sp, lr}
67
68	cps	#CPSR_MODE_MON
69	bx	lr
70UNWIND(	.fnend)
71END_FUNC sm_save_modes_regs
72
73/* Restores the mode specific registers */
74FUNC sm_restore_modes_regs , :
75UNWIND(	.fnstart)
76UNWIND(	.cantunwind)
77	/* User mode registers has to be saved from system mode */
78	cps	#CPSR_MODE_SYS
79	ldm	r0!, {sp, lr}
80
81	cps	#CPSR_MODE_IRQ
82	ldm	r0!, {r2, sp, lr}
83	msr	spsr_fsxc, r2
84
85	cps	#CPSR_MODE_FIQ
86	ldm	r0!, {r2, sp, lr}
87	msr	spsr_fsxc, r2
88
89	cps	#CPSR_MODE_SVC
90	ldm	r0!, {r2, sp, lr}
91	msr	spsr_fsxc, r2
92
93	cps	#CPSR_MODE_ABT
94	ldm	r0!, {r2, sp, lr}
95	msr	spsr_fsxc, r2
96
97	cps	#CPSR_MODE_UND
98	ldm	r0!, {r2, sp, lr}
99	msr	spsr_fsxc, r2
100
101	cps	#CPSR_MODE_MON
102	bx	lr
103UNWIND(	.fnend)
104END_FUNC sm_restore_modes_regs
105
106/*
107 * stack_tmp is used as stack, the top of the stack is reserved to hold
108 * struct sm_ctx, everything below is for normal stack usage. As several
109 * different CPU modes are using the same stack it's important that switch
110 * of CPU mode isn't done until one mode is done. This means FIQ, IRQ and
111 * Async abort has to be masked while using stack_tmp.
112 */
113LOCAL_FUNC sm_smc_entry , :
114UNWIND(	.fnstart)
115UNWIND(	.cantunwind)
116	srsdb	sp!, #CPSR_MODE_MON
117	push	{r0-r7}
118
119	clrex		/* Clear the exclusive monitor */
120
121	/* Find out if we're doing an secure or non-secure entry */
122	read_scr r1
123	tst	r1, #SCR_NS
124	bne	.smc_from_nsec
125
126	/*
127	 * As we're coming from secure world (NS bit cleared) the stack
128	 * pointer points to sm_ctx.sec.r0 at this stage. After the
129	 * instruction below the stack pointer points to sm_ctx.
130	 */
131	sub	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
132
133	/* Save secure context */
134	add	r0, sp, #SM_CTX_SEC
135	bl	sm_save_modes_regs
136
137	/*
138	 * On FIQ exit we're restoring the non-secure context unchanged, on
139	 * all other exits we're shifting r1-r4 from secure context into
140	 * r0-r3 in non-secure context.
141	 */
142	add	r8, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
143	ldm	r8, {r0-r4}
144	mov_imm	r9, TEESMC_OPTEED_RETURN_FIQ_DONE
145	cmp	r0, r9
146	addne	r8, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
147	stmne	r8, {r1-r4}
148
149	/* Restore non-secure context */
150	add	r0, sp, #SM_CTX_NSEC
151	bl	sm_restore_modes_regs
152
153.sm_ret_to_nsec:
154	/*
155	 * Return to non-secure world
156	 */
157	add     r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
158	ldm	r0, {r8-r12}
159
160	/* Update SCR */
161	read_scr r0
162	orr	r0, r0, #(SCR_NS | SCR_FIQ) /* Set NS and FIQ bit in SCR */
163	write_scr r0
164
165	add	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
166	b	.sm_exit
167
168.smc_from_nsec:
169	/*
170	 * As we're coming from non-secure world (NS bit set) the stack
171	 * pointer points to sm_ctx.nsec.r0 at this stage. After the
172	 * instruction below the stack pointer points to sm_ctx.
173	 */
174	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
175
176	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
177	write_scr r1
178
179	add	r0, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R8)
180	stm	r0, {r8-r12}
181
182	mov	r0, sp
183	bl	sm_from_nsec
184	cmp	r0, #0
185	beq	.sm_ret_to_nsec
186
187	/*
188	 * Continue into secure world
189	 */
190	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_R0)
191
192.sm_exit:
193	pop	{r0-r7}
194	rfefd	sp!
195UNWIND(	.fnend)
196END_FUNC sm_smc_entry
197
198/*
199 * FIQ handling
200 *
201 * Saves CPU context in the same way as sm_smc_entry() above. The CPU
202 * context will later be restored by sm_smc_entry() when handling a return
203 * from FIQ.
204 */
205LOCAL_FUNC sm_fiq_entry , :
206UNWIND(	.fnstart)
207UNWIND(	.cantunwind)
208	/* FIQ has a +4 offset for lr compared to preferred return address */
209	sub	lr, lr, #4
210	/* sp points just past struct sm_sec_ctx */
211	srsdb	sp!, #CPSR_MODE_MON
212	push	{r0-r7}
213
214	clrex		/* Clear the exclusive monitor */
215
216	/*
217	 * As we're coming from non-secure world the stack pointer points
218	 * to sm_ctx.nsec.r0 at this stage. After the instruction below the
219	 * stack pointer points to sm_ctx.
220	 */
221	sub	sp, sp, #(SM_CTX_NSEC + SM_NSEC_CTX_R0)
222
223	/* Update SCR */
224	read_scr r1
225	bic	r1, r1, #(SCR_NS | SCR_FIQ) /* Clear NS and FIQ bit in SCR */
226	write_scr r1
227
228	/* Save non-secure context */
229	add	r0, sp, #SM_CTX_NSEC
230	bl	sm_save_modes_regs
231	stm	r0!, {r8-r12}
232
233	/* Set FIQ entry */
234	ldr	r0, =(thread_vector_table + THREAD_VECTOR_TABLE_FIQ_ENTRY)
235	str	r0, [sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)]
236
237	/* Restore secure context */
238	add	r0, sp, #SM_CTX_SEC
239	bl	sm_restore_modes_regs
240
241	add	sp, sp, #(SM_CTX_SEC + SM_SEC_CTX_MON_LR)
242
243	rfefd	sp!
244UNWIND(	.fnend)
245END_FUNC sm_fiq_entry
246
247	.section .text.sm_vect_table
248        .align	5
249LOCAL_FUNC sm_vect_table , :
250UNWIND(	.fnstart)
251UNWIND(	.cantunwind)
252	b	.		/* Reset			*/
253	b	.		/* Undefined instruction	*/
254	b	sm_smc_entry	/* Secure monitor call		*/
255	b	.		/* Prefetch abort		*/
256	b	.		/* Data abort			*/
257	b	.		/* Reserved			*/
258	b	.		/* IRQ				*/
259	b	sm_fiq_entry	/* FIQ				*/
260UNWIND(	.fnend)
261END_FUNC sm_vect_table
262
263/* void sm_init(vaddr_t stack_pointer); */
264FUNC sm_init , :
265UNWIND(	.fnstart)
266	/* Set monitor stack */
267	mrs	r1, cpsr
268	cps	#CPSR_MODE_MON
269	/* Point just beyond sm_ctx.sec */
270	sub	sp, r0, #(SM_CTX_SIZE - SM_CTX_NSEC)
271	msr	cpsr, r1
272
273	/* Set monitor vector (MVBAR) */
274	ldr	r0, =sm_vect_table
275	write_mvbar r0
276
277	bx	lr
278END_FUNC sm_init
279KEEP_PAGER sm_init
280
281
282/* struct sm_nsec_ctx *sm_get_nsec_ctx(void); */
283FUNC sm_get_nsec_ctx , :
284	mrs	r1, cpsr
285	cps	#CPSR_MODE_MON
286	mov	r0, sp
287	msr	cpsr, r1
288
289	/*
290	 * As we're in secure mode mon_sp points just beyond sm_ctx.sec
291	 * which is sm_ctx.nsec
292	 */
293	bx	lr
294END_FUNC sm_get_nsec_ctx
295