xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision 0de9a5fb2f86132c703c27f603125e1e5f29b5a2)
1/*
2 * Copyright (c) 2014, STMicroelectronics International N.V.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <asm.S>
29#include <arm.h>
30#include <arm32_macros.S>
31#include <sm/teesmc.h>
32#include <sm/teesmc_opteed_macros.h>
33#include <sm/teesmc_opteed.h>
34#include <kernel/abort.h>
35#include <kernel/thread_defs.h>
36
37	.section .text.thread_asm
38
39LOCAL_FUNC vector_std_smc_entry , :
40	push	{r0-r7}
41	mov	r0, sp
42	bl	thread_handle_std_smc
43	/*
44	 * Normally thread_handle_std_smc() should return via
45	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
46	 * hasn't switched stack (error detected) it will do a normal "C"
47	 * return.
48	 */
49	pop	{r1-r8}
50	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
51	smc	#0
52	b	.	/* SMC should not return */
53END_FUNC vector_std_smc_entry
54
55LOCAL_FUNC vector_fast_smc_entry , :
56	push	{r0-r7}
57	mov	r0, sp
58	bl	thread_handle_fast_smc
59	pop	{r1-r8}
60	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
61	smc	#0
62	b	.	/* SMC should not return */
63END_FUNC vector_fast_smc_entry
64
65LOCAL_FUNC vector_fiq_entry , :
66 	/* Secure Monitor received a FIQ and passed control to us. */
67	bl	thread_check_canaries
68 	ldr	lr, =thread_fiq_handler_ptr
69 	ldr	lr, [lr]
70 	blx	lr
71	mov	r1, r0
72	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
73	smc	#0
74	b	.	/* SMC should not return */
75END_FUNC vector_fiq_entry
76
77LOCAL_FUNC vector_cpu_on_entry , :
78	ldr	lr, =thread_cpu_on_handler_ptr
79	ldr	lr, [lr]
80	blx	lr
81	mov	r1, r0
82	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
83	smc	#0
84	b	.	/* SMC should not return */
85END_FUNC vector_cpu_on_entry
86
87LOCAL_FUNC vector_cpu_off_entry , :
88	ldr	lr, =thread_cpu_off_handler_ptr
89	ldr	lr, [lr]
90	blx	lr
91	mov	r1, r0
92	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
93	smc	#0
94	b	.	/* SMC should not return */
95END_FUNC vector_cpu_off_entry
96
97LOCAL_FUNC vector_cpu_suspend_entry , :
98	ldr	lr, =thread_cpu_suspend_handler_ptr
99	ldr	lr, [lr]
100	blx	lr
101	mov	r1, r0
102	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
103	smc	#0
104	b	.	/* SMC should not return */
105END_FUNC vector_cpu_suspend_entry
106
107LOCAL_FUNC vector_cpu_resume_entry , :
108	ldr	lr, =thread_cpu_resume_handler_ptr
109	ldr	lr, [lr]
110	blx	lr
111	mov	r1, r0
112	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
113	smc	#0
114	b	.	/* SMC should not return */
115END_FUNC vector_cpu_resume_entry
116
117LOCAL_FUNC vector_system_off_entry , :
118	ldr	lr, =thread_system_off_handler_ptr
119	ldr	lr, [lr]
120	blx	lr
121	mov	r1, r0
122	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
123	smc	#0
124	b	.	/* SMC should not return */
125END_FUNC vector_system_off_entry
126
127LOCAL_FUNC vector_system_reset_entry , :
128	ldr	lr, =thread_system_reset_handler_ptr
129	ldr	lr, [lr]
130	blx	lr
131	mov	r1, r0
132	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
133	smc	#0
134	b	.	/* SMC should not return */
135END_FUNC vector_system_reset_entry
136
137/*
138 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
139 * initialization.  Also used when compiled with the internal monitor, but
140 * the cpu_*_entry and system_*_entry are not used then.
141 *
142 * Note that ARM-TF depends on the layout of this vector table, any change
143 * in layout has to be synced with ARM-TF. This layout must also be kept in
144 * sync with sm_entry_vector in sm.c
145 */
146FUNC thread_vector_table , :
147	b	vector_std_smc_entry
148	b	vector_fast_smc_entry
149	b	vector_cpu_on_entry
150	b	vector_cpu_off_entry
151	b	vector_cpu_resume_entry
152	b	vector_cpu_suspend_entry
153	b	vector_fiq_entry
154	b	vector_system_off_entry
155	b	vector_system_reset_entry
156END_FUNC thread_vector_table
157
158FUNC thread_set_abt_sp , :
159	mrs	r1, cpsr
160	cps	#CPSR_MODE_ABT
161	mov	sp, r0
162	msr	cpsr, r1
163	bx	lr
164END_FUNC thread_set_abt_sp
165
166FUNC thread_set_irq_sp , :
167	mrs	r1, cpsr
168	cps	#CPSR_MODE_IRQ
169	mov	sp, r0
170	msr	cpsr, r1
171	bx	lr
172END_FUNC thread_set_irq_sp
173
174FUNC thread_set_fiq_sp , :
175	mrs	r1, cpsr
176	cps	#CPSR_MODE_FIQ
177	mov	sp, r0
178	msr	cpsr, r1
179	bx	lr
180END_FUNC thread_set_fiq_sp
181
182/* void thread_resume(struct thread_ctx_regs *regs) */
183FUNC thread_resume , :
184	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
185
186	cps	#CPSR_MODE_SYS
187	ldm	r12!, {sp, lr}
188
189	cps	#CPSR_MODE_SVC
190	ldm	r12!, {r1, sp, lr}
191	msr	spsr_fsxc, r1
192
193	cps	#CPSR_MODE_SVC
194	ldm	r12, {r1, r2}
195	push	{r1, r2}
196
197	ldm	r0, {r0-r12}
198
199
200	/* Restore CPSR and jump to the instruction to resume at */
201	rfefd	sp!
202END_FUNC thread_resume
203
204/*
205 * Disables IRQ and FIQ and saves state of thread, returns original
206 * CPSR.
207 */
208LOCAL_FUNC thread_save_state , :
209	push	{r12, lr}
210	/*
211	 * Uses stack for temporary storage, while storing needed
212	 * context in the thread context struct.
213	 */
214
215	mrs	r12, cpsr
216
217	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
218
219	push	{r4-r7}
220	push	{r0-r3}
221
222	mov	r5, r12			/* Save CPSR in a preserved register */
223	mrs	r6, cpsr		/* Save current CPSR */
224
225	bl	thread_get_ctx_regs
226
227	pop	{r1-r4}			/* r0-r3 pushed above */
228	stm	r0!, {r1-r4}
229	pop	{r1-r4}			/* r4-r7 pushed above */
230	stm	r0!, {r1-r4}
231	stm	r0!, {r8-r11}
232
233	pop	{r12, lr}
234	stm	r0!, {r12}
235
236        cps     #CPSR_MODE_SYS
237        stm     r0!, {sp, lr}
238
239        cps     #CPSR_MODE_SVC
240        mrs     r1, spsr
241        stm     r0!, {r1, sp, lr}
242
243	msr	cpsr, r6		/* Restore mode */
244
245	mov	r0, r5			/* Return original CPSR */
246	bx	lr
247END_FUNC thread_save_state
248
249FUNC thread_std_smc_entry , :
250	/* Pass r0-r7 in a struct thread_smc_args */
251	push	{r0-r7}
252	mov	r0, sp
253	bl	__thread_std_smc_entry
254	/*
255	 * Load the returned r0-r3 into preserved registers and skip the
256	 * "returned" r4-r7 since they will not be returned to normal
257	 * world.
258	 */
259	pop	{r4-r7}
260	add	sp, #(4 * 4)
261
262	/* Disable interrupts before switching to temporary stack */
263	cpsid	if
264	bl	thread_get_tmp_sp
265	mov	sp, r0
266
267	bl	thread_state_free
268
269	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
270	mov	r1, r4
271	mov	r2, r5
272	mov	r3, r6
273	mov	r4, r7
274	smc	#0
275	b	.	/* SMC should not return */
276END_FUNC thread_std_smc_entry
277
278
279/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
280FUNC thread_rpc , :
281	push	{lr}
282	push	{r0}
283
284	bl	thread_save_state
285	mov	r4, r0			/* Save original CPSR */
286
287	/*
288 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
289	 */
290	bl	thread_get_tmp_sp
291	ldr	r5, [sp]		/* Get pointer to rv[] */
292	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
293	mov	sp, r0			/* Switch to tmp stack */
294
295	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
296	mov	r1, r4			/* CPSR to restore */
297	ldr	r2, =.thread_rpc_return
298	bl	thread_state_suspend
299	mov	r4, r0			/* Supply thread index */
300	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
301	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
302	smc	#0
303	b	.	/* SMC should not return */
304
305.thread_rpc_return:
306	/*
307	 * At this point has the stack pointer been restored to the value
308	 * it had when thread_save_state() was called above.
309	 *
310	 * Jumps here from thread_resume above when RPC has returned. The
311	 * IRQ and FIQ bits are restored to what they where when this
312	 * function was originally entered.
313	 */
314	pop	{r12}			/* Get pointer to rv[] */
315	stm	r12, {r0-r2}		/* Store r0-r2 into rv[] */
316	pop	{pc}
317END_FUNC thread_rpc
318
319LOCAL_FUNC thread_fiq_handler , :
320	/* FIQ has a +4 offset for lr compared to preferred return address */
321	sub     lr, lr, #4
322	push	{r0-r12, lr}
323	bl	thread_check_canaries
324	ldr	lr, =thread_fiq_handler_ptr
325	ldr	lr, [lr]
326	blx	lr
327	pop	{r0-r12, lr}
328	movs	pc, lr
329END_FUNC thread_fiq_handler
330
331LOCAL_FUNC thread_irq_handler , :
332	/*
333	 * IRQ mode is set up to use tmp stack so FIQ has to be
334	 * disabled before touching the stack. We can also assign
335	 * SVC sp from IRQ sp to get SVC mode into the state we
336	 * need when doing the SMC below.
337	 */
338	cpsid	f			/* Disable FIQ also */
339	sub	lr, lr, #4
340	push	{lr}
341	push	{r12}
342
343	bl	thread_save_state
344
345	mov	r0, #THREAD_FLAGS_EXIT_ON_IRQ
346	mrs	r1, spsr
347	pop	{r12}
348	pop	{r2}
349	blx	thread_state_suspend
350	mov	r4, r0		/* Supply thread index */
351
352	/*
353	 * Switch to SVC mode and copy current stack pointer as it already
354	 * is the tmp stack.
355	 */
356	mov	r0, sp
357	cps	#CPSR_MODE_SVC
358	mov	sp, r0
359
360	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
361	ldr	r1, =TEESMC_RETURN_RPC_IRQ
362	mov	r2, #0
363	mov	r3, #0
364	/* r4 is already filled in above */
365	smc	#0
366	b	.	/* SMC should not return */
367END_FUNC thread_irq_handler
368
369FUNC thread_init_vbar , :
370	/* Set vector (VBAR) */
371	ldr	r0, =thread_vect_table
372	write_vbar r0
373	bx	lr
374END_FUNC thread_init_vbar
375
376/*
377 * Below are low level routines handling entry and return from user mode.
378 *
379 * thread_enter_user_mode() saves all that registers user mode can change
380 * so kernel mode can restore needed registers when resuming execution
381 * after the call to thread_enter_user_mode() has returned.
382 * thread_enter_user_mode() doesn't return directly since it enters user
383 * mode instead, it's thread_unwind_user_mode() that does the
384 * returning by restoring the registers saved by thread_enter_user_mode().
385 *
386 * There's three ways for thread_enter_user_mode() to return to caller,
387 * user TA calls utee_return, user TA calls utee_panic or through an abort.
388 *
389 * Calls to utee_return or utee_panic are handled as:
390 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
391 * calls syscall_return() or syscall_panic().
392 *
393 * These function calls returns normally except thread_svc_handler() which
394 * which is an exception handling routine so it reads return address and
395 * SPSR to restore from the stack. syscall_return() and syscall_panic()
396 * changes return address and SPSR used by thread_svc_handler() to instead of
397 * returning into user mode as with other syscalls it returns into
398 * thread_unwind_user_mode() in kernel mode instead.  When
399 * thread_svc_handler() returns the stack pointer at the point where
400 * thread_enter_user_mode() left it so this is where
401 * thread_unwind_user_mode() can operate.
402 *
403 * Aborts are handled in a similar way but by thread_abort_handler()
404 * instead, when the pager sees that it's an abort from user mode that
405 * can't be handled it updates SPSR and return address used by
406 * thread_abort_handler() to return into thread_unwind_user_mode()
407 * instead.
408 */
409
410/*
411 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
412 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
413 *               unsigned long user_func, unsigned long spsr,
414 *               uint32_t *exit_status0, uint32_t *exit_status1)
415 *
416 */
417FUNC __thread_enter_user_mode , :
418	/*
419	 * Save all registers to allow syscall_return() to resume execution
420	 * as if this function would have returned. This is also used in
421	 * syscall_panic().
422	 *
423	 * If stack usage of this function is changed
424	 * thread_unwind_user_mode() has to be updated.
425	 */
426	push    {r4-r12,lr}
427
428	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
429	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
430	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
431
432	/*
433	 * Set the saved Processors Status Register to user mode to allow
434	 * entry of user mode through movs below.
435	 */
436	msr     spsr_cxsf, r6
437
438	/*
439	 * Save old user sp and set new user sp.
440	 */
441	cps	#CPSR_MODE_SYS
442	mov	r6, sp
443	mov     sp, r4
444	cps	#CPSR_MODE_SVC
445	push	{r6}
446
447	/*
448	* Don't allow return from this function, return is done through
449	* thread_unwind_user_mode() below.
450	*/
451	mov     lr, #0
452	/* Call the user function with its arguments */
453	movs    pc, r5
454END_FUNC __thread_enter_user_mode
455
456/*
457 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
458 *              uint32_t exit_status1);
459 * See description in thread.h
460 */
461FUNC thread_unwind_user_mode , :
462	ldr     ip, [sp, #(14 * 0x4)]   /* &ctx->panicked */
463	str	r1, [ip]
464	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panic_code */
465	str	r2, [ip]
466
467	/* Restore old user sp */
468	pop	{r4}
469	cps	#CPSR_MODE_SYS
470	mov	sp, r4
471	cps	#CPSR_MODE_SVC
472
473	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
474END_FUNC thread_unwind_user_mode
475
476LOCAL_FUNC thread_abort_handler , :
477thread_abort_handler:
478thread_und_handler:
479	/*
480	 * Switch to abort mode to use that stack instead.
481	 */
482	cps	#CPSR_MODE_ABT
483	push	{r0-r11, ip}
484	cps	#CPSR_MODE_UND
485	mrs	r0, spsr
486	tst	r0, #CPSR_T
487	subne	r1, lr, #2
488	subeq	r1, lr, #4
489	cps	#CPSR_MODE_ABT
490	push	{r0, r1}
491	msr	spsr_fsxc, r0	/* In case some code reads spsr directly */
492	mov	r0, #ABORT_TYPE_UNDEF
493	b	.thread_abort_generic
494
495thread_dabort_handler:
496	push	{r0-r11, ip}
497	sub	r1, lr, #8
498	mrs	r0, spsr
499	push	{r0, r1}
500	mov	r0, #ABORT_TYPE_DATA
501	b	.thread_abort_generic
502
503thread_pabort_handler:
504	push	{r0-r11, ip}
505	sub	r1, lr, #4
506	mrs	r0, spsr
507	push	{r0, r1}
508	mov	r0, #ABORT_TYPE_PREFETCH
509	b	.thread_abort_generic
510
511.thread_abort_generic:
512	cps	#CPSR_MODE_SYS
513	mov	r1, sp
514	mov	r2, lr
515	cps	#CPSR_MODE_ABT
516	push	{r1-r3}
517	mov	r1, sp
518	bl	thread_handle_abort
519	pop	{r1-r3}
520	cps	#CPSR_MODE_SYS
521	mov	sp, r1
522	mov	lr, r2
523	cps	#CPSR_MODE_ABT
524	pop	{r0, r1}
525	mov	lr, r1
526	msr	spsr_fsxc, r0
527	pop	{r0-r11, ip}
528	movs	pc, lr
529END_FUNC thread_abort_handler
530
531LOCAL_FUNC thread_svc_handler , :
532	push	{r0-r7, lr}
533	mrs	r0, spsr
534	push	{r0}
535	mov	r0, sp
536	ldr	lr, =thread_svc_handler_ptr;
537	ldr	lr, [lr]
538	blx	lr
539	pop	{r0}
540	msr	spsr_fsxc, r0
541	pop	{r0-r7, lr}
542	movs	pc, lr
543END_FUNC thread_svc_handler
544
545        .align	5
546LOCAL_FUNC thread_vect_table , :
547	b	.			/* Reset			*/
548	b	thread_und_handler	/* Undefined instruction	*/
549	b	thread_svc_handler	/* System call			*/
550	b	thread_pabort_handler	/* Prefetch abort		*/
551	b	thread_dabort_handler	/* Data abort			*/
552	b	.			/* Reserved			*/
553	b	thread_irq_handler	/* IRQ				*/
554	b	thread_fiq_handler	/* FIQ				*/
555END_FUNC thread_vect_table
556