xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision f17691b3f6b27866f66636a53685bd3a6f7daa8a)
1/*
2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <asm.S>
30#include <arm.h>
31#include <arm32_macros.S>
32#include <sm/teesmc.h>
33#include <sm/teesmc_opteed_macros.h>
34#include <sm/teesmc_opteed.h>
35#include <kernel/abort.h>
36#include <kernel/thread_defs.h>
37#include <kernel/unwind.h>
38
39	.section .text.thread_asm
40
41LOCAL_FUNC vector_std_smc_entry , :
42UNWIND(	.fnstart)
43UNWIND(	.cantunwind)
44	push	{r0-r7}
45	mov	r0, sp
46	bl	thread_handle_std_smc
47	/*
48	 * Normally thread_handle_std_smc() should return via
49	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
50	 * hasn't switched stack (error detected) it will do a normal "C"
51	 * return.
52	 */
53	pop	{r1-r8}
54	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
55	smc	#0
56	b	.	/* SMC should not return */
57UNWIND(	.fnend)
58END_FUNC vector_std_smc_entry
59
60LOCAL_FUNC vector_fast_smc_entry , :
61UNWIND(	.fnstart)
62UNWIND(	.cantunwind)
63	push	{r0-r7}
64	mov	r0, sp
65	bl	thread_handle_fast_smc
66	pop	{r1-r8}
67	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
68	smc	#0
69	b	.	/* SMC should not return */
70UNWIND(	.fnend)
71END_FUNC vector_fast_smc_entry
72
73LOCAL_FUNC vector_fiq_entry , :
74UNWIND(	.fnstart)
75UNWIND(	.cantunwind)
76 	/* Secure Monitor received a FIQ and passed control to us. */
77	bl	thread_check_canaries
78 	ldr	lr, =thread_fiq_handler_ptr
79 	ldr	lr, [lr]
80 	blx	lr
81	mov	r1, r0
82	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
83	smc	#0
84	b	.	/* SMC should not return */
85UNWIND(	.fnend)
86END_FUNC vector_fiq_entry
87
88LOCAL_FUNC vector_cpu_on_entry , :
89UNWIND(	.fnstart)
90UNWIND(	.cantunwind)
91	ldr	lr, =thread_cpu_on_handler_ptr
92	ldr	lr, [lr]
93	blx	lr
94	mov	r1, r0
95	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
96	smc	#0
97	b	.	/* SMC should not return */
98UNWIND(	.fnend)
99END_FUNC vector_cpu_on_entry
100
101LOCAL_FUNC vector_cpu_off_entry , :
102UNWIND(	.fnstart)
103UNWIND(	.cantunwind)
104	ldr	lr, =thread_cpu_off_handler_ptr
105	ldr	lr, [lr]
106	blx	lr
107	mov	r1, r0
108	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
109	smc	#0
110	b	.	/* SMC should not return */
111UNWIND(	.fnend)
112END_FUNC vector_cpu_off_entry
113
114LOCAL_FUNC vector_cpu_suspend_entry , :
115UNWIND(	.fnstart)
116UNWIND(	.cantunwind)
117	ldr	lr, =thread_cpu_suspend_handler_ptr
118	ldr	lr, [lr]
119	blx	lr
120	mov	r1, r0
121	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
122	smc	#0
123	b	.	/* SMC should not return */
124UNWIND(	.fnend)
125END_FUNC vector_cpu_suspend_entry
126
127LOCAL_FUNC vector_cpu_resume_entry , :
128UNWIND(	.fnstart)
129UNWIND(	.cantunwind)
130	ldr	lr, =thread_cpu_resume_handler_ptr
131	ldr	lr, [lr]
132	blx	lr
133	mov	r1, r0
134	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
135	smc	#0
136	b	.	/* SMC should not return */
137UNWIND(	.fnend)
138END_FUNC vector_cpu_resume_entry
139
140LOCAL_FUNC vector_system_off_entry , :
141UNWIND(	.fnstart)
142UNWIND(	.cantunwind)
143	ldr	lr, =thread_system_off_handler_ptr
144	ldr	lr, [lr]
145	blx	lr
146	mov	r1, r0
147	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
148	smc	#0
149	b	.	/* SMC should not return */
150UNWIND(	.fnend)
151END_FUNC vector_system_off_entry
152
153LOCAL_FUNC vector_system_reset_entry , :
154UNWIND(	.fnstart)
155UNWIND(	.cantunwind)
156	ldr	lr, =thread_system_reset_handler_ptr
157	ldr	lr, [lr]
158	blx	lr
159	mov	r1, r0
160	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
161	smc	#0
162	b	.	/* SMC should not return */
163UNWIND(	.fnend)
164END_FUNC vector_system_reset_entry
165
166/*
167 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
168 * initialization.  Also used when compiled with the internal monitor, but
169 * the cpu_*_entry and system_*_entry are not used then.
170 *
171 * Note that ARM-TF depends on the layout of this vector table, any change
172 * in layout has to be synced with ARM-TF. This layout must also be kept in
173 * sync with sm_entry_vector in sm.c
174 */
175FUNC thread_vector_table , :
176UNWIND(	.fnstart)
177UNWIND(	.cantunwind)
178	b	vector_std_smc_entry
179	b	vector_fast_smc_entry
180	b	vector_cpu_on_entry
181	b	vector_cpu_off_entry
182	b	vector_cpu_resume_entry
183	b	vector_cpu_suspend_entry
184	b	vector_fiq_entry
185	b	vector_system_off_entry
186	b	vector_system_reset_entry
187UNWIND(	.fnend)
188END_FUNC thread_vector_table
189
190FUNC thread_set_abt_sp , :
191UNWIND(	.fnstart)
192UNWIND(	.cantunwind)
193	mrs	r1, cpsr
194	cps	#CPSR_MODE_ABT
195	mov	sp, r0
196	msr	cpsr, r1
197	bx	lr
198UNWIND(	.fnend)
199END_FUNC thread_set_abt_sp
200
201FUNC thread_set_irq_sp , :
202UNWIND(	.fnstart)
203UNWIND(	.cantunwind)
204	mrs	r1, cpsr
205	cps	#CPSR_MODE_IRQ
206	mov	sp, r0
207	msr	cpsr, r1
208	bx	lr
209UNWIND(	.fnend)
210END_FUNC thread_set_irq_sp
211
212FUNC thread_set_fiq_sp , :
213UNWIND(	.fnstart)
214UNWIND(	.cantunwind)
215	mrs	r1, cpsr
216	cps	#CPSR_MODE_FIQ
217	mov	sp, r0
218	msr	cpsr, r1
219	bx	lr
220UNWIND(	.fnend)
221END_FUNC thread_set_fiq_sp
222
223/* void thread_resume(struct thread_ctx_regs *regs) */
224FUNC thread_resume , :
225UNWIND(	.fnstart)
226UNWIND(	.cantunwind)
227	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
228
229	cps	#CPSR_MODE_SYS
230	ldm	r12!, {sp, lr}
231
232	cps	#CPSR_MODE_SVC
233	ldm	r12!, {r1, sp, lr}
234	msr	spsr_fsxc, r1
235
236	cps	#CPSR_MODE_SVC
237	ldm	r12, {r1, r2}
238	push	{r1, r2}
239
240	ldm	r0, {r0-r12}
241
242	/* Restore CPSR and jump to the instruction to resume at */
243	rfefd	sp!
244UNWIND(	.fnend)
245END_FUNC thread_resume
246
247/*
248 * Disables IRQ and FIQ and saves state of thread, returns original
249 * CPSR.
250 */
251LOCAL_FUNC thread_save_state , :
252UNWIND(	.fnstart)
253UNWIND(	.cantunwind)
254	push	{r12, lr}
255	/*
256	 * Uses stack for temporary storage, while storing needed
257	 * context in the thread context struct.
258	 */
259
260	mrs	r12, cpsr
261
262	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
263
264	push	{r4-r7}
265	push	{r0-r3}
266
267	mov	r5, r12			/* Save CPSR in a preserved register */
268	mrs	r6, cpsr		/* Save current CPSR */
269
270	bl	thread_get_ctx_regs
271
272	pop	{r1-r4}			/* r0-r3 pushed above */
273	stm	r0!, {r1-r4}
274	pop	{r1-r4}			/* r4-r7 pushed above */
275	stm	r0!, {r1-r4}
276	stm	r0!, {r8-r11}
277
278	pop	{r12, lr}
279	stm	r0!, {r12}
280
281        cps     #CPSR_MODE_SYS
282        stm     r0!, {sp, lr}
283
284        cps     #CPSR_MODE_SVC
285        mrs     r1, spsr
286        stm     r0!, {r1, sp, lr}
287
288	msr	cpsr, r6		/* Restore mode */
289
290	mov	r0, r5			/* Return original CPSR */
291	bx	lr
292UNWIND(	.fnend)
293END_FUNC thread_save_state
294
295FUNC thread_std_smc_entry , :
296UNWIND(	.fnstart)
297UNWIND(	.cantunwind)
298	/* Pass r0-r7 in a struct thread_smc_args */
299	push	{r0-r7}
300	mov	r0, sp
301	bl	__thread_std_smc_entry
302	/*
303	 * Load the returned r0-r3 into preserved registers and skip the
304	 * "returned" r4-r7 since they will not be returned to normal
305	 * world.
306	 */
307	pop	{r4-r7}
308	add	sp, #(4 * 4)
309
310	/* Disable interrupts before switching to temporary stack */
311	cpsid	if
312	bl	thread_get_tmp_sp
313	mov	sp, r0
314
315	bl	thread_state_free
316
317	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
318	mov	r1, r4
319	mov	r2, r5
320	mov	r3, r6
321	mov	r4, r7
322	smc	#0
323	b	.	/* SMC should not return */
324UNWIND(	.fnend)
325END_FUNC thread_std_smc_entry
326
327
328/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
329FUNC thread_rpc , :
330UNWIND(	.fnstart)
331	push	{lr}
332UNWIND(	.save	{lr})
333	push	{r0}
334UNWIND(	.save	{r0})
335
336	bl	thread_save_state
337	mov	r4, r0			/* Save original CPSR */
338
339	/*
340 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
341	 */
342	bl	thread_get_tmp_sp
343	ldr	r5, [sp]		/* Get pointer to rv[] */
344	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
345	mov	sp, r0			/* Switch to tmp stack */
346
347	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
348	mov	r1, r4			/* CPSR to restore */
349	ldr	r2, =.thread_rpc_return
350	bl	thread_state_suspend
351	mov	r4, r0			/* Supply thread index */
352	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
353	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
354	smc	#0
355	b	.	/* SMC should not return */
356
357.thread_rpc_return:
358	/*
359	 * At this point has the stack pointer been restored to the value
360	 * it had when thread_save_state() was called above.
361	 *
362	 * Jumps here from thread_resume above when RPC has returned. The
363	 * IRQ and FIQ bits are restored to what they where when this
364	 * function was originally entered.
365	 */
366	pop	{r12}			/* Get pointer to rv[] */
367	stm	r12, {r0-r2}		/* Store r0-r2 into rv[] */
368	pop	{pc}
369UNWIND(	.fnend)
370END_FUNC thread_rpc
371
372LOCAL_FUNC thread_fiq_handler , :
373UNWIND(	.fnstart)
374UNWIND(	.cantunwind)
375	/* FIQ has a +4 offset for lr compared to preferred return address */
376	sub     lr, lr, #4
377	push	{r0-r12, lr}
378	bl	thread_check_canaries
379	ldr	lr, =thread_fiq_handler_ptr
380	ldr	lr, [lr]
381	blx	lr
382	pop	{r0-r12, lr}
383	movs	pc, lr
384UNWIND(	.fnend)
385END_FUNC thread_fiq_handler
386
387LOCAL_FUNC thread_irq_handler , :
388UNWIND(	.fnstart)
389UNWIND(	.cantunwind)
390	/*
391	 * IRQ mode is set up to use tmp stack so FIQ has to be
392	 * disabled before touching the stack. We can also assign
393	 * SVC sp from IRQ sp to get SVC mode into the state we
394	 * need when doing the SMC below.
395	 */
396	cpsid	f			/* Disable FIQ also */
397	sub	lr, lr, #4
398	push	{lr}
399	push	{r12}
400
401	bl	thread_save_state
402
403	mov	r0, #THREAD_FLAGS_EXIT_ON_IRQ
404	mrs	r1, spsr
405	pop	{r12}
406	pop	{r2}
407	blx	thread_state_suspend
408	mov	r4, r0		/* Supply thread index */
409
410	/*
411	 * Switch to SVC mode and copy current stack pointer as it already
412	 * is the tmp stack.
413	 */
414	mov	r0, sp
415	cps	#CPSR_MODE_SVC
416	mov	sp, r0
417
418	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
419	ldr	r1, =TEESMC_RETURN_RPC_IRQ
420	mov	r2, #0
421	mov	r3, #0
422	/* r4 is already filled in above */
423	smc	#0
424	b	.	/* SMC should not return */
425UNWIND(	.fnend)
426END_FUNC thread_irq_handler
427
428FUNC thread_init_vbar , :
429UNWIND(	.fnstart)
430	/* Set vector (VBAR) */
431	ldr	r0, =thread_vect_table
432	write_vbar r0
433	bx	lr
434UNWIND(	.fnend)
435END_FUNC thread_init_vbar
436
437/*
438 * Below are low level routines handling entry and return from user mode.
439 *
440 * thread_enter_user_mode() saves all that registers user mode can change
441 * so kernel mode can restore needed registers when resuming execution
442 * after the call to thread_enter_user_mode() has returned.
443 * thread_enter_user_mode() doesn't return directly since it enters user
444 * mode instead, it's thread_unwind_user_mode() that does the
445 * returning by restoring the registers saved by thread_enter_user_mode().
446 *
447 * There's three ways for thread_enter_user_mode() to return to caller,
448 * user TA calls utee_return, user TA calls utee_panic or through an abort.
449 *
450 * Calls to utee_return or utee_panic are handled as:
451 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
452 * calls syscall_return() or syscall_panic().
453 *
454 * These function calls returns normally except thread_svc_handler() which
455 * which is an exception handling routine so it reads return address and
456 * SPSR to restore from the stack. syscall_return() and syscall_panic()
457 * changes return address and SPSR used by thread_svc_handler() to instead of
458 * returning into user mode as with other syscalls it returns into
459 * thread_unwind_user_mode() in kernel mode instead.  When
460 * thread_svc_handler() returns the stack pointer at the point where
461 * thread_enter_user_mode() left it so this is where
462 * thread_unwind_user_mode() can operate.
463 *
464 * Aborts are handled in a similar way but by thread_abort_handler()
465 * instead, when the pager sees that it's an abort from user mode that
466 * can't be handled it updates SPSR and return address used by
467 * thread_abort_handler() to return into thread_unwind_user_mode()
468 * instead.
469 */
470
471/*
472 * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
473 *               unsigned long a2, unsigned long a3, unsigned long user_sp,
474 *               unsigned long user_func, unsigned long spsr,
475 *               uint32_t *exit_status0, uint32_t *exit_status1)
476 *
477 */
478FUNC __thread_enter_user_mode , :
479UNWIND(	.fnstart)
480UNWIND(	.cantunwind)
481	/*
482	 * Save all registers to allow syscall_return() to resume execution
483	 * as if this function would have returned. This is also used in
484	 * syscall_panic().
485	 *
486	 * If stack usage of this function is changed
487	 * thread_unwind_user_mode() has to be updated.
488	 */
489	push    {r4-r12,lr}
490
491	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
492	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
493	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
494
495	/*
496	 * Set the saved Processors Status Register to user mode to allow
497	 * entry of user mode through movs below.
498	 */
499	msr     spsr_cxsf, r6
500
501	/*
502	 * Save old user sp and set new user sp.
503	 */
504	cps	#CPSR_MODE_SYS
505	mov	r6, sp
506	mov     sp, r4
507	cps	#CPSR_MODE_SVC
508	push	{r6}
509
510	/*
511	* Don't allow return from this function, return is done through
512	* thread_unwind_user_mode() below.
513	*/
514	mov     lr, #0
515	/* Call the user function with its arguments */
516	movs    pc, r5
517UNWIND(	.fnend)
518END_FUNC __thread_enter_user_mode
519
520/*
521 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
522 *              uint32_t exit_status1);
523 * See description in thread.h
524 */
525FUNC thread_unwind_user_mode , :
526UNWIND(	.fnstart)
527UNWIND(	.cantunwind)
528	ldr     ip, [sp, #(14 * 0x4)]   /* &ctx->panicked */
529	str	r1, [ip]
530	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panic_code */
531	str	r2, [ip]
532
533	/* Restore old user sp */
534	pop	{r4}
535	cps	#CPSR_MODE_SYS
536	mov	sp, r4
537	cps	#CPSR_MODE_SVC
538
539	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
540UNWIND(	.fnend)
541END_FUNC thread_unwind_user_mode
542
543LOCAL_FUNC thread_abort_handler , :
544thread_abort_handler:
545thread_und_handler:
546UNWIND(	.fnstart)
547UNWIND(	.cantunwind)
548	/*
549	 * Switch to abort mode to use that stack instead.
550	 */
551	cps	#CPSR_MODE_ABT
552	push	{r0-r11, ip}
553	cps	#CPSR_MODE_UND
554	mrs	r0, spsr
555	tst	r0, #CPSR_T
556	subne	r1, lr, #2
557	subeq	r1, lr, #4
558	cps	#CPSR_MODE_ABT
559	push	{r0, r1}
560	msr	spsr_fsxc, r0	/* In case some code reads spsr directly */
561	mov	r0, #ABORT_TYPE_UNDEF
562	b	.thread_abort_generic
563
564thread_dabort_handler:
565	push	{r0-r11, ip}
566	sub	r1, lr, #8
567	mrs	r0, spsr
568	push	{r0, r1}
569	mov	r0, #ABORT_TYPE_DATA
570	b	.thread_abort_generic
571
572thread_pabort_handler:
573	push	{r0-r11, ip}
574	sub	r1, lr, #4
575	mrs	r0, spsr
576	push	{r0, r1}
577	mov	r0, #ABORT_TYPE_PREFETCH
578	b	.thread_abort_generic
579
580.thread_abort_generic:
581	cps	#CPSR_MODE_SYS
582	mov	r1, sp
583	mov	r2, lr
584	cps	#CPSR_MODE_ABT
585	push	{r1-r3}
586	mov	r1, sp
587	bl	abort_handler
588	pop	{r1-r3}
589	cps	#CPSR_MODE_SYS
590	mov	sp, r1
591	mov	lr, r2
592	cps	#CPSR_MODE_ABT
593	pop	{r0, r1}
594	mov	lr, r1
595	msr	spsr_fsxc, r0
596	pop	{r0-r11, ip}
597	movs	pc, lr
598UNWIND(	.fnend)
599END_FUNC thread_abort_handler
600
601LOCAL_FUNC thread_svc_handler , :
602UNWIND(	.fnstart)
603UNWIND(	.cantunwind)
604	push	{r0-r7, lr}
605	mrs	r0, spsr
606	push	{r0}
607	mov	r0, sp
608	bl	tee_svc_handler
609	pop	{r0}
610	msr	spsr_fsxc, r0
611	pop	{r0-r7, lr}
612	movs	pc, lr
613UNWIND(	.fnend)
614END_FUNC thread_svc_handler
615
616        .align	5
617LOCAL_FUNC thread_vect_table , :
618UNWIND(	.fnstart)
619UNWIND(	.cantunwind)
620	b	.			/* Reset			*/
621	b	thread_und_handler	/* Undefined instruction	*/
622	b	thread_svc_handler	/* System call			*/
623	b	thread_pabort_handler	/* Prefetch abort		*/
624	b	thread_dabort_handler	/* Data abort			*/
625	b	.			/* Reserved			*/
626	b	thread_irq_handler	/* IRQ				*/
627	b	thread_fiq_handler	/* FIQ				*/
628UNWIND(	.fnend)
629END_FUNC thread_vect_table
630