xref: /optee_os/core/arch/arm/kernel/thread_a32.S (revision c61c04b837302c854a3c9f650118dd7be548302b)
1/*
2 * Copyright (c) 2014, STMicroelectronics International N.V.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <asm.S>
29#include <arm.h>
30#include <arm32_macros.S>
31#include <sm/teesmc.h>
32#include <sm/teesmc_opteed_macros.h>
33#include <sm/teesmc_opteed.h>
34#include <kernel/thread_defs.h>
35
36	.section .text.thread_asm
37
38LOCAL_FUNC vector_std_smc_entry , :
39	push	{r0-r7}
40	mov	r0, sp
41	bl	thread_handle_std_smc
42	/*
43	 * Normally thread_handle_std_smc() should return via
44	 * thread_exit(), thread_rpc(), but if thread_handle_std_smc()
45	 * hasn't switched stack (error detected) it will do a normal "C"
46	 * return.
47	 */
48	pop	{r1-r8}
49	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
50	smc	#0
51	b	.	/* SMC should not return */
52END_FUNC vector_std_smc_entry
53
54LOCAL_FUNC vector_fast_smc_entry , :
55	push	{r0-r7}
56	mov	r0, sp
57	bl	thread_handle_fast_smc
58	pop	{r1-r8}
59	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
60	smc	#0
61	b	.	/* SMC should not return */
62END_FUNC vector_fast_smc_entry
63
64LOCAL_FUNC vector_fiq_entry , :
65 	/* Secure Monitor received a FIQ and passed control to us. */
66	bl	thread_check_canaries
67 	ldr	lr, =thread_fiq_handler_ptr
68 	ldr	lr, [lr]
69 	blx	lr
70	mov	r1, r0
71	ldr	r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
72	smc	#0
73	b	.	/* SMC should not return */
74END_FUNC vector_fiq_entry
75
76LOCAL_FUNC vector_cpu_on_entry , :
77	ldr	lr, =thread_cpu_on_handler_ptr
78	ldr	lr, [lr]
79	blx	lr
80	mov	r1, r0
81	ldr	r0, =TEESMC_OPTEED_RETURN_ON_DONE
82	smc	#0
83	b	.	/* SMC should not return */
84END_FUNC vector_cpu_on_entry
85
86LOCAL_FUNC vector_cpu_off_entry , :
87	ldr	lr, =thread_cpu_off_handler_ptr
88	ldr	lr, [lr]
89	blx	lr
90	mov	r1, r0
91	ldr	r0, =TEESMC_OPTEED_RETURN_OFF_DONE
92	smc	#0
93	b	.	/* SMC should not return */
94END_FUNC vector_cpu_off_entry
95
96LOCAL_FUNC vector_cpu_suspend_entry , :
97	ldr	lr, =thread_cpu_suspend_handler_ptr
98	ldr	lr, [lr]
99	blx	lr
100	mov	r1, r0
101	ldr	r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
102	smc	#0
103	b	.	/* SMC should not return */
104END_FUNC vector_cpu_suspend_entry
105
106LOCAL_FUNC vector_cpu_resume_entry , :
107	ldr	lr, =thread_cpu_resume_handler_ptr
108	ldr	lr, [lr]
109	blx	lr
110	mov	r1, r0
111	ldr	r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
112	smc	#0
113	b	.	/* SMC should not return */
114END_FUNC vector_cpu_resume_entry
115
116LOCAL_FUNC vector_system_off_entry , :
117	ldr	lr, =thread_system_off_handler_ptr
118	ldr	lr, [lr]
119	blx	lr
120	mov	r1, r0
121	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
122	smc	#0
123	b	.	/* SMC should not return */
124END_FUNC vector_system_off_entry
125
126LOCAL_FUNC vector_system_reset_entry , :
127	ldr	lr, =thread_system_reset_handler_ptr
128	ldr	lr, [lr]
129	blx	lr
130	mov	r1, r0
131	ldr	r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
132	smc	#0
133	b	.	/* SMC should not return */
134END_FUNC vector_system_reset_entry
135
136/*
137 * Vector table supplied to ARM Trusted Firmware (ARM-TF) at
138 * initialization.  Also used when compiled with the internal monitor, but
139 * the cpu_*_entry and system_*_entry are not used then.
140 *
141 * Note that ARM-TF depends on the layout of this vector table, any change
142 * in layout has to be synced with ARM-TF. This layout must also be kept in
143 * sync with sm_entry_vector in sm.c
144 */
145FUNC thread_vector_table , :
146	b	vector_std_smc_entry
147	b	vector_fast_smc_entry
148	b	vector_cpu_on_entry
149	b	vector_cpu_off_entry
150	b	vector_cpu_resume_entry
151	b	vector_cpu_suspend_entry
152	b	vector_fiq_entry
153	b	vector_system_off_entry
154	b	vector_system_reset_entry
155END_FUNC thread_vector_table
156
157FUNC thread_set_abt_sp , :
158	mrs	r1, cpsr
159	cps	#CPSR_MODE_ABT
160	mov	sp, r0
161	msr	cpsr, r1
162	bx	lr
163END_FUNC thread_set_abt_sp
164
165FUNC thread_set_irq_sp , :
166	mrs	r1, cpsr
167	cps	#CPSR_MODE_IRQ
168	mov	sp, r0
169	msr	cpsr, r1
170	bx	lr
171END_FUNC thread_set_irq_sp
172
173FUNC thread_set_fiq_sp , :
174	mrs	r1, cpsr
175	cps	#CPSR_MODE_FIQ
176	mov	sp, r0
177	msr	cpsr, r1
178	bx	lr
179END_FUNC thread_set_fiq_sp
180
181/* void thread_resume(struct thread_ctx_regs *regs) */
182FUNC thread_resume , :
183	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
184
185	cps	#CPSR_MODE_SYS
186	ldm	r12!, {sp, lr}
187
188	cps	#CPSR_MODE_SVC
189	ldm	r12!, {r1, sp, lr}
190	msr	spsr_fsxc, r1
191
192	cps	#CPSR_MODE_SVC
193	ldm	r12, {r1, r2}
194	push	{r1, r2}
195
196	ldm	r0, {r0-r12}
197
198
199	/* Restore CPSR and jump to the instruction to resume at */
200	rfefd	sp!
201END_FUNC thread_resume
202
203/*
204 * Disables IRQ and FIQ and saves state of thread, returns original
205 * CPSR.
206 */
207LOCAL_FUNC thread_save_state , :
208	push	{r12, lr}
209	/*
210	 * Uses stack for temporary storage, while storing needed
211	 * context in the thread context struct.
212	 */
213
214	mrs	r12, cpsr
215
216	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
217
218	push	{r4-r7}
219	push	{r0-r3}
220
221	mov	r5, r12			/* Save CPSR in a preserved register */
222	mrs	r6, cpsr		/* Save current CPSR */
223
224	bl	thread_get_ctx_regs
225
226	pop	{r1-r4}			/* r0-r3 pushed above */
227	stm	r0!, {r1-r4}
228	pop	{r1-r4}			/* r4-r7 pushed above */
229	stm	r0!, {r1-r4}
230	stm	r0!, {r8-r11}
231
232	pop	{r12, lr}
233	stm	r0!, {r12}
234
235        cps     #CPSR_MODE_SYS
236        stm     r0!, {sp, lr}
237
238        cps     #CPSR_MODE_SVC
239        mrs     r1, spsr
240        stm     r0!, {r1, sp, lr}
241
242	msr	cpsr, r6		/* Restore mode */
243
244	mov	r0, r5			/* Return original CPSR */
245	bx	lr
246END_FUNC thread_save_state
247
248FUNC thread_std_smc_entry , :
249	/* Pass r0-r7 in a struct thread_smc_args */
250	push	{r0-r7}
251	mov	r0, sp
252	ldr	lr, =thread_std_smc_handler_ptr
253	ldr	lr, [lr]
254	blx	lr
255	/*
256	 * Load the returned r0-r3 into preserved registers and skip the
257	 * "returned" r4-r7 since they will not be returned to normal
258	 * world.
259	 */
260	pop	{r4-r7}
261	add	sp, #(4 * 4)
262
263	/* Disable interrupts before switching to temporary stack */
264	cpsid	if
265	bl	thread_get_tmp_sp
266	mov	sp, r0
267
268	bl	thread_state_free
269
270	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
271	mov	r1, r4
272	mov	r2, r5
273	mov	r3, r6
274	mov	r4, r7
275	smc	#0
276	b	.	/* SMC should not return */
277END_FUNC thread_std_smc_entry
278
279
280/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
281FUNC thread_rpc , :
282	push	{lr}
283	push	{r0}
284
285	bl	thread_save_state
286	mov	r4, r0			/* Save original CPSR */
287
288	/*
289 	 * Switch to temporary stack and SVC mode. Save CPSR to resume into.
290	 */
291	bl	thread_get_tmp_sp
292	ldr	r5, [sp]		/* Get pointer to rv[] */
293	cps	#CPSR_MODE_SVC		/* Change to SVC mode */
294	mov	sp, r0			/* Switch to tmp stack */
295
296	mov	r0, #THREAD_FLAGS_COPY_ARGS_ON_RETURN
297	mov	r1, r4			/* CPSR to restore */
298	ldr	r2, =.thread_rpc_return
299	bl	thread_state_suspend
300	mov	r4, r0			/* Supply thread index */
301	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
302	ldm	r5, {r1-r3}		/* Load rv[] into r0-r2 */
303	smc	#0
304	b	.	/* SMC should not return */
305
306.thread_rpc_return:
307	/*
308	 * At this point has the stack pointer been restored to the value
309	 * it had when thread_save_state() was called above.
310	 *
311	 * Jumps here from thread_resume above when RPC has returned. The
312	 * IRQ and FIQ bits are restored to what they where when this
313	 * function was originally entered.
314	 */
315	pop	{r12}			/* Get pointer to rv[] */
316	stm	r12, {r0-r2}		/* Store r0-r2 into rv[] */
317	pop	{pc}
318END_FUNC thread_rpc
319
320LOCAL_FUNC thread_fiq_handler , :
321	/* FIQ has a +4 offset for lr compared to preferred return address */
322	sub     lr, lr, #4
323	push	{r0-r12, lr}
324	bl	thread_check_canaries
325	ldr	lr, =thread_fiq_handler_ptr
326	ldr	lr, [lr]
327	blx	lr
328	pop	{r0-r12, lr}
329	movs	pc, lr
330END_FUNC thread_fiq_handler
331
332LOCAL_FUNC thread_irq_handler , :
333	/*
334	 * IRQ mode is set up to use tmp stack so FIQ has to be
335	 * disabled before touching the stack. We can also assign
336	 * SVC sp from IRQ sp to get SVC mode into the state we
337	 * need when doing the SMC below.
338	 */
339	cpsid	f			/* Disable FIQ also */
340	sub	lr, lr, #4
341	push	{lr}
342	push	{r12}
343
344	bl	thread_save_state
345
346	mov	r0, #THREAD_FLAGS_EXIT_ON_IRQ
347	mrs	r1, spsr
348	pop	{r12}
349	pop	{r2}
350	blx	thread_state_suspend
351	mov	r4, r0		/* Supply thread index */
352
353	/*
354	 * Switch to SVC mode and copy current stack pointer as it already
355	 * is the tmp stack.
356	 */
357	mov	r0, sp
358	cps	#CPSR_MODE_SVC
359	mov	sp, r0
360
361	ldr	r0, =TEESMC_OPTEED_RETURN_CALL_DONE
362	ldr	r1, =TEESMC_RETURN_RPC_IRQ
363	mov	r2, #0
364	mov	r3, #0
365	/* r4 is already filled in above */
366	smc	#0
367	b	.	/* SMC should not return */
368END_FUNC thread_irq_handler
369
370FUNC thread_init_vbar , :
371	/* Set vector (VBAR) */
372	ldr	r0, =thread_vect_table
373	write_vbar r0
374	bx	lr
375END_FUNC thread_init_vbar
376
377/*
378 * Below are low level routines handling entry and return from user mode.
379 *
380 * thread_enter_user_mode() saves all that registers user mode can change
381 * so kernel mode can restore needed registers when resuming execution
382 * after the call to thread_enter_user_mode() has returned.
383 * thread_enter_user_mode() doesn't return directly since it enters user
384 * mode instead, it's thread_unwind_user_mode() that does the
385 * returning by restoring the registers saved by thread_enter_user_mode().
386 *
387 * There's three ways for thread_enter_user_mode() to return to caller,
388 * user TA calls utee_return, user TA calls utee_panic or through an abort.
389 *
390 * Calls to utee_return or utee_panic are handled as:
391 * thread_svc_handler() -> tee_svc_handler() ->	tee_svc_do_call() which
392 * calls tee_svc_sys_return() or tee_svc_sys_panic().
393 *
394 * These function calls returns normally except thread_svc_handler() which
395 * which is an exception handling routine so it reads return address and
396 * SPSR to restore from the stack. tee_svc_sys_return() and tee_svc_sys_panic()
397 * changes return address and SPSR used by thread_svc_handler() to instead of
398 * returning into user mode as with other syscalls it returns into
399 * thread_unwind_user_mode() in kernel mode instead.  When
400 * thread_svc_handler() returns the stack pointer at the point where
401 * thread_enter_user_mode() left it so this is where
402 * thread_unwind_user_mode() can operate.
403 *
404 * Aborts are handled in a similar way but by thread_abort_handler()
405 * instead, when the pager sees that it's an abort from user mode that
406 * can't be handled it updates SPSR and return address used by
407 * thread_abort_handler() to return into thread_unwind_user_mode()
408 * instead.
409 */
410
411/*
412 * TEE_Result thread_enter_user_mode(
413 *              uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
414 *              tee_uaddr_t sp, tee_uaddr_t user_func,
415 *              uint32_t *panicked, uint32_t *panic_code);
416 * See description in thread.h
417 */
418FUNC thread_enter_user_mode , :
419	/*
420	 * Save all registers to allow tee_svc_sys_return() to
421	 * resume execution as if this function would have returned.
422	 * This is also used in tee_svc_sys_panic().
423	 *
424	 * If stack usage of this function is changed
425	 * thread_unwind_user_mode() has to be updated.
426	 */
427	push    {r4-r12,lr}
428
429	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
430	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
431
432	/*
433	 * Save old user sp and set new user sp.
434	 */
435	cps	#CPSR_MODE_SYS
436	mov	r6, sp
437	mov     sp, r4
438	cps	#CPSR_MODE_SVC
439	push	{r6}
440
441	/*
442	 * Set the saved Processors Status Register to user mode to allow
443	 * entry of user mode through movs below. Also update thumbstate
444	 * since movs doesn't do that automatically.
445	 */
446	mrs     r6, cpsr
447	bic     r6, #CPSR_MODE_MASK
448	orr	r6, #CPSR_MODE_USR
449	tst     r5, #1	/* If it's odd we should switch to thumb mode */
450	orrne   r6, #CPSR_T		/* Enable thumb mode */
451	biceq   r6, #CPSR_T		/* Disable thumb mode */
452	bicne   r6, #CPSR_IT_MASK1	/* Clear IT state for thumb mode */
453	bicne   r6, #CPSR_IT_MASK2	/* Clear IT state for thumb mode */
454	msr     spsr_cxsf, r6
455
456	/*
457	* Don't allow return from this function, return is done through
458	* thread_unwind_user_mode() below.
459	*/
460	mov     lr, #0
461	/* Call the user function with its arguments */
462	movs    pc, r5
463END_FUNC thread_enter_user_mode
464
465/*
466 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
467 *              uint32_t exit_status1);
468 * See description in thread.h
469 */
470FUNC thread_unwind_user_mode , :
471	ldr     ip, [sp, #(13 * 0x4)]   /* &ctx->panicked */
472	str	r1, [ip]
473	ldr     ip, [sp, #(14 * 0x4)]   /* &ctx->panic_code */
474	str	r2, [ip]
475
476	/* Restore old user sp */
477	pop	{r4}
478	cps	#CPSR_MODE_SYS
479	mov	sp, r4
480	cps	#CPSR_MODE_SVC
481
482	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
483END_FUNC thread_unwind_user_mode
484
485LOCAL_FUNC thread_abort_handler , :
486thread_abort_handler:
487thread_und_handler:
488	/*
489	 * Switch to abort mode to use that stack instead.
490	 */
491	cps	#CPSR_MODE_ABT
492	push	{r0-r11, ip}
493	cps	#CPSR_MODE_UND
494	sub	r1, lr, #4
495	mrs	r0, spsr
496	cps	#CPSR_MODE_ABT
497	push	{r0, r1}
498	msr	spsr_fsxc, r0	/* In case some code reads spsr directly */
499	mov	r0, #THREAD_ABORT_UNDEF
500	b	.thread_abort_generic
501
502thread_dabort_handler:
503	push	{r0-r11, ip}
504	sub	r1, lr, #8
505	mrs	r0, spsr
506	push	{r0, r1}
507	mov	r0, #THREAD_ABORT_DATA
508	b	.thread_abort_generic
509
510thread_pabort_handler:
511	push	{r0-r11, ip}
512	sub	r1, lr, #4
513	mrs	r0, spsr
514	push	{r0, r1}
515	mov	r0, #THREAD_ABORT_PREFETCH
516	b	.thread_abort_generic
517
518.thread_abort_generic:
519	cps	#CPSR_MODE_SYS
520	mov	r1, sp
521	mov	r2, lr
522	cps	#CPSR_MODE_ABT
523	push	{r1-r3}
524	mov	r1, sp
525	bl	thread_handle_abort
526	pop	{r1-r3}
527	cps	#CPSR_MODE_SYS
528	mov	sp, r1
529	mov	lr, r2
530	cps	#CPSR_MODE_ABT
531	pop	{r0, r1}
532	mov	lr, r1
533	msr	spsr_fsxc, r0
534	pop	{r0-r11, ip}
535	movs	pc, lr
536END_FUNC thread_abort_handler
537
538LOCAL_FUNC thread_svc_handler , :
539	push	{r0-r7, lr}
540	mrs	r0, spsr
541	push	{r0}
542	mov	r0, sp
543	ldr	lr, =thread_svc_handler_ptr;
544	ldr	lr, [lr]
545	blx	lr
546	pop	{r0}
547	msr	spsr_fsxc, r0
548	pop	{r0-r7, lr}
549	movs	pc, lr
550END_FUNC thread_svc_handler
551
552        .align	5
553LOCAL_FUNC thread_vect_table , :
554	b	.			/* Reset			*/
555	b	thread_und_handler	/* Undefined instruction	*/
556	b	thread_svc_handler	/* System call			*/
557	b	thread_pabort_handler	/* Prefetch abort		*/
558	b	thread_dabort_handler	/* Data abort			*/
559	b	.			/* Reserved			*/
560	b	thread_irq_handler	/* IRQ				*/
561	b	thread_fiq_handler	/* FIQ				*/
562END_FUNC thread_vect_table
563