xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision 66fb7ee422c2de7a126c57945a245c855ba7d807)
1/*
2 * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <asm_macros.S>
11#include <bl32/tsp/tsp.h>
12#include <lib/xlat_tables/xlat_tables_defs.h>
13#include <smccc_helpers.h>
14
15#include "../tsp_private.h"
16
17
18	.globl	tsp_entrypoint
19	.globl  tsp_vector_table
20#if SPMC_AT_EL3
21	.globl tsp_cpu_on_entry
22#endif
23
24
25
26	/* ---------------------------------------------
27	 * Populate the params in x0-x7 from the pointer
28	 * to the smc args structure in x0.
29	 * ---------------------------------------------
30	 */
31	.macro restore_args_call_smc
32	ldp	x6, x7, [x0, #SMC_ARG6]
33	ldp	x4, x5, [x0, #SMC_ARG4]
34	ldp	x2, x3, [x0, #SMC_ARG2]
35	ldp	x0, x1, [x0, #SMC_ARG0]
36	smc	#0
37	.endm
38
39	.macro	save_eret_context reg1 reg2
40	mrs	\reg1, elr_el1
41	mrs	\reg2, spsr_el1
42	stp	\reg1, \reg2, [sp, #-0x10]!
43	stp	x30, x18, [sp, #-0x10]!
44	.endm
45
46	.macro restore_eret_context reg1 reg2
47	ldp	x30, x18, [sp], #0x10
48	ldp	\reg1, \reg2, [sp], #0x10
49	msr	elr_el1, \reg1
50	msr	spsr_el1, \reg2
51	.endm
52
53func tsp_entrypoint _align=3
54	/*---------------------------------------------
55	 * Save arguments x0 - x3 from BL1 for future
56	 * use.
57	 * ---------------------------------------------
58	 */
59	mov	x20, x0
60	mov	x21, x1
61	mov	x22, x2
62	mov	x23, x3
63
64#if ENABLE_PIE
65		/*
66		 * ------------------------------------------------------------
67		 * If PIE is enabled fixup the Global descriptor Table only
68		 * once during primary core cold boot path.
69		 *
70		 * Compile time base address, required for fixup, is calculated
71		 * using "pie_fixup" label present within first page.
72		 * ------------------------------------------------------------
73		 */
74	pie_fixup:
75		ldr	x0, =pie_fixup
76		and	x0, x0, #~(PAGE_SIZE_MASK)
77		mov_imm	x1, (BL32_LIMIT - BL32_BASE)
78		add	x1, x1, x0
79		bl	fixup_gdt_reloc
80#endif /* ENABLE_PIE */
81
82	/* ---------------------------------------------
83	 * Set the exception vector to something sane.
84	 * ---------------------------------------------
85	 */
86	adr	x0, tsp_exceptions
87	msr	vbar_el1, x0
88	isb
89
90	/* ---------------------------------------------
91	 * Enable the SError interrupt now that the
92	 * exception vectors have been setup.
93	 * ---------------------------------------------
94	 */
95	msr	daifclr, #DAIF_ABT_BIT
96
97	/* ---------------------------------------------
98	 * Enable the instruction cache, stack pointer
99	 * and data access alignment checks and disable
100	 * speculative loads.
101	 * ---------------------------------------------
102	 */
103	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
104	mrs	x0, sctlr_el1
105	orr	x0, x0, x1
106#if ENABLE_BTI
107	/* Enable PAC branch type compatibility */
108	bic     x0, x0, #(SCTLR_BT0_BIT | SCTLR_BT1_BIT)
109#endif
110	bic	x0, x0, #SCTLR_DSSBS_BIT
111	msr	sctlr_el1, x0
112	isb
113
114	/* ---------------------------------------------
115	 * Invalidate the RW memory used by the BL32
116	 * image. This includes the data and NOBITS
117	 * sections. This is done to safeguard against
118	 * possible corruption of this memory by dirty
119	 * cache lines in a system cache as a result of
120	 * use by an earlier boot loader stage. If PIE
121	 * is enabled however, RO sections including the
122	 * GOT may be modified during pie fixup.
123	 * Therefore, to be on the safe side, invalidate
124	 * the entire image region if PIE is enabled.
125	 * ---------------------------------------------
126	 */
127#if ENABLE_PIE
128#if SEPARATE_CODE_AND_RODATA
129	adrp	x0, __TEXT_START__
130	add	x0, x0, :lo12:__TEXT_START__
131#else
132	adrp	x0, __RO_START__
133	add	x0, x0, :lo12:__RO_START__
134#endif /* SEPARATE_CODE_AND_RODATA */
135#else
136	adrp	x0, __RW_START__
137	add	x0, x0, :lo12:__RW_START__
138#endif /* ENABLE_PIE */
139	adrp	x1, __RW_END__
140	add     x1, x1, :lo12:__RW_END__
141	sub	x1, x1, x0
142	bl	inv_dcache_range
143
144	/* ---------------------------------------------
145	 * Zero out NOBITS sections. There are 2 of them:
146	 *   - the .bss section;
147	 *   - the coherent memory section.
148	 * ---------------------------------------------
149	 */
150	adrp	x0, __BSS_START__
151	add	x0, x0, :lo12:__BSS_START__
152	adrp	x1, __BSS_END__
153	add	x1, x1, :lo12:__BSS_END__
154	sub	x1, x1, x0
155	bl	zeromem
156
157#if USE_COHERENT_MEM
158	adrp	x0, __COHERENT_RAM_START__
159	add	x0, x0, :lo12:__COHERENT_RAM_START__
160	adrp	x1, __COHERENT_RAM_END_UNALIGNED__
161	add	x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__
162	sub	x1, x1, x0
163	bl	zeromem
164#endif
165
166	/* --------------------------------------------
167	 * Allocate a stack whose memory will be marked
168	 * as Normal-IS-WBWA when the MMU is enabled.
169	 * There is no risk of reading stale stack
170	 * memory after enabling the MMU as only the
171	 * primary cpu is running at the moment.
172	 * --------------------------------------------
173	 */
174	bl	plat_set_my_stack
175
176	/* ---------------------------------------------
177	 * Initialize the stack protector canary before
178	 * any C code is called.
179	 * ---------------------------------------------
180	 */
181#if STACK_PROTECTOR_ENABLED
182	bl	update_stack_protector_canary
183#endif
184
185	/*---------------------------------------------
186	 * Save arguments x0 - x3 from prio stage for
187	 * future use.
188	 * ---------------------------------------------
189	 */
190	mov	x0, x20
191	mov	x1, x21
192	mov	x2, x22
193	mov	x3, x23
194
195	/* ---------------------------------------------
196	 * Perform TSP setup
197	 * ---------------------------------------------
198	 */
199	bl	tsp_setup
200
201#if ENABLE_PAUTH
202	/* ---------------------------------------------
203	 * Program APIAKey_EL1
204	 * and enable pointer authentication
205	 * ---------------------------------------------
206	 */
207	bl	pauth_init_enable_el1
208#endif /* ENABLE_PAUTH */
209
210	/* ---------------------------------------------
211	 * Jump to main function.
212	 * ---------------------------------------------
213	 */
214	bl	tsp_main
215
216	/* ---------------------------------------------
217	 * Tell TSPD that we are done initialising
218	 * ---------------------------------------------
219	 */
220	mov	x1, x0
221	mov	x0, #TSP_ENTRY_DONE
222	smc	#0
223
224tsp_entrypoint_panic:
225	b	tsp_entrypoint_panic
226endfunc tsp_entrypoint
227
228
229	/* -------------------------------------------
230	 * Table of entrypoint vectors provided to the
231	 * TSPD for the various entrypoints
232	 * -------------------------------------------
233	 */
234vector_base tsp_vector_table
235	b	tsp_yield_smc_entry
236	b	tsp_fast_smc_entry
237	b	tsp_cpu_on_entry
238	b	tsp_cpu_off_entry
239	b	tsp_cpu_resume_entry
240	b	tsp_cpu_suspend_entry
241	b	tsp_sel1_intr_entry
242	b	tsp_system_off_entry
243	b	tsp_system_reset_entry
244	b	tsp_abort_yield_smc_entry
245
246	/*---------------------------------------------
247	 * This entrypoint is used by the TSPD when this
248	 * cpu is to be turned off through a CPU_OFF
249	 * psci call to ask the TSP to perform any
250	 * bookeeping necessary. In the current
251	 * implementation, the TSPD expects the TSP to
252	 * re-initialise its state so nothing is done
253	 * here except for acknowledging the request.
254	 * ---------------------------------------------
255	 */
256func tsp_cpu_off_entry
257	bl	tsp_cpu_off_main
258	restore_args_call_smc
259endfunc tsp_cpu_off_entry
260
261	/*---------------------------------------------
262	 * This entrypoint is used by the TSPD when the
263	 * system is about to be switched off (through
264	 * a SYSTEM_OFF psci call) to ask the TSP to
265	 * perform any necessary bookkeeping.
266	 * ---------------------------------------------
267	 */
268func tsp_system_off_entry
269	bl	tsp_system_off_main
270	restore_args_call_smc
271endfunc tsp_system_off_entry
272
273	/*---------------------------------------------
274	 * This entrypoint is used by the TSPD when the
275	 * system is about to be reset (through a
276	 * SYSTEM_RESET psci call) to ask the TSP to
277	 * perform any necessary bookkeeping.
278	 * ---------------------------------------------
279	 */
280func tsp_system_reset_entry
281	bl	tsp_system_reset_main
282	restore_args_call_smc
283endfunc tsp_system_reset_entry
284
285	/*---------------------------------------------
286	 * This entrypoint is used by the TSPD when this
287	 * cpu is turned on using a CPU_ON psci call to
288	 * ask the TSP to initialise itself i.e. setup
289	 * the mmu, stacks etc. Minimal architectural
290	 * state will be initialised by the TSPD when
291	 * this function is entered i.e. Caches and MMU
292	 * will be turned off, the execution state
293	 * will be aarch64 and exceptions masked.
294	 * ---------------------------------------------
295	 */
296func tsp_cpu_on_entry
297	/* ---------------------------------------------
298	 * Set the exception vector to something sane.
299	 * ---------------------------------------------
300	 */
301	adr	x0, tsp_exceptions
302	msr	vbar_el1, x0
303	isb
304
305	/* Enable the SError interrupt */
306	msr	daifclr, #DAIF_ABT_BIT
307
308	/* ---------------------------------------------
309	 * Enable the instruction cache, stack pointer
310	 * and data access alignment checks
311	 * ---------------------------------------------
312	 */
313	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
314	mrs	x0, sctlr_el1
315	orr	x0, x0, x1
316	msr	sctlr_el1, x0
317	isb
318
319	/* --------------------------------------------
320	 * Give ourselves a stack whose memory will be
321	 * marked as Normal-IS-WBWA when the MMU is
322	 * enabled.
323	 * --------------------------------------------
324	 */
325	bl	plat_set_my_stack
326
327	/* --------------------------------------------
328	 * Enable MMU and D-caches together.
329	 * --------------------------------------------
330	 */
331	mov	x0, #0
332	bl	bl32_plat_enable_mmu
333
334#if ENABLE_PAUTH
335	/* ---------------------------------------------
336	 * Program APIAKey_EL1
337	 * and enable pointer authentication
338	 * ---------------------------------------------
339	 */
340	bl	pauth_init_enable_el1
341#endif /* ENABLE_PAUTH */
342
343	/* ---------------------------------------------
344	 * Enter C runtime to perform any remaining
345	 * book keeping
346	 * ---------------------------------------------
347	 */
348	bl	tsp_cpu_on_main
349	restore_args_call_smc
350
351	/* Should never reach here */
352tsp_cpu_on_entry_panic:
353	b	tsp_cpu_on_entry_panic
354endfunc tsp_cpu_on_entry
355
356	/*---------------------------------------------
357	 * This entrypoint is used by the TSPD when this
358	 * cpu is to be suspended through a CPU_SUSPEND
359	 * psci call to ask the TSP to perform any
360	 * bookeeping necessary. In the current
361	 * implementation, the TSPD saves and restores
362	 * the EL1 state.
363	 * ---------------------------------------------
364	 */
365func tsp_cpu_suspend_entry
366	bl	tsp_cpu_suspend_main
367	restore_args_call_smc
368endfunc tsp_cpu_suspend_entry
369
370	/*-------------------------------------------------
371	 * This entrypoint is used by the TSPD to pass
372	 * control for `synchronously` handling a S-EL1
373	 * Interrupt which was triggered while executing
374	 * in normal world. 'x0' contains a magic number
375	 * which indicates this. TSPD expects control to
376	 * be handed back at the end of interrupt
377	 * processing. This is done through an SMC.
378	 * The handover agreement is:
379	 *
380	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
381	 *    the ELR_EL3 from the non-secure state.
382	 * 2. TSP has to preserve the callee saved
383	 *    general purpose registers, SP_EL1/EL0 and
384	 *    LR.
385	 * 3. TSP has to preserve the system and vfp
386	 *    registers (if applicable).
387	 * 4. TSP can use 'x0-x18' to enable its C
388	 *    runtime.
389	 * 5. TSP returns to TSPD using an SMC with
390	 *    'x0' = TSP_HANDLED_S_EL1_INTR
391	 * ------------------------------------------------
392	 */
393func	tsp_sel1_intr_entry
394#if DEBUG
395	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
396	cmp	x0, x2
397	b.ne	tsp_sel1_int_entry_panic
398#endif
399	/*-------------------------------------------------
400	 * Save any previous context needed to perform
401	 * an exception return from S-EL1 e.g. context
402	 * from a previous Non secure Interrupt.
403	 * Update statistics and handle the S-EL1
404	 * interrupt before returning to the TSPD.
405	 * IRQ/FIQs are not enabled since that will
406	 * complicate the implementation. Execution
407	 * will be transferred back to the normal world
408	 * in any case. The handler can return 0
409	 * if the interrupt was handled or TSP_PREEMPTED
410	 * if the expected interrupt was preempted
411	 * by an interrupt that should be handled in EL3
412	 * e.g. Group 0 interrupt in GICv3. In both
413	 * the cases switch to EL3 using SMC with id
414	 * TSP_HANDLED_S_EL1_INTR. Any other return value
415	 * from the handler will result in panic.
416	 * ------------------------------------------------
417	 */
418	save_eret_context x2 x3
419	bl	tsp_update_sync_sel1_intr_stats
420	bl	tsp_common_int_handler
421	/* Check if the S-EL1 interrupt has been handled */
422	cbnz	x0, tsp_sel1_intr_check_preemption
423	b	tsp_sel1_intr_return
424tsp_sel1_intr_check_preemption:
425	/* Check if the S-EL1 interrupt has been preempted */
426	mov_imm	x1, TSP_PREEMPTED
427	cmp	x0, x1
428	b.ne	tsp_sel1_int_entry_panic
429tsp_sel1_intr_return:
430	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
431	restore_eret_context x2 x3
432	smc	#0
433
434	/* Should never reach here */
435tsp_sel1_int_entry_panic:
436	no_ret	plat_panic_handler
437endfunc tsp_sel1_intr_entry
438
439	/*---------------------------------------------
440	 * This entrypoint is used by the TSPD when this
441	 * cpu resumes execution after an earlier
442	 * CPU_SUSPEND psci call to ask the TSP to
443	 * restore its saved context. In the current
444	 * implementation, the TSPD saves and restores
445	 * EL1 state so nothing is done here apart from
446	 * acknowledging the request.
447	 * ---------------------------------------------
448	 */
449func tsp_cpu_resume_entry
450	bl	tsp_cpu_resume_main
451	restore_args_call_smc
452
453	/* Should never reach here */
454	no_ret	plat_panic_handler
455endfunc tsp_cpu_resume_entry
456
457	/*---------------------------------------------
458	 * This entrypoint is used by the TSPD to ask
459	 * the TSP to service a fast smc request.
460	 * ---------------------------------------------
461	 */
462func tsp_fast_smc_entry
463	bl	tsp_smc_handler
464	restore_args_call_smc
465
466	/* Should never reach here */
467	no_ret	plat_panic_handler
468endfunc tsp_fast_smc_entry
469
470	/*---------------------------------------------
471	 * This entrypoint is used by the TSPD to ask
472	 * the TSP to service a Yielding SMC request.
473	 * We will enable preemption during execution
474	 * of tsp_smc_handler.
475	 * ---------------------------------------------
476	 */
477func tsp_yield_smc_entry
478	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
479	bl	tsp_smc_handler
480	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
481	restore_args_call_smc
482
483	/* Should never reach here */
484	no_ret	plat_panic_handler
485endfunc tsp_yield_smc_entry
486
487	/*---------------------------------------------------------------------
488	 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
489	 * SMC. It could be on behalf of non-secure world or because a CPU
490	 * suspend/CPU off request needs to abort the preempted SMC.
491	 * --------------------------------------------------------------------
492	 */
493func tsp_abort_yield_smc_entry
494
495	/*
496	 * Exceptions masking is already done by the TSPD when entering this
497	 * hook so there is no need to do it here.
498	 */
499
500	/* Reset the stack used by the pre-empted SMC */
501	bl	plat_set_my_stack
502
503	/*
504	 * Allow some cleanup such as releasing locks.
505	 */
506	bl	tsp_abort_smc_handler
507
508	restore_args_call_smc
509
510	/* Should never reach here */
511	bl	plat_panic_handler
512endfunc tsp_abort_yield_smc_entry
513