xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision bcc3c49c90a1e79befa72b8871d4d4c6031c15b7)
1/*
2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <tsp.h>
34#include <xlat_tables_defs.h>
35#include "../tsp_private.h"
36
37
38	.globl	tsp_entrypoint
39	.globl  tsp_vector_table
40
41
42
43	/* ---------------------------------------------
44	 * Populate the params in x0-x7 from the pointer
45	 * to the smc args structure in x0.
46	 * ---------------------------------------------
47	 */
48	.macro restore_args_call_smc
49	ldp	x6, x7, [x0, #TSP_ARG6]
50	ldp	x4, x5, [x0, #TSP_ARG4]
51	ldp	x2, x3, [x0, #TSP_ARG2]
52	ldp	x0, x1, [x0, #TSP_ARG0]
53	smc	#0
54	.endm
55
56	.macro	save_eret_context reg1 reg2
57	mrs	\reg1, elr_el1
58	mrs	\reg2, spsr_el1
59	stp	\reg1, \reg2, [sp, #-0x10]!
60	stp	x30, x18, [sp, #-0x10]!
61	.endm
62
63	.macro restore_eret_context reg1 reg2
64	ldp	x30, x18, [sp], #0x10
65	ldp	\reg1, \reg2, [sp], #0x10
66	msr	elr_el1, \reg1
67	msr	spsr_el1, \reg2
68	.endm
69
70	.section	.text, "ax"
71	.align 3
72
73func tsp_entrypoint
74
75	/* ---------------------------------------------
76	 * Set the exception vector to something sane.
77	 * ---------------------------------------------
78	 */
79	adr	x0, tsp_exceptions
80	msr	vbar_el1, x0
81	isb
82
83	/* ---------------------------------------------
84	 * Enable the SError interrupt now that the
85	 * exception vectors have been setup.
86	 * ---------------------------------------------
87	 */
88	msr	daifclr, #DAIF_ABT_BIT
89
90	/* ---------------------------------------------
91	 * Enable the instruction cache, stack pointer
92	 * and data access alignment checks
93	 * ---------------------------------------------
94	 */
95	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
96	mrs	x0, sctlr_el1
97	orr	x0, x0, x1
98	msr	sctlr_el1, x0
99	isb
100
101	/* ---------------------------------------------
102	 * Invalidate the RW memory used by the BL32
103	 * image. This includes the data and NOBITS
104	 * sections. This is done to safeguard against
105	 * possible corruption of this memory by dirty
106	 * cache lines in a system cache as a result of
107	 * use by an earlier boot loader stage.
108	 * ---------------------------------------------
109	 */
110	adr	x0, __RW_START__
111	adr	x1, __RW_END__
112	sub	x1, x1, x0
113	bl	inv_dcache_range
114
115	/* ---------------------------------------------
116	 * Zero out NOBITS sections. There are 2 of them:
117	 *   - the .bss section;
118	 *   - the coherent memory section.
119	 * ---------------------------------------------
120	 */
121	ldr	x0, =__BSS_START__
122	ldr	x1, =__BSS_SIZE__
123	bl	zeromem
124
125#if USE_COHERENT_MEM
126	ldr	x0, =__COHERENT_RAM_START__
127	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
128	bl	zeromem
129#endif
130
131	/* --------------------------------------------
132	 * Allocate a stack whose memory will be marked
133	 * as Normal-IS-WBWA when the MMU is enabled.
134	 * There is no risk of reading stale stack
135	 * memory after enabling the MMU as only the
136	 * primary cpu is running at the moment.
137	 * --------------------------------------------
138	 */
139	bl	plat_set_my_stack
140
141	/* ---------------------------------------------
142	 * Initialize the stack protector canary before
143	 * any C code is called.
144	 * ---------------------------------------------
145	 */
146#if STACK_PROTECTOR_ENABLED
147	bl	update_stack_protector_canary
148#endif
149
150	/* ---------------------------------------------
151	 * Perform early platform setup & platform
152	 * specific early arch. setup e.g. mmu setup
153	 * ---------------------------------------------
154	 */
155	bl	tsp_early_platform_setup
156	bl	tsp_plat_arch_setup
157
158	/* ---------------------------------------------
159	 * Jump to main function.
160	 * ---------------------------------------------
161	 */
162	bl	tsp_main
163
164	/* ---------------------------------------------
165	 * Tell TSPD that we are done initialising
166	 * ---------------------------------------------
167	 */
168	mov	x1, x0
169	mov	x0, #TSP_ENTRY_DONE
170	smc	#0
171
172tsp_entrypoint_panic:
173	b	tsp_entrypoint_panic
174endfunc tsp_entrypoint
175
176
177	/* -------------------------------------------
178	 * Table of entrypoint vectors provided to the
179	 * TSPD for the various entrypoints
180	 * -------------------------------------------
181	 */
182func tsp_vector_table
183	b	tsp_std_smc_entry
184	b	tsp_fast_smc_entry
185	b	tsp_cpu_on_entry
186	b	tsp_cpu_off_entry
187	b	tsp_cpu_resume_entry
188	b	tsp_cpu_suspend_entry
189	b	tsp_sel1_intr_entry
190	b	tsp_system_off_entry
191	b	tsp_system_reset_entry
192	b	tsp_abort_std_smc_entry
193endfunc tsp_vector_table
194
195	/*---------------------------------------------
196	 * This entrypoint is used by the TSPD when this
197	 * cpu is to be turned off through a CPU_OFF
198	 * psci call to ask the TSP to perform any
199	 * bookeeping necessary. In the current
200	 * implementation, the TSPD expects the TSP to
201	 * re-initialise its state so nothing is done
202	 * here except for acknowledging the request.
203	 * ---------------------------------------------
204	 */
205func tsp_cpu_off_entry
206	bl	tsp_cpu_off_main
207	restore_args_call_smc
208endfunc tsp_cpu_off_entry
209
210	/*---------------------------------------------
211	 * This entrypoint is used by the TSPD when the
212	 * system is about to be switched off (through
213	 * a SYSTEM_OFF psci call) to ask the TSP to
214	 * perform any necessary bookkeeping.
215	 * ---------------------------------------------
216	 */
217func tsp_system_off_entry
218	bl	tsp_system_off_main
219	restore_args_call_smc
220endfunc tsp_system_off_entry
221
222	/*---------------------------------------------
223	 * This entrypoint is used by the TSPD when the
224	 * system is about to be reset (through a
225	 * SYSTEM_RESET psci call) to ask the TSP to
226	 * perform any necessary bookkeeping.
227	 * ---------------------------------------------
228	 */
229func tsp_system_reset_entry
230	bl	tsp_system_reset_main
231	restore_args_call_smc
232endfunc tsp_system_reset_entry
233
234	/*---------------------------------------------
235	 * This entrypoint is used by the TSPD when this
236	 * cpu is turned on using a CPU_ON psci call to
237	 * ask the TSP to initialise itself i.e. setup
238	 * the mmu, stacks etc. Minimal architectural
239	 * state will be initialised by the TSPD when
240	 * this function is entered i.e. Caches and MMU
241	 * will be turned off, the execution state
242	 * will be aarch64 and exceptions masked.
243	 * ---------------------------------------------
244	 */
245func tsp_cpu_on_entry
246	/* ---------------------------------------------
247	 * Set the exception vector to something sane.
248	 * ---------------------------------------------
249	 */
250	adr	x0, tsp_exceptions
251	msr	vbar_el1, x0
252	isb
253
254	/* Enable the SError interrupt */
255	msr	daifclr, #DAIF_ABT_BIT
256
257	/* ---------------------------------------------
258	 * Enable the instruction cache, stack pointer
259	 * and data access alignment checks
260	 * ---------------------------------------------
261	 */
262	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
263	mrs	x0, sctlr_el1
264	orr	x0, x0, x1
265	msr	sctlr_el1, x0
266	isb
267
268	/* --------------------------------------------
269	 * Give ourselves a stack whose memory will be
270	 * marked as Normal-IS-WBWA when the MMU is
271	 * enabled.
272	 * --------------------------------------------
273	 */
274	bl	plat_set_my_stack
275
276	/* --------------------------------------------
277	 * Enable the MMU with the DCache disabled. It
278	 * is safe to use stacks allocated in normal
279	 * memory as a result. All memory accesses are
280	 * marked nGnRnE when the MMU is disabled. So
281	 * all the stack writes will make it to memory.
282	 * All memory accesses are marked Non-cacheable
283	 * when the MMU is enabled but D$ is disabled.
284	 * So used stack memory is guaranteed to be
285	 * visible immediately after the MMU is enabled
286	 * Enabling the DCache at the same time as the
287	 * MMU can lead to speculatively fetched and
288	 * possibly stale stack memory being read from
289	 * other caches. This can lead to coherency
290	 * issues.
291	 * --------------------------------------------
292	 */
293	mov	x0, #DISABLE_DCACHE
294	bl	bl32_plat_enable_mmu
295
296	/* ---------------------------------------------
297	 * Enable the Data cache now that the MMU has
298	 * been enabled. The stack has been unwound. It
299	 * will be written first before being read. This
300	 * will invalidate any stale cache lines resi-
301	 * -dent in other caches. We assume that
302	 * interconnect coherency has been enabled for
303	 * this cluster by EL3 firmware.
304	 * ---------------------------------------------
305	 */
306	mrs	x0, sctlr_el1
307	orr	x0, x0, #SCTLR_C_BIT
308	msr	sctlr_el1, x0
309	isb
310
311	/* ---------------------------------------------
312	 * Enter C runtime to perform any remaining
313	 * book keeping
314	 * ---------------------------------------------
315	 */
316	bl	tsp_cpu_on_main
317	restore_args_call_smc
318
319	/* Should never reach here */
320tsp_cpu_on_entry_panic:
321	b	tsp_cpu_on_entry_panic
322endfunc tsp_cpu_on_entry
323
324	/*---------------------------------------------
325	 * This entrypoint is used by the TSPD when this
326	 * cpu is to be suspended through a CPU_SUSPEND
327	 * psci call to ask the TSP to perform any
328	 * bookeeping necessary. In the current
329	 * implementation, the TSPD saves and restores
330	 * the EL1 state.
331	 * ---------------------------------------------
332	 */
333func tsp_cpu_suspend_entry
334	bl	tsp_cpu_suspend_main
335	restore_args_call_smc
336endfunc tsp_cpu_suspend_entry
337
338	/*-------------------------------------------------
339	 * This entrypoint is used by the TSPD to pass
340	 * control for `synchronously` handling a S-EL1
341	 * Interrupt which was triggered while executing
342	 * in normal world. 'x0' contains a magic number
343	 * which indicates this. TSPD expects control to
344	 * be handed back at the end of interrupt
345	 * processing. This is done through an SMC.
346	 * The handover agreement is:
347	 *
348	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
349	 *    the ELR_EL3 from the non-secure state.
350	 * 2. TSP has to preserve the callee saved
351	 *    general purpose registers, SP_EL1/EL0 and
352	 *    LR.
353	 * 3. TSP has to preserve the system and vfp
354	 *    registers (if applicable).
355	 * 4. TSP can use 'x0-x18' to enable its C
356	 *    runtime.
357	 * 5. TSP returns to TSPD using an SMC with
358	 *    'x0' = TSP_HANDLED_S_EL1_INTR
359	 * ------------------------------------------------
360	 */
361func	tsp_sel1_intr_entry
362#if DEBUG
363	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
364	cmp	x0, x2
365	b.ne	tsp_sel1_int_entry_panic
366#endif
367	/*-------------------------------------------------
368	 * Save any previous context needed to perform
369	 * an exception return from S-EL1 e.g. context
370	 * from a previous Non secure Interrupt.
371	 * Update statistics and handle the S-EL1
372	 * interrupt before returning to the TSPD.
373	 * IRQ/FIQs are not enabled since that will
374	 * complicate the implementation. Execution
375	 * will be transferred back to the normal world
376	 * in any case. The handler can return 0
377	 * if the interrupt was handled or TSP_PREEMPTED
378	 * if the expected interrupt was preempted
379	 * by an interrupt that should be handled in EL3
380	 * e.g. Group 0 interrupt in GICv3. In both
381	 * the cases switch to EL3 using SMC with id
382	 * TSP_HANDLED_S_EL1_INTR. Any other return value
383	 * from the handler will result in panic.
384	 * ------------------------------------------------
385	 */
386	save_eret_context x2 x3
387	bl	tsp_update_sync_sel1_intr_stats
388	bl	tsp_common_int_handler
389	/* Check if the S-EL1 interrupt has been handled */
390	cbnz	x0, tsp_sel1_intr_check_preemption
391	b	tsp_sel1_intr_return
392tsp_sel1_intr_check_preemption:
393	/* Check if the S-EL1 interrupt has been preempted */
394	mov_imm	x1, TSP_PREEMPTED
395	cmp	x0, x1
396	b.ne	tsp_sel1_int_entry_panic
397tsp_sel1_intr_return:
398	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
399	restore_eret_context x2 x3
400	smc	#0
401
402	/* Should never reach here */
403tsp_sel1_int_entry_panic:
404	no_ret	plat_panic_handler
405endfunc tsp_sel1_intr_entry
406
407	/*---------------------------------------------
408	 * This entrypoint is used by the TSPD when this
409	 * cpu resumes execution after an earlier
410	 * CPU_SUSPEND psci call to ask the TSP to
411	 * restore its saved context. In the current
412	 * implementation, the TSPD saves and restores
413	 * EL1 state so nothing is done here apart from
414	 * acknowledging the request.
415	 * ---------------------------------------------
416	 */
417func tsp_cpu_resume_entry
418	bl	tsp_cpu_resume_main
419	restore_args_call_smc
420
421	/* Should never reach here */
422	no_ret	plat_panic_handler
423endfunc tsp_cpu_resume_entry
424
425	/*---------------------------------------------
426	 * This entrypoint is used by the TSPD to ask
427	 * the TSP to service a fast smc request.
428	 * ---------------------------------------------
429	 */
430func tsp_fast_smc_entry
431	bl	tsp_smc_handler
432	restore_args_call_smc
433
434	/* Should never reach here */
435	no_ret	plat_panic_handler
436endfunc tsp_fast_smc_entry
437
438	/*---------------------------------------------
439	 * This entrypoint is used by the TSPD to ask
440	 * the TSP to service a std smc request.
441	 * We will enable preemption during execution
442	 * of tsp_smc_handler.
443	 * ---------------------------------------------
444	 */
445func tsp_std_smc_entry
446	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
447	bl	tsp_smc_handler
448	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
449	restore_args_call_smc
450
451	/* Should never reach here */
452	no_ret	plat_panic_handler
453endfunc tsp_std_smc_entry
454
455	/*---------------------------------------------------------------------
456	 * This entrypoint is used by the TSPD to abort a pre-empted Standard
457	 * SMC. It could be on behalf of non-secure world or because a CPU
458	 * suspend/CPU off request needs to abort the preempted SMC.
459	 * --------------------------------------------------------------------
460	 */
461func tsp_abort_std_smc_entry
462
463	/*
464	 * Exceptions masking is already done by the TSPD when entering this
465	 * hook so there is no need to do it here.
466	 */
467
468	/* Reset the stack used by the pre-empted SMC */
469	bl	plat_set_my_stack
470
471	/*
472	 * Allow some cleanup such as releasing locks.
473	 */
474	bl	tsp_abort_smc_handler
475
476	restore_args_call_smc
477
478	/* Should never reach here */
479	bl	plat_panic_handler
480endfunc tsp_abort_std_smc_entry
481