xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision a806dad58c4cf752238d7bbffbc9a1ce17f63cea)
1/*
2 * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <tsp.h>
34#include <xlat_tables.h>
35#include "../tsp_private.h"
36
37
38	.globl	tsp_entrypoint
39	.globl  tsp_vector_table
40
41
42
43	/* ---------------------------------------------
44	 * Populate the params in x0-x7 from the pointer
45	 * to the smc args structure in x0.
46	 * ---------------------------------------------
47	 */
48	.macro restore_args_call_smc
49	ldp	x6, x7, [x0, #TSP_ARG6]
50	ldp	x4, x5, [x0, #TSP_ARG4]
51	ldp	x2, x3, [x0, #TSP_ARG2]
52	ldp	x0, x1, [x0, #TSP_ARG0]
53	smc	#0
54	.endm
55
56	.macro	save_eret_context reg1 reg2
57	mrs	\reg1, elr_el1
58	mrs	\reg2, spsr_el1
59	stp	\reg1, \reg2, [sp, #-0x10]!
60	stp	x30, x18, [sp, #-0x10]!
61	.endm
62
63	.macro restore_eret_context reg1 reg2
64	ldp	x30, x18, [sp], #0x10
65	ldp	\reg1, \reg2, [sp], #0x10
66	msr	elr_el1, \reg1
67	msr	spsr_el1, \reg2
68	.endm
69
70	.section	.text, "ax"
71	.align 3
72
73func tsp_entrypoint
74
75	/* ---------------------------------------------
76	 * Set the exception vector to something sane.
77	 * ---------------------------------------------
78	 */
79	adr	x0, tsp_exceptions
80	msr	vbar_el1, x0
81	isb
82
83	/* ---------------------------------------------
84	 * Enable the SError interrupt now that the
85	 * exception vectors have been setup.
86	 * ---------------------------------------------
87	 */
88	msr	daifclr, #DAIF_ABT_BIT
89
90	/* ---------------------------------------------
91	 * Enable the instruction cache, stack pointer
92	 * and data access alignment checks
93	 * ---------------------------------------------
94	 */
95	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
96	mrs	x0, sctlr_el1
97	orr	x0, x0, x1
98	msr	sctlr_el1, x0
99	isb
100
101	/* ---------------------------------------------
102	 * Invalidate the RW memory used by the BL32
103	 * image. This includes the data and NOBITS
104	 * sections. This is done to safeguard against
105	 * possible corruption of this memory by dirty
106	 * cache lines in a system cache as a result of
107	 * use by an earlier boot loader stage.
108	 * ---------------------------------------------
109	 */
110	adr	x0, __RW_START__
111	adr	x1, __RW_END__
112	sub	x1, x1, x0
113	bl	inv_dcache_range
114
115	/* ---------------------------------------------
116	 * Zero out NOBITS sections. There are 2 of them:
117	 *   - the .bss section;
118	 *   - the coherent memory section.
119	 * ---------------------------------------------
120	 */
121	ldr	x0, =__BSS_START__
122	ldr	x1, =__BSS_SIZE__
123	bl	zeromem16
124
125#if USE_COHERENT_MEM
126	ldr	x0, =__COHERENT_RAM_START__
127	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
128	bl	zeromem16
129#endif
130
131	/* --------------------------------------------
132	 * Allocate a stack whose memory will be marked
133	 * as Normal-IS-WBWA when the MMU is enabled.
134	 * There is no risk of reading stale stack
135	 * memory after enabling the MMU as only the
136	 * primary cpu is running at the moment.
137	 * --------------------------------------------
138	 */
139	bl	plat_set_my_stack
140
141	/* ---------------------------------------------
142	 * Perform early platform setup & platform
143	 * specific early arch. setup e.g. mmu setup
144	 * ---------------------------------------------
145	 */
146	bl	tsp_early_platform_setup
147	bl	tsp_plat_arch_setup
148
149	/* ---------------------------------------------
150	 * Jump to main function.
151	 * ---------------------------------------------
152	 */
153	bl	tsp_main
154
155	/* ---------------------------------------------
156	 * Tell TSPD that we are done initialising
157	 * ---------------------------------------------
158	 */
159	mov	x1, x0
160	mov	x0, #TSP_ENTRY_DONE
161	smc	#0
162
163tsp_entrypoint_panic:
164	b	tsp_entrypoint_panic
165endfunc tsp_entrypoint
166
167
168	/* -------------------------------------------
169	 * Table of entrypoint vectors provided to the
170	 * TSPD for the various entrypoints
171	 * -------------------------------------------
172	 */
173func tsp_vector_table
174	b	tsp_std_smc_entry
175	b	tsp_fast_smc_entry
176	b	tsp_cpu_on_entry
177	b	tsp_cpu_off_entry
178	b	tsp_cpu_resume_entry
179	b	tsp_cpu_suspend_entry
180	b	tsp_sel1_intr_entry
181	b	tsp_system_off_entry
182	b	tsp_system_reset_entry
183endfunc tsp_vector_table
184
185	/*---------------------------------------------
186	 * This entrypoint is used by the TSPD when this
187	 * cpu is to be turned off through a CPU_OFF
188	 * psci call to ask the TSP to perform any
189	 * bookeeping necessary. In the current
190	 * implementation, the TSPD expects the TSP to
191	 * re-initialise its state so nothing is done
192	 * here except for acknowledging the request.
193	 * ---------------------------------------------
194	 */
195func tsp_cpu_off_entry
196	bl	tsp_cpu_off_main
197	restore_args_call_smc
198endfunc tsp_cpu_off_entry
199
200	/*---------------------------------------------
201	 * This entrypoint is used by the TSPD when the
202	 * system is about to be switched off (through
203	 * a SYSTEM_OFF psci call) to ask the TSP to
204	 * perform any necessary bookkeeping.
205	 * ---------------------------------------------
206	 */
207func tsp_system_off_entry
208	bl	tsp_system_off_main
209	restore_args_call_smc
210endfunc tsp_system_off_entry
211
212	/*---------------------------------------------
213	 * This entrypoint is used by the TSPD when the
214	 * system is about to be reset (through a
215	 * SYSTEM_RESET psci call) to ask the TSP to
216	 * perform any necessary bookkeeping.
217	 * ---------------------------------------------
218	 */
219func tsp_system_reset_entry
220	bl	tsp_system_reset_main
221	restore_args_call_smc
222endfunc tsp_system_reset_entry
223
224	/*---------------------------------------------
225	 * This entrypoint is used by the TSPD when this
226	 * cpu is turned on using a CPU_ON psci call to
227	 * ask the TSP to initialise itself i.e. setup
228	 * the mmu, stacks etc. Minimal architectural
229	 * state will be initialised by the TSPD when
230	 * this function is entered i.e. Caches and MMU
231	 * will be turned off, the execution state
232	 * will be aarch64 and exceptions masked.
233	 * ---------------------------------------------
234	 */
235func tsp_cpu_on_entry
236	/* ---------------------------------------------
237	 * Set the exception vector to something sane.
238	 * ---------------------------------------------
239	 */
240	adr	x0, tsp_exceptions
241	msr	vbar_el1, x0
242	isb
243
244	/* Enable the SError interrupt */
245	msr	daifclr, #DAIF_ABT_BIT
246
247	/* ---------------------------------------------
248	 * Enable the instruction cache, stack pointer
249	 * and data access alignment checks
250	 * ---------------------------------------------
251	 */
252	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
253	mrs	x0, sctlr_el1
254	orr	x0, x0, x1
255	msr	sctlr_el1, x0
256	isb
257
258	/* --------------------------------------------
259	 * Give ourselves a stack whose memory will be
260	 * marked as Normal-IS-WBWA when the MMU is
261	 * enabled.
262	 * --------------------------------------------
263	 */
264	bl	plat_set_my_stack
265
266	/* --------------------------------------------
267	 * Enable the MMU with the DCache disabled. It
268	 * is safe to use stacks allocated in normal
269	 * memory as a result. All memory accesses are
270	 * marked nGnRnE when the MMU is disabled. So
271	 * all the stack writes will make it to memory.
272	 * All memory accesses are marked Non-cacheable
273	 * when the MMU is enabled but D$ is disabled.
274	 * So used stack memory is guaranteed to be
275	 * visible immediately after the MMU is enabled
276	 * Enabling the DCache at the same time as the
277	 * MMU can lead to speculatively fetched and
278	 * possibly stale stack memory being read from
279	 * other caches. This can lead to coherency
280	 * issues.
281	 * --------------------------------------------
282	 */
283	mov	x0, #DISABLE_DCACHE
284	bl	bl32_plat_enable_mmu
285
286	/* ---------------------------------------------
287	 * Enable the Data cache now that the MMU has
288	 * been enabled. The stack has been unwound. It
289	 * will be written first before being read. This
290	 * will invalidate any stale cache lines resi-
291	 * -dent in other caches. We assume that
292	 * interconnect coherency has been enabled for
293	 * this cluster by EL3 firmware.
294	 * ---------------------------------------------
295	 */
296	mrs	x0, sctlr_el1
297	orr	x0, x0, #SCTLR_C_BIT
298	msr	sctlr_el1, x0
299	isb
300
301	/* ---------------------------------------------
302	 * Enter C runtime to perform any remaining
303	 * book keeping
304	 * ---------------------------------------------
305	 */
306	bl	tsp_cpu_on_main
307	restore_args_call_smc
308
309	/* Should never reach here */
310tsp_cpu_on_entry_panic:
311	b	tsp_cpu_on_entry_panic
312endfunc tsp_cpu_on_entry
313
314	/*---------------------------------------------
315	 * This entrypoint is used by the TSPD when this
316	 * cpu is to be suspended through a CPU_SUSPEND
317	 * psci call to ask the TSP to perform any
318	 * bookeeping necessary. In the current
319	 * implementation, the TSPD saves and restores
320	 * the EL1 state.
321	 * ---------------------------------------------
322	 */
323func tsp_cpu_suspend_entry
324	bl	tsp_cpu_suspend_main
325	restore_args_call_smc
326endfunc tsp_cpu_suspend_entry
327
328	/*-------------------------------------------------
329	 * This entrypoint is used by the TSPD to pass
330	 * control for `synchronously` handling a S-EL1
331	 * Interrupt which was triggered while executing
332	 * in normal world. 'x0' contains a magic number
333	 * which indicates this. TSPD expects control to
334	 * be handed back at the end of interrupt
335	 * processing. This is done through an SMC.
336	 * The handover agreement is:
337	 *
338	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
339	 *    the ELR_EL3 from the non-secure state.
340	 * 2. TSP has to preserve the callee saved
341	 *    general purpose registers, SP_EL1/EL0 and
342	 *    LR.
343	 * 3. TSP has to preserve the system and vfp
344	 *    registers (if applicable).
345	 * 4. TSP can use 'x0-x18' to enable its C
346	 *    runtime.
347	 * 5. TSP returns to TSPD using an SMC with
348	 *    'x0' = TSP_HANDLED_S_EL1_INTR
349	 * ------------------------------------------------
350	 */
351func	tsp_sel1_intr_entry
352#if DEBUG
353	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
354	cmp	x0, x2
355	b.ne	tsp_sel1_int_entry_panic
356#endif
357	/*-------------------------------------------------
358	 * Save any previous context needed to perform
359	 * an exception return from S-EL1 e.g. context
360	 * from a previous Non secure Interrupt.
361	 * Update statistics and handle the S-EL1
362	 * interrupt before returning to the TSPD.
363	 * IRQ/FIQs are not enabled since that will
364	 * complicate the implementation. Execution
365	 * will be transferred back to the normal world
366	 * in any case. The handler can return 0
367	 * if the interrupt was handled or TSP_PREEMPTED
368	 * if the expected interrupt was preempted
369	 * by an interrupt that should be handled in EL3
370	 * e.g. Group 0 interrupt in GICv3. In both
371	 * the cases switch to EL3 using SMC with id
372	 * TSP_HANDLED_S_EL1_INTR. Any other return value
373	 * from the handler will result in panic.
374	 * ------------------------------------------------
375	 */
376	save_eret_context x2 x3
377	bl	tsp_update_sync_sel1_intr_stats
378	bl	tsp_common_int_handler
379	/* Check if the S-EL1 interrupt has been handled */
380	cbnz	x0, tsp_sel1_intr_check_preemption
381	b	tsp_sel1_intr_return
382tsp_sel1_intr_check_preemption:
383	/* Check if the S-EL1 interrupt has been preempted */
384	mov_imm	x1, TSP_PREEMPTED
385	cmp	x0, x1
386	b.ne	tsp_sel1_int_entry_panic
387tsp_sel1_intr_return:
388	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
389	restore_eret_context x2 x3
390	smc	#0
391
392	/* Should never reach here */
393tsp_sel1_int_entry_panic:
394	no_ret	plat_panic_handler
395endfunc tsp_sel1_intr_entry
396
397	/*---------------------------------------------
398	 * This entrypoint is used by the TSPD when this
399	 * cpu resumes execution after an earlier
400	 * CPU_SUSPEND psci call to ask the TSP to
401	 * restore its saved context. In the current
402	 * implementation, the TSPD saves and restores
403	 * EL1 state so nothing is done here apart from
404	 * acknowledging the request.
405	 * ---------------------------------------------
406	 */
407func tsp_cpu_resume_entry
408	bl	tsp_cpu_resume_main
409	restore_args_call_smc
410
411	/* Should never reach here */
412	no_ret	plat_panic_handler
413endfunc tsp_cpu_resume_entry
414
415	/*---------------------------------------------
416	 * This entrypoint is used by the TSPD to ask
417	 * the TSP to service a fast smc request.
418	 * ---------------------------------------------
419	 */
420func tsp_fast_smc_entry
421	bl	tsp_smc_handler
422	restore_args_call_smc
423
424	/* Should never reach here */
425	no_ret	plat_panic_handler
426endfunc tsp_fast_smc_entry
427
428	/*---------------------------------------------
429	 * This entrypoint is used by the TSPD to ask
430	 * the TSP to service a std smc request.
431	 * We will enable preemption during execution
432	 * of tsp_smc_handler.
433	 * ---------------------------------------------
434	 */
435func tsp_std_smc_entry
436	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
437	bl	tsp_smc_handler
438	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
439	restore_args_call_smc
440
441	/* Should never reach here */
442	no_ret	plat_panic_handler
443endfunc tsp_std_smc_entry
444