xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision fd6007de64fd7e16f6d96972643434c04a77f1c6)
1/*
2 * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <tsp.h>
34#include <xlat_tables.h>
35#include "../tsp_private.h"
36
37
38	.globl	tsp_entrypoint
39	.globl  tsp_vector_table
40
41
42
43	/* ---------------------------------------------
44	 * Populate the params in x0-x7 from the pointer
45	 * to the smc args structure in x0.
46	 * ---------------------------------------------
47	 */
48	.macro restore_args_call_smc
49	ldp	x6, x7, [x0, #TSP_ARG6]
50	ldp	x4, x5, [x0, #TSP_ARG4]
51	ldp	x2, x3, [x0, #TSP_ARG2]
52	ldp	x0, x1, [x0, #TSP_ARG0]
53	smc	#0
54	.endm
55
56	.macro	save_eret_context reg1 reg2
57	mrs	\reg1, elr_el1
58	mrs	\reg2, spsr_el1
59	stp	\reg1, \reg2, [sp, #-0x10]!
60	stp	x30, x18, [sp, #-0x10]!
61	.endm
62
63	.macro restore_eret_context reg1 reg2
64	ldp	x30, x18, [sp], #0x10
65	ldp	\reg1, \reg2, [sp], #0x10
66	msr	elr_el1, \reg1
67	msr	spsr_el1, \reg2
68	.endm
69
70	.section	.text, "ax"
71	.align 3
72
73func tsp_entrypoint
74
75	/* ---------------------------------------------
76	 * Set the exception vector to something sane.
77	 * ---------------------------------------------
78	 */
79	adr	x0, tsp_exceptions
80	msr	vbar_el1, x0
81	isb
82
83	/* ---------------------------------------------
84	 * Enable the SError interrupt now that the
85	 * exception vectors have been setup.
86	 * ---------------------------------------------
87	 */
88	msr	daifclr, #DAIF_ABT_BIT
89
90	/* ---------------------------------------------
91	 * Enable the instruction cache, stack pointer
92	 * and data access alignment checks
93	 * ---------------------------------------------
94	 */
95	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
96	mrs	x0, sctlr_el1
97	orr	x0, x0, x1
98	msr	sctlr_el1, x0
99	isb
100
101	/* ---------------------------------------------
102	 * Invalidate the RW memory used by the BL32
103	 * image. This includes the data and NOBITS
104	 * sections. This is done to safeguard against
105	 * possible corruption of this memory by dirty
106	 * cache lines in a system cache as a result of
107	 * use by an earlier boot loader stage.
108	 * ---------------------------------------------
109	 */
110	adr	x0, __RW_START__
111	adr	x1, __RW_END__
112	sub	x1, x1, x0
113	bl	inv_dcache_range
114
115	/* ---------------------------------------------
116	 * Zero out NOBITS sections. There are 2 of them:
117	 *   - the .bss section;
118	 *   - the coherent memory section.
119	 * ---------------------------------------------
120	 */
121	ldr	x0, =__BSS_START__
122	ldr	x1, =__BSS_SIZE__
123	bl	zeromem16
124
125#if USE_COHERENT_MEM
126	ldr	x0, =__COHERENT_RAM_START__
127	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
128	bl	zeromem16
129#endif
130
131	/* --------------------------------------------
132	 * Allocate a stack whose memory will be marked
133	 * as Normal-IS-WBWA when the MMU is enabled.
134	 * There is no risk of reading stale stack
135	 * memory after enabling the MMU as only the
136	 * primary cpu is running at the moment.
137	 * --------------------------------------------
138	 */
139	bl	plat_set_my_stack
140
141	/* ---------------------------------------------
142	 * Perform early platform setup & platform
143	 * specific early arch. setup e.g. mmu setup
144	 * ---------------------------------------------
145	 */
146	bl	tsp_early_platform_setup
147	bl	tsp_plat_arch_setup
148
149	/* ---------------------------------------------
150	 * Jump to main function.
151	 * ---------------------------------------------
152	 */
153	bl	tsp_main
154
155	/* ---------------------------------------------
156	 * Tell TSPD that we are done initialising
157	 * ---------------------------------------------
158	 */
159	mov	x1, x0
160	mov	x0, #TSP_ENTRY_DONE
161	smc	#0
162
163tsp_entrypoint_panic:
164	b	tsp_entrypoint_panic
165endfunc tsp_entrypoint
166
167
168	/* -------------------------------------------
169	 * Table of entrypoint vectors provided to the
170	 * TSPD for the various entrypoints
171	 * -------------------------------------------
172	 */
173func tsp_vector_table
174	b	tsp_std_smc_entry
175	b	tsp_fast_smc_entry
176	b	tsp_cpu_on_entry
177	b	tsp_cpu_off_entry
178	b	tsp_cpu_resume_entry
179	b	tsp_cpu_suspend_entry
180	b	tsp_fiq_entry
181	b	tsp_system_off_entry
182	b	tsp_system_reset_entry
183endfunc tsp_vector_table
184
185	/*---------------------------------------------
186	 * This entrypoint is used by the TSPD when this
187	 * cpu is to be turned off through a CPU_OFF
188	 * psci call to ask the TSP to perform any
189	 * bookeeping necessary. In the current
190	 * implementation, the TSPD expects the TSP to
191	 * re-initialise its state so nothing is done
192	 * here except for acknowledging the request.
193	 * ---------------------------------------------
194	 */
195func tsp_cpu_off_entry
196	bl	tsp_cpu_off_main
197	restore_args_call_smc
198endfunc tsp_cpu_off_entry
199
200	/*---------------------------------------------
201	 * This entrypoint is used by the TSPD when the
202	 * system is about to be switched off (through
203	 * a SYSTEM_OFF psci call) to ask the TSP to
204	 * perform any necessary bookkeeping.
205	 * ---------------------------------------------
206	 */
207func tsp_system_off_entry
208	bl	tsp_system_off_main
209	restore_args_call_smc
210endfunc tsp_system_off_entry
211
212	/*---------------------------------------------
213	 * This entrypoint is used by the TSPD when the
214	 * system is about to be reset (through a
215	 * SYSTEM_RESET psci call) to ask the TSP to
216	 * perform any necessary bookkeeping.
217	 * ---------------------------------------------
218	 */
219func tsp_system_reset_entry
220	bl	tsp_system_reset_main
221	restore_args_call_smc
222endfunc tsp_system_reset_entry
223
224	/*---------------------------------------------
225	 * This entrypoint is used by the TSPD when this
226	 * cpu is turned on using a CPU_ON psci call to
227	 * ask the TSP to initialise itself i.e. setup
228	 * the mmu, stacks etc. Minimal architectural
229	 * state will be initialised by the TSPD when
230	 * this function is entered i.e. Caches and MMU
231	 * will be turned off, the execution state
232	 * will be aarch64 and exceptions masked.
233	 * ---------------------------------------------
234	 */
235func tsp_cpu_on_entry
236	/* ---------------------------------------------
237	 * Set the exception vector to something sane.
238	 * ---------------------------------------------
239	 */
240	adr	x0, tsp_exceptions
241	msr	vbar_el1, x0
242	isb
243
244	/* Enable the SError interrupt */
245	msr	daifclr, #DAIF_ABT_BIT
246
247	/* ---------------------------------------------
248	 * Enable the instruction cache, stack pointer
249	 * and data access alignment checks
250	 * ---------------------------------------------
251	 */
252	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
253	mrs	x0, sctlr_el1
254	orr	x0, x0, x1
255	msr	sctlr_el1, x0
256	isb
257
258	/* --------------------------------------------
259	 * Give ourselves a stack whose memory will be
260	 * marked as Normal-IS-WBWA when the MMU is
261	 * enabled.
262	 * --------------------------------------------
263	 */
264	bl	plat_set_my_stack
265
266	/* --------------------------------------------
267	 * Enable the MMU with the DCache disabled. It
268	 * is safe to use stacks allocated in normal
269	 * memory as a result. All memory accesses are
270	 * marked nGnRnE when the MMU is disabled. So
271	 * all the stack writes will make it to memory.
272	 * All memory accesses are marked Non-cacheable
273	 * when the MMU is enabled but D$ is disabled.
274	 * So used stack memory is guaranteed to be
275	 * visible immediately after the MMU is enabled
276	 * Enabling the DCache at the same time as the
277	 * MMU can lead to speculatively fetched and
278	 * possibly stale stack memory being read from
279	 * other caches. This can lead to coherency
280	 * issues.
281	 * --------------------------------------------
282	 */
283	mov	x0, #DISABLE_DCACHE
284	bl	bl32_plat_enable_mmu
285
286	/* ---------------------------------------------
287	 * Enable the Data cache now that the MMU has
288	 * been enabled. The stack has been unwound. It
289	 * will be written first before being read. This
290	 * will invalidate any stale cache lines resi-
291	 * -dent in other caches. We assume that
292	 * interconnect coherency has been enabled for
293	 * this cluster by EL3 firmware.
294	 * ---------------------------------------------
295	 */
296	mrs	x0, sctlr_el1
297	orr	x0, x0, #SCTLR_C_BIT
298	msr	sctlr_el1, x0
299	isb
300
301	/* ---------------------------------------------
302	 * Enter C runtime to perform any remaining
303	 * book keeping
304	 * ---------------------------------------------
305	 */
306	bl	tsp_cpu_on_main
307	restore_args_call_smc
308
309	/* Should never reach here */
310tsp_cpu_on_entry_panic:
311	b	tsp_cpu_on_entry_panic
312endfunc tsp_cpu_on_entry
313
314	/*---------------------------------------------
315	 * This entrypoint is used by the TSPD when this
316	 * cpu is to be suspended through a CPU_SUSPEND
317	 * psci call to ask the TSP to perform any
318	 * bookeeping necessary. In the current
319	 * implementation, the TSPD saves and restores
320	 * the EL1 state.
321	 * ---------------------------------------------
322	 */
323func tsp_cpu_suspend_entry
324	bl	tsp_cpu_suspend_main
325	restore_args_call_smc
326endfunc tsp_cpu_suspend_entry
327
328	/*---------------------------------------------
329	 * This entrypoint is used by the TSPD to pass
330	 * control for handling a pending S-EL1 FIQ.
331	 * 'x0' contains a magic number which indicates
332	 * this. TSPD expects control to be handed back
333	 * at the end of FIQ processing. This is done
334	 * through an SMC. The handover agreement is:
335	 *
336	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
337	 *    the ELR_EL3 from the non-secure state.
338	 * 2. TSP has to preserve the callee saved
339	 *    general purpose registers, SP_EL1/EL0 and
340	 *    LR.
341	 * 3. TSP has to preserve the system and vfp
342	 *    registers (if applicable).
343	 * 4. TSP can use 'x0-x18' to enable its C
344	 *    runtime.
345	 * 5. TSP returns to TSPD using an SMC with
346	 *    'x0' = TSP_HANDLED_S_EL1_FIQ
347	 * ---------------------------------------------
348	 */
349func	tsp_fiq_entry
350#if DEBUG
351	mov	x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff)
352	movk	x2, #(TSP_HANDLE_FIQ_AND_RETURN &  0xffff)
353	cmp	x0, x2
354	b.ne	tsp_fiq_entry_panic
355#endif
356	/*---------------------------------------------
357	 * Save any previous context needed to perform
358	 * an exception return from S-EL1 e.g. context
359	 * from a previous IRQ. Update statistics and
360	 * handle the FIQ before returning to the TSPD.
361	 * IRQ/FIQs are not enabled since that will
362	 * complicate the implementation. Execution
363	 * will be transferred back to the normal world
364	 * in any case. A non-zero return value from the
365	 * fiq handler is an error.
366	 * ---------------------------------------------
367	 */
368	save_eret_context x2 x3
369	bl	tsp_update_sync_fiq_stats
370	bl	tsp_fiq_handler
371	cbnz	x0, tsp_fiq_entry_panic
372	restore_eret_context x2 x3
373	mov	x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff)
374	movk	x0, #(TSP_HANDLED_S_EL1_FIQ &  0xffff)
375	smc	#0
376
377tsp_fiq_entry_panic:
378	b	tsp_fiq_entry_panic
379endfunc tsp_fiq_entry
380
381	/*---------------------------------------------
382	 * This entrypoint is used by the TSPD when this
383	 * cpu resumes execution after an earlier
384	 * CPU_SUSPEND psci call to ask the TSP to
385	 * restore its saved context. In the current
386	 * implementation, the TSPD saves and restores
387	 * EL1 state so nothing is done here apart from
388	 * acknowledging the request.
389	 * ---------------------------------------------
390	 */
391func tsp_cpu_resume_entry
392	bl	tsp_cpu_resume_main
393	restore_args_call_smc
394tsp_cpu_resume_panic:
395	b	tsp_cpu_resume_panic
396endfunc tsp_cpu_resume_entry
397
398	/*---------------------------------------------
399	 * This entrypoint is used by the TSPD to ask
400	 * the TSP to service a fast smc request.
401	 * ---------------------------------------------
402	 */
403func tsp_fast_smc_entry
404	bl	tsp_smc_handler
405	restore_args_call_smc
406tsp_fast_smc_entry_panic:
407	b	tsp_fast_smc_entry_panic
408endfunc tsp_fast_smc_entry
409
410	/*---------------------------------------------
411	 * This entrypoint is used by the TSPD to ask
412	 * the TSP to service a std smc request.
413	 * We will enable preemption during execution
414	 * of tsp_smc_handler.
415	 * ---------------------------------------------
416	 */
417func tsp_std_smc_entry
418	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
419	bl	tsp_smc_handler
420	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
421	restore_args_call_smc
422tsp_std_smc_entry_panic:
423	b	tsp_std_smc_entry_panic
424endfunc tsp_std_smc_entry
425