xref: /rk3399_ARM-atf/include/arch/aarch64/el3_common_macros.S (revision 306551362c15c3be7d118b549c7c99290716d5d6)
1/*
2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef EL3_COMMON_MACROS_S
8#define EL3_COMMON_MACROS_S
9
10#include <arch.h>
11#include <asm_macros.S>
12#include <assert_macros.S>
13#include <context.h>
14#include <lib/xlat_tables/xlat_tables_defs.h>
15
16	/*
17	 * Helper macro to initialise EL3 registers we care about.
18	 */
19	.macro el3_arch_init_common
20	/* ---------------------------------------------------------------------
21	 * SCTLR_EL3 has already been initialised - read current value before
22	 * modifying.
23	 *
24	 * SCTLR_EL3.I: Enable the instruction cache.
25	 *
26	 * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault
27	 *  exception is generated if a load or store instruction executed at
28	 *  EL3 uses the SP as the base address and the SP is not aligned to a
29	 *  16-byte boundary.
30	 *
31	 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
32	 *  load or store one or more registers have an alignment check that the
33	 *  address being accessed is aligned to the size of the data element(s)
34	 *  being accessed.
35	 * ---------------------------------------------------------------------
36	 */
37	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
38	mrs	x0, sctlr_el3
39	orr	x0, x0, x1
40	msr	sctlr_el3, x0
41	isb
42
43#ifdef IMAGE_BL31
44	/* ---------------------------------------------------------------------
45	 * Initialise the per-cpu cache pointer to the CPU.
46	 * This is done early to enable crash reporting to have access to crash
47	 * stack. Since crash reporting depends on cpu_data to report the
48	 * unhandled exception, not doing so can lead to recursive exceptions
49	 * due to a NULL TPIDR_EL3.
50	 * ---------------------------------------------------------------------
51	 */
52	bl	init_cpu_data_ptr
53#endif /* IMAGE_BL31 */
54
55	/* ---------------------------------------------------------------------
56	 * Initialise SCR_EL3, setting all fields rather than relying on hw.
57	 * All fields are architecturally UNKNOWN on reset. The following fields
58	 * do not change during the TF lifetime. The remaining fields are set to
59	 * zero here but are updated ahead of transitioning to a lower EL in the
60	 * function cm_init_context_common().
61	 *
62	 * SCR_EL3.EEL2: Set to one if S-EL2 is present and enabled.
63	 *
64	 * NOTE: Modifying EEL2 bit along with EA bit ensures that we mitigate
65	 * against ERRATA_V2_3099206.
66	 * ---------------------------------------------------------------------
67	 */
68	mov_imm	x0, SCR_RESET_VAL
69#if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2
70	mrs	x1, id_aa64pfr0_el1
71	and	x1, x1, #(ID_AA64PFR0_SEL2_MASK << ID_AA64PFR0_SEL2_SHIFT)
72	cbz	x1, 1f
73	orr	x0, x0, #SCR_EEL2_BIT
74#endif
751:
76	msr	scr_el3, x0
77
78	/* ---------------------------------------------------------------------
79	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
80	 * Some fields are architecturally UNKNOWN on reset.
81	 */
82	mov_imm	x0, MDCR_EL3_RESET_VAL
83	msr	mdcr_el3, x0
84
85	/* ---------------------------------------------------------------------
86	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
87	 * All fields are architecturally UNKNOWN on reset.
88	 * ---------------------------------------------------------------------
89	 */
90	mov_imm x0, CPTR_EL3_RESET_VAL
91	msr	cptr_el3, x0
92
93	.endm
94
95/* -----------------------------------------------------------------------------
96 * This is the super set of actions that need to be performed during a cold boot
97 * or a warm boot in EL3. This code is shared by BL1 and BL31.
98 *
99 * This macro will always perform reset handling, architectural initialisations
100 * and stack setup. The rest of the actions are optional because they might not
101 * be needed, depending on the context in which this macro is called. This is
102 * why this macro is parameterised ; each parameter allows to enable/disable
103 * some actions.
104 *
105 *  _init_sctlr:
106 *	Whether the macro needs to initialise SCTLR_EL3, including configuring
107 *      the endianness of data accesses.
108 *
109 *  _warm_boot_mailbox:
110 *	Whether the macro needs to detect the type of boot (cold/warm). The
111 *	detection is based on the platform entrypoint address : if it is zero
112 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
113 *	this macro jumps on the platform entrypoint address.
114 *
115 *  _secondary_cold_boot:
116 *	Whether the macro needs to identify the CPU that is calling it: primary
117 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
118 *	the platform initialisations, while the secondaries will be put in a
119 *	platform-specific state in the meantime.
120 *
121 *	If the caller knows this macro will only be called by the primary CPU
122 *	then this parameter can be defined to 0 to skip this step.
123 *
124 * _init_memory:
125 *	Whether the macro needs to initialise the memory.
126 *
127 * _init_c_runtime:
128 *	Whether the macro needs to initialise the C runtime environment.
129 *
130 * _exception_vectors:
131 *	Address of the exception vectors to program in the VBAR_EL3 register.
132 *
133 * _pie_fixup_size:
134 *	Size of memory region to fixup Global Descriptor Table (GDT).
135 *
136 *	A non-zero value is expected when firmware needs GDT to be fixed-up.
137 *
138 * -----------------------------------------------------------------------------
139 */
140	.macro el3_entrypoint_common					\
141		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
142		_init_memory, _init_c_runtime, _exception_vectors,	\
143		_pie_fixup_size
144
145	.if \_init_sctlr
146		/* -------------------------------------------------------------
147		 * This is the initialisation of SCTLR_EL3 and so must ensure
148		 * that all fields are explicitly set rather than relying on hw.
149		 * Some fields reset to an IMPLEMENTATION DEFINED value and
150		 * others are architecturally UNKNOWN on reset.
151		 *
152		 * SCTLR.EE: Set the CPU endianness before doing anything that
153		 *  might involve memory reads or writes. Set to zero to select
154		 *  Little Endian.
155		 *
156		 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
157		 *  force all memory regions that are writeable to be treated as
158		 *  XN (Execute-never). Set to zero so that this control has no
159		 *  effect on memory access permissions.
160		 *
161		 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
162		 *
163		 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
164		 *
165		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
166		 *  safe behaviour upon exception entry to EL3.
167		 * -------------------------------------------------------------
168		 */
169		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
170				| SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
171#if ENABLE_FEAT_RAS
172		/* If FEAT_RAS is present assume FEAT_IESB is also present */
173		orr	x0, x0, #SCTLR_IESB_BIT
174#endif
175		msr	sctlr_el3, x0
176		isb
177	.endif /* _init_sctlr */
178
179	.if \_warm_boot_mailbox
180		/* -------------------------------------------------------------
181		 * This code will be executed for both warm and cold resets.
182		 * Now is the time to distinguish between the two.
183		 * Query the platform entrypoint address and if it is not zero
184		 * then it means it is a warm boot so jump to this address.
185		 * -------------------------------------------------------------
186		 */
187		bl	plat_get_my_entrypoint
188		cbz	x0, do_cold_boot
189		br	x0
190
191	do_cold_boot:
192	.endif /* _warm_boot_mailbox */
193
194	.if \_pie_fixup_size
195#if ENABLE_PIE
196		/*
197		 * ------------------------------------------------------------
198		 * If PIE is enabled fixup the Global descriptor Table only
199		 * once during primary core cold boot path.
200		 *
201		 * Compile time base address, required for fixup, is calculated
202		 * using "pie_fixup" label present within first page.
203		 * ------------------------------------------------------------
204		 */
205	pie_fixup:
206		ldr	x0, =pie_fixup
207		and	x0, x0, #~(PAGE_SIZE_MASK)
208		mov_imm	x1, \_pie_fixup_size
209		add	x1, x1, x0
210		bl	fixup_gdt_reloc
211#endif /* ENABLE_PIE */
212	.endif /* _pie_fixup_size */
213
214	/* ---------------------------------------------------------------------
215	 * Set the exception vectors.
216	 * ---------------------------------------------------------------------
217	 */
218	adr	x0, \_exception_vectors
219	msr	vbar_el3, x0
220	isb
221
222#if !(defined(IMAGE_BL2) && ENABLE_RME)
223	/* ---------------------------------------------------------------------
224	 * It is a cold boot.
225	 * Perform any processor specific actions upon reset e.g. cache, TLB
226	 * invalidations etc.
227	 * ---------------------------------------------------------------------
228	 */
229	bl	reset_handler
230#endif
231
232	el3_arch_init_common
233
234	/* ---------------------------------------------------------------------
235	 * Set the el3 execution context(i.e. root_context).
236	 * ---------------------------------------------------------------------
237	 */
238	setup_el3_execution_context
239
240	.if \_secondary_cold_boot
241		/* -------------------------------------------------------------
242		 * Check if this is a primary or secondary CPU cold boot.
243		 * The primary CPU will set up the platform while the
244		 * secondaries are placed in a platform-specific state until the
245		 * primary CPU performs the necessary actions to bring them out
246		 * of that state and allows entry into the OS.
247		 * -------------------------------------------------------------
248		 */
249		bl	plat_is_my_cpu_primary
250		cbnz	w0, do_primary_cold_boot
251
252		/* This is a cold boot on a secondary CPU */
253		bl	plat_secondary_cold_boot_setup
254		/* plat_secondary_cold_boot_setup() is not supposed to return */
255		bl	el3_panic
256
257	do_primary_cold_boot:
258	.endif /* _secondary_cold_boot */
259
260	/* ---------------------------------------------------------------------
261	 * Initialize memory now. Secondary CPU initialization won't get to this
262	 * point.
263	 * ---------------------------------------------------------------------
264	 */
265
266	.if \_init_memory
267		bl	platform_mem_init
268	.endif /* _init_memory */
269
270	/* ---------------------------------------------------------------------
271	 * Init C runtime environment:
272	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
273	 *       - the .bss section;
274	 *       - the coherent memory section (if any).
275	 *   - Relocate the data section from ROM to RAM, if required.
276	 * ---------------------------------------------------------------------
277	 */
278	.if \_init_c_runtime
279#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
280	((RESET_TO_BL2 && BL2_INV_DCACHE) || ENABLE_RME))
281		/* -------------------------------------------------------------
282		 * Invalidate the RW memory used by the BL31 image. This
283		 * includes the data and NOBITS sections. This is done to
284		 * safeguard against possible corruption of this memory by
285		 * dirty cache lines in a system cache as a result of use by
286		 * an earlier boot loader stage. If PIE is enabled however,
287		 * RO sections including the GOT may be modified during
288                 * pie fixup. Therefore, to be on the safe side, invalidate
289		 * the entire image region if PIE is enabled.
290		 * -------------------------------------------------------------
291		 */
292#if ENABLE_PIE
293#if SEPARATE_CODE_AND_RODATA
294		adrp	x0, __TEXT_START__
295		add	x0, x0, :lo12:__TEXT_START__
296#else
297		adrp	x0, __RO_START__
298		add	x0, x0, :lo12:__RO_START__
299#endif /* SEPARATE_CODE_AND_RODATA */
300#else
301		adrp	x0, __RW_START__
302		add	x0, x0, :lo12:__RW_START__
303#endif /* ENABLE_PIE */
304		adrp	x1, __RW_END__
305		add	x1, x1, :lo12:__RW_END__
306		sub	x1, x1, x0
307		bl	inv_dcache_range
308#if defined(IMAGE_BL31) && SEPARATE_NOBITS_REGION
309		adrp	x0, __NOBITS_START__
310		add	x0, x0, :lo12:__NOBITS_START__
311		adrp	x1, __NOBITS_END__
312		add	x1, x1, :lo12:__NOBITS_END__
313		sub	x1, x1, x0
314		bl	inv_dcache_range
315#endif
316#if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION
317		adrp	x0, __BL2_NOLOAD_START__
318		add	x0, x0, :lo12:__BL2_NOLOAD_START__
319		adrp	x1, __BL2_NOLOAD_END__
320		add	x1, x1, :lo12:__BL2_NOLOAD_END__
321		sub	x1, x1, x0
322		bl	inv_dcache_range
323#endif
324#endif
325		adrp	x0, __BSS_START__
326		add	x0, x0, :lo12:__BSS_START__
327
328		adrp	x1, __BSS_END__
329		add	x1, x1, :lo12:__BSS_END__
330		sub	x1, x1, x0
331		bl	zeromem
332
333#if USE_COHERENT_MEM
334		adrp	x0, __COHERENT_RAM_START__
335		add	x0, x0, :lo12:__COHERENT_RAM_START__
336		adrp	x1, __COHERENT_RAM_END_UNALIGNED__
337		add	x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
338		sub	x1, x1, x0
339		bl	zeromem
340#endif
341
342#if defined(IMAGE_BL1) ||	\
343	(defined(IMAGE_BL2) && RESET_TO_BL2 && BL2_IN_XIP_MEM)
344		adrp	x0, __DATA_RAM_START__
345		add	x0, x0, :lo12:__DATA_RAM_START__
346		adrp	x1, __DATA_ROM_START__
347		add	x1, x1, :lo12:__DATA_ROM_START__
348		adrp	x2, __DATA_RAM_END__
349		add	x2, x2, :lo12:__DATA_RAM_END__
350		sub	x2, x2, x0
351		bl	memcpy16
352#endif
353	.endif /* _init_c_runtime */
354
355	/* ---------------------------------------------------------------------
356	 * Use SP_EL0 for the C runtime stack.
357	 * ---------------------------------------------------------------------
358	 */
359	msr	spsel, #0
360
361	/* ---------------------------------------------------------------------
362	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
363	 * the MMU is enabled. There is no risk of reading stale stack memory
364	 * after enabling the MMU as only the primary CPU is running at the
365	 * moment.
366	 * ---------------------------------------------------------------------
367	 */
368	bl	plat_set_my_stack
369
370#if STACK_PROTECTOR_ENABLED
371	.if \_init_c_runtime
372	bl	update_stack_protector_canary
373	.endif /* _init_c_runtime */
374#endif
375	.endm
376
377	.macro	apply_at_speculative_wa
378#if ERRATA_SPECULATIVE_AT
379	/*
380	 * This function expects x30 has been saved.
381	 * Also, save x29 which will be used in the called function.
382	 */
383	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
384	bl	save_and_update_ptw_el1_sys_regs
385	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
386#endif
387	.endm
388
389	.macro	restore_ptw_el1_sys_regs
390#if ERRATA_SPECULATIVE_AT
391	/* -----------------------------------------------------------
392	 * In case of ERRATA_SPECULATIVE_AT, must follow below order
393	 * to ensure that page table walk is not enabled until
394	 * restoration of all EL1 system registers. TCR_EL1 register
395	 * should be updated at the end which restores previous page
396	 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
397	 * ensures that CPU does below steps in order.
398	 *
399	 * 1. Ensure all other system registers are written before
400	 *    updating SCTLR_EL1 using ISB.
401	 * 2. Restore SCTLR_EL1 register.
402	 * 3. Ensure SCTLR_EL1 written successfully using ISB.
403	 * 4. Restore TCR_EL1 register.
404	 * -----------------------------------------------------------
405	 */
406	isb
407	ldp	x28, x29, [sp, #CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1]
408	msr	sctlr_el1, x28
409	isb
410	msr	tcr_el1, x29
411#endif
412	.endm
413
414/* -----------------------------------------------------------------
415 * The below macro reads SCR_EL3 from the context structure to
416 * determine the security state of the context upon ERET.
417 * ------------------------------------------------------------------
418 */
419	.macro get_security_state _ret:req, _scr_reg:req
420		ubfx 	\_ret, \_scr_reg, #SCR_NSE_SHIFT, #1
421		cmp 	\_ret, #1
422		beq 	realm_state
423		bfi	\_ret, \_scr_reg, #0, #1
424		b 	end
425	realm_state:
426		mov 	\_ret, #2
427	end:
428	.endm
429
430/*-----------------------------------------------------------------------------
431 * Helper macro to configure EL3 registers we care about, while executing
432 * at EL3/Root world. Root world has its own execution environment and
433 * needs to have its settings configured to be independent of other worlds.
434 * -----------------------------------------------------------------------------
435 */
436	.macro setup_el3_execution_context
437
438	/* ---------------------------------------------------------------------
439	 * The following registers need to be part of separate root context
440	 * as their values are of importance during EL3 execution.
441	 * Hence these registers are overwritten to their intital values,
442	 * irrespective of whichever world they return from to ensure EL3 has a
443	 * consistent execution context throughout the lifetime of TF-A.
444	 *
445	 * DAIF.A: Enable External Aborts and SError Interrupts at EL3.
446	 *
447	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
448	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
449	 *  disabled from all ELs in Secure state.
450	 *
451	 * SCR_EL3.EA: Set to one to enable SError interrupts at EL3.
452	 *
453	 * SCR_EL3.SIF: Set to one to disable instruction fetches from
454	 *  Non-secure memory.
455	 *
456	 * PMCR_EL0.DP: Set to one so that the cycle counter,
457	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
458	 *  Necessary on PMUv3 <= p7 where MDCR_EL3.{SCCD,MCCD} are not
459	 *  available.
460	 *
461	 * PSTATE.DIT: Set to one to enable the Data Independent Timing (DIT)
462	 *  functionality, if implemented in EL3.
463	 * ---------------------------------------------------------------------
464	 */
465		msr	daifclr, #DAIF_ABT_BIT
466
467		mrs 	x15, mdcr_el3
468		orr	x15, x15, #MDCR_SDD_BIT
469		msr	mdcr_el3, x15
470
471		mrs	x15, scr_el3
472		orr	x15, x15, #SCR_EA_BIT
473		orr	x15, x15, #SCR_SIF_BIT
474		msr	scr_el3, x15
475
476		mrs 	x15, pmcr_el0
477		orr	x15, x15, #PMCR_EL0_DP_BIT
478		msr	pmcr_el0, x15
479
480#if ENABLE_FEAT_DIT
481#if ENABLE_FEAT_DIT > 1
482		mrs	x15, id_aa64pfr0_el1
483		ubfx	x15, x15, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
484		cbz	x15, 1f
485#endif
486		mov	x15, #DIT_BIT
487		msr	DIT, x15
488	1:
489#endif
490
491		isb
492	.endm
493
494#endif /* EL3_COMMON_MACROS_S */
495