xref: /rk3399_ARM-atf/include/arch/aarch64/el3_common_macros.S (revision 761d0c72c32bfd45d2a2bedbf92e5a0e629729d6)
1/*
2 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef EL3_COMMON_MACROS_S
8#define EL3_COMMON_MACROS_S
9
10#include <arch.h>
11#include <asm_macros.S>
12#include <assert_macros.S>
13#include <context.h>
14#include <lib/el3_runtime/cpu_data.h>
15#include <lib/per_cpu/per_cpu_macros.S>
16
17	/*
18	 * Helper macro to initialise EL3 registers we care about.
19	 */
20	.macro el3_arch_init_common
21	/* ---------------------------------------------------------------------
22	 * SCTLR_EL3 has already been initialised - read current value before
23	 * modifying.
24	 *
25	 * SCTLR_EL3.I: Enable the instruction cache.
26	 *
27	 * SCTLR_EL3.SA: Enable Stack Alignment check. A SP alignment fault
28	 *  exception is generated if a load or store instruction executed at
29	 *  EL3 uses the SP as the base address and the SP is not aligned to a
30	 *  16-byte boundary.
31	 *
32	 * SCTLR_EL3.A: Enable Alignment fault checking. All instructions that
33	 *  load or store one or more registers have an alignment check that the
34	 *  address being accessed is aligned to the size of the data element(s)
35	 *  being accessed.
36	 *
37	 * SCTLR_EL3.BT: PAuth instructions are compatible with bti jc
38	 * ---------------------------------------------------------------------
39	 */
40	mov_imm	x1, (SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
41	mrs	x0, sctlr_el3
42#if ENABLE_BTI
43	bic	x0, x0, #SCTLR_BT_BIT
44#endif
45	orr	x0, x0, x1
46	msr	sctlr_el3, x0
47	isb
48
49#if ENABLE_FEAT_SCTLR2
50#if ENABLE_FEAT_SCTLR2 > 1
51	is_feat_sctlr2_present_asm x1
52	beq	feat_sctlr2_not_supported\@
53#endif
54	mov	x1, #SCTLR2_RESET_VAL
55	msr	SCTLR2_EL3, x1
56feat_sctlr2_not_supported\@:
57#endif
58
59#ifdef IMAGE_BL31
60	/* ---------------------------------------------------------------------
61	 * Initialise the per-cpu framework to utilize tpidr_el3.
62	 *
63	 * This is done early to enable crash reporting to have access to crash
64	 * stack. Since crash reporting depends on cpu_data to report the
65	 * unhandled exception, not doing so can lead to recursive exceptions
66	 * due to a NULL TPIDR_EL3.
67	 * ---------------------------------------------------------------------
68	 */
69	per_cpu_init
70#endif /* IMAGE_BL31 */
71
72	/* ---------------------------------------------------------------------
73	 * Initialise SCR_EL3, setting all fields rather than relying on hw.
74	 * All fields are architecturally UNKNOWN on reset. The following fields
75	 * do not change during the TF lifetime. The remaining fields are set to
76	 * zero here but are updated ahead of transitioning to a lower EL in the
77	 * function cm_init_context_common().
78	 *
79	 * SCR_EL3.EEL2: Set to one if S-EL2 is present and enabled.
80	 *
81	 * NOTE: Modifying EEL2 bit along with EA bit ensures that we mitigate
82	 * against ERRATA_V2_3099206.
83	 * ---------------------------------------------------------------------
84	 */
85	mov_imm	x0, SCR_RESET_VAL
86#if IMAGE_BL31 && defined(SPD_spmd) && SPMD_SPM_AT_SEL2
87	mrs	x1, id_aa64pfr0_el1
88	and	x1, x1, #(ID_AA64PFR0_SEL2_MASK << ID_AA64PFR0_SEL2_SHIFT)
89	cbz	x1, 1f
90	orr	x0, x0, #SCR_EEL2_BIT
91#endif
921:
93	msr	scr_el3, x0
94
95	/* ---------------------------------------------------------------------
96	 * Initialise MDCR_EL3, setting all fields rather than relying on hw.
97	 * Some fields are architecturally UNKNOWN on reset.
98	 */
99	mov_imm	x0, MDCR_EL3_RESET_VAL
100	msr	mdcr_el3, x0
101
102	/* ---------------------------------------------------------------------
103	 * Initialise CPTR_EL3, setting all fields rather than relying on hw.
104	 * All fields are architecturally UNKNOWN on reset.
105	 * ---------------------------------------------------------------------
106	 */
107	mov_imm x0, CPTR_EL3_RESET_VAL
108	msr	cptr_el3, x0
109
110#if ENABLE_FEAT_MORELLO
111#if ENABLE_FEAT_MORELLO == 2
112	is_feat_morello_present_asm	x10
113	beq 2f
114#endif /* ENABLE_FEAT_MORELLO == 2 */
115	/*
116	 * SETTAG: disable privileged tag-setting operations
117	 */
118	mov	x0, CSCR_EL3_SETTAG
119	msr	cscr_el3, x0
120
121	/*
122	 * Disable PCC/DCC base offset
123	 */
124	msr	cctlr_el3, xzr
1252:
126#endif /* ENABLE_FEAT_MORELLO */
127	.endm
128
129/* -----------------------------------------------------------------------------
130 * This is the super set of actions that need to be performed during a cold boot
131 * or a warm boot in EL3. This code is shared by BL1 and BL31.
132 *
133 * This macro will always perform reset handling, architectural initialisations
134 * and stack setup. The rest of the actions are optional because they might not
135 * be needed, depending on the context in which this macro is called. This is
136 * why this macro is parameterised ; each parameter allows to enable/disable
137 * some actions.
138 *
139 *  _init_sctlr:
140 *	Whether the macro needs to initialise SCTLR_EL3, including configuring
141 *      the endianness of data accesses.
142 *
143 *  _warm_boot_mailbox:
144 *	Whether the macro needs to detect the type of boot (cold/warm). The
145 *	detection is based on the platform entrypoint address : if it is zero
146 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
147 *	this macro jumps on the platform entrypoint address.
148 *
149 *  _secondary_cold_boot:
150 *	Whether the macro needs to identify the CPU that is calling it: primary
151 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
152 *	the platform initialisations, while the secondaries will be put in a
153 *	platform-specific state in the meantime.
154 *
155 *	If the caller knows this macro will only be called by the primary CPU
156 *	then this parameter can be defined to 0 to skip this step.
157 *
158 * _init_memory:
159 *	Whether the macro needs to initialise the memory.
160 *
161 * _init_c_runtime:
162 *	Whether the macro needs to initialise the C runtime environment.
163 *
164 * _exception_vectors:
165 *	Address of the exception vectors to program in the VBAR_EL3 register.
166 *
167 * _pie_fixup_size:
168 *	Size of memory region to fixup Global Descriptor Table (GDT).
169 *
170 *	A non-zero value is expected when firmware needs GDT to be fixed-up.
171 *
172 * -----------------------------------------------------------------------------
173 */
174	.macro el3_entrypoint_common					\
175		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
176		_init_memory, _init_c_runtime, _exception_vectors,	\
177		_pie_fixup_size
178
179	.if \_init_sctlr
180		/* -------------------------------------------------------------
181		 * This is the initialisation of SCTLR_EL3 and so must ensure
182		 * that all fields are explicitly set rather than relying on hw.
183		 * Some fields reset to an IMPLEMENTATION DEFINED value and
184		 * others are architecturally UNKNOWN on reset.
185		 *
186		 * SCTLR.EE: Set the CPU endianness before doing anything that
187		 *  might involve memory reads or writes. Set to zero to select
188		 *  Little Endian.
189		 *
190		 * SCTLR_EL3.WXN: For the EL3 translation regime, this field can
191		 *  force all memory regions that are writeable to be treated as
192		 *  XN (Execute-never). Set to zero so that this control has no
193		 *  effect on memory access permissions.
194		 *
195		 * SCTLR_EL3.SA: Set to zero to disable Stack Alignment check.
196		 *
197		 * SCTLR_EL3.A: Set to zero to disable Alignment fault checking.
198		 *
199		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
200		 *  safe behaviour upon exception entry to EL3.
201		 * -------------------------------------------------------------
202		 */
203		mov_imm	x0, (SCTLR_RESET_VAL & ~(SCTLR_EE_BIT | SCTLR_WXN_BIT \
204				| SCTLR_SA_BIT | SCTLR_A_BIT | SCTLR_DSSBS_BIT))
205#if ENABLE_FEAT_RAS
206		/* If FEAT_RAS is present assume FEAT_IESB is also present */
207		orr	x0, x0, #SCTLR_IESB_BIT
208#endif
209		msr	sctlr_el3, x0
210		isb
211	.endif /* _init_sctlr */
212
213	.if \_warm_boot_mailbox
214		/* -------------------------------------------------------------
215		 * This code will be executed for both warm and cold resets.
216		 * Now is the time to distinguish between the two.
217		 * Query the platform entrypoint address and if it is not zero
218		 * then it means it is a warm boot so jump to this address.
219		 * -------------------------------------------------------------
220		 */
221		bl	plat_get_my_entrypoint
222		cbz	x0, do_cold_boot
223		br	x0
224
225	do_cold_boot:
226	.endif /* _warm_boot_mailbox */
227
228	.if \_pie_fixup_size
229#if ENABLE_PIE
230		/*
231		 * ------------------------------------------------------------
232		 * If PIE is enabled fixup the Global descriptor Table only
233		 * once during primary core cold boot path.
234		 *
235		 * Compile time base address, required for fixup, is calculated
236		 * using "pie_fixup" label present within first page.
237		 * ------------------------------------------------------------
238		 */
239	pie_fixup:
240		ldr	x0, =pie_fixup
241		and	x0, x0, #~(PAGE_SIZE_MASK)
242		mov_imm	x1, \_pie_fixup_size
243		add	x1, x1, x0
244		bl	fixup_gdt_reloc
245#endif /* ENABLE_PIE */
246	.endif /* _pie_fixup_size */
247
248	/* ---------------------------------------------------------------------------
249	 * Set the exception vectors.
250	 * cptr_el3 write needs to be done before the first capability register access
251	 * ---------------------------------------------------------------------------
252	 */
253#if ENABLE_FEAT_MORELLO
254	mrs	x0, cptr_el3
255	orr	x0, x0, #EC_BIT
256	msr	cptr_el3, x0
257#endif /* ENABLE_FEAT_MORELLO */
258	adr	x0, \_exception_vectors
259	msr_wide_reg	vbar_el3, 0
260	isb
261
262	call_reset_handler
263
264	el3_arch_init_common
265
266	/* ---------------------------------------------------------------------
267	 * Set the el3 execution context(i.e. root_context).
268	 * ---------------------------------------------------------------------
269	 */
270	setup_el3_execution_context
271
272	.if \_secondary_cold_boot
273		/* -------------------------------------------------------------
274		 * Check if this is a primary or secondary CPU cold boot.
275		 * The primary CPU will set up the platform while the
276		 * secondaries are placed in a platform-specific state until the
277		 * primary CPU performs the necessary actions to bring them out
278		 * of that state and allows entry into the OS.
279		 * -------------------------------------------------------------
280		 */
281		bl	plat_is_my_cpu_primary
282		cbnz	w0, do_primary_cold_boot
283
284		/* This is a cold boot on a secondary CPU */
285		bl	plat_secondary_cold_boot_setup
286		/* plat_secondary_cold_boot_setup() is not supposed to return */
287		bl	el3_panic
288
289	do_primary_cold_boot:
290	.endif /* _secondary_cold_boot */
291
292	/* ---------------------------------------------------------------------
293	 * Initialize memory now. Secondary CPU initialization won't get to this
294	 * point.
295	 * ---------------------------------------------------------------------
296	 */
297
298	.if \_init_memory
299		bl	platform_mem_init
300	.endif /* _init_memory */
301
302	/* ---------------------------------------------------------------------
303	 * Init C runtime environment:
304	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
305	 *       - the .bss section;
306	 *       - the coherent memory section (if any).
307	 *   - Relocate the data section from ROM to RAM, if required.
308	 * ---------------------------------------------------------------------
309	 */
310	.if \_init_c_runtime
311#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
312	((RESET_TO_BL2 && BL2_INV_DCACHE) || ENABLE_RME))
313		/* -------------------------------------------------------------
314		 * Invalidate the RW memory used by the BL31 image. This
315		 * includes the data and NOBITS sections. This is done to
316		 * safeguard against possible corruption of this memory by
317		 * dirty cache lines in a system cache as a result of use by
318		 * an earlier boot loader stage. If PIE is enabled however,
319		 * RO sections including the GOT may be modified during
320                 * pie fixup. Therefore, to be on the safe side, invalidate
321		 * the entire image region if PIE is enabled.
322		 * -------------------------------------------------------------
323		 */
324#if ENABLE_PIE
325#if SEPARATE_CODE_AND_RODATA
326		adrp	x0, __TEXT_START__
327		add	x0, x0, :lo12:__TEXT_START__
328#else
329		adrp	x0, __RO_START__
330		add	x0, x0, :lo12:__RO_START__
331#endif /* SEPARATE_CODE_AND_RODATA */
332#else
333		adrp	x0, __RW_START__
334		add	x0, x0, :lo12:__RW_START__
335#endif /* ENABLE_PIE */
336		adrp	x1, __RW_END__
337		add	x1, x1, :lo12:__RW_END__
338		sub	x1, x1, x0
339		bl	inv_dcache_range
340#if defined(IMAGE_BL31) && SEPARATE_NOBITS_REGION
341		adrp	x0, __NOBITS_START__
342		add	x0, x0, :lo12:__NOBITS_START__
343		adrp	x1, __NOBITS_END__
344		add	x1, x1, :lo12:__NOBITS_END__
345		sub	x1, x1, x0
346		bl	inv_dcache_range
347#endif
348#if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION
349		adrp	x0, __BL2_NOLOAD_START__
350		add	x0, x0, :lo12:__BL2_NOLOAD_START__
351		adrp	x1, __BL2_NOLOAD_END__
352		add	x1, x1, :lo12:__BL2_NOLOAD_END__
353		sub	x1, x1, x0
354		bl	inv_dcache_range
355#endif
356#endif
357#if defined(IMAGE_BL31)
358		adrp	x0, __PER_CPU_START__
359		add	x0, x0, :lo12:__PER_CPU_START__
360		adrp	x1, __PER_CPU_END__
361		add	x1, x1, :lo12:__PER_CPU_END__
362		sub	x1, x1, x0
363#if (PLATFORM_NODE_COUNT > 1)
364		mov	x9, x1
365#endif /* (PLATFORM_NODE_COUNT > 1) */
366		bl	zeromem
367#if (PLATFORM_NODE_COUNT > 1)
368		/*
369		 * Zero-initialize per-cpu sections defined by the platform.
370		 * Care must be taken to preserve and retain the clobbered
371		 * registers. A standard around the container for per-cpu nodes
372		 * is not yet defined.
373		 */
374		mov	x10, #1
375		mov	x11, #PLATFORM_NODE_COUNT
376
377		1:
378			cmp	x10, x11
379			b.hs	2f
380
381			mov	x0, x10
382			bl	plat_per_cpu_node_base
383			cmn	x0, #1
384			b.eq	3f
385
386			/* x1 contains size param */
387			mov	x1, x9
388			bl	zeromem
389
390		3:
391			add	x10, x10, #1
392			b	1b
393
394		2:
395#endif /* (PLATFORM_NODE_COUNT > 1) */
396#endif /* defined(IMAGE_BL31) */
397
398		adrp	x0, __BSS_START__
399		add	x0, x0, :lo12:__BSS_START__
400
401		adrp	x1, __BSS_END__
402		add	x1, x1, :lo12:__BSS_END__
403		sub	x1, x1, x0
404		bl	zeromem
405
406#if USE_COHERENT_MEM
407		adrp	x0, __COHERENT_RAM_START__
408		add	x0, x0, :lo12:__COHERENT_RAM_START__
409		adrp	x1, __COHERENT_RAM_END_UNALIGNED__
410		add	x1, x1, :lo12: __COHERENT_RAM_END_UNALIGNED__
411		sub	x1, x1, x0
412		bl	zeromem
413#endif
414
415#if defined(IMAGE_BL1) ||	\
416	(defined(IMAGE_BL2) && RESET_TO_BL2 && BL2_IN_XIP_MEM) || \
417	(defined(IMAGE_BL31) && SEPARATE_RWDATA_REGION)
418
419		adrp	x0, __DATA_RAM_START__
420		add	x0, x0, :lo12:__DATA_RAM_START__
421		adrp	x1, __DATA_ROM_START__
422		add	x1, x1, :lo12:__DATA_ROM_START__
423		adrp	x2, __DATA_RAM_END__
424		add	x2, x2, :lo12:__DATA_RAM_END__
425		sub	x2, x2, x0
426		bl	memcpy16
427#endif
428	.endif /* _init_c_runtime */
429
430	/* ---------------------------------------------------------------------
431	 * Use SP_EL0 for the C runtime stack.
432	 * ---------------------------------------------------------------------
433	 */
434	msr	spsel, #0
435
436	/* ---------------------------------------------------------------------
437	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
438	 * the MMU is enabled. There is no risk of reading stale stack memory
439	 * after enabling the MMU as only the primary CPU is running at the
440	 * moment.
441	 * ---------------------------------------------------------------------
442	 */
443	bl	plat_set_my_stack
444
445#if STACK_PROTECTOR_ENABLED
446	.if \_init_c_runtime
447	bl	update_stack_protector_canary
448	.endif /* _init_c_runtime */
449#endif
450	.endm
451
452	.macro	apply_at_speculative_wa
453#if ERRATA_SPECULATIVE_AT
454	/*
455	 * This function expects x30 has been saved.
456	 * Also, save x29 which will be used in the called function.
457	 */
458	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
459	bl	save_and_update_ptw_el1_sys_regs
460	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
461#endif
462	.endm
463
464	.macro	restore_ptw_el1_sys_regs
465#if ERRATA_SPECULATIVE_AT
466	/* -----------------------------------------------------------
467	 * In case of ERRATA_SPECULATIVE_AT, must follow below order
468	 * to ensure that page table walk is not enabled until
469	 * restoration of all EL1 system registers. TCR_EL1 register
470	 * should be updated at the end which restores previous page
471	 * table walk setting of stage1 i.e.(TCR_EL1.EPDx) bits. ISB
472	 * ensures that CPU does below steps in order.
473	 *
474	 * 1. Ensure all other system registers are written before
475	 *    updating SCTLR_EL1 using ISB.
476	 * 2. Restore SCTLR_EL1 register.
477	 * 3. Ensure SCTLR_EL1 written successfully using ISB.
478	 * 4. Restore TCR_EL1 register.
479	 * -----------------------------------------------------------
480	 */
481	isb
482	ldp	x28, x29, [sp, #CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_SCTLR_EL1]
483	msr	sctlr_el1, x28
484	isb
485	msr	tcr_el1, x29
486#endif
487	.endm
488
489/* -----------------------------------------------------------------
490 * The below macro reads SCR_EL3 from the context structure to
491 * determine the security state of the context upon ERET.
492 * ------------------------------------------------------------------
493 */
494	.macro get_security_state _ret:req, _scr_reg:req
495		ubfx 	\_ret, \_scr_reg, #SCR_NSE_SHIFT, #1
496		cmp 	\_ret, #1
497		beq 	realm_state
498		bfi	\_ret, \_scr_reg, #0, #1
499		b 	end
500	realm_state:
501		mov 	\_ret, #2
502	end:
503	.endm
504
505/*-----------------------------------------------------------------------------
506 * Helper macro to configure EL3 registers we care about, while executing
507 * at EL3/Root world. Root world has its own execution environment and
508 * needs to have its settings configured to be independent of other worlds.
509 * -----------------------------------------------------------------------------
510 */
511	.macro setup_el3_execution_context
512
513	/* ---------------------------------------------------------------------
514	 * The following registers need to be part of separate root context
515	 * as their values are of importance during EL3 execution.
516	 * Hence these registers are overwritten to their intital values,
517	 * irrespective of whichever world they return from to ensure EL3 has a
518	 * consistent execution context throughout the lifetime of TF-A.
519	 *
520	 * DAIF.A: Enable External Aborts and SError Interrupts at EL3.
521	 *
522	 * MDCR_EL3.SDD: Set to one to disable AArch64 Secure self-hosted debug.
523	 *  Debug exceptions, other than Breakpoint Instruction exceptions, are
524	 *  disabled from all ELs in Secure state.
525	 *
526	 * SCR_EL3.EA: Set to one to enable SError interrupts at EL3.
527	 *
528	 * SCR_EL3.SIF: Set to one to disable instruction fetches from
529	 *  Non-secure memory.
530	 *
531	 * PMCR_EL0.DP: Set to one so that the cycle counter,
532	 *  PMCCNTR_EL0 does not count when event counting is prohibited.
533	 *  Necessary on PMUv3 <= p7 where MDCR_EL3.{SCCD,MCCD} are not
534	 *  available.
535	 *
536	 * CPTR_EL3.EZ: Set to one so that accesses to ZCR_EL3 do not trap
537	 * CPTR_EL3.ESM: Set to one so that SME related registers don't trap
538	 * CPTR_EL3.EC: Set to one when Morello is enabled so that access to morello
539	 *  architecture and registers are not trapped
540	 *
541	 * PSTATE.DIT: Set to one to enable the Data Independent Timing (DIT)
542	 *  functionality, if implemented in EL3.
543	 * ---------------------------------------------------------------------
544	 */
545		msr	daifclr, #DAIF_ABT_BIT
546
547		mrs 	x15, mdcr_el3
548		orr	x15, x15, #MDCR_SDD_BIT
549		msr	mdcr_el3, x15
550
551		mrs	x15, scr_el3
552		orr	x15, x15, #SCR_EA_BIT
553		orr	x15, x15, #SCR_SIF_BIT
554		bic	x15, x15, #SCR_TRNDR_BIT
555		msr	scr_el3, x15
556
557		mrs 	x15, pmcr_el0
558		orr	x15, x15, #PMCR_EL0_DP_BIT
559		msr	pmcr_el0, x15
560
561		mrs	x15, cptr_el3
562		orr	x15, x15, #CPTR_EZ_BIT
563		orr	x15, x15, #ESM_BIT
564#if ENABLE_FEAT_MORELLO
565		orr	x15, x15, #EC_BIT
566#endif /* ENABLE_FEAT_MORELLO */
567		msr	cptr_el3, x15
568
569#if ENABLE_FEAT_DIT
570#if ENABLE_FEAT_DIT > 1
571		mrs	x15, id_aa64pfr0_el1
572		ubfx	x15, x15, #ID_AA64PFR0_DIT_SHIFT, #ID_AA64PFR0_DIT_LENGTH
573		cbz	x15, 1f
574#endif
575		mov	x15, #DIT_BIT
576		msr	DIT, x15
577	1:
578#endif
579
580		isb
581	.endm
582
583#endif /* EL3_COMMON_MACROS_S */
584