xref: /rk3399_ARM-atf/include/arch/aarch32/el3_common_macros.S (revision 2031d6166a58623ae59034bc2353fcd2fabe9c30)
1/*
2 * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef EL3_COMMON_MACROS_S
8#define EL3_COMMON_MACROS_S
9
10#include <arch.h>
11#include <asm_macros.S>
12#include <assert_macros.S>
13#include <lib/xlat_tables/xlat_tables_defs.h>
14
15#define PAGE_START_MASK		~(PAGE_SIZE_MASK)
16
17	/*
18	 * Helper macro to initialise EL3 registers we care about.
19	 */
20	.macro el3_arch_init_common
21	/* ---------------------------------------------------------------------
22	 * SCTLR has already been initialised - read current value before
23	 * modifying.
24	 *
25	 * SCTLR.I: Enable the instruction cache.
26	 *
27	 * SCTLR.A: Enable Alignment fault checking. All instructions that load
28	 *  or store one or more registers have an alignment check that the
29	 *  address being accessed is aligned to the size of the data element(s)
30	 *  being accessed.
31	 * ---------------------------------------------------------------------
32	 */
33	ldr	r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
34	ldcopr	r0, SCTLR
35	orr	r0, r0, r1
36	stcopr	r0, SCTLR
37	isb
38
39	/* ---------------------------------------------------------------------
40	 * Initialise SCR, setting all fields rather than relying on the hw.
41	 *
42	 * SCR.SIF: Enabled so that Secure state instruction fetches from
43	 *  Non-secure memory are not permitted.
44	 * ---------------------------------------------------------------------
45	 */
46	ldr	r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
47	stcopr	r0, SCR
48
49	/* -----------------------------------------------------
50	 * Enable the Asynchronous data abort now that the
51	 * exception vectors have been setup.
52	 * -----------------------------------------------------
53	 */
54	cpsie   a
55	isb
56
57	/* ---------------------------------------------------------------------
58	 * Initialise NSACR, setting all the fields, except for the
59	 * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
60	 * fields are architecturally UNKNOWN on reset.
61	 *
62	 * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
63	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
64	 *  field is set to allow access to Advanced SIMD and floating point
65	 *  features from both Security states.
66	 *
67	 * NSACR.NSTRCDIS: When system register trace implemented, Set to one
68	 *  so that NS System register accesses to all implemented trace
69	 *  registers are disabled.
70	 *  When system register trace is not implemented, this bit is RES0 and
71	 *  hence set to zero.
72	 * ---------------------------------------------------------------------
73	 */
74	ldcopr	r0, NSACR
75	and	r0, r0, #NSACR_IMP_DEF_MASK
76	orr	r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
77	ldcopr	r1, ID_DFR0
78	ubfx	r1, r1, #ID_DFR0_COPTRC_SHIFT, #ID_DFR0_COPTRC_LENGTH
79	cmp	r1, #ID_DFR0_COPTRC_SUPPORTED
80	bne	1f
81	orr	r0, r0, #NSTRCDIS_BIT
821:
83	stcopr	r0, NSACR
84	isb
85
86	/* ---------------------------------------------------------------------
87	 * Initialise CPACR, setting all fields rather than relying on hw. Some
88	 * fields are architecturally UNKNOWN on reset.
89	 *
90	 * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
91	 *  to trace registers. Set to zero to allow access.
92	 *
93	 * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
94	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
95	 *  field is set to allow full access from PL0 and PL1 to floating-point
96	 *  and Advanced SIMD features.
97	 * ---------------------------------------------------------------------
98	 */
99	ldr	r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
100	stcopr	r0, CPACR
101	isb
102
103	/* ---------------------------------------------------------------------
104	 * Initialise FPEXC, setting all fields rather than relying on hw. Some
105	 * fields are architecturally UNKNOWN on reset and are set to zero
106	 * except for field(s) listed below.
107	 *
108	 * FPEXC.EN: Enable access to Advanced SIMD and floating point features
109	 *  from all exception levels.
110         *
111         * __SOFTFP__: Predefined macro exposed by soft-float toolchain.
112         *  ARMv7 and Cortex-A32(ARMv8/aarch32) has both soft-float and
113         *  hard-float variants of toolchain, avoid compiling below code with
114         *  soft-float toolchain as "vmsr" instruction will not be recognized.
115	 * ---------------------------------------------------------------------
116	 */
117#if ((ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_VFP)) && !(__SOFTFP__)
118	ldr	r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
119	vmsr	FPEXC, r0
120	isb
121#endif
122
123#if (ARM_ARCH_MAJOR > 7)
124	/* ---------------------------------------------------------------------
125	 * Initialise SDCR, setting all the fields rather than relying on hw.
126	 *
127	 * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
128	 *  Secure EL1 are disabled.
129	 *
130	 * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
131	 *  in Secure state. This bit is RES0 in versions of the architecture
132	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect on
133	 *  them.
134	 * ---------------------------------------------------------------------
135	 */
136	ldr	r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE) | SDCR_SCCD_BIT)
137	stcopr	r0, SDCR
138
139	/* ---------------------------------------------------------------------
140	 * Initialise PMCR, setting all fields rather than relying
141	 * on hw. Some fields are architecturally UNKNOWN on reset.
142	 *
143	 * PMCR.LP: Set to one so that event counter overflow, that
144	 *  is recorded in PMOVSCLR[0-30], occurs on the increment
145	 *  that changes PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU
146	 *  is implemented. This bit is RES0 in versions of the architecture
147	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
148	 *  on them.
149	 *  This bit is Reserved, UNK/SBZP in ARMv7.
150	 *
151	 * PMCR.LC: Set to one so that cycle counter overflow, that
152	 *  is recorded in PMOVSCLR[31], occurs on the increment
153	 *  that changes PMCCNTR[63] from 1 to 0.
154	 *  This bit is Reserved, UNK/SBZP in ARMv7.
155	 *
156	 * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
157	 * ---------------------------------------------------------------------
158	 */
159	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT | PMCR_LC_BIT | \
160		      PMCR_LP_BIT)
161#else
162	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT)
163#endif
164	stcopr	r0, PMCR
165
166	/*
167	 * If Data Independent Timing (DIT) functionality is implemented,
168	 * always enable DIT in EL3
169	 */
170	ldcopr	r0, ID_PFR0
171	and	r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT)
172	cmp	r0, #ID_PFR0_DIT_SUPPORTED
173	bne	1f
174	mrs	r0, cpsr
175	orr	r0, r0, #CPSR_DIT_BIT
176	msr	cpsr_cxsf, r0
1771:
178	.endm
179
180/* -----------------------------------------------------------------------------
181 * This is the super set of actions that need to be performed during a cold boot
182 * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
183 *
184 * This macro will always perform reset handling, architectural initialisations
185 * and stack setup. The rest of the actions are optional because they might not
186 * be needed, depending on the context in which this macro is called. This is
187 * why this macro is parameterised ; each parameter allows to enable/disable
188 * some actions.
189 *
190 *  _init_sctlr:
191 *	Whether the macro needs to initialise the SCTLR register including
192 *	configuring the endianness of data accesses.
193 *
194 *  _warm_boot_mailbox:
195 *	Whether the macro needs to detect the type of boot (cold/warm). The
196 *	detection is based on the platform entrypoint address : if it is zero
197 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
198 *	this macro jumps on the platform entrypoint address.
199 *
200 *  _secondary_cold_boot:
201 *	Whether the macro needs to identify the CPU that is calling it: primary
202 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
203 *	the platform initialisations, while the secondaries will be put in a
204 *	platform-specific state in the meantime.
205 *
206 *	If the caller knows this macro will only be called by the primary CPU
207 *	then this parameter can be defined to 0 to skip this step.
208 *
209 * _init_memory:
210 *	Whether the macro needs to initialise the memory.
211 *
212 * _init_c_runtime:
213 *	Whether the macro needs to initialise the C runtime environment.
214 *
215 * _exception_vectors:
216 *	Address of the exception vectors to program in the VBAR_EL3 register.
217 *
218 * _pie_fixup_size:
219 *	Size of memory region to fixup Global Descriptor Table (GDT).
220 *
221 *	A non-zero value is expected when firmware needs GDT to be fixed-up.
222 *
223 * -----------------------------------------------------------------------------
224 */
225	.macro el3_entrypoint_common					\
226		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
227		_init_memory, _init_c_runtime, _exception_vectors,	\
228		_pie_fixup_size
229
230	/* Make sure we are in Secure Mode */
231#if ENABLE_ASSERTIONS
232	ldcopr	r0, SCR
233	tst	r0, #SCR_NS_BIT
234	ASM_ASSERT(eq)
235#endif
236
237	.if \_init_sctlr
238		/* -------------------------------------------------------------
239		 * This is the initialisation of SCTLR and so must ensure that
240		 * all fields are explicitly set rather than relying on hw. Some
241		 * fields reset to an IMPLEMENTATION DEFINED value.
242		 *
243		 * SCTLR.TE: Set to zero so that exceptions to an Exception
244		 *  Level executing at PL1 are taken to A32 state.
245		 *
246		 * SCTLR.EE: Set the CPU endianness before doing anything that
247		 *  might involve memory reads or writes. Set to zero to select
248		 *  Little Endian.
249		 *
250		 * SCTLR.V: Set to zero to select the normal exception vectors
251		 *  with base address held in VBAR.
252		 *
253		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
254		 *  safe behaviour upon exception entry to EL3.
255		 * -------------------------------------------------------------
256		 */
257		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
258				SCTLR_V_BIT | SCTLR_DSSBS_BIT))
259		stcopr	r0, SCTLR
260		isb
261	.endif /* _init_sctlr */
262
263	/* Switch to monitor mode */
264	cps	#MODE32_mon
265	isb
266
267#if DISABLE_MTPMU
268	bl	mtpmu_disable
269#endif
270
271	.if \_warm_boot_mailbox
272		/* -------------------------------------------------------------
273		 * This code will be executed for both warm and cold resets.
274		 * Now is the time to distinguish between the two.
275		 * Query the platform entrypoint address and if it is not zero
276		 * then it means it is a warm boot so jump to this address.
277		 * -------------------------------------------------------------
278		 */
279		bl	plat_get_my_entrypoint
280		cmp	r0, #0
281		bxne	r0
282	.endif /* _warm_boot_mailbox */
283
284	.if \_pie_fixup_size
285#if ENABLE_PIE
286		/*
287		 * ------------------------------------------------------------
288		 * If PIE is enabled fixup the Global descriptor Table only
289		 * once during primary core cold boot path.
290		 *
291		 * Compile time base address, required for fixup, is calculated
292		 * using "pie_fixup" label present within first page.
293		 * ------------------------------------------------------------
294		 */
295	pie_fixup:
296		ldr	r0, =pie_fixup
297		ldr	r1, =PAGE_START_MASK
298		and	r0, r0, r1
299		mov_imm	r1, \_pie_fixup_size
300		add	r1, r1, r0
301		bl	fixup_gdt_reloc
302#endif /* ENABLE_PIE */
303	.endif /* _pie_fixup_size */
304
305	/* ---------------------------------------------------------------------
306	 * Set the exception vectors (VBAR/MVBAR).
307	 * ---------------------------------------------------------------------
308	 */
309	ldr	r0, =\_exception_vectors
310	stcopr	r0, VBAR
311	stcopr	r0, MVBAR
312	isb
313
314	/* ---------------------------------------------------------------------
315	 * It is a cold boot.
316	 * Perform any processor specific actions upon reset e.g. cache, TLB
317	 * invalidations etc.
318	 * ---------------------------------------------------------------------
319	 */
320	bl	reset_handler
321
322	el3_arch_init_common
323
324	.if \_secondary_cold_boot
325		/* -------------------------------------------------------------
326		 * Check if this is a primary or secondary CPU cold boot.
327		 * The primary CPU will set up the platform while the
328		 * secondaries are placed in a platform-specific state until the
329		 * primary CPU performs the necessary actions to bring them out
330		 * of that state and allows entry into the OS.
331		 * -------------------------------------------------------------
332		 */
333		bl	plat_is_my_cpu_primary
334		cmp	r0, #0
335		bne	do_primary_cold_boot
336
337		/* This is a cold boot on a secondary CPU */
338		bl	plat_secondary_cold_boot_setup
339		/* plat_secondary_cold_boot_setup() is not supposed to return */
340		no_ret	plat_panic_handler
341
342	do_primary_cold_boot:
343	.endif /* _secondary_cold_boot */
344
345	/* ---------------------------------------------------------------------
346	 * Initialize memory now. Secondary CPU initialization won't get to this
347	 * point.
348	 * ---------------------------------------------------------------------
349	 */
350
351	.if \_init_memory
352		bl	platform_mem_init
353	.endif /* _init_memory */
354
355	/* ---------------------------------------------------------------------
356	 * Init C runtime environment:
357	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
358	 *       - the .bss section;
359	 *       - the coherent memory section (if any).
360	 *   - Relocate the data section from ROM to RAM, if required.
361	 * ---------------------------------------------------------------------
362	 */
363	.if \_init_c_runtime
364#if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
365		/* -----------------------------------------------------------------
366		 * Invalidate the RW memory used by the image. This
367		 * includes the data and NOBITS sections. This is done to
368		 * safeguard against possible corruption of this memory by
369		 * dirty cache lines in a system cache as a result of use by
370		 * an earlier boot loader stage.
371		 * -----------------------------------------------------------------
372		 */
373		ldr	r0, =__RW_START__
374		ldr	r1, =__RW_END__
375		sub	r1, r1, r0
376		bl	inv_dcache_range
377#endif
378
379		/*
380		 * zeromem uses r12 whereas it is used to save previous BL arg3,
381		 * save it in r7
382		 */
383		mov	r7, r12
384		ldr	r0, =__BSS_START__
385		ldr	r1, =__BSS_END__
386		sub 	r1, r1, r0
387		bl	zeromem
388
389#if USE_COHERENT_MEM
390		ldr	r0, =__COHERENT_RAM_START__
391		ldr	r1, =__COHERENT_RAM_END_UNALIGNED__
392		sub 	r1, r1, r0
393		bl	zeromem
394#endif
395
396		/* Restore r12 */
397		mov	r12, r7
398
399#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
400		/* -----------------------------------------------------
401		 * Copy data from ROM to RAM.
402		 * -----------------------------------------------------
403		 */
404		ldr	r0, =__DATA_RAM_START__
405		ldr	r1, =__DATA_ROM_START__
406		ldr	r2, =__DATA_RAM_END__
407		sub 	r2, r2, r0
408		bl	memcpy4
409#endif
410	.endif /* _init_c_runtime */
411
412	/* ---------------------------------------------------------------------
413	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
414	 * the MMU is enabled. There is no risk of reading stale stack memory
415	 * after enabling the MMU as only the primary CPU is running at the
416	 * moment.
417	 * ---------------------------------------------------------------------
418	 */
419	bl	plat_set_my_stack
420
421#if STACK_PROTECTOR_ENABLED
422	.if \_init_c_runtime
423	bl	update_stack_protector_canary
424	.endif /* _init_c_runtime */
425#endif
426	.endm
427
428#endif /* EL3_COMMON_MACROS_S */
429