xref: /rk3399_ARM-atf/include/arch/aarch64/asm_macros.S (revision 8d9f5f2586d32ca19c11f65f3c6954615f3efdf6)
1f5478dedSAntonio Nino Diaz/*
258fadd62SIgor Podgainõi * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
3f5478dedSAntonio Nino Diaz *
4f5478dedSAntonio Nino Diaz * SPDX-License-Identifier: BSD-3-Clause
5f5478dedSAntonio Nino Diaz */
6f5478dedSAntonio Nino Diaz#ifndef ASM_MACROS_S
7f5478dedSAntonio Nino Diaz#define ASM_MACROS_S
8f5478dedSAntonio Nino Diaz
9f5478dedSAntonio Nino Diaz#include <arch.h>
1009d40e0eSAntonio Nino Diaz#include <common/asm_macros_common.S>
110d020822SBoyan Karatotev#include <lib/cpus/cpu_ops.h>
1209d40e0eSAntonio Nino Diaz#include <lib/spinlock.h>
13f5478dedSAntonio Nino Diaz
14f5478dedSAntonio Nino Diaz/*
15f5478dedSAntonio Nino Diaz * TLBI instruction with type specifier that implements the workaround for
16f85edceaSSoby Mathew * errata 813419 of Cortex-A57 or errata 1286807 of Cortex-A76.
17f5478dedSAntonio Nino Diaz */
18f85edceaSSoby Mathew#if ERRATA_A57_813419 || ERRATA_A76_1286807
19f5478dedSAntonio Nino Diaz#define TLB_INVALIDATE(_type) \
20f5478dedSAntonio Nino Diaz	tlbi	_type; \
21f5478dedSAntonio Nino Diaz	dsb	ish; \
22f5478dedSAntonio Nino Diaz	tlbi	_type
23f5478dedSAntonio Nino Diaz#else
24f5478dedSAntonio Nino Diaz#define TLB_INVALIDATE(_type) \
25f5478dedSAntonio Nino Diaz	tlbi	_type
26f5478dedSAntonio Nino Diaz#endif
27f5478dedSAntonio Nino Diaz
28f5478dedSAntonio Nino Diaz
29f8328853SBoyan Karatotev	/*
30f8328853SBoyan Karatotev	 * Create a stack frame at the start of an assembly function. Will also
31f8328853SBoyan Karatotev	 * add all necessary call frame information (cfi) directives for a
32f8328853SBoyan Karatotev	 * pretty stack trace. This is necessary as there is quite a bit of
33f8328853SBoyan Karatotev	 * flexibility within a stack frame and the stack pointer can move
34f8328853SBoyan Karatotev	 * around throughout the function. If the debugger isn't told where to
35f8328853SBoyan Karatotev	 * find things, it gets lost, gives up and displays nothing. So inform
36f8328853SBoyan Karatotev	 * the debugger of what's where. Anchor the Canonical Frame Address
37f8328853SBoyan Karatotev	 * (CFA; the thing used to track what's where) to the frame pointer as
38f8328853SBoyan Karatotev	 * that's not expected to change in the function body and no extra
39f8328853SBoyan Karatotev	 * bookkeeping will be necessary, allowing free movement of the sp
40f8328853SBoyan Karatotev	 *
41f8328853SBoyan Karatotev	 *   _frame_size: requested space for caller to use. Must be a mutliple
42f8328853SBoyan Karatotev	 *     of 16 for stack pointer alignment
43f8328853SBoyan Karatotev	 */
44f8328853SBoyan Karatotev	.macro	func_prologue _frame_size=0
45f8328853SBoyan Karatotev	.if \_frame_size & 0xf
46f8328853SBoyan Karatotev	.error "frame_size must have stack pointer alignment (multiple of 16)"
47f8328853SBoyan Karatotev	.endif
48f8328853SBoyan Karatotev
49f8328853SBoyan Karatotev	/* put frame record at top of frame */
50f5478dedSAntonio Nino Diaz	stp	x29, x30, [sp, #-0x10]!
51f5478dedSAntonio Nino Diaz	mov	x29,sp
52f8328853SBoyan Karatotev	.if \_frame_size
53f8328853SBoyan Karatotev	sub	sp, sp, #\_frame_size
54f8328853SBoyan Karatotev	.endif
55f8328853SBoyan Karatotev
56f8328853SBoyan Karatotev	/* point CFA to start of frame record, i.e. x29 + 0x10 */
57f8328853SBoyan Karatotev	.cfi_def_cfa	x29,  0x10
58f8328853SBoyan Karatotev	/* inform it about x29, x30 locations */
59f8328853SBoyan Karatotev	.cfi_offset	x30, -0x8
60f8328853SBoyan Karatotev	.cfi_offset	x29, -0x10
61f5478dedSAntonio Nino Diaz	.endm
62f5478dedSAntonio Nino Diaz
63f8328853SBoyan Karatotev	/*
64f8328853SBoyan Karatotev	 * Clear stack frame at the end of an assembly function.
65f8328853SBoyan Karatotev	 *
66f8328853SBoyan Karatotev	 *   _frame_size: the value passed to func_prologue
67f8328853SBoyan Karatotev	 */
68f8328853SBoyan Karatotev	.macro	func_epilogue _frame_size=0
69f8328853SBoyan Karatotev	/* remove requested space */
70f8328853SBoyan Karatotev	.if \_frame_size
71f8328853SBoyan Karatotev	add	sp, sp, #\_frame_size
72f8328853SBoyan Karatotev	.endif
73f5478dedSAntonio Nino Diaz	ldp	x29, x30, [sp], #0x10
74f5478dedSAntonio Nino Diaz	.endm
75f5478dedSAntonio Nino Diaz
76f5478dedSAntonio Nino Diaz
77f5478dedSAntonio Nino Diaz	.macro	dcache_line_size  reg, tmp
78f5478dedSAntonio Nino Diaz	mrs	\tmp, ctr_el0
79f5478dedSAntonio Nino Diaz	ubfx	\tmp, \tmp, #16, #4
80f5478dedSAntonio Nino Diaz	mov	\reg, #4
81f5478dedSAntonio Nino Diaz	lsl	\reg, \reg, \tmp
82f5478dedSAntonio Nino Diaz	.endm
83f5478dedSAntonio Nino Diaz
84f5478dedSAntonio Nino Diaz
85f5478dedSAntonio Nino Diaz	.macro	icache_line_size  reg, tmp
86f5478dedSAntonio Nino Diaz	mrs	\tmp, ctr_el0
87f5478dedSAntonio Nino Diaz	and	\tmp, \tmp, #0xf
88f5478dedSAntonio Nino Diaz	mov	\reg, #4
89f5478dedSAntonio Nino Diaz	lsl	\reg, \reg, \tmp
90f5478dedSAntonio Nino Diaz	.endm
91f5478dedSAntonio Nino Diaz
92f5478dedSAntonio Nino Diaz
93f5478dedSAntonio Nino Diaz	.macro	smc_check  label
94f5478dedSAntonio Nino Diaz	mrs	x0, esr_el3
95f5478dedSAntonio Nino Diaz	ubfx	x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
96f5478dedSAntonio Nino Diaz	cmp	x0, #EC_AARCH64_SMC
97f5478dedSAntonio Nino Diaz	b.ne	$label
98f5478dedSAntonio Nino Diaz	.endm
99f5478dedSAntonio Nino Diaz
100f5478dedSAntonio Nino Diaz	/*
101f5478dedSAntonio Nino Diaz	 * Declare the exception vector table, enforcing it is aligned on a
102f5478dedSAntonio Nino Diaz	 * 2KB boundary, as required by the ARMv8 architecture.
103f5478dedSAntonio Nino Diaz	 * Use zero bytes as the fill value to be stored in the padding bytes
104f5478dedSAntonio Nino Diaz	 * so that it inserts illegal AArch64 instructions. This increases
105f5478dedSAntonio Nino Diaz	 * security, robustness and potentially facilitates debugging.
106f5478dedSAntonio Nino Diaz	 */
107f5478dedSAntonio Nino Diaz	.macro vector_base  label, section_name=.vectors
108f5478dedSAntonio Nino Diaz	.section \section_name, "ax"
109f5478dedSAntonio Nino Diaz	.align 11, 0
110f5478dedSAntonio Nino Diaz	\label:
111f5478dedSAntonio Nino Diaz	.endm
112f5478dedSAntonio Nino Diaz
113f5478dedSAntonio Nino Diaz	/*
114f5478dedSAntonio Nino Diaz	 * Create an entry in the exception vector table, enforcing it is
115f5478dedSAntonio Nino Diaz	 * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
116f5478dedSAntonio Nino Diaz	 * Use zero bytes as the fill value to be stored in the padding bytes
117f5478dedSAntonio Nino Diaz	 * so that it inserts illegal AArch64 instructions. This increases
118f5478dedSAntonio Nino Diaz	 * security, robustness and potentially facilitates debugging.
119f5478dedSAntonio Nino Diaz	 */
120f5478dedSAntonio Nino Diaz	.macro vector_entry  label, section_name=.vectors
121f5478dedSAntonio Nino Diaz	.cfi_sections .debug_frame
122f5478dedSAntonio Nino Diaz	.section \section_name, "ax"
123f5478dedSAntonio Nino Diaz	.align 7, 0
124f5478dedSAntonio Nino Diaz	.type \label, %function
125f5478dedSAntonio Nino Diaz	.cfi_startproc
126f5478dedSAntonio Nino Diaz	\label:
127f5478dedSAntonio Nino Diaz	.endm
128f5478dedSAntonio Nino Diaz
129f5478dedSAntonio Nino Diaz	/*
130f5478dedSAntonio Nino Diaz	 * Add the bytes until fill the full exception vector, whose size is always
131f5478dedSAntonio Nino Diaz	 * 32 instructions. If there are more than 32 instructions in the
132f5478dedSAntonio Nino Diaz	 * exception vector then an error is emitted.
133f5478dedSAntonio Nino Diaz	 */
134f5478dedSAntonio Nino Diaz	.macro end_vector_entry label
135f5478dedSAntonio Nino Diaz	.cfi_endproc
136f5478dedSAntonio Nino Diaz	.fill	\label + (32 * 4) - .
137f5478dedSAntonio Nino Diaz	.endm
138f5478dedSAntonio Nino Diaz
139f5478dedSAntonio Nino Diaz	/*
140f5478dedSAntonio Nino Diaz	 * This macro calculates the base address of the current CPU's MP stack
141f5478dedSAntonio Nino Diaz	 * using the plat_my_core_pos() index, the name of the stack storage
142f5478dedSAntonio Nino Diaz	 * and the size of each stack
143f5478dedSAntonio Nino Diaz	 * Out: X0 = physical address of stack base
144f5478dedSAntonio Nino Diaz	 * Clobber: X30, X1, X2
145f5478dedSAntonio Nino Diaz	 */
146f5478dedSAntonio Nino Diaz	.macro get_my_mp_stack _name, _size
147f5478dedSAntonio Nino Diaz	bl	plat_my_core_pos
148f5478dedSAntonio Nino Diaz	adrp	x2, (\_name + \_size)
149f5478dedSAntonio Nino Diaz	add	x2, x2, :lo12:(\_name + \_size)
150f5478dedSAntonio Nino Diaz	mov x1, #\_size
151f5478dedSAntonio Nino Diaz	madd x0, x0, x1, x2
152f5478dedSAntonio Nino Diaz	.endm
153f5478dedSAntonio Nino Diaz
154f5478dedSAntonio Nino Diaz	/*
155f5478dedSAntonio Nino Diaz	 * This macro calculates the base address of a UP stack using the
156f5478dedSAntonio Nino Diaz	 * name of the stack storage and the size of the stack
157f5478dedSAntonio Nino Diaz	 * Out: X0 = physical address of stack base
158f5478dedSAntonio Nino Diaz	 */
159f5478dedSAntonio Nino Diaz	.macro get_up_stack _name, _size
160f5478dedSAntonio Nino Diaz	adrp	x0, (\_name + \_size)
161f5478dedSAntonio Nino Diaz	add	x0, x0, :lo12:(\_name + \_size)
162f5478dedSAntonio Nino Diaz	.endm
163f5478dedSAntonio Nino Diaz
164f5478dedSAntonio Nino Diaz	/*
165f5478dedSAntonio Nino Diaz	 * Helper macro to generate the best mov/movk combinations according
166f5478dedSAntonio Nino Diaz	 * the value to be moved. The 16 bits from '_shift' are tested and
167f5478dedSAntonio Nino Diaz	 * if not zero, they are moved into '_reg' without affecting
168f5478dedSAntonio Nino Diaz	 * other bits.
169f5478dedSAntonio Nino Diaz	 */
170f5478dedSAntonio Nino Diaz	.macro _mov_imm16 _reg, _val, _shift
171f5478dedSAntonio Nino Diaz		.if (\_val >> \_shift) & 0xffff
172f5478dedSAntonio Nino Diaz			.if (\_val & (1 << \_shift - 1))
173f5478dedSAntonio Nino Diaz				movk	\_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
174f5478dedSAntonio Nino Diaz			.else
175f5478dedSAntonio Nino Diaz				mov	\_reg, \_val & (0xffff << \_shift)
176f5478dedSAntonio Nino Diaz			.endif
177f5478dedSAntonio Nino Diaz		.endif
178f5478dedSAntonio Nino Diaz	.endm
179f5478dedSAntonio Nino Diaz
180f5478dedSAntonio Nino Diaz	/*
181f5478dedSAntonio Nino Diaz	 * Helper macro to load arbitrary values into 32 or 64-bit registers
182f5478dedSAntonio Nino Diaz	 * which generates the best mov/movk combinations. Many base addresses
183f5478dedSAntonio Nino Diaz	 * are 64KB aligned the macro will eliminate updating bits 15:0 in
184f5478dedSAntonio Nino Diaz	 * that case
185f5478dedSAntonio Nino Diaz	 */
186f5478dedSAntonio Nino Diaz	.macro mov_imm _reg, _val
187f5478dedSAntonio Nino Diaz		.if (\_val) == 0
188f5478dedSAntonio Nino Diaz			mov	\_reg, #0
189f5478dedSAntonio Nino Diaz		.else
190f5478dedSAntonio Nino Diaz			_mov_imm16	\_reg, (\_val), 0
191f5478dedSAntonio Nino Diaz			_mov_imm16	\_reg, (\_val), 16
192f5478dedSAntonio Nino Diaz			_mov_imm16	\_reg, (\_val), 32
193f5478dedSAntonio Nino Diaz			_mov_imm16	\_reg, (\_val), 48
194f5478dedSAntonio Nino Diaz		.endif
195f5478dedSAntonio Nino Diaz	.endm
196f5478dedSAntonio Nino Diaz
197f5478dedSAntonio Nino Diaz	/*
198f5478dedSAntonio Nino Diaz	 * Macro to mark instances where we're jumping to a function and don't
199f5478dedSAntonio Nino Diaz	 * expect a return. To provide the function being jumped to with
200f5478dedSAntonio Nino Diaz	 * additional information, we use 'bl' instruction to jump rather than
201f5478dedSAntonio Nino Diaz	 * 'b'.
202f5478dedSAntonio Nino Diaz         *
203f5478dedSAntonio Nino Diaz	 * Debuggers infer the location of a call from where LR points to, which
204f5478dedSAntonio Nino Diaz	 * is usually the instruction after 'bl'. If this macro expansion
205f5478dedSAntonio Nino Diaz	 * happens to be the last location in a function, that'll cause the LR
206f5478dedSAntonio Nino Diaz	 * to point a location beyond the function, thereby misleading debugger
207f5478dedSAntonio Nino Diaz	 * back trace. We therefore insert a 'nop' after the function call for
208f5478dedSAntonio Nino Diaz	 * debug builds, unless 'skip_nop' parameter is non-zero.
209f5478dedSAntonio Nino Diaz	 */
210f5478dedSAntonio Nino Diaz	.macro no_ret _func:req, skip_nop=0
211f5478dedSAntonio Nino Diaz	bl	\_func
212f5478dedSAntonio Nino Diaz#if DEBUG
213f5478dedSAntonio Nino Diaz	.ifeq \skip_nop
214f5478dedSAntonio Nino Diaz	nop
215f5478dedSAntonio Nino Diaz	.endif
216f5478dedSAntonio Nino Diaz#endif
217f5478dedSAntonio Nino Diaz	.endm
218f5478dedSAntonio Nino Diaz
219f5478dedSAntonio Nino Diaz	/*
220f5478dedSAntonio Nino Diaz	 * Reserve space for a spin lock in assembly file.
221f5478dedSAntonio Nino Diaz	 */
222f5478dedSAntonio Nino Diaz	.macro define_asm_spinlock _name:req
223f5478dedSAntonio Nino Diaz	.align	SPINLOCK_ASM_ALIGN
224f5478dedSAntonio Nino Diaz	\_name:
225f5478dedSAntonio Nino Diaz	.space	SPINLOCK_ASM_SIZE
226f5478dedSAntonio Nino Diaz	.endm
227f5478dedSAntonio Nino Diaz
2287d5036b8SManish Pandey	/*
2299fc59639SAlexei Fedorov	 * Helper macro to read system register value into x0
2309fc59639SAlexei Fedorov	 */
2319fc59639SAlexei Fedorov	.macro	read reg:req
2329fc59639SAlexei Fedorov#if ENABLE_BTI
2339fc59639SAlexei Fedorov	bti	j
2349fc59639SAlexei Fedorov#endif
2359fc59639SAlexei Fedorov	mrs	x0, \reg
2369fc59639SAlexei Fedorov	ret
2379fc59639SAlexei Fedorov	.endm
2389fc59639SAlexei Fedorov
2399fc59639SAlexei Fedorov	/*
2409fc59639SAlexei Fedorov	 * Helper macro to write value from x1 to system register
2419fc59639SAlexei Fedorov	 */
2429fc59639SAlexei Fedorov	.macro	write reg:req
2439fc59639SAlexei Fedorov#if ENABLE_BTI
2449fc59639SAlexei Fedorov	bti	j
2459fc59639SAlexei Fedorov#endif
2469fc59639SAlexei Fedorov	msr	\reg, x1
2479fc59639SAlexei Fedorov	ret
2489fc59639SAlexei Fedorov	.endm
2499fc59639SAlexei Fedorov
250f461fe34SAnthony Steinhauser	/*
251387b8801SAndre Przywara	 * The "sb" instruction was introduced later into the architecture,
252387b8801SAndre Przywara	 * so not all toolchains understand it. Some deny its usage unless
253387b8801SAndre Przywara	 * a supported processor is specified on the build command line.
254387b8801SAndre Przywara	 * Use sb's system register encoding to work around this, we already
255387b8801SAndre Przywara	 * guard the sb execution with a feature flag.
256387b8801SAndre Przywara	 */
257387b8801SAndre Przywara
258387b8801SAndre Przywara	.macro sb_barrier_insn
259387b8801SAndre Przywara	msr	SYSREG_SB, xzr
260387b8801SAndre Przywara	.endm
261387b8801SAndre Przywara
262f8088733SBoyan Karatotev	.macro psb_csync
263f8088733SBoyan Karatotev	hint #17 /* use the hint synonym for compatibility */
264f8088733SBoyan Karatotev	.endm
265f8088733SBoyan Karatotev
26673d98e37SBoyan Karatotev	.macro tsb_csync
26773d98e37SBoyan Karatotev	hint #18 /* use the hint synonym for compatibility */
26873d98e37SBoyan Karatotev	.endm
26973d98e37SBoyan Karatotev
270387b8801SAndre Przywara	/*
271e74d6581SBipin Ravi	 * Macro for using speculation barrier instruction introduced by
272e74d6581SBipin Ravi	 * FEAT_SB, if it's enabled.
273e74d6581SBipin Ravi	 */
274e74d6581SBipin Ravi	.macro speculation_barrier
275e74d6581SBipin Ravi#if ENABLE_FEAT_SB
276387b8801SAndre Przywara	sb_barrier_insn
277e74d6581SBipin Ravi#else
278e74d6581SBipin Ravi	dsb	sy
279e74d6581SBipin Ravi	isb
280e74d6581SBipin Ravi#endif
281e74d6581SBipin Ravi	.endm
282e74d6581SBipin Ravi
283e74d6581SBipin Ravi	/*
2844e04478aSChris Kay	 * Macro for mitigating against speculative execution beyond ERET. Uses the
2854e04478aSChris Kay	 * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
286f461fe34SAnthony Steinhauser	 */
287f461fe34SAnthony Steinhauser	.macro exception_return
288f461fe34SAnthony Steinhauser	eret
2894e04478aSChris Kay#if ENABLE_FEAT_SB
290387b8801SAndre Przywara	sb_barrier_insn
291ccfb5c81SMadhukar Pappireddy#else
292f461fe34SAnthony Steinhauser	dsb	nsh
293f461fe34SAnthony Steinhauser	isb
294ccfb5c81SMadhukar Pappireddy#endif
295f461fe34SAnthony Steinhauser	.endm
296f461fe34SAnthony Steinhauser
297d04c04a4SManish Pandey	/*
298d04c04a4SManish Pandey	 * Macro to unmask External Aborts by changing PSTATE.A bit.
299d04c04a4SManish Pandey	 * Put explicit synchronization event to ensure newly unmasked interrupt
300d04c04a4SManish Pandey	 * is taken immediately.
301d04c04a4SManish Pandey	 */
302d04c04a4SManish Pandey	.macro  unmask_async_ea
303d04c04a4SManish Pandey	msr     daifclr, #DAIF_ABT_BIT
304d04c04a4SManish Pandey	isb
305d04c04a4SManish Pandey	.endm
3066597fcf1SManish Pandey
3076597fcf1SManish Pandey	/* Macro for error synchronization on exception boundries.
3086597fcf1SManish Pandey	 * With FEAT_RAS enabled, it is assumed that FEAT_IESB is also present
3096597fcf1SManish Pandey	 * and enabled.
3106597fcf1SManish Pandey	 * FEAT_IESB provides an implicit error synchronization event at exception
3116597fcf1SManish Pandey	 * entry and exception return, so there is no need for any explicit instruction.
3126597fcf1SManish Pandey	 */
3136597fcf1SManish Pandey	.macro synchronize_errors
314970a4a8dSManish Pandey#if !ENABLE_FEAT_RAS
3156597fcf1SManish Pandey	/* Complete any stores that may return an abort */
3166597fcf1SManish Pandey	dsb	sy
3176597fcf1SManish Pandey	/* Synchronise the CPU context with the completion of the dsb */
3186597fcf1SManish Pandey	isb
3196597fcf1SManish Pandey#endif
3206597fcf1SManish Pandey	.endm
3216597fcf1SManish Pandey
32231857d4cSHsin-Hsiung Wang	/*
32331857d4cSHsin-Hsiung Wang	 * Helper macro to instruction adr <reg>, <symbol> where <symbol> is
32431857d4cSHsin-Hsiung Wang	 * within the range +/- 4 GB.
32531857d4cSHsin-Hsiung Wang	 */
32631857d4cSHsin-Hsiung Wang	.macro adr_l, dst, sym
32731857d4cSHsin-Hsiung Wang	adrp	\dst, \sym
32831857d4cSHsin-Hsiung Wang	add	\dst, \dst, :lo12:\sym
32931857d4cSHsin-Hsiung Wang	.endm
33058fadd62SIgor Podgainõi
33158fadd62SIgor Podgainõi	/*
332*8d9f5f25SBoyan Karatotev	* is_feat_XYZ_present_asm - Set flags and reg if FEAT_XYZ
33358fadd62SIgor Podgainõi	* is enabled at runtime.
33458fadd62SIgor Podgainõi	*
33558fadd62SIgor Podgainõi	* Arguments:
33658fadd62SIgor Podgainõi	* reg: Register for temporary use.
33758fadd62SIgor Podgainõi	*
33858fadd62SIgor Podgainõi	* Clobbers: reg
33958fadd62SIgor Podgainõi	*/
34058fadd62SIgor Podgainõi	.macro is_feat_sysreg128_present_asm reg:req
34158fadd62SIgor Podgainõi	mrs	\reg, ID_AA64ISAR2_EL1
34258fadd62SIgor Podgainõi	ands	\reg, \reg, #(ID_AA64ISAR2_SYSREG128_MASK << ID_AA64ISAR2_SYSREG128_SHIFT)
34358fadd62SIgor Podgainõi	.endm
3440d020822SBoyan Karatotev
345*8d9f5f25SBoyan Karatotev	.macro is_feat_pauth_present_asm reg:req, clobber:req
346*8d9f5f25SBoyan Karatotev	mrs	\reg, ID_AA64ISAR1_EL1
347*8d9f5f25SBoyan Karatotev	mov_imm	\clobber, ((ID_AA64ISAR1_GPI_MASK << ID_AA64ISAR1_GPI_SHIFT) \
348*8d9f5f25SBoyan Karatotev			 | (ID_AA64ISAR1_GPA_MASK << ID_AA64ISAR1_GPA_SHIFT) \
349*8d9f5f25SBoyan Karatotev			 | (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) \
350*8d9f5f25SBoyan Karatotev			 | (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT))
351*8d9f5f25SBoyan Karatotev	tst	\reg, \clobber
352*8d9f5f25SBoyan Karatotev	.endm
353*8d9f5f25SBoyan Karatotev
3540d020822SBoyan Karatotev.macro call_reset_handler
3550d020822SBoyan Karatotev#if !(defined(IMAGE_BL2) && ENABLE_RME)
3560d020822SBoyan Karatotev	/* ---------------------------------------------------------------------
3570d020822SBoyan Karatotev	 * It is a cold boot.
3580d020822SBoyan Karatotev	 * Perform any processor specific actions upon reset e.g. cache, TLB
3590d020822SBoyan Karatotev	 * invalidations etc.
3600d020822SBoyan Karatotev	 * ---------------------------------------------------------------------
3610d020822SBoyan Karatotev	 */
3620d020822SBoyan Karatotev	/* The plat_reset_handler can clobber x0 - x18, x30 */
3630d020822SBoyan Karatotev	bl	plat_reset_handler
3640d020822SBoyan Karatotev
3650d020822SBoyan Karatotev	/* Get the matching cpu_ops pointer */
3660d020822SBoyan Karatotev	bl	get_cpu_ops_ptr
3670d020822SBoyan Karatotev
3680d020822SBoyan Karatotev	/* Get the cpu_ops reset handler */
3690d020822SBoyan Karatotev	ldr	x2, [x0, #CPU_RESET_FUNC]
3700d020822SBoyan Karatotev
3710d020822SBoyan Karatotev	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
3720d020822SBoyan Karatotev	blr	x2
3730d020822SBoyan Karatotev#endif
3740d020822SBoyan Karatotev.endm
375f5478dedSAntonio Nino Diaz#endif /* ASM_MACROS_S */
376