xref: /rk3399_ARM-atf/include/arch/aarch32/asm_macros.S (revision 38e580e6411c7a2eb2801a6aacb0a19bb9b1ac46)
1/*
2 * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef ASM_MACROS_S
7#define ASM_MACROS_S
8
9#include <arch.h>
10#include <common/asm_macros_common.S>
11#include <lib/cpus/cpu_ops.h>
12#include <lib/spinlock.h>
13
14/*
15 * TLBI instruction with type specifier that implements the workaround for
16 * errata 813419 of Cortex-A57.
17 */
18#if ERRATA_A57_813419
19#define TLB_INVALIDATE(_reg, _coproc) \
20	stcopr	_reg, _coproc; \
21	dsb	ish; \
22	stcopr	_reg, _coproc
23#else
24#define TLB_INVALIDATE(_reg, _coproc) \
25	stcopr	_reg, _coproc
26#endif
27
28	/*
29	 * Co processor register accessors
30	 */
31	.macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
32	mrc	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
33	.endm
34
35	.macro ldcopr16 reg1, reg2, coproc, opc1, CRm
36	mrrc	\coproc, \opc1, \reg1, \reg2, \CRm
37	.endm
38
39	.macro stcopr reg, coproc, opc1, CRn, CRm, opc2
40	mcr	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
41	.endm
42
43	.macro stcopr16 reg1, reg2, coproc, opc1, CRm
44	mcrr	\coproc, \opc1, \reg1, \reg2, \CRm
45	.endm
46
47	/* Cache line size helpers */
48	.macro	dcache_line_size  reg, tmp
49	ldcopr	\tmp, CTR
50	ubfx	\tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
51	mov	\reg, #CPU_WORD_SIZE
52	lsl	\reg, \reg, \tmp
53	.endm
54
55	.macro	icache_line_size  reg, tmp
56	ldcopr	\tmp, CTR
57	and	\tmp, \tmp, #CTR_IMINLINE_MASK
58	mov	\reg, #CPU_WORD_SIZE
59	lsl	\reg, \reg, \tmp
60	.endm
61
62	/*
63	 * Declare the exception vector table, enforcing it is aligned on a
64	 * 32 byte boundary.
65	 */
66	.macro vector_base  label
67	.section .vectors, "ax"
68	.align 5
69	\label:
70	.endm
71
72	/*
73	 * This macro calculates the base address of the current CPU's multi
74	 * processor(MP) stack using the plat_my_core_pos() index, the name of
75	 * the stack storage and the size of each stack.
76	 * Out: r0 = physical address of stack base
77	 * Clobber: r14, r1, r2
78	 */
79	.macro get_my_mp_stack _name, _size
80	bl	plat_my_core_pos
81	ldr r2, =(\_name + \_size)
82	mov r1, #\_size
83	mla r0, r0, r1, r2
84	.endm
85
86	/*
87	 * This macro calculates the base address of a uniprocessor(UP) stack
88	 * using the name of the stack storage and the size of the stack
89	 * Out: r0 = physical address of stack base
90	 */
91	.macro get_up_stack _name, _size
92	ldr r0, =(\_name + \_size)
93	.endm
94
95#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
96	/*
97	 * Macro for mitigating against speculative execution.
98	 * ARMv7 cores without Virtualization extension do not support the
99	 * eret instruction.
100	 */
101	.macro exception_return
102	movs	pc, lr
103	dsb	nsh
104	isb
105	.endm
106
107#else
108	/*
109	 * Macro for mitigating against speculative execution beyond ERET. Uses the
110	 * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
111	 */
112	.macro exception_return
113	eret
114#if ENABLE_FEAT_SB
115	sb
116#else
117	dsb	nsh
118	isb
119#endif
120	.endm
121#endif
122
123	/* Macro for error synchronization */
124	.macro synchronize_errors
125	/* Complete any stores that may return an abort */
126	dsb	sy
127	/* Synchronise the CPU context with the completion of the dsb */
128	isb
129	.endm
130
131	/*
132	 * Helper macro to generate the best mov/movw/movt combinations
133	 * according to the value to be moved.
134	 */
135	.macro mov_imm _reg, _val
136		.if ((\_val) & 0xffff0000) == 0
137			mov	\_reg, #(\_val)
138		.else
139			movw	\_reg, #((\_val) & 0xffff)
140			movt	\_reg, #((\_val) >> 16)
141		.endif
142	.endm
143
144	/*
145	 * Macro to mark instances where we're jumping to a function and don't
146	 * expect a return. To provide the function being jumped to with
147	 * additional information, we use 'bl' instruction to jump rather than
148	 * 'b'.
149         *
150	 * Debuggers infer the location of a call from where LR points to, which
151	 * is usually the instruction after 'bl'. If this macro expansion
152	 * happens to be the last location in a function, that'll cause the LR
153	 * to point a location beyond the function, thereby misleading debugger
154	 * back trace. We therefore insert a 'nop' after the function call for
155	 * debug builds, unless 'skip_nop' parameter is non-zero.
156	 */
157	.macro no_ret _func:req, skip_nop=0
158	bl	\_func
159#if DEBUG
160	.ifeq \skip_nop
161	nop
162	.endif
163#endif
164	.endm
165
166	/*
167	 * Reserve space for a spin lock in assembly file.
168	 */
169	.macro define_asm_spinlock _name:req
170	.align	SPINLOCK_ASM_ALIGN
171	\_name:
172	.space	SPINLOCK_ASM_SIZE
173	.endm
174
175	/*
176	 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
177	 * and the top 32 bits of `_val` into `_reg_h`.  If either the bottom
178	 * or top word of `_val` is zero, the corresponding OR operation
179	 * is skipped.
180	 */
181	.macro orr64_imm _reg_l, _reg_h, _val
182		.if (\_val >> 32)
183			orr \_reg_h, \_reg_h, #(\_val >> 32)
184		.endif
185		.if (\_val & 0xffffffff)
186			orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
187		.endif
188	.endm
189
190	/*
191	 * Helper macro to bitwise-clear bits in `_reg_l` and
192	 * `_reg_h` given a 64 bit immediate `_val`.  The set bits
193	 * in the bottom word of `_val` dictate which bits from
194	 * `_reg_l` should be cleared.  Similarly, the set bits in
195	 * the top word of `_val` dictate which bits from `_reg_h`
196	 * should be cleared.  If either the bottom or top word of
197	 * `_val` is zero, the corresponding BIC operation is skipped.
198	 */
199	.macro bic64_imm _reg_l, _reg_h, _val
200		.if (\_val >> 32)
201			bic \_reg_h, \_reg_h, #(\_val >> 32)
202		.endif
203		.if (\_val & 0xffffffff)
204			bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
205		.endif
206	.endm
207
208	/*
209	 * Helper macro for carrying out division in software when
210	 * hardware division is not suported. \top holds the dividend
211	 * in the function call and the remainder after
212	 * the function is executed. \bot holds the divisor. \div holds
213	 * the quotient and \temp is a temporary registed used in calcualtion.
214	 * The division algorithm has been obtained from:
215	 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
216	 */
217	.macro	softudiv	div:req,top:req,bot:req,temp:req
218
219	mov     \temp, \bot
220	cmp     \temp, \top, lsr #1
221div1:
222	movls   \temp, \temp, lsl #1
223	cmp     \temp, \top, lsr #1
224	bls     div1
225	mov     \div, #0
226
227div2:
228	cmp     \top, \temp
229	subcs   \top, \top,\temp
230	ADC     \div, \div, \div
231	mov     \temp, \temp, lsr #1
232	cmp     \temp, \bot
233	bhs     div2
234	.endm
235
236	/*
237	 * Helper macro to instruction adr <reg>, <symbol> where <symbol> is
238	 * within the range +/- 4 GB.
239	 */
240	.macro adr_l, dst, sym
241	adrp	\dst, \sym
242	add	\dst, \dst, :lo12:\sym
243	.endm
244#endif /* ASM_MACROS_S */
245