xref: /rk3399_ARM-atf/include/arch/aarch32/asm_macros.S (revision dd9fae1ce0e7b985c9fe8f8f8ae358b8c166c6a9)
1/*
2 * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef ASM_MACROS_S
7#define ASM_MACROS_S
8
9#include <arch.h>
10#include <common/asm_macros_common.S>
11#include <lib/cpus/cpu_ops.h>
12#include <lib/spinlock.h>
13
14/*
15 * TLBI instruction with type specifier that implements the workaround for
16 * errata 813419 of Cortex-A57.
17 */
18#if ERRATA_A57_813419
19#define TLB_INVALIDATE(_reg, _coproc) \
20	stcopr	_reg, _coproc; \
21	dsb	ish; \
22	stcopr	_reg, _coproc
23#else
24#define TLB_INVALIDATE(_reg, _coproc) \
25	stcopr	_reg, _coproc
26#endif
27
28	/*
29	 * Co processor register accessors
30	 */
31	.macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
32	mrc	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
33	.endm
34
35	.macro ldcopr16 reg1, reg2, coproc, opc1, CRm
36	mrrc	\coproc, \opc1, \reg1, \reg2, \CRm
37	.endm
38
39	.macro stcopr reg, coproc, opc1, CRn, CRm, opc2
40	mcr	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
41	.endm
42
43	.macro stcopr16 reg1, reg2, coproc, opc1, CRm
44	mcrr	\coproc, \opc1, \reg1, \reg2, \CRm
45	.endm
46
47	/* Cache line size helpers */
48	.macro	dcache_line_size  reg, tmp
49	ldcopr	\tmp, CTR
50	ubfx	\tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
51	mov	\reg, #CPU_WORD_SIZE
52	lsl	\reg, \reg, \tmp
53	.endm
54
55	.macro	icache_line_size  reg, tmp
56	ldcopr	\tmp, CTR
57	and	\tmp, \tmp, #CTR_IMINLINE_MASK
58	mov	\reg, #CPU_WORD_SIZE
59	lsl	\reg, \reg, \tmp
60	.endm
61
62	/*
63	 * Declare the exception vector table, enforcing it is aligned on a
64	 * 32 byte boundary.
65	 */
66	.macro vector_base  label
67	.section .vectors, "ax"
68	.align 5
69	\label:
70	.endm
71
72	/*
73	 * This macro calculates the base address of the current CPU's multi
74	 * processor(MP) stack using the plat_my_core_pos() index, the name of
75	 * the stack storage and the size of each stack.
76	 * Out: r0 = physical address of stack base
77	 * Clobber: r14, r1, r2
78	 */
79	.macro get_my_mp_stack _name, _size
80	bl	plat_my_core_pos
81	ldr r2, =(\_name + \_size)
82	mov r1, #\_size
83	mla r0, r0, r1, r2
84	.endm
85
86	/*
87	 * This macro calculates the base address of a uniprocessor(UP) stack
88	 * using the name of the stack storage and the size of the stack
89	 * Out: r0 = physical address of stack base
90	 */
91	.macro get_up_stack _name, _size
92	ldr r0, =(\_name + \_size)
93	.endm
94
95#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
96	/*
97	 * Macro for mitigating against speculative execution.
98	 * ARMv7 cores without Virtualization extension do not support the
99	 * eret instruction.
100	 */
101	.macro exception_return
102	movs	pc, lr
103	dsb	nsh
104	isb
105	.endm
106
107#else
108	/*
109	 * Macro for mitigating against speculative execution beyond ERET. Uses the
110	 * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
111	 */
112	.macro exception_return
113	eret
114#if ENABLE_FEAT_SB
115	sb
116#else
117	dsb	nsh
118	isb
119#endif
120	.endm
121#endif
122
123#if (ARM_ARCH_MAJOR == 7)
124	/* ARMv7 does not support stl instruction */
125	.macro stl _reg, _write_lock
126	dmb
127	str	\_reg, \_write_lock
128	dsb
129	.endm
130#endif
131
132	/*
133	 * Helper macro to generate the best mov/movw/movt combinations
134	 * according to the value to be moved.
135	 */
136	.macro mov_imm _reg, _val
137		.if ((\_val) & 0xffff0000) == 0
138			mov	\_reg, #(\_val)
139		.else
140			movw	\_reg, #((\_val) & 0xffff)
141			movt	\_reg, #((\_val) >> 16)
142		.endif
143	.endm
144
145	/*
146	 * Macro to mark instances where we're jumping to a function and don't
147	 * expect a return. To provide the function being jumped to with
148	 * additional information, we use 'bl' instruction to jump rather than
149	 * 'b'.
150         *
151	 * Debuggers infer the location of a call from where LR points to, which
152	 * is usually the instruction after 'bl'. If this macro expansion
153	 * happens to be the last location in a function, that'll cause the LR
154	 * to point a location beyond the function, thereby misleading debugger
155	 * back trace. We therefore insert a 'nop' after the function call for
156	 * debug builds, unless 'skip_nop' parameter is non-zero.
157	 */
158	.macro no_ret _func:req, skip_nop=0
159	bl	\_func
160#if DEBUG
161	.ifeq \skip_nop
162	nop
163	.endif
164#endif
165	.endm
166
167	/*
168	 * Reserve space for a spin lock in assembly file.
169	 */
170	.macro define_asm_spinlock _name:req
171	.align	SPINLOCK_ASM_ALIGN
172	\_name:
173	.space	SPINLOCK_ASM_SIZE
174	.endm
175
176	/*
177	 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
178	 * and the top 32 bits of `_val` into `_reg_h`.  If either the bottom
179	 * or top word of `_val` is zero, the corresponding OR operation
180	 * is skipped.
181	 */
182	.macro orr64_imm _reg_l, _reg_h, _val
183		.if (\_val >> 32)
184			orr \_reg_h, \_reg_h, #(\_val >> 32)
185		.endif
186		.if (\_val & 0xffffffff)
187			orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
188		.endif
189	.endm
190
191	/*
192	 * Helper macro to bitwise-clear bits in `_reg_l` and
193	 * `_reg_h` given a 64 bit immediate `_val`.  The set bits
194	 * in the bottom word of `_val` dictate which bits from
195	 * `_reg_l` should be cleared.  Similarly, the set bits in
196	 * the top word of `_val` dictate which bits from `_reg_h`
197	 * should be cleared.  If either the bottom or top word of
198	 * `_val` is zero, the corresponding BIC operation is skipped.
199	 */
200	.macro bic64_imm _reg_l, _reg_h, _val
201		.if (\_val >> 32)
202			bic \_reg_h, \_reg_h, #(\_val >> 32)
203		.endif
204		.if (\_val & 0xffffffff)
205			bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
206		.endif
207	.endm
208
209	/*
210	 * Helper macro for carrying out division in software when
211	 * hardware division is not suported. \top holds the dividend
212	 * in the function call and the remainder after
213	 * the function is executed. \bot holds the divisor. \div holds
214	 * the quotient and \temp is a temporary registed used in calcualtion.
215	 * The division algorithm has been obtained from:
216	 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
217	 */
218	.macro	softudiv	div:req,top:req,bot:req,temp:req
219
220	mov     \temp, \bot
221	cmp     \temp, \top, lsr #1
222div1:
223	movls   \temp, \temp, lsl #1
224	cmp     \temp, \top, lsr #1
225	bls     div1
226	mov     \div, #0
227
228div2:
229	cmp     \top, \temp
230	subcs   \top, \top,\temp
231	ADC     \div, \div, \div
232	mov     \temp, \temp, lsr #1
233	cmp     \temp, \bot
234	bhs     div2
235	.endm
236#endif /* ASM_MACROS_S */
237