xref: /rk3399_ARM-atf/include/arch/aarch32/asm_macros.S (revision 665e71b8ea28162ec7737c1411bca3ea89e5957e)
1/*
2 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6#ifndef ASM_MACROS_S
7#define ASM_MACROS_S
8
9#include <arch.h>
10#include <common/asm_macros_common.S>
11#include <lib/spinlock.h>
12
13/*
14 * TLBI instruction with type specifier that implements the workaround for
15 * errata 813419 of Cortex-A57.
16 */
17#if ERRATA_A57_813419
18#define TLB_INVALIDATE(_reg, _coproc) \
19	stcopr	_reg, _coproc; \
20	dsb	ish; \
21	stcopr	_reg, _coproc
22#else
23#define TLB_INVALIDATE(_reg, _coproc) \
24	stcopr	_reg, _coproc
25#endif
26
27#define WORD_SIZE	4
28
29	/*
30	 * Co processor register accessors
31	 */
32	.macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
33	mrc	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
34	.endm
35
36	.macro ldcopr16 reg1, reg2, coproc, opc1, CRm
37	mrrc	\coproc, \opc1, \reg1, \reg2, \CRm
38	.endm
39
40	.macro stcopr reg, coproc, opc1, CRn, CRm, opc2
41	mcr	\coproc, \opc1, \reg, \CRn, \CRm, \opc2
42	.endm
43
44	.macro stcopr16 reg1, reg2, coproc, opc1, CRm
45	mcrr	\coproc, \opc1, \reg1, \reg2, \CRm
46	.endm
47
48	/* Cache line size helpers */
49	.macro	dcache_line_size  reg, tmp
50	ldcopr	\tmp, CTR
51	ubfx	\tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
52	mov	\reg, #WORD_SIZE
53	lsl	\reg, \reg, \tmp
54	.endm
55
56	.macro	icache_line_size  reg, tmp
57	ldcopr	\tmp, CTR
58	and	\tmp, \tmp, #CTR_IMINLINE_MASK
59	mov	\reg, #WORD_SIZE
60	lsl	\reg, \reg, \tmp
61	.endm
62
63	/*
64	 * Declare the exception vector table, enforcing it is aligned on a
65	 * 32 byte boundary.
66	 */
67	.macro vector_base  label
68	.section .vectors, "ax"
69	.align 5
70	\label:
71	.endm
72
73	/*
74	 * This macro calculates the base address of the current CPU's multi
75	 * processor(MP) stack using the plat_my_core_pos() index, the name of
76	 * the stack storage and the size of each stack.
77	 * Out: r0 = physical address of stack base
78	 * Clobber: r14, r1, r2
79	 */
80	.macro get_my_mp_stack _name, _size
81	bl	plat_my_core_pos
82	ldr r2, =(\_name + \_size)
83	mov r1, #\_size
84	mla r0, r0, r1, r2
85	.endm
86
87	/*
88	 * This macro calculates the base address of a uniprocessor(UP) stack
89	 * using the name of the stack storage and the size of the stack
90	 * Out: r0 = physical address of stack base
91	 */
92	.macro get_up_stack _name, _size
93	ldr r0, =(\_name + \_size)
94	.endm
95
96#if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
97	/*
98	 * Macro for mitigating against speculative execution.
99	 * ARMv7 cores without Virtualization extension do not support the
100	 * eret instruction.
101	 */
102	.macro exception_return
103	movs	pc, lr
104	dsb	nsh
105	isb
106	.endm
107
108#else
109	/*
110	 * Macro for mitigating against speculative execution beyond ERET.
111	 */
112	.macro exception_return
113	eret
114	dsb	nsh
115	isb
116	.endm
117#endif
118
119#if (ARM_ARCH_MAJOR == 7)
120	/* ARMv7 does not support stl instruction */
121	.macro stl _reg, _write_lock
122	dmb
123	str	\_reg, \_write_lock
124	dsb
125	.endm
126#endif
127
128	/*
129	 * Helper macro to generate the best mov/movw/movt combinations
130	 * according to the value to be moved.
131	 */
132	.macro mov_imm _reg, _val
133		.if ((\_val) & 0xffff0000) == 0
134			mov	\_reg, #(\_val)
135		.else
136			movw	\_reg, #((\_val) & 0xffff)
137			movt	\_reg, #((\_val) >> 16)
138		.endif
139	.endm
140
141	/*
142	 * Macro to mark instances where we're jumping to a function and don't
143	 * expect a return. To provide the function being jumped to with
144	 * additional information, we use 'bl' instruction to jump rather than
145	 * 'b'.
146         *
147	 * Debuggers infer the location of a call from where LR points to, which
148	 * is usually the instruction after 'bl'. If this macro expansion
149	 * happens to be the last location in a function, that'll cause the LR
150	 * to point a location beyond the function, thereby misleading debugger
151	 * back trace. We therefore insert a 'nop' after the function call for
152	 * debug builds, unless 'skip_nop' parameter is non-zero.
153	 */
154	.macro no_ret _func:req, skip_nop=0
155	bl	\_func
156#if DEBUG
157	.ifeq \skip_nop
158	nop
159	.endif
160#endif
161	.endm
162
163	/*
164	 * Reserve space for a spin lock in assembly file.
165	 */
166	.macro define_asm_spinlock _name:req
167	.align	SPINLOCK_ASM_ALIGN
168	\_name:
169	.space	SPINLOCK_ASM_SIZE
170	.endm
171
172	/*
173	 * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
174	 * and the top 32 bits of `_val` into `_reg_h`.  If either the bottom
175	 * or top word of `_val` is zero, the corresponding OR operation
176	 * is skipped.
177	 */
178	.macro orr64_imm _reg_l, _reg_h, _val
179		.if (\_val >> 32)
180			orr \_reg_h, \_reg_h, #(\_val >> 32)
181		.endif
182		.if (\_val & 0xffffffff)
183			orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
184		.endif
185	.endm
186
187	/*
188	 * Helper macro to bitwise-clear bits in `_reg_l` and
189	 * `_reg_h` given a 64 bit immediate `_val`.  The set bits
190	 * in the bottom word of `_val` dictate which bits from
191	 * `_reg_l` should be cleared.  Similarly, the set bits in
192	 * the top word of `_val` dictate which bits from `_reg_h`
193	 * should be cleared.  If either the bottom or top word of
194	 * `_val` is zero, the corresponding BIC operation is skipped.
195	 */
196	.macro bic64_imm _reg_l, _reg_h, _val
197		.if (\_val >> 32)
198			bic \_reg_h, \_reg_h, #(\_val >> 32)
199		.endif
200		.if (\_val & 0xffffffff)
201			bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
202		.endif
203	.endm
204
205	/*
206	 * Helper macro for carrying out division in software when
207	 * hardware division is not suported. \top holds the dividend
208	 * in the function call and the remainder after
209	 * the function is executed. \bot holds the divisor. \div holds
210	 * the quotient and \temp is a temporary registed used in calcualtion.
211	 * The division algorithm has been obtained from:
212	 * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
213	 */
214	.macro	softudiv	div:req,top:req,bot:req,temp:req
215
216	mov     \temp, \bot
217	cmp     \temp, \top, lsr #1
218div1:
219	movls   \temp, \temp, lsl #1
220	cmp     \temp, \top, lsr #1
221	bls     div1
222	mov     \div, #0
223
224div2:
225	cmp     \top, \temp
226	subcs   \top, \top,\temp
227	ADC     \div, \div, \div
228	mov     \temp, \temp, lsr #1
229	cmp     \temp, \bot
230	bhs     div2
231	.endm
232#endif /* ASM_MACROS_S */
233