xref: /rk3399_ARM-atf/lib/aarch64/misc_helpers.S (revision 7d37aa171158422b5ee7ee6c3cdad58f6aa066b4)
1/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <assert_macros.S>
34
35	.globl	get_afflvl_shift
36	.globl	mpidr_mask_lower_afflvls
37	.globl	eret
38	.globl	smc
39
40	.globl	zeromem16
41	.globl	memcpy16
42
43	.globl	disable_mmu_el3
44	.globl	disable_mmu_icache_el3
45
46#if SUPPORT_VFP
47	.globl	enable_vfp
48#endif
49
50func get_afflvl_shift
51	cmp	x0, #3
52	cinc	x0, x0, eq
53	mov	x1, #MPIDR_AFFLVL_SHIFT
54	lsl	x0, x0, x1
55	ret
56endfunc get_afflvl_shift
57
58func mpidr_mask_lower_afflvls
59	cmp	x1, #3
60	cinc	x1, x1, eq
61	mov	x2, #MPIDR_AFFLVL_SHIFT
62	lsl	x2, x1, x2
63	lsr	x0, x0, x2
64	lsl	x0, x0, x2
65	ret
66endfunc mpidr_mask_lower_afflvls
67
68
69func eret
70	eret
71endfunc eret
72
73
74func smc
75	smc	#0
76endfunc smc
77
78/* -----------------------------------------------------------------------
79 * void zeromem16(void *mem, unsigned int length);
80 *
81 * Initialise a memory region to 0.
82 * The memory address must be 16-byte aligned.
83 * -----------------------------------------------------------------------
84 */
85func zeromem16
86#if ASM_ASSERTION
87	tst	x0, #0xf
88	ASM_ASSERT(eq)
89#endif
90	add	x2, x0, x1
91/* zero 16 bytes at a time */
92z_loop16:
93	sub	x3, x2, x0
94	cmp	x3, #16
95	b.lt	z_loop1
96	stp	xzr, xzr, [x0], #16
97	b	z_loop16
98/* zero byte per byte */
99z_loop1:
100	cmp	x0, x2
101	b.eq	z_end
102	strb	wzr, [x0], #1
103	b	z_loop1
104z_end:
105	ret
106endfunc zeromem16
107
108
109/* --------------------------------------------------------------------------
110 * void memcpy16(void *dest, const void *src, unsigned int length)
111 *
112 * Copy length bytes from memory area src to memory area dest.
113 * The memory areas should not overlap.
114 * Destination and source addresses must be 16-byte aligned.
115 * --------------------------------------------------------------------------
116 */
117func memcpy16
118#if ASM_ASSERTION
119	orr	x3, x0, x1
120	tst	x3, #0xf
121	ASM_ASSERT(eq)
122#endif
123/* copy 16 bytes at a time */
124m_loop16:
125	cmp	x2, #16
126	b.lt	m_loop1
127	ldp	x3, x4, [x1], #16
128	stp	x3, x4, [x0], #16
129	sub	x2, x2, #16
130	b	m_loop16
131/* copy byte per byte */
132m_loop1:
133	cbz	x2, m_end
134	ldrb	w3, [x1], #1
135	strb	w3, [x0], #1
136	subs	x2, x2, #1
137	b.ne	m_loop1
138m_end:
139	ret
140endfunc memcpy16
141
142/* ---------------------------------------------------------------------------
143 * Disable the MMU at EL3
144 * This is implemented in assembler to ensure that the data cache is cleaned
145 * and invalidated after the MMU is disabled without any intervening cacheable
146 * data accesses
147 * ---------------------------------------------------------------------------
148 */
149
150func disable_mmu_el3
151	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
152do_disable_mmu:
153	mrs	x0, sctlr_el3
154	bic	x0, x0, x1
155	msr	sctlr_el3, x0
156	isb				// ensure MMU is off
157	mov	x0, #DCCISW		// DCache clean and invalidate
158	b	dcsw_op_all
159endfunc disable_mmu_el3
160
161
162func disable_mmu_icache_el3
163	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
164	b	do_disable_mmu
165endfunc disable_mmu_icache_el3
166
167/* ---------------------------------------------------------------------------
168 * Enable the use of VFP at EL3
169 * ---------------------------------------------------------------------------
170 */
171#if SUPPORT_VFP
172func enable_vfp
173	mrs	x0, cpacr_el1
174	orr	x0, x0, #CPACR_VFP_BITS
175	msr	cpacr_el1, x0
176	mrs	x0, cptr_el3
177	mov	x1, #AARCH64_CPTR_TFP
178	bic	x0, x0, x1
179	msr	cptr_el3, x0
180	isb
181	ret
182endfunc enable_vfp
183#endif
184