xref: /rk3399_ARM-atf/lib/aarch64/misc_helpers.S (revision 5c3272a717f357872973c78007b659dca0e5c673)
1/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33
34	.globl	get_afflvl_shift
35	.globl	mpidr_mask_lower_afflvls
36	.globl	eret
37	.globl	smc
38
39	.globl	zeromem16
40	.globl	memcpy16
41
42	.globl	disable_mmu_el3
43	.globl	disable_mmu_icache_el3
44
45#if SUPPORT_VFP
46	.globl	enable_vfp
47#endif
48
49
50func get_afflvl_shift
51	cmp	x0, #3
52	cinc	x0, x0, eq
53	mov	x1, #MPIDR_AFFLVL_SHIFT
54	lsl	x0, x0, x1
55	ret
56
57func mpidr_mask_lower_afflvls
58	cmp	x1, #3
59	cinc	x1, x1, eq
60	mov	x2, #MPIDR_AFFLVL_SHIFT
61	lsl	x2, x1, x2
62	lsr	x0, x0, x2
63	lsl	x0, x0, x2
64	ret
65
66
67func eret
68	eret
69
70
71func smc
72	smc	#0
73
74/* -----------------------------------------------------------------------
75 * void zeromem16(void *mem, unsigned int length);
76 *
77 * Initialise a memory region to 0.
78 * The memory address must be 16-byte aligned.
79 * -----------------------------------------------------------------------
80 */
81func zeromem16
82	add	x2, x0, x1
83/* zero 16 bytes at a time */
84z_loop16:
85	sub	x3, x2, x0
86	cmp	x3, #16
87	b.lt	z_loop1
88	stp	xzr, xzr, [x0], #16
89	b	z_loop16
90/* zero byte per byte */
91z_loop1:
92	cmp	x0, x2
93	b.eq	z_end
94	strb	wzr, [x0], #1
95	b	z_loop1
96z_end:	ret
97
98
99/* --------------------------------------------------------------------------
100 * void memcpy16(void *dest, const void *src, unsigned int length)
101 *
102 * Copy length bytes from memory area src to memory area dest.
103 * The memory areas should not overlap.
104 * Destination and source addresses must be 16-byte aligned.
105 * --------------------------------------------------------------------------
106 */
107func memcpy16
108/* copy 16 bytes at a time */
109m_loop16:
110	cmp	x2, #16
111	b.lt	m_loop1
112	ldp	x3, x4, [x1], #16
113	stp	x3, x4, [x0], #16
114	sub	x2, x2, #16
115	b	m_loop16
116/* copy byte per byte */
117m_loop1:
118	cbz	x2, m_end
119	ldrb	w3, [x1], #1
120	strb	w3, [x0], #1
121	subs	x2, x2, #1
122	b.ne	m_loop1
123m_end:	ret
124
125/* ---------------------------------------------------------------------------
126 * Disable the MMU at EL3
127 * This is implemented in assembler to ensure that the data cache is cleaned
128 * and invalidated after the MMU is disabled without any intervening cacheable
129 * data accesses
130 * ---------------------------------------------------------------------------
131 */
132
133func disable_mmu_el3
134	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
135do_disable_mmu:
136	mrs	x0, sctlr_el3
137	bic	x0, x0, x1
138	msr	sctlr_el3, x0
139	isb				// ensure MMU is off
140	mov	x0, #DCCISW		// DCache clean and invalidate
141	b	dcsw_op_all
142
143
144func disable_mmu_icache_el3
145	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
146	b	do_disable_mmu
147
148
149/* ---------------------------------------------------------------------------
150 * Enable the use of VFP at EL3
151 * ---------------------------------------------------------------------------
152 */
153#if SUPPORT_VFP
154func enable_vfp
155	mrs	x0, cpacr_el1
156	orr	x0, x0, #CPACR_VFP_BITS
157	msr	cpacr_el1, x0
158	mrs	x0, cptr_el3
159	mov	x1, #AARCH64_CPTR_TFP
160	bic	x0, x0, x1
161	msr	cptr_el3, x0
162	isb
163	ret
164#endif
165