xref: /rk3399_ARM-atf/lib/aarch64/misc_helpers.S (revision a1ec2f4c9aa089fe938b8d233ebd18c389cbad78)
1/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33
34	.globl	enable_irq
35	.globl	disable_irq
36
37	.globl	enable_fiq
38	.globl	disable_fiq
39
40	.globl	enable_serror
41	.globl	disable_serror
42
43	.globl	enable_debug_exceptions
44	.globl	disable_debug_exceptions
45
46	.globl	read_daif
47	.globl	write_daif
48
49	.globl	read_spsr
50	.globl	read_spsr_el1
51	.globl	read_spsr_el2
52	.globl	read_spsr_el3
53
54	.globl	write_spsr
55	.globl	write_spsr_el1
56	.globl	write_spsr_el2
57	.globl	write_spsr_el3
58
59	.globl	read_elr
60	.globl	read_elr_el1
61	.globl	read_elr_el2
62	.globl	read_elr_el3
63
64	.globl	write_elr
65	.globl	write_elr_el1
66	.globl	write_elr_el2
67	.globl	write_elr_el3
68
69	.globl	get_afflvl_shift
70	.globl	mpidr_mask_lower_afflvls
71	.globl	dsb
72	.globl	isb
73	.globl	sev
74	.globl	wfe
75	.globl	wfi
76	.globl	eret
77	.globl	smc
78
79	.globl	zeromem16
80	.globl	memcpy16
81
82	.globl	disable_mmu_el3
83	.globl	disable_mmu_icache_el3
84
85
86func get_afflvl_shift
87	cmp	x0, #3
88	cinc	x0, x0, eq
89	mov	x1, #MPIDR_AFFLVL_SHIFT
90	lsl	x0, x0, x1
91	ret
92
93func mpidr_mask_lower_afflvls
94	cmp	x1, #3
95	cinc	x1, x1, eq
96	mov	x2, #MPIDR_AFFLVL_SHIFT
97	lsl	x2, x1, x2
98	lsr	x0, x0, x2
99	lsl	x0, x0, x2
100	ret
101
102	/* -----------------------------------------------------
103	 * Asynchronous exception manipulation accessors
104	 * -----------------------------------------------------
105	 */
106func enable_irq
107	msr	daifclr, #DAIF_IRQ_BIT
108	ret
109
110
111func enable_fiq
112	msr	daifclr, #DAIF_FIQ_BIT
113	ret
114
115
116func enable_serror
117	msr	daifclr, #DAIF_ABT_BIT
118	ret
119
120
121func enable_debug_exceptions
122	msr	daifclr, #DAIF_DBG_BIT
123	ret
124
125
126func disable_irq
127	msr	daifset, #DAIF_IRQ_BIT
128	ret
129
130
131func disable_fiq
132	msr	daifset, #DAIF_FIQ_BIT
133	ret
134
135
136func disable_serror
137	msr	daifset, #DAIF_ABT_BIT
138	ret
139
140
141func disable_debug_exceptions
142	msr	daifset, #DAIF_DBG_BIT
143	ret
144
145
146func read_daif
147	mrs	x0, daif
148	ret
149
150
151func write_daif
152	msr	daif, x0
153	ret
154
155
156func read_spsr
157	mrs	x0, CurrentEl
158	cmp	x0, #(MODE_EL1 << MODE_EL_SHIFT)
159	b.eq	read_spsr_el1
160	cmp	x0, #(MODE_EL2 << MODE_EL_SHIFT)
161	b.eq	read_spsr_el2
162	cmp	x0, #(MODE_EL3 << MODE_EL_SHIFT)
163	b.eq	read_spsr_el3
164
165
166func read_spsr_el1
167	mrs	x0, spsr_el1
168	ret
169
170
171func read_spsr_el2
172	mrs	x0, spsr_el2
173	ret
174
175
176func read_spsr_el3
177	mrs	x0, spsr_el3
178	ret
179
180
181func write_spsr
182	mrs	x1, CurrentEl
183	cmp	x1, #(MODE_EL1 << MODE_EL_SHIFT)
184	b.eq	write_spsr_el1
185	cmp	x1, #(MODE_EL2 << MODE_EL_SHIFT)
186	b.eq	write_spsr_el2
187	cmp	x1, #(MODE_EL3 << MODE_EL_SHIFT)
188	b.eq	write_spsr_el3
189
190
191func write_spsr_el1
192	msr	spsr_el1, x0
193	ret
194
195
196func write_spsr_el2
197	msr	spsr_el2, x0
198	ret
199
200
201func write_spsr_el3
202	msr	spsr_el3, x0
203	ret
204
205
206func read_elr
207	mrs	x0, CurrentEl
208	cmp	x0, #(MODE_EL1 << MODE_EL_SHIFT)
209	b.eq	read_elr_el1
210	cmp	x0, #(MODE_EL2 << MODE_EL_SHIFT)
211	b.eq	read_elr_el2
212	cmp	x0, #(MODE_EL3 << MODE_EL_SHIFT)
213	b.eq	read_elr_el3
214
215
216func read_elr_el1
217	mrs	x0, elr_el1
218	ret
219
220
221func read_elr_el2
222	mrs	x0, elr_el2
223	ret
224
225
226func read_elr_el3
227	mrs	x0, elr_el3
228	ret
229
230
231func write_elr
232	mrs	x1, CurrentEl
233	cmp	x1, #(MODE_EL1 << MODE_EL_SHIFT)
234	b.eq	write_elr_el1
235	cmp	x1, #(MODE_EL2 << MODE_EL_SHIFT)
236	b.eq	write_elr_el2
237	cmp	x1, #(MODE_EL3 << MODE_EL_SHIFT)
238	b.eq	write_elr_el3
239
240
241func write_elr_el1
242	msr	elr_el1, x0
243	ret
244
245
246func write_elr_el2
247	msr	elr_el2, x0
248	ret
249
250
251func write_elr_el3
252	msr	elr_el3, x0
253	ret
254
255
256func dsb
257	dsb	sy
258	ret
259
260
261func isb
262	isb
263	ret
264
265
266func sev
267	sev
268	ret
269
270
271func wfe
272	wfe
273	ret
274
275
276func wfi
277	wfi
278	ret
279
280
281func eret
282	eret
283
284
285func smc
286	smc	#0
287
288/* -----------------------------------------------------------------------
289 * void zeromem16(void *mem, unsigned int length);
290 *
291 * Initialise a memory region to 0.
292 * The memory address must be 16-byte aligned.
293 * -----------------------------------------------------------------------
294 */
295func zeromem16
296	add	x2, x0, x1
297/* zero 16 bytes at a time */
298z_loop16:
299	sub	x3, x2, x0
300	cmp	x3, #16
301	b.lt	z_loop1
302	stp	xzr, xzr, [x0], #16
303	b	z_loop16
304/* zero byte per byte */
305z_loop1:
306	cmp	x0, x2
307	b.eq	z_end
308	strb	wzr, [x0], #1
309	b	z_loop1
310z_end:	ret
311
312
313/* --------------------------------------------------------------------------
314 * void memcpy16(void *dest, const void *src, unsigned int length)
315 *
316 * Copy length bytes from memory area src to memory area dest.
317 * The memory areas should not overlap.
318 * Destination and source addresses must be 16-byte aligned.
319 * --------------------------------------------------------------------------
320 */
321func memcpy16
322/* copy 16 bytes at a time */
323m_loop16:
324	cmp	x2, #16
325	b.lt	m_loop1
326	ldp	x3, x4, [x1], #16
327	stp	x3, x4, [x0], #16
328	sub	x2, x2, #16
329	b	m_loop16
330/* copy byte per byte */
331m_loop1:
332	cbz	x2, m_end
333	ldrb	w3, [x1], #1
334	strb	w3, [x0], #1
335	subs	x2, x2, #1
336	b.ne	m_loop1
337m_end:	ret
338
339/* ---------------------------------------------------------------------------
340 * Disable the MMU at EL3
341 * This is implemented in assembler to ensure that the data cache is cleaned
342 * and invalidated after the MMU is disabled without any intervening cacheable
343 * data accesses
344 * ---------------------------------------------------------------------------
345 */
346
347func disable_mmu_el3
348	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
349do_disable_mmu:
350	mrs	x0, sctlr_el3
351	bic	x0, x0, x1
352	msr	sctlr_el3, x0
353	isb				// ensure MMU is off
354	mov	x0, #DCCISW		// DCache clean and invalidate
355	b	dcsw_op_all
356
357
358func disable_mmu_icache_el3
359	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
360	b	do_disable_mmu
361
362