xref: /rk3399_ARM-atf/lib/aarch64/misc_helpers.S (revision fb037bfb7cbf7b404c069b4ebac5a10059d948b1)
1/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch_helpers.h>
32#include <runtime_svc.h>
33#include <asm_macros.S>
34
35	.globl	enable_irq
36	.globl	disable_irq
37
38	.globl	enable_fiq
39	.globl	disable_fiq
40
41	.globl	enable_serror
42	.globl	disable_serror
43
44	.globl	enable_debug_exceptions
45	.globl	disable_debug_exceptions
46
47	.globl	read_daif
48	.globl	write_daif
49
50	.globl	read_spsr
51	.globl	read_spsr_el1
52	.globl	read_spsr_el2
53	.globl	read_spsr_el3
54
55	.globl	write_spsr
56	.globl	write_spsr_el1
57	.globl	write_spsr_el2
58	.globl	write_spsr_el3
59
60	.globl	read_elr
61	.globl	read_elr_el1
62	.globl	read_elr_el2
63	.globl	read_elr_el3
64
65	.globl	write_elr
66	.globl	write_elr_el1
67	.globl	write_elr_el2
68	.globl	write_elr_el3
69
70	.globl	get_afflvl_shift
71	.globl	mpidr_mask_lower_afflvls
72	.globl	dsb
73	.globl	isb
74	.globl	sev
75	.globl	wfe
76	.globl	wfi
77	.globl	eret
78	.globl	smc
79
80	.globl	zeromem16
81	.globl	memcpy16
82
83
84func get_afflvl_shift
85	cmp	x0, #3
86	cinc	x0, x0, eq
87	mov	x1, #MPIDR_AFFLVL_SHIFT
88	lsl	x0, x0, x1
89	ret
90
91func mpidr_mask_lower_afflvls
92	cmp	x1, #3
93	cinc	x1, x1, eq
94	mov	x2, #MPIDR_AFFLVL_SHIFT
95	lsl	x2, x1, x2
96	lsr	x0, x0, x2
97	lsl	x0, x0, x2
98	ret
99
100	/* -----------------------------------------------------
101	 * Asynchronous exception manipulation accessors
102	 * -----------------------------------------------------
103	 */
104func enable_irq
105	msr	daifclr, #DAIF_IRQ_BIT
106	ret
107
108
109func enable_fiq
110	msr	daifclr, #DAIF_FIQ_BIT
111	ret
112
113
114func enable_serror
115	msr	daifclr, #DAIF_ABT_BIT
116	ret
117
118
119func enable_debug_exceptions
120	msr	daifclr, #DAIF_DBG_BIT
121	ret
122
123
124func disable_irq
125	msr	daifset, #DAIF_IRQ_BIT
126	ret
127
128
129func disable_fiq
130	msr	daifset, #DAIF_FIQ_BIT
131	ret
132
133
134func disable_serror
135	msr	daifset, #DAIF_ABT_BIT
136	ret
137
138
139func disable_debug_exceptions
140	msr	daifset, #DAIF_DBG_BIT
141	ret
142
143
144func read_daif
145	mrs	x0, daif
146	ret
147
148
149func write_daif
150	msr	daif, x0
151	ret
152
153
154func read_spsr
155	mrs	x0, CurrentEl
156	cmp	x0, #(MODE_EL1 << MODE_EL_SHIFT)
157	b.eq	read_spsr_el1
158	cmp	x0, #(MODE_EL2 << MODE_EL_SHIFT)
159	b.eq	read_spsr_el2
160	cmp	x0, #(MODE_EL3 << MODE_EL_SHIFT)
161	b.eq	read_spsr_el3
162
163
164func read_spsr_el1
165	mrs	x0, spsr_el1
166	ret
167
168
169func read_spsr_el2
170	mrs	x0, spsr_el2
171	ret
172
173
174func read_spsr_el3
175	mrs	x0, spsr_el3
176	ret
177
178
179func write_spsr
180	mrs	x1, CurrentEl
181	cmp	x1, #(MODE_EL1 << MODE_EL_SHIFT)
182	b.eq	write_spsr_el1
183	cmp	x1, #(MODE_EL2 << MODE_EL_SHIFT)
184	b.eq	write_spsr_el2
185	cmp	x1, #(MODE_EL3 << MODE_EL_SHIFT)
186	b.eq	write_spsr_el3
187
188
189func write_spsr_el1
190	msr	spsr_el1, x0
191	isb
192	ret
193
194
195func write_spsr_el2
196	msr	spsr_el2, x0
197	isb
198	ret
199
200
201func write_spsr_el3
202	msr	spsr_el3, x0
203	isb
204	ret
205
206
207func read_elr
208	mrs	x0, CurrentEl
209	cmp	x0, #(MODE_EL1 << MODE_EL_SHIFT)
210	b.eq	read_elr_el1
211	cmp	x0, #(MODE_EL2 << MODE_EL_SHIFT)
212	b.eq	read_elr_el2
213	cmp	x0, #(MODE_EL3 << MODE_EL_SHIFT)
214	b.eq	read_elr_el3
215
216
217func read_elr_el1
218	mrs	x0, elr_el1
219	ret
220
221
222func read_elr_el2
223	mrs	x0, elr_el2
224	ret
225
226
227func read_elr_el3
228	mrs	x0, elr_el3
229	ret
230
231
232func write_elr
233	mrs	x1, CurrentEl
234	cmp	x1, #(MODE_EL1 << MODE_EL_SHIFT)
235	b.eq	write_elr_el1
236	cmp	x1, #(MODE_EL2 << MODE_EL_SHIFT)
237	b.eq	write_elr_el2
238	cmp	x1, #(MODE_EL3 << MODE_EL_SHIFT)
239	b.eq	write_elr_el3
240
241
242func write_elr_el1
243	msr	elr_el1, x0
244	isb
245	ret
246
247
248func write_elr_el2
249	msr	elr_el2, x0
250	isb
251	ret
252
253
254func write_elr_el3
255	msr	elr_el3, x0
256	isb
257	ret
258
259
260func dsb
261	dsb	sy
262	ret
263
264
265func isb
266	isb
267	ret
268
269
270func sev
271	sev
272	ret
273
274
275func wfe
276	wfe
277	ret
278
279
280func wfi
281	wfi
282	ret
283
284
285func eret
286	eret
287
288
289func smc
290	smc	#0
291
292/* -----------------------------------------------------------------------
293 * void zeromem16(void *mem, unsigned int length);
294 *
295 * Initialise a memory region to 0.
296 * The memory address must be 16-byte aligned.
297 * -----------------------------------------------------------------------
298 */
299func zeromem16
300	add	x2, x0, x1
301/* zero 16 bytes at a time */
302z_loop16:
303	sub	x3, x2, x0
304	cmp	x3, #16
305	b.lt	z_loop1
306	stp	xzr, xzr, [x0], #16
307	b	z_loop16
308/* zero byte per byte */
309z_loop1:
310	cmp	x0, x2
311	b.eq	z_end
312	strb	wzr, [x0], #1
313	b	z_loop1
314z_end:	ret
315
316
317/* --------------------------------------------------------------------------
318 * void memcpy16(void *dest, const void *src, unsigned int length)
319 *
320 * Copy length bytes from memory area src to memory area dest.
321 * The memory areas should not overlap.
322 * Destination and source addresses must be 16-byte aligned.
323 * --------------------------------------------------------------------------
324 */
325func memcpy16
326/* copy 16 bytes at a time */
327m_loop16:
328	cmp	x2, #16
329	b.lt	m_loop1
330	ldp	x3, x4, [x1], #16
331	stp	x3, x4, [x0], #16
332	sub	x2, x2, #16
333	b	m_loop16
334/* copy byte per byte */
335m_loop1:
336	cbz	x2, m_end
337	ldrb	w3, [x1], #1
338	strb	w3, [x0], #1
339	subs	x2, x2, #1
340	b.ne	m_loop1
341m_end:	ret
342