xref: /rk3399_ARM-atf/lib/aarch64/misc_helpers.S (revision 82cb2c1ad9897473743f08437d0a3995bed561b9)
14ecca339SDan Handley/*
2308d359bSDouglas Raillard * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
34ecca339SDan Handley *
4*82cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
54ecca339SDan Handley */
64ecca339SDan Handley
797043ac9SDan Handley#include <arch.h>
84ecca339SDan Handley#include <asm_macros.S>
9bc920128SSoby Mathew#include <assert_macros.S>
104ecca339SDan Handley
114ecca339SDan Handley	.globl	get_afflvl_shift
124ecca339SDan Handley	.globl	mpidr_mask_lower_afflvls
134ecca339SDan Handley	.globl	eret
144ecca339SDan Handley	.globl	smc
154ecca339SDan Handley
16308d359bSDouglas Raillard	.globl	zero_normalmem
17308d359bSDouglas Raillard	.globl	zeromem
184ecca339SDan Handley	.globl	zeromem16
194ecca339SDan Handley	.globl	memcpy16
204ecca339SDan Handley
212f5dcfefSAndrew Thoelke	.globl	disable_mmu_el3
222f5dcfefSAndrew Thoelke	.globl	disable_mmu_icache_el3
232f5dcfefSAndrew Thoelke
245c3272a7SAndrew Thoelke#if SUPPORT_VFP
255c3272a7SAndrew Thoelke	.globl	enable_vfp
265c3272a7SAndrew Thoelke#endif
275c3272a7SAndrew Thoelke
284ecca339SDan Handleyfunc get_afflvl_shift
294ecca339SDan Handley	cmp	x0, #3
304ecca339SDan Handley	cinc	x0, x0, eq
314ecca339SDan Handley	mov	x1, #MPIDR_AFFLVL_SHIFT
324ecca339SDan Handley	lsl	x0, x0, x1
334ecca339SDan Handley	ret
348b779620SKévin Petitendfunc get_afflvl_shift
354ecca339SDan Handley
364ecca339SDan Handleyfunc mpidr_mask_lower_afflvls
374ecca339SDan Handley	cmp	x1, #3
384ecca339SDan Handley	cinc	x1, x1, eq
394ecca339SDan Handley	mov	x2, #MPIDR_AFFLVL_SHIFT
404ecca339SDan Handley	lsl	x2, x1, x2
414ecca339SDan Handley	lsr	x0, x0, x2
424ecca339SDan Handley	lsl	x0, x0, x2
434ecca339SDan Handley	ret
448b779620SKévin Petitendfunc mpidr_mask_lower_afflvls
454ecca339SDan Handley
464ecca339SDan Handley
474ecca339SDan Handleyfunc eret
484ecca339SDan Handley	eret
498b779620SKévin Petitendfunc eret
504ecca339SDan Handley
514ecca339SDan Handley
524ecca339SDan Handleyfunc smc
534ecca339SDan Handley	smc	#0
548b779620SKévin Petitendfunc smc
554ecca339SDan Handley
564ecca339SDan Handley/* -----------------------------------------------------------------------
574ecca339SDan Handley * void zeromem16(void *mem, unsigned int length);
584ecca339SDan Handley *
594ecca339SDan Handley * Initialise a memory region to 0.
604ecca339SDan Handley * The memory address must be 16-byte aligned.
61308d359bSDouglas Raillard * NOTE: This function is deprecated and zeromem should be used instead.
624ecca339SDan Handley * -----------------------------------------------------------------------
634ecca339SDan Handley */
64308d359bSDouglas Raillard.equ	zeromem16, zeromem
654ecca339SDan Handley
66308d359bSDouglas Raillard/* -----------------------------------------------------------------------
67308d359bSDouglas Raillard * void zero_normalmem(void *mem, unsigned int length);
68308d359bSDouglas Raillard *
69308d359bSDouglas Raillard * Initialise a region in normal memory to 0. This functions complies with the
70308d359bSDouglas Raillard * AAPCS and can be called from C code.
71308d359bSDouglas Raillard *
72308d359bSDouglas Raillard * NOTE: MMU must be enabled when using this function as it can only operate on
73308d359bSDouglas Raillard *       normal memory. It is intended to be mainly used from C code when MMU
74308d359bSDouglas Raillard *       is usually enabled.
75308d359bSDouglas Raillard * -----------------------------------------------------------------------
76308d359bSDouglas Raillard */
77308d359bSDouglas Raillard.equ	zero_normalmem, zeromem_dczva
78308d359bSDouglas Raillard
79308d359bSDouglas Raillard/* -----------------------------------------------------------------------
80308d359bSDouglas Raillard * void zeromem(void *mem, unsigned int length);
81308d359bSDouglas Raillard *
82308d359bSDouglas Raillard * Initialise a region of device memory to 0. This functions complies with the
83308d359bSDouglas Raillard * AAPCS and can be called from C code.
84308d359bSDouglas Raillard *
85308d359bSDouglas Raillard * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be
86308d359bSDouglas Raillard *       used instead for faster zeroing.
87308d359bSDouglas Raillard *
88308d359bSDouglas Raillard * -----------------------------------------------------------------------
89308d359bSDouglas Raillard */
90308d359bSDouglas Raillardfunc zeromem
91308d359bSDouglas Raillard	/* x2 is the address past the last zeroed address */
92308d359bSDouglas Raillard	add	x2, x0, x1
93308d359bSDouglas Raillard	/*
94308d359bSDouglas Raillard	 * Uses the fallback path that does not use DC ZVA instruction and
95308d359bSDouglas Raillard	 * therefore does not need enabled MMU
96308d359bSDouglas Raillard	 */
97308d359bSDouglas Raillard	b	.Lzeromem_dczva_fallback_entry
98308d359bSDouglas Raillardendfunc zeromem
99308d359bSDouglas Raillard
100308d359bSDouglas Raillard/* -----------------------------------------------------------------------
101308d359bSDouglas Raillard * void zeromem_dczva(void *mem, unsigned int length);
102308d359bSDouglas Raillard *
103308d359bSDouglas Raillard * Fill a region of normal memory of size "length" in bytes with null bytes.
104308d359bSDouglas Raillard * MMU must be enabled and the memory be of
105308d359bSDouglas Raillard * normal type. This is because this function internally uses the DC ZVA
106308d359bSDouglas Raillard * instruction, which generates an Alignment fault if used on any type of
107308d359bSDouglas Raillard * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU
108308d359bSDouglas Raillard * is disabled, all memory behaves like Device-nGnRnE memory (see section
109308d359bSDouglas Raillard * D4.2.8), hence the requirement on the MMU being enabled.
110308d359bSDouglas Raillard * NOTE: The code assumes that the block size as defined in DCZID_EL0
111308d359bSDouglas Raillard *       register is at least 16 bytes.
112308d359bSDouglas Raillard *
113308d359bSDouglas Raillard * -----------------------------------------------------------------------
114308d359bSDouglas Raillard */
115308d359bSDouglas Raillardfunc zeromem_dczva
116308d359bSDouglas Raillard
117308d359bSDouglas Raillard	/*
118308d359bSDouglas Raillard	 * The function consists of a series of loops that zero memory one byte
119308d359bSDouglas Raillard	 * at a time, 16 bytes at a time or using the DC ZVA instruction to
120308d359bSDouglas Raillard	 * zero aligned block of bytes, which is assumed to be more than 16.
121308d359bSDouglas Raillard	 * In the case where the DC ZVA instruction cannot be used or if the
122308d359bSDouglas Raillard	 * first 16 bytes loop would overflow, there is fallback path that does
123308d359bSDouglas Raillard	 * not use DC ZVA.
124308d359bSDouglas Raillard	 * Note: The fallback path is also used by the zeromem function that
125308d359bSDouglas Raillard	 *       branches to it directly.
126308d359bSDouglas Raillard	 *
127308d359bSDouglas Raillard	 *              +---------+   zeromem_dczva
128308d359bSDouglas Raillard	 *              |  entry  |
129308d359bSDouglas Raillard	 *              +----+----+
130308d359bSDouglas Raillard	 *                   |
131308d359bSDouglas Raillard	 *                   v
132308d359bSDouglas Raillard	 *              +---------+
133308d359bSDouglas Raillard	 *              | checks  |>o-------+ (If any check fails, fallback)
134308d359bSDouglas Raillard	 *              +----+----+         |
135308d359bSDouglas Raillard	 *                   |              |---------------+
136308d359bSDouglas Raillard	 *                   v              | Fallback path |
137308d359bSDouglas Raillard	 *            +------+------+       |---------------+
138308d359bSDouglas Raillard	 *            | 1 byte loop |       |
139308d359bSDouglas Raillard	 *            +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end
140308d359bSDouglas Raillard	 *                   |              |
141308d359bSDouglas Raillard	 *                   v              |
142308d359bSDouglas Raillard	 *           +-------+-------+      |
143308d359bSDouglas Raillard	 *           | 16 bytes loop |      |
144308d359bSDouglas Raillard	 *           +-------+-------+      |
145308d359bSDouglas Raillard	 *                   |              |
146308d359bSDouglas Raillard	 *                   v              |
147308d359bSDouglas Raillard	 *            +------+------+ .Lzeromem_dczva_blocksize_aligned
148308d359bSDouglas Raillard	 *            | DC ZVA loop |       |
149308d359bSDouglas Raillard	 *            +------+------+       |
150308d359bSDouglas Raillard	 *       +--------+  |              |
151308d359bSDouglas Raillard	 *       |        |  |              |
152308d359bSDouglas Raillard	 *       |        v  v              |
153308d359bSDouglas Raillard	 *       |   +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned
154308d359bSDouglas Raillard	 *       |   | 16 bytes loop |      |
155308d359bSDouglas Raillard	 *       |   +-------+-------+      |
156308d359bSDouglas Raillard	 *       |           |              |
157308d359bSDouglas Raillard	 *       |           v              |
158308d359bSDouglas Raillard	 *       |    +------+------+ .Lzeromem_dczva_final_1byte_aligned
159308d359bSDouglas Raillard	 *       |    | 1 byte loop |       |
160308d359bSDouglas Raillard	 *       |    +-------------+       |
161308d359bSDouglas Raillard	 *       |           |              |
162308d359bSDouglas Raillard	 *       |           v              |
163308d359bSDouglas Raillard	 *       |       +---+--+           |
164308d359bSDouglas Raillard	 *       |       | exit |           |
165308d359bSDouglas Raillard	 *       |       +------+           |
166308d359bSDouglas Raillard	 *       |			    |
167308d359bSDouglas Raillard	 *       |           +--------------+    +------------------+ zeromem
168308d359bSDouglas Raillard	 *       |           |  +----------------| zeromem function |
169308d359bSDouglas Raillard	 *       |           |  |                +------------------+
170308d359bSDouglas Raillard	 *       |           v  v
171308d359bSDouglas Raillard	 *       |    +-------------+ .Lzeromem_dczva_fallback_entry
172308d359bSDouglas Raillard	 *       |    | 1 byte loop |
173308d359bSDouglas Raillard	 *       |    +------+------+
174308d359bSDouglas Raillard	 *       |           |
175308d359bSDouglas Raillard	 *       +-----------+
176308d359bSDouglas Raillard	 */
177308d359bSDouglas Raillard
178308d359bSDouglas Raillard	/*
179308d359bSDouglas Raillard	 * Readable names for registers
180308d359bSDouglas Raillard	 *
181308d359bSDouglas Raillard	 * Registers x0, x1 and x2 are also set by zeromem which
182308d359bSDouglas Raillard	 * branches into the fallback path directly, so cursor, length and
183308d359bSDouglas Raillard	 * stop_address should not be retargeted to other registers.
184308d359bSDouglas Raillard	 */
185308d359bSDouglas Raillard	cursor       .req x0 /* Start address and then current address */
186308d359bSDouglas Raillard	length       .req x1 /* Length in bytes of the region to zero out */
187308d359bSDouglas Raillard	/* Reusing x1 as length is never used after block_mask is set */
188308d359bSDouglas Raillard	block_mask   .req x1 /* Bitmask of the block size read in DCZID_EL0 */
189308d359bSDouglas Raillard	stop_address .req x2 /* Address past the last zeroed byte */
190308d359bSDouglas Raillard	block_size   .req x3 /* Size of a block in bytes as read in DCZID_EL0 */
191308d359bSDouglas Raillard	tmp1         .req x4
192308d359bSDouglas Raillard	tmp2         .req x5
193308d359bSDouglas Raillard
194044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
195308d359bSDouglas Raillard	/*
196308d359bSDouglas Raillard	 * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
197308d359bSDouglas Raillard	 * register value and panic if the MMU is disabled.
198308d359bSDouglas Raillard	 */
199308d359bSDouglas Raillard#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
200308d359bSDouglas Raillard	mrs	tmp1, sctlr_el3
201308d359bSDouglas Raillard#else
202308d359bSDouglas Raillard	mrs	tmp1, sctlr_el1
203308d359bSDouglas Raillard#endif
204308d359bSDouglas Raillard
205308d359bSDouglas Raillard	tst	tmp1, #SCTLR_M_BIT
206308d359bSDouglas Raillard	ASM_ASSERT(ne)
207044bb2faSAntonio Nino Diaz#endif /* ENABLE_ASSERTIONS */
208308d359bSDouglas Raillard
209308d359bSDouglas Raillard	/* stop_address is the address past the last to zero */
210308d359bSDouglas Raillard	add	stop_address, cursor, length
211308d359bSDouglas Raillard
212308d359bSDouglas Raillard	/*
213308d359bSDouglas Raillard	 * Get block_size = (log2(<block size>) >> 2) (see encoding of
214308d359bSDouglas Raillard	 * dczid_el0 reg)
215308d359bSDouglas Raillard	 */
216308d359bSDouglas Raillard	mrs	block_size, dczid_el0
217308d359bSDouglas Raillard
218308d359bSDouglas Raillard	/*
219308d359bSDouglas Raillard	 * Select the 4 lowest bits and convert the extracted log2(<block size
220308d359bSDouglas Raillard	 * in words>) to <block size in bytes>
221308d359bSDouglas Raillard	 */
222308d359bSDouglas Raillard	ubfx	block_size, block_size, #0, #4
223308d359bSDouglas Raillard	mov	tmp2, #(1 << 2)
224308d359bSDouglas Raillard	lsl	block_size, tmp2, block_size
225308d359bSDouglas Raillard
226044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
227308d359bSDouglas Raillard	/*
228308d359bSDouglas Raillard	 * Assumes block size is at least 16 bytes to avoid manual realignment
229308d359bSDouglas Raillard	 * of the cursor at the end of the DCZVA loop.
230308d359bSDouglas Raillard	 */
231308d359bSDouglas Raillard	cmp	block_size, #16
232308d359bSDouglas Raillard	ASM_ASSERT(hs)
233308d359bSDouglas Raillard#endif
234308d359bSDouglas Raillard	/*
235308d359bSDouglas Raillard	 * Not worth doing all the setup for a region less than a block and
236308d359bSDouglas Raillard	 * protects against zeroing a whole block when the area to zero is
237308d359bSDouglas Raillard	 * smaller than that. Also, as it is assumed that the block size is at
238308d359bSDouglas Raillard	 * least 16 bytes, this also protects the initial aligning loops from
239308d359bSDouglas Raillard	 * trying to zero 16 bytes when length is less than 16.
240308d359bSDouglas Raillard	 */
241308d359bSDouglas Raillard	cmp	length, block_size
242308d359bSDouglas Raillard	b.lo	.Lzeromem_dczva_fallback_entry
243308d359bSDouglas Raillard
244308d359bSDouglas Raillard	/*
245308d359bSDouglas Raillard	 * Calculate the bitmask of the block alignment. It will never
246308d359bSDouglas Raillard	 * underflow as the block size is between 4 bytes and 2kB.
247308d359bSDouglas Raillard	 * block_mask = block_size - 1
248308d359bSDouglas Raillard	 */
249308d359bSDouglas Raillard	sub	block_mask, block_size, #1
250308d359bSDouglas Raillard
251308d359bSDouglas Raillard	/*
252308d359bSDouglas Raillard	 * length alias should not be used after this point unless it is
253308d359bSDouglas Raillard	 * defined as a register other than block_mask's.
254308d359bSDouglas Raillard	 */
255308d359bSDouglas Raillard	 .unreq length
256308d359bSDouglas Raillard
257308d359bSDouglas Raillard	/*
258308d359bSDouglas Raillard	 * If the start address is already aligned to zero block size, go
259308d359bSDouglas Raillard	 * straight to the cache zeroing loop. This is safe because at this
260308d359bSDouglas Raillard	 * point, the length cannot be smaller than a block size.
261308d359bSDouglas Raillard	 */
262308d359bSDouglas Raillard	tst	cursor, block_mask
263308d359bSDouglas Raillard	b.eq	.Lzeromem_dczva_blocksize_aligned
264308d359bSDouglas Raillard
265308d359bSDouglas Raillard	/*
266308d359bSDouglas Raillard	 * Calculate the first block-size-aligned address. It is assumed that
267308d359bSDouglas Raillard	 * the zero block size is at least 16 bytes. This address is the last
268308d359bSDouglas Raillard	 * address of this initial loop.
269308d359bSDouglas Raillard	 */
270308d359bSDouglas Raillard	orr	tmp1, cursor, block_mask
271308d359bSDouglas Raillard	add	tmp1, tmp1, #1
272308d359bSDouglas Raillard
273308d359bSDouglas Raillard	/*
274308d359bSDouglas Raillard	 * If the addition overflows, skip the cache zeroing loops. This is
275308d359bSDouglas Raillard	 * quite unlikely however.
276308d359bSDouglas Raillard	 */
277308d359bSDouglas Raillard	cbz	tmp1, .Lzeromem_dczva_fallback_entry
278308d359bSDouglas Raillard
279308d359bSDouglas Raillard	/*
280308d359bSDouglas Raillard	 * If the first block-size-aligned address is past the last address,
281308d359bSDouglas Raillard	 * fallback to the simpler code.
282308d359bSDouglas Raillard	 */
283308d359bSDouglas Raillard	cmp	tmp1, stop_address
284308d359bSDouglas Raillard	b.hi	.Lzeromem_dczva_fallback_entry
285308d359bSDouglas Raillard
286308d359bSDouglas Raillard	/*
287308d359bSDouglas Raillard	 * If the start address is already aligned to 16 bytes, skip this loop.
288308d359bSDouglas Raillard	 * It is safe to do this because tmp1 (the stop address of the initial
289308d359bSDouglas Raillard	 * 16 bytes loop) will never be greater than the final stop address.
290308d359bSDouglas Raillard	 */
291308d359bSDouglas Raillard	tst	cursor, #0xf
292308d359bSDouglas Raillard	b.eq	.Lzeromem_dczva_initial_1byte_aligned_end
293308d359bSDouglas Raillard
294308d359bSDouglas Raillard	/* Calculate the next address aligned to 16 bytes */
295308d359bSDouglas Raillard	orr	tmp2, cursor, #0xf
296308d359bSDouglas Raillard	add	tmp2, tmp2, #1
297308d359bSDouglas Raillard	/* If it overflows, fallback to the simple path (unlikely) */
298308d359bSDouglas Raillard	cbz	tmp2, .Lzeromem_dczva_fallback_entry
299308d359bSDouglas Raillard	/*
300308d359bSDouglas Raillard	 * Next aligned address cannot be after the stop address because the
301308d359bSDouglas Raillard	 * length cannot be smaller than 16 at this point.
302308d359bSDouglas Raillard	 */
303308d359bSDouglas Raillard
304308d359bSDouglas Raillard	/* First loop: zero byte per byte */
305308d359bSDouglas Raillard1:
306308d359bSDouglas Raillard	strb	wzr, [cursor], #1
307308d359bSDouglas Raillard	cmp	cursor, tmp2
308308d359bSDouglas Raillard	b.ne	1b
309308d359bSDouglas Raillard.Lzeromem_dczva_initial_1byte_aligned_end:
310308d359bSDouglas Raillard
311308d359bSDouglas Raillard	/*
312308d359bSDouglas Raillard	 * Second loop: we need to zero 16 bytes at a time from cursor to tmp1
313308d359bSDouglas Raillard	 * before being able to use the code that deals with block-size-aligned
314308d359bSDouglas Raillard	 * addresses.
315308d359bSDouglas Raillard	 */
316308d359bSDouglas Raillard	cmp	cursor, tmp1
317308d359bSDouglas Raillard	b.hs	2f
318308d359bSDouglas Raillard1:
319308d359bSDouglas Raillard	stp	xzr, xzr, [cursor], #16
320308d359bSDouglas Raillard	cmp	cursor, tmp1
321308d359bSDouglas Raillard	b.lo	1b
322308d359bSDouglas Raillard2:
323308d359bSDouglas Raillard
324308d359bSDouglas Raillard	/*
325308d359bSDouglas Raillard	 * Third loop: zero a block at a time using DC ZVA cache block zeroing
326308d359bSDouglas Raillard	 * instruction.
327308d359bSDouglas Raillard	 */
328308d359bSDouglas Raillard.Lzeromem_dczva_blocksize_aligned:
329308d359bSDouglas Raillard	/*
330308d359bSDouglas Raillard	 * Calculate the last block-size-aligned address. If the result equals
331308d359bSDouglas Raillard	 * to the start address, the loop will exit immediately.
332308d359bSDouglas Raillard	 */
333308d359bSDouglas Raillard	bic	tmp1, stop_address, block_mask
334308d359bSDouglas Raillard
335308d359bSDouglas Raillard	cmp	cursor, tmp1
336308d359bSDouglas Raillard	b.hs	2f
337308d359bSDouglas Raillard1:
338308d359bSDouglas Raillard	/* Zero the block containing the cursor */
339308d359bSDouglas Raillard	dc	zva, cursor
340308d359bSDouglas Raillard	/* Increment the cursor by the size of a block */
341308d359bSDouglas Raillard	add	cursor, cursor, block_size
342308d359bSDouglas Raillard	cmp	cursor, tmp1
343308d359bSDouglas Raillard	b.lo	1b
344308d359bSDouglas Raillard2:
345308d359bSDouglas Raillard
346308d359bSDouglas Raillard	/*
347308d359bSDouglas Raillard	 * Fourth loop: zero 16 bytes at a time and then byte per byte the
348308d359bSDouglas Raillard	 * remaining area
349308d359bSDouglas Raillard	 */
350308d359bSDouglas Raillard.Lzeromem_dczva_final_16bytes_aligned:
351308d359bSDouglas Raillard	/*
352308d359bSDouglas Raillard	 * Calculate the last 16 bytes aligned address. It is assumed that the
353308d359bSDouglas Raillard	 * block size will never be smaller than 16 bytes so that the current
354308d359bSDouglas Raillard	 * cursor is aligned to at least 16 bytes boundary.
355308d359bSDouglas Raillard	 */
356308d359bSDouglas Raillard	bic	tmp1, stop_address, #15
357308d359bSDouglas Raillard
358308d359bSDouglas Raillard	cmp	cursor, tmp1
359308d359bSDouglas Raillard	b.hs	2f
360308d359bSDouglas Raillard1:
361308d359bSDouglas Raillard	stp	xzr, xzr, [cursor], #16
362308d359bSDouglas Raillard	cmp	cursor, tmp1
363308d359bSDouglas Raillard	b.lo	1b
364308d359bSDouglas Raillard2:
365308d359bSDouglas Raillard
366308d359bSDouglas Raillard	/* Fifth and final loop: zero byte per byte */
367308d359bSDouglas Raillard.Lzeromem_dczva_final_1byte_aligned:
368308d359bSDouglas Raillard	cmp	cursor, stop_address
369308d359bSDouglas Raillard	b.eq	2f
370308d359bSDouglas Raillard1:
371308d359bSDouglas Raillard	strb	wzr, [cursor], #1
372308d359bSDouglas Raillard	cmp	cursor, stop_address
373308d359bSDouglas Raillard	b.ne	1b
374308d359bSDouglas Raillard2:
375308d359bSDouglas Raillard	ret
376308d359bSDouglas Raillard
377308d359bSDouglas Raillard	/* Fallback for unaligned start addresses */
378308d359bSDouglas Raillard.Lzeromem_dczva_fallback_entry:
379308d359bSDouglas Raillard	/*
380308d359bSDouglas Raillard	 * If the start address is already aligned to 16 bytes, skip this loop.
381308d359bSDouglas Raillard	 */
382308d359bSDouglas Raillard	tst	cursor, #0xf
383308d359bSDouglas Raillard	b.eq	.Lzeromem_dczva_final_16bytes_aligned
384308d359bSDouglas Raillard
385308d359bSDouglas Raillard	/* Calculate the next address aligned to 16 bytes */
386308d359bSDouglas Raillard	orr	tmp1, cursor, #15
387308d359bSDouglas Raillard	add	tmp1, tmp1, #1
388308d359bSDouglas Raillard	/* If it overflows, fallback to byte per byte zeroing */
389308d359bSDouglas Raillard	cbz	tmp1, .Lzeromem_dczva_final_1byte_aligned
390308d359bSDouglas Raillard	/* If the next aligned address is after the stop address, fall back */
391308d359bSDouglas Raillard	cmp	tmp1, stop_address
392308d359bSDouglas Raillard	b.hs	.Lzeromem_dczva_final_1byte_aligned
393308d359bSDouglas Raillard
394308d359bSDouglas Raillard	/* Fallback entry loop: zero byte per byte */
395308d359bSDouglas Raillard1:
396308d359bSDouglas Raillard	strb	wzr, [cursor], #1
397308d359bSDouglas Raillard	cmp	cursor, tmp1
398308d359bSDouglas Raillard	b.ne	1b
399308d359bSDouglas Raillard
400308d359bSDouglas Raillard	b	.Lzeromem_dczva_final_16bytes_aligned
401308d359bSDouglas Raillard
402308d359bSDouglas Raillard	.unreq	cursor
403308d359bSDouglas Raillard	/*
404308d359bSDouglas Raillard	 * length is already unreq'ed to reuse the register for another
405308d359bSDouglas Raillard	 * variable.
406308d359bSDouglas Raillard	 */
407308d359bSDouglas Raillard	.unreq	stop_address
408308d359bSDouglas Raillard	.unreq	block_size
409308d359bSDouglas Raillard	.unreq	block_mask
410308d359bSDouglas Raillard	.unreq	tmp1
411308d359bSDouglas Raillard	.unreq	tmp2
412308d359bSDouglas Raillardendfunc zeromem_dczva
4134ecca339SDan Handley
4144ecca339SDan Handley/* --------------------------------------------------------------------------
4154ecca339SDan Handley * void memcpy16(void *dest, const void *src, unsigned int length)
4164ecca339SDan Handley *
4174ecca339SDan Handley * Copy length bytes from memory area src to memory area dest.
4184ecca339SDan Handley * The memory areas should not overlap.
4194ecca339SDan Handley * Destination and source addresses must be 16-byte aligned.
4204ecca339SDan Handley * --------------------------------------------------------------------------
4214ecca339SDan Handley */
4224ecca339SDan Handleyfunc memcpy16
423044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
424bc920128SSoby Mathew	orr	x3, x0, x1
425bc920128SSoby Mathew	tst	x3, #0xf
426bc920128SSoby Mathew	ASM_ASSERT(eq)
427bc920128SSoby Mathew#endif
4284ecca339SDan Handley/* copy 16 bytes at a time */
4294ecca339SDan Handleym_loop16:
4304ecca339SDan Handley	cmp	x2, #16
431ea926532SDouglas Raillard	b.lo	m_loop1
4324ecca339SDan Handley	ldp	x3, x4, [x1], #16
4334ecca339SDan Handley	stp	x3, x4, [x0], #16
4344ecca339SDan Handley	sub	x2, x2, #16
4354ecca339SDan Handley	b	m_loop16
4364ecca339SDan Handley/* copy byte per byte */
4374ecca339SDan Handleym_loop1:
4384ecca339SDan Handley	cbz	x2, m_end
4394ecca339SDan Handley	ldrb	w3, [x1], #1
4404ecca339SDan Handley	strb	w3, [x0], #1
4414ecca339SDan Handley	subs	x2, x2, #1
4424ecca339SDan Handley	b.ne	m_loop1
4438b779620SKévin Petitm_end:
4448b779620SKévin Petit	ret
4458b779620SKévin Petitendfunc memcpy16
4462f5dcfefSAndrew Thoelke
4472f5dcfefSAndrew Thoelke/* ---------------------------------------------------------------------------
4482f5dcfefSAndrew Thoelke * Disable the MMU at EL3
4492f5dcfefSAndrew Thoelke * ---------------------------------------------------------------------------
4502f5dcfefSAndrew Thoelke */
4512f5dcfefSAndrew Thoelke
4522f5dcfefSAndrew Thoelkefunc disable_mmu_el3
4532f5dcfefSAndrew Thoelke	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
4542f5dcfefSAndrew Thoelkedo_disable_mmu:
4552f5dcfefSAndrew Thoelke	mrs	x0, sctlr_el3
4562f5dcfefSAndrew Thoelke	bic	x0, x0, x1
4572f5dcfefSAndrew Thoelke	msr	sctlr_el3, x0
4582f5dcfefSAndrew Thoelke	isb				// ensure MMU is off
45954dc71e7SAchin Gupta	dsb	sy
46054dc71e7SAchin Gupta	ret
4618b779620SKévin Petitendfunc disable_mmu_el3
4622f5dcfefSAndrew Thoelke
4632f5dcfefSAndrew Thoelke
4642f5dcfefSAndrew Thoelkefunc disable_mmu_icache_el3
4652f5dcfefSAndrew Thoelke	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
4662f5dcfefSAndrew Thoelke	b	do_disable_mmu
4678b779620SKévin Petitendfunc disable_mmu_icache_el3
4682f5dcfefSAndrew Thoelke
4695c3272a7SAndrew Thoelke/* ---------------------------------------------------------------------------
4705c3272a7SAndrew Thoelke * Enable the use of VFP at EL3
4715c3272a7SAndrew Thoelke * ---------------------------------------------------------------------------
4725c3272a7SAndrew Thoelke */
4735c3272a7SAndrew Thoelke#if SUPPORT_VFP
4745c3272a7SAndrew Thoelkefunc enable_vfp
4755c3272a7SAndrew Thoelke	mrs	x0, cpacr_el1
4765c3272a7SAndrew Thoelke	orr	x0, x0, #CPACR_VFP_BITS
4775c3272a7SAndrew Thoelke	msr	cpacr_el1, x0
4785c3272a7SAndrew Thoelke	mrs	x0, cptr_el3
4795c3272a7SAndrew Thoelke	mov	x1, #AARCH64_CPTR_TFP
4805c3272a7SAndrew Thoelke	bic	x0, x0, x1
4815c3272a7SAndrew Thoelke	msr	cptr_el3, x0
4825c3272a7SAndrew Thoelke	isb
4835c3272a7SAndrew Thoelke	ret
4848b779620SKévin Petitendfunc enable_vfp
4855c3272a7SAndrew Thoelke#endif
486