xref: /rk3399_ARM-atf/lib/aarch64/misc_helpers.S (revision d7b5f40823d449cc79e6440174390997cf11a9d9)
14ecca339SDan Handley/*
2*d7b5f408SJimmy Brisson * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
34ecca339SDan Handley *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
54ecca339SDan Handley */
64ecca339SDan Handley
797043ac9SDan Handley#include <arch.h>
84ecca339SDan Handley#include <asm_macros.S>
9bc920128SSoby Mathew#include <assert_macros.S>
101242b9a9SVarun Wadekar#include <common/bl_common.h>
1109d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h>
124ecca339SDan Handley
134ecca339SDan Handley	.globl	smc
144ecca339SDan Handley
15308d359bSDouglas Raillard	.globl	zero_normalmem
16308d359bSDouglas Raillard	.globl	zeromem
174ecca339SDan Handley	.globl	memcpy16
184ecca339SDan Handley
19ec0c8fdaSAntonio Nino Diaz	.globl	disable_mmu_el1
202f5dcfefSAndrew Thoelke	.globl	disable_mmu_el3
21ec0c8fdaSAntonio Nino Diaz	.globl	disable_mmu_icache_el1
222f5dcfefSAndrew Thoelke	.globl	disable_mmu_icache_el3
23931f7c61SSoby Mathew	.globl	fixup_gdt_reloc
245c3272a7SAndrew Thoelke#if SUPPORT_VFP
255c3272a7SAndrew Thoelke	.globl	enable_vfp
265c3272a7SAndrew Thoelke#endif
275c3272a7SAndrew Thoelke
284ecca339SDan Handleyfunc smc
294ecca339SDan Handley	smc	#0
308b779620SKévin Petitendfunc smc
314ecca339SDan Handley
324ecca339SDan Handley/* -----------------------------------------------------------------------
33308d359bSDouglas Raillard * void zero_normalmem(void *mem, unsigned int length);
34308d359bSDouglas Raillard *
35308d359bSDouglas Raillard * Initialise a region in normal memory to 0. This functions complies with the
36308d359bSDouglas Raillard * AAPCS and can be called from C code.
37308d359bSDouglas Raillard *
38308d359bSDouglas Raillard * NOTE: MMU must be enabled when using this function as it can only operate on
39308d359bSDouglas Raillard *       normal memory. It is intended to be mainly used from C code when MMU
40308d359bSDouglas Raillard *       is usually enabled.
41308d359bSDouglas Raillard * -----------------------------------------------------------------------
42308d359bSDouglas Raillard */
43308d359bSDouglas Raillard.equ	zero_normalmem, zeromem_dczva
44308d359bSDouglas Raillard
45308d359bSDouglas Raillard/* -----------------------------------------------------------------------
46308d359bSDouglas Raillard * void zeromem(void *mem, unsigned int length);
47308d359bSDouglas Raillard *
48308d359bSDouglas Raillard * Initialise a region of device memory to 0. This functions complies with the
49308d359bSDouglas Raillard * AAPCS and can be called from C code.
50308d359bSDouglas Raillard *
51308d359bSDouglas Raillard * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be
52308d359bSDouglas Raillard *       used instead for faster zeroing.
53308d359bSDouglas Raillard *
54308d359bSDouglas Raillard * -----------------------------------------------------------------------
55308d359bSDouglas Raillard */
56308d359bSDouglas Raillardfunc zeromem
57308d359bSDouglas Raillard	/* x2 is the address past the last zeroed address */
58308d359bSDouglas Raillard	add	x2, x0, x1
59308d359bSDouglas Raillard	/*
60308d359bSDouglas Raillard	 * Uses the fallback path that does not use DC ZVA instruction and
61308d359bSDouglas Raillard	 * therefore does not need enabled MMU
62308d359bSDouglas Raillard	 */
63308d359bSDouglas Raillard	b	.Lzeromem_dczva_fallback_entry
64308d359bSDouglas Raillardendfunc zeromem
65308d359bSDouglas Raillard
66308d359bSDouglas Raillard/* -----------------------------------------------------------------------
67308d359bSDouglas Raillard * void zeromem_dczva(void *mem, unsigned int length);
68308d359bSDouglas Raillard *
69308d359bSDouglas Raillard * Fill a region of normal memory of size "length" in bytes with null bytes.
70308d359bSDouglas Raillard * MMU must be enabled and the memory be of
71308d359bSDouglas Raillard * normal type. This is because this function internally uses the DC ZVA
72308d359bSDouglas Raillard * instruction, which generates an Alignment fault if used on any type of
73308d359bSDouglas Raillard * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU
74308d359bSDouglas Raillard * is disabled, all memory behaves like Device-nGnRnE memory (see section
75308d359bSDouglas Raillard * D4.2.8), hence the requirement on the MMU being enabled.
76308d359bSDouglas Raillard * NOTE: The code assumes that the block size as defined in DCZID_EL0
77308d359bSDouglas Raillard *       register is at least 16 bytes.
78308d359bSDouglas Raillard *
79308d359bSDouglas Raillard * -----------------------------------------------------------------------
80308d359bSDouglas Raillard */
81308d359bSDouglas Raillardfunc zeromem_dczva
82308d359bSDouglas Raillard
83308d359bSDouglas Raillard	/*
84308d359bSDouglas Raillard	 * The function consists of a series of loops that zero memory one byte
85308d359bSDouglas Raillard	 * at a time, 16 bytes at a time or using the DC ZVA instruction to
86308d359bSDouglas Raillard	 * zero aligned block of bytes, which is assumed to be more than 16.
87308d359bSDouglas Raillard	 * In the case where the DC ZVA instruction cannot be used or if the
88308d359bSDouglas Raillard	 * first 16 bytes loop would overflow, there is fallback path that does
89308d359bSDouglas Raillard	 * not use DC ZVA.
90308d359bSDouglas Raillard	 * Note: The fallback path is also used by the zeromem function that
91308d359bSDouglas Raillard	 *       branches to it directly.
92308d359bSDouglas Raillard	 *
93308d359bSDouglas Raillard	 *              +---------+   zeromem_dczva
94308d359bSDouglas Raillard	 *              |  entry  |
95308d359bSDouglas Raillard	 *              +----+----+
96308d359bSDouglas Raillard	 *                   |
97308d359bSDouglas Raillard	 *                   v
98308d359bSDouglas Raillard	 *              +---------+
99308d359bSDouglas Raillard	 *              | checks  |>o-------+ (If any check fails, fallback)
100308d359bSDouglas Raillard	 *              +----+----+         |
101308d359bSDouglas Raillard	 *                   |              |---------------+
102308d359bSDouglas Raillard	 *                   v              | Fallback path |
103308d359bSDouglas Raillard	 *            +------+------+       |---------------+
104308d359bSDouglas Raillard	 *            | 1 byte loop |       |
105308d359bSDouglas Raillard	 *            +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end
106308d359bSDouglas Raillard	 *                   |              |
107308d359bSDouglas Raillard	 *                   v              |
108308d359bSDouglas Raillard	 *           +-------+-------+      |
109308d359bSDouglas Raillard	 *           | 16 bytes loop |      |
110308d359bSDouglas Raillard	 *           +-------+-------+      |
111308d359bSDouglas Raillard	 *                   |              |
112308d359bSDouglas Raillard	 *                   v              |
113308d359bSDouglas Raillard	 *            +------+------+ .Lzeromem_dczva_blocksize_aligned
114308d359bSDouglas Raillard	 *            | DC ZVA loop |       |
115308d359bSDouglas Raillard	 *            +------+------+       |
116308d359bSDouglas Raillard	 *       +--------+  |              |
117308d359bSDouglas Raillard	 *       |        |  |              |
118308d359bSDouglas Raillard	 *       |        v  v              |
119308d359bSDouglas Raillard	 *       |   +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned
120308d359bSDouglas Raillard	 *       |   | 16 bytes loop |      |
121308d359bSDouglas Raillard	 *       |   +-------+-------+      |
122308d359bSDouglas Raillard	 *       |           |              |
123308d359bSDouglas Raillard	 *       |           v              |
124308d359bSDouglas Raillard	 *       |    +------+------+ .Lzeromem_dczva_final_1byte_aligned
125308d359bSDouglas Raillard	 *       |    | 1 byte loop |       |
126308d359bSDouglas Raillard	 *       |    +-------------+       |
127308d359bSDouglas Raillard	 *       |           |              |
128308d359bSDouglas Raillard	 *       |           v              |
129308d359bSDouglas Raillard	 *       |       +---+--+           |
130308d359bSDouglas Raillard	 *       |       | exit |           |
131308d359bSDouglas Raillard	 *       |       +------+           |
132308d359bSDouglas Raillard	 *       |			    |
133308d359bSDouglas Raillard	 *       |           +--------------+    +------------------+ zeromem
134308d359bSDouglas Raillard	 *       |           |  +----------------| zeromem function |
135308d359bSDouglas Raillard	 *       |           |  |                +------------------+
136308d359bSDouglas Raillard	 *       |           v  v
137308d359bSDouglas Raillard	 *       |    +-------------+ .Lzeromem_dczva_fallback_entry
138308d359bSDouglas Raillard	 *       |    | 1 byte loop |
139308d359bSDouglas Raillard	 *       |    +------+------+
140308d359bSDouglas Raillard	 *       |           |
141308d359bSDouglas Raillard	 *       +-----------+
142308d359bSDouglas Raillard	 */
143308d359bSDouglas Raillard
144308d359bSDouglas Raillard	/*
145308d359bSDouglas Raillard	 * Readable names for registers
146308d359bSDouglas Raillard	 *
147308d359bSDouglas Raillard	 * Registers x0, x1 and x2 are also set by zeromem which
148308d359bSDouglas Raillard	 * branches into the fallback path directly, so cursor, length and
149308d359bSDouglas Raillard	 * stop_address should not be retargeted to other registers.
150308d359bSDouglas Raillard	 */
151308d359bSDouglas Raillard	cursor       .req x0 /* Start address and then current address */
152308d359bSDouglas Raillard	length       .req x1 /* Length in bytes of the region to zero out */
153308d359bSDouglas Raillard	/* Reusing x1 as length is never used after block_mask is set */
154308d359bSDouglas Raillard	block_mask   .req x1 /* Bitmask of the block size read in DCZID_EL0 */
155308d359bSDouglas Raillard	stop_address .req x2 /* Address past the last zeroed byte */
156308d359bSDouglas Raillard	block_size   .req x3 /* Size of a block in bytes as read in DCZID_EL0 */
157308d359bSDouglas Raillard	tmp1         .req x4
158308d359bSDouglas Raillard	tmp2         .req x5
159308d359bSDouglas Raillard
160044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
161308d359bSDouglas Raillard	/*
162308d359bSDouglas Raillard	 * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
163308d359bSDouglas Raillard	 * register value and panic if the MMU is disabled.
164308d359bSDouglas Raillard	 */
16579c7e728SMasahiro Yamada#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
166308d359bSDouglas Raillard	mrs	tmp1, sctlr_el3
167308d359bSDouglas Raillard#else
168308d359bSDouglas Raillard	mrs	tmp1, sctlr_el1
169308d359bSDouglas Raillard#endif
170308d359bSDouglas Raillard
171308d359bSDouglas Raillard	tst	tmp1, #SCTLR_M_BIT
172308d359bSDouglas Raillard	ASM_ASSERT(ne)
173044bb2faSAntonio Nino Diaz#endif /* ENABLE_ASSERTIONS */
174308d359bSDouglas Raillard
175308d359bSDouglas Raillard	/* stop_address is the address past the last to zero */
176308d359bSDouglas Raillard	add	stop_address, cursor, length
177308d359bSDouglas Raillard
178308d359bSDouglas Raillard	/*
179308d359bSDouglas Raillard	 * Get block_size = (log2(<block size>) >> 2) (see encoding of
180308d359bSDouglas Raillard	 * dczid_el0 reg)
181308d359bSDouglas Raillard	 */
182308d359bSDouglas Raillard	mrs	block_size, dczid_el0
183308d359bSDouglas Raillard
184308d359bSDouglas Raillard	/*
185308d359bSDouglas Raillard	 * Select the 4 lowest bits and convert the extracted log2(<block size
186308d359bSDouglas Raillard	 * in words>) to <block size in bytes>
187308d359bSDouglas Raillard	 */
188308d359bSDouglas Raillard	ubfx	block_size, block_size, #0, #4
189308d359bSDouglas Raillard	mov	tmp2, #(1 << 2)
190308d359bSDouglas Raillard	lsl	block_size, tmp2, block_size
191308d359bSDouglas Raillard
192044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
193308d359bSDouglas Raillard	/*
194308d359bSDouglas Raillard	 * Assumes block size is at least 16 bytes to avoid manual realignment
195308d359bSDouglas Raillard	 * of the cursor at the end of the DCZVA loop.
196308d359bSDouglas Raillard	 */
197308d359bSDouglas Raillard	cmp	block_size, #16
198308d359bSDouglas Raillard	ASM_ASSERT(hs)
199308d359bSDouglas Raillard#endif
200308d359bSDouglas Raillard	/*
201308d359bSDouglas Raillard	 * Not worth doing all the setup for a region less than a block and
202308d359bSDouglas Raillard	 * protects against zeroing a whole block when the area to zero is
203308d359bSDouglas Raillard	 * smaller than that. Also, as it is assumed that the block size is at
204308d359bSDouglas Raillard	 * least 16 bytes, this also protects the initial aligning loops from
205308d359bSDouglas Raillard	 * trying to zero 16 bytes when length is less than 16.
206308d359bSDouglas Raillard	 */
207308d359bSDouglas Raillard	cmp	length, block_size
208308d359bSDouglas Raillard	b.lo	.Lzeromem_dczva_fallback_entry
209308d359bSDouglas Raillard
210308d359bSDouglas Raillard	/*
211308d359bSDouglas Raillard	 * Calculate the bitmask of the block alignment. It will never
212308d359bSDouglas Raillard	 * underflow as the block size is between 4 bytes and 2kB.
213308d359bSDouglas Raillard	 * block_mask = block_size - 1
214308d359bSDouglas Raillard	 */
215308d359bSDouglas Raillard	sub	block_mask, block_size, #1
216308d359bSDouglas Raillard
217308d359bSDouglas Raillard	/*
218308d359bSDouglas Raillard	 * length alias should not be used after this point unless it is
219308d359bSDouglas Raillard	 * defined as a register other than block_mask's.
220308d359bSDouglas Raillard	 */
221308d359bSDouglas Raillard	 .unreq length
222308d359bSDouglas Raillard
223308d359bSDouglas Raillard	/*
224308d359bSDouglas Raillard	 * If the start address is already aligned to zero block size, go
225308d359bSDouglas Raillard	 * straight to the cache zeroing loop. This is safe because at this
226308d359bSDouglas Raillard	 * point, the length cannot be smaller than a block size.
227308d359bSDouglas Raillard	 */
228308d359bSDouglas Raillard	tst	cursor, block_mask
229308d359bSDouglas Raillard	b.eq	.Lzeromem_dczva_blocksize_aligned
230308d359bSDouglas Raillard
231308d359bSDouglas Raillard	/*
232308d359bSDouglas Raillard	 * Calculate the first block-size-aligned address. It is assumed that
233308d359bSDouglas Raillard	 * the zero block size is at least 16 bytes. This address is the last
234308d359bSDouglas Raillard	 * address of this initial loop.
235308d359bSDouglas Raillard	 */
236308d359bSDouglas Raillard	orr	tmp1, cursor, block_mask
237308d359bSDouglas Raillard	add	tmp1, tmp1, #1
238308d359bSDouglas Raillard
239308d359bSDouglas Raillard	/*
240308d359bSDouglas Raillard	 * If the addition overflows, skip the cache zeroing loops. This is
241308d359bSDouglas Raillard	 * quite unlikely however.
242308d359bSDouglas Raillard	 */
243308d359bSDouglas Raillard	cbz	tmp1, .Lzeromem_dczva_fallback_entry
244308d359bSDouglas Raillard
245308d359bSDouglas Raillard	/*
246308d359bSDouglas Raillard	 * If the first block-size-aligned address is past the last address,
247308d359bSDouglas Raillard	 * fallback to the simpler code.
248308d359bSDouglas Raillard	 */
249308d359bSDouglas Raillard	cmp	tmp1, stop_address
250308d359bSDouglas Raillard	b.hi	.Lzeromem_dczva_fallback_entry
251308d359bSDouglas Raillard
252308d359bSDouglas Raillard	/*
253308d359bSDouglas Raillard	 * If the start address is already aligned to 16 bytes, skip this loop.
254308d359bSDouglas Raillard	 * It is safe to do this because tmp1 (the stop address of the initial
255308d359bSDouglas Raillard	 * 16 bytes loop) will never be greater than the final stop address.
256308d359bSDouglas Raillard	 */
257308d359bSDouglas Raillard	tst	cursor, #0xf
258308d359bSDouglas Raillard	b.eq	.Lzeromem_dczva_initial_1byte_aligned_end
259308d359bSDouglas Raillard
260308d359bSDouglas Raillard	/* Calculate the next address aligned to 16 bytes */
261308d359bSDouglas Raillard	orr	tmp2, cursor, #0xf
262308d359bSDouglas Raillard	add	tmp2, tmp2, #1
263308d359bSDouglas Raillard	/* If it overflows, fallback to the simple path (unlikely) */
264308d359bSDouglas Raillard	cbz	tmp2, .Lzeromem_dczva_fallback_entry
265308d359bSDouglas Raillard	/*
266308d359bSDouglas Raillard	 * Next aligned address cannot be after the stop address because the
267308d359bSDouglas Raillard	 * length cannot be smaller than 16 at this point.
268308d359bSDouglas Raillard	 */
269308d359bSDouglas Raillard
270308d359bSDouglas Raillard	/* First loop: zero byte per byte */
271308d359bSDouglas Raillard1:
272308d359bSDouglas Raillard	strb	wzr, [cursor], #1
273308d359bSDouglas Raillard	cmp	cursor, tmp2
274308d359bSDouglas Raillard	b.ne	1b
275308d359bSDouglas Raillard.Lzeromem_dczva_initial_1byte_aligned_end:
276308d359bSDouglas Raillard
277308d359bSDouglas Raillard	/*
278308d359bSDouglas Raillard	 * Second loop: we need to zero 16 bytes at a time from cursor to tmp1
279308d359bSDouglas Raillard	 * before being able to use the code that deals with block-size-aligned
280308d359bSDouglas Raillard	 * addresses.
281308d359bSDouglas Raillard	 */
282308d359bSDouglas Raillard	cmp	cursor, tmp1
283308d359bSDouglas Raillard	b.hs	2f
284308d359bSDouglas Raillard1:
285308d359bSDouglas Raillard	stp	xzr, xzr, [cursor], #16
286308d359bSDouglas Raillard	cmp	cursor, tmp1
287308d359bSDouglas Raillard	b.lo	1b
288308d359bSDouglas Raillard2:
289308d359bSDouglas Raillard
290308d359bSDouglas Raillard	/*
291308d359bSDouglas Raillard	 * Third loop: zero a block at a time using DC ZVA cache block zeroing
292308d359bSDouglas Raillard	 * instruction.
293308d359bSDouglas Raillard	 */
294308d359bSDouglas Raillard.Lzeromem_dczva_blocksize_aligned:
295308d359bSDouglas Raillard	/*
296308d359bSDouglas Raillard	 * Calculate the last block-size-aligned address. If the result equals
297308d359bSDouglas Raillard	 * to the start address, the loop will exit immediately.
298308d359bSDouglas Raillard	 */
299308d359bSDouglas Raillard	bic	tmp1, stop_address, block_mask
300308d359bSDouglas Raillard
301308d359bSDouglas Raillard	cmp	cursor, tmp1
302308d359bSDouglas Raillard	b.hs	2f
303308d359bSDouglas Raillard1:
304308d359bSDouglas Raillard	/* Zero the block containing the cursor */
305308d359bSDouglas Raillard	dc	zva, cursor
306308d359bSDouglas Raillard	/* Increment the cursor by the size of a block */
307308d359bSDouglas Raillard	add	cursor, cursor, block_size
308308d359bSDouglas Raillard	cmp	cursor, tmp1
309308d359bSDouglas Raillard	b.lo	1b
310308d359bSDouglas Raillard2:
311308d359bSDouglas Raillard
312308d359bSDouglas Raillard	/*
313308d359bSDouglas Raillard	 * Fourth loop: zero 16 bytes at a time and then byte per byte the
314308d359bSDouglas Raillard	 * remaining area
315308d359bSDouglas Raillard	 */
316308d359bSDouglas Raillard.Lzeromem_dczva_final_16bytes_aligned:
317308d359bSDouglas Raillard	/*
318308d359bSDouglas Raillard	 * Calculate the last 16 bytes aligned address. It is assumed that the
319308d359bSDouglas Raillard	 * block size will never be smaller than 16 bytes so that the current
320308d359bSDouglas Raillard	 * cursor is aligned to at least 16 bytes boundary.
321308d359bSDouglas Raillard	 */
322308d359bSDouglas Raillard	bic	tmp1, stop_address, #15
323308d359bSDouglas Raillard
324308d359bSDouglas Raillard	cmp	cursor, tmp1
325308d359bSDouglas Raillard	b.hs	2f
326308d359bSDouglas Raillard1:
327308d359bSDouglas Raillard	stp	xzr, xzr, [cursor], #16
328308d359bSDouglas Raillard	cmp	cursor, tmp1
329308d359bSDouglas Raillard	b.lo	1b
330308d359bSDouglas Raillard2:
331308d359bSDouglas Raillard
332308d359bSDouglas Raillard	/* Fifth and final loop: zero byte per byte */
333308d359bSDouglas Raillard.Lzeromem_dczva_final_1byte_aligned:
334308d359bSDouglas Raillard	cmp	cursor, stop_address
335308d359bSDouglas Raillard	b.eq	2f
336308d359bSDouglas Raillard1:
337308d359bSDouglas Raillard	strb	wzr, [cursor], #1
338308d359bSDouglas Raillard	cmp	cursor, stop_address
339308d359bSDouglas Raillard	b.ne	1b
340308d359bSDouglas Raillard2:
341308d359bSDouglas Raillard	ret
342308d359bSDouglas Raillard
343308d359bSDouglas Raillard	/* Fallback for unaligned start addresses */
344308d359bSDouglas Raillard.Lzeromem_dczva_fallback_entry:
345308d359bSDouglas Raillard	/*
346308d359bSDouglas Raillard	 * If the start address is already aligned to 16 bytes, skip this loop.
347308d359bSDouglas Raillard	 */
348308d359bSDouglas Raillard	tst	cursor, #0xf
349308d359bSDouglas Raillard	b.eq	.Lzeromem_dczva_final_16bytes_aligned
350308d359bSDouglas Raillard
351308d359bSDouglas Raillard	/* Calculate the next address aligned to 16 bytes */
352308d359bSDouglas Raillard	orr	tmp1, cursor, #15
353308d359bSDouglas Raillard	add	tmp1, tmp1, #1
354308d359bSDouglas Raillard	/* If it overflows, fallback to byte per byte zeroing */
355308d359bSDouglas Raillard	cbz	tmp1, .Lzeromem_dczva_final_1byte_aligned
356308d359bSDouglas Raillard	/* If the next aligned address is after the stop address, fall back */
357308d359bSDouglas Raillard	cmp	tmp1, stop_address
358308d359bSDouglas Raillard	b.hs	.Lzeromem_dczva_final_1byte_aligned
359308d359bSDouglas Raillard
360308d359bSDouglas Raillard	/* Fallback entry loop: zero byte per byte */
361308d359bSDouglas Raillard1:
362308d359bSDouglas Raillard	strb	wzr, [cursor], #1
363308d359bSDouglas Raillard	cmp	cursor, tmp1
364308d359bSDouglas Raillard	b.ne	1b
365308d359bSDouglas Raillard
366308d359bSDouglas Raillard	b	.Lzeromem_dczva_final_16bytes_aligned
367308d359bSDouglas Raillard
368308d359bSDouglas Raillard	.unreq	cursor
369308d359bSDouglas Raillard	/*
370308d359bSDouglas Raillard	 * length is already unreq'ed to reuse the register for another
371308d359bSDouglas Raillard	 * variable.
372308d359bSDouglas Raillard	 */
373308d359bSDouglas Raillard	.unreq	stop_address
374308d359bSDouglas Raillard	.unreq	block_size
375308d359bSDouglas Raillard	.unreq	block_mask
376308d359bSDouglas Raillard	.unreq	tmp1
377308d359bSDouglas Raillard	.unreq	tmp2
378308d359bSDouglas Raillardendfunc zeromem_dczva
3794ecca339SDan Handley
3804ecca339SDan Handley/* --------------------------------------------------------------------------
3814ecca339SDan Handley * void memcpy16(void *dest, const void *src, unsigned int length)
3824ecca339SDan Handley *
3834ecca339SDan Handley * Copy length bytes from memory area src to memory area dest.
3844ecca339SDan Handley * The memory areas should not overlap.
3854ecca339SDan Handley * Destination and source addresses must be 16-byte aligned.
3864ecca339SDan Handley * --------------------------------------------------------------------------
3874ecca339SDan Handley */
3884ecca339SDan Handleyfunc memcpy16
389044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
390bc920128SSoby Mathew	orr	x3, x0, x1
391bc920128SSoby Mathew	tst	x3, #0xf
392bc920128SSoby Mathew	ASM_ASSERT(eq)
393bc920128SSoby Mathew#endif
3944ecca339SDan Handley/* copy 16 bytes at a time */
3954ecca339SDan Handleym_loop16:
3964ecca339SDan Handley	cmp	x2, #16
397ea926532SDouglas Raillard	b.lo	m_loop1
3984ecca339SDan Handley	ldp	x3, x4, [x1], #16
3994ecca339SDan Handley	stp	x3, x4, [x0], #16
4004ecca339SDan Handley	sub	x2, x2, #16
4014ecca339SDan Handley	b	m_loop16
4024ecca339SDan Handley/* copy byte per byte */
4034ecca339SDan Handleym_loop1:
4044ecca339SDan Handley	cbz	x2, m_end
4054ecca339SDan Handley	ldrb	w3, [x1], #1
4064ecca339SDan Handley	strb	w3, [x0], #1
4074ecca339SDan Handley	subs	x2, x2, #1
4084ecca339SDan Handley	b.ne	m_loop1
4098b779620SKévin Petitm_end:
4108b779620SKévin Petit	ret
4118b779620SKévin Petitendfunc memcpy16
4122f5dcfefSAndrew Thoelke
4132f5dcfefSAndrew Thoelke/* ---------------------------------------------------------------------------
4142f5dcfefSAndrew Thoelke * Disable the MMU at EL3
4152f5dcfefSAndrew Thoelke * ---------------------------------------------------------------------------
4162f5dcfefSAndrew Thoelke */
4172f5dcfefSAndrew Thoelke
4182f5dcfefSAndrew Thoelkefunc disable_mmu_el3
4192f5dcfefSAndrew Thoelke	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
420ec0c8fdaSAntonio Nino Diazdo_disable_mmu_el3:
4212f5dcfefSAndrew Thoelke	mrs	x0, sctlr_el3
4222f5dcfefSAndrew Thoelke	bic	x0, x0, x1
4232f5dcfefSAndrew Thoelke	msr	sctlr_el3, x0
424ec0c8fdaSAntonio Nino Diaz	isb	/* ensure MMU is off */
42554dc71e7SAchin Gupta	dsb	sy
42654dc71e7SAchin Gupta	ret
4278b779620SKévin Petitendfunc disable_mmu_el3
4282f5dcfefSAndrew Thoelke
4292f5dcfefSAndrew Thoelke
4302f5dcfefSAndrew Thoelkefunc disable_mmu_icache_el3
4312f5dcfefSAndrew Thoelke	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
432ec0c8fdaSAntonio Nino Diaz	b	do_disable_mmu_el3
4338b779620SKévin Petitendfunc disable_mmu_icache_el3
4342f5dcfefSAndrew Thoelke
4355c3272a7SAndrew Thoelke/* ---------------------------------------------------------------------------
436ec0c8fdaSAntonio Nino Diaz * Disable the MMU at EL1
437ec0c8fdaSAntonio Nino Diaz * ---------------------------------------------------------------------------
438ec0c8fdaSAntonio Nino Diaz */
439ec0c8fdaSAntonio Nino Diaz
440ec0c8fdaSAntonio Nino Diazfunc disable_mmu_el1
441ec0c8fdaSAntonio Nino Diaz	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
442ec0c8fdaSAntonio Nino Diazdo_disable_mmu_el1:
443ec0c8fdaSAntonio Nino Diaz	mrs	x0, sctlr_el1
444ec0c8fdaSAntonio Nino Diaz	bic	x0, x0, x1
445ec0c8fdaSAntonio Nino Diaz	msr	sctlr_el1, x0
446ec0c8fdaSAntonio Nino Diaz	isb	/* ensure MMU is off */
447ec0c8fdaSAntonio Nino Diaz	dsb	sy
448ec0c8fdaSAntonio Nino Diaz	ret
449ec0c8fdaSAntonio Nino Diazendfunc disable_mmu_el1
450ec0c8fdaSAntonio Nino Diaz
451ec0c8fdaSAntonio Nino Diaz
452ec0c8fdaSAntonio Nino Diazfunc disable_mmu_icache_el1
453ec0c8fdaSAntonio Nino Diaz	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
454ec0c8fdaSAntonio Nino Diaz	b	do_disable_mmu_el1
455ec0c8fdaSAntonio Nino Diazendfunc disable_mmu_icache_el1
456ec0c8fdaSAntonio Nino Diaz
457ec0c8fdaSAntonio Nino Diaz/* ---------------------------------------------------------------------------
4585c3272a7SAndrew Thoelke * Enable the use of VFP at EL3
4595c3272a7SAndrew Thoelke * ---------------------------------------------------------------------------
4605c3272a7SAndrew Thoelke */
4615c3272a7SAndrew Thoelke#if SUPPORT_VFP
4625c3272a7SAndrew Thoelkefunc enable_vfp
4635c3272a7SAndrew Thoelke	mrs	x0, cpacr_el1
4645c3272a7SAndrew Thoelke	orr	x0, x0, #CPACR_VFP_BITS
4655c3272a7SAndrew Thoelke	msr	cpacr_el1, x0
4665c3272a7SAndrew Thoelke	mrs	x0, cptr_el3
4675c3272a7SAndrew Thoelke	mov	x1, #AARCH64_CPTR_TFP
4685c3272a7SAndrew Thoelke	bic	x0, x0, x1
4695c3272a7SAndrew Thoelke	msr	cptr_el3, x0
4705c3272a7SAndrew Thoelke	isb
4715c3272a7SAndrew Thoelke	ret
4728b779620SKévin Petitendfunc enable_vfp
4735c3272a7SAndrew Thoelke#endif
474931f7c61SSoby Mathew
475931f7c61SSoby Mathew/* ---------------------------------------------------------------------------
476931f7c61SSoby Mathew * Helper to fixup Global Descriptor table (GDT) and dynamic relocations
477931f7c61SSoby Mathew * (.rela.dyn) at runtime.
478931f7c61SSoby Mathew *
479931f7c61SSoby Mathew * This function is meant to be used when the firmware is compiled with -fpie
480931f7c61SSoby Mathew * and linked with -pie options. We rely on the linker script exporting
481931f7c61SSoby Mathew * appropriate markers for start and end of the section. For GOT, we
482931f7c61SSoby Mathew * expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect
483931f7c61SSoby Mathew * __RELA_START__ and __RELA_END__.
484931f7c61SSoby Mathew *
485931f7c61SSoby Mathew * The function takes the limits of the memory to apply fixups to as
486931f7c61SSoby Mathew * arguments (which is usually the limits of the relocable BL image).
487931f7c61SSoby Mathew *   x0 -  the start of the fixup region
488931f7c61SSoby Mathew *   x1 -  the limit of the fixup region
489931f7c61SSoby Mathew * These addresses have to be page (4KB aligned).
490931f7c61SSoby Mathew * ---------------------------------------------------------------------------
491931f7c61SSoby Mathew */
492931f7c61SSoby Mathewfunc fixup_gdt_reloc
493931f7c61SSoby Mathew	mov	x6, x0
494931f7c61SSoby Mathew	mov	x7, x1
495931f7c61SSoby Mathew
496931f7c61SSoby Mathew	/* Test if the limits are 4K aligned */
497931f7c61SSoby Mathew#if ENABLE_ASSERTIONS
498931f7c61SSoby Mathew	orr	x0, x0, x1
499*d7b5f408SJimmy Brisson	tst	x0, #(PAGE_SIZE_MASK)
500931f7c61SSoby Mathew	ASM_ASSERT(eq)
501931f7c61SSoby Mathew#endif
502931f7c61SSoby Mathew	/*
503931f7c61SSoby Mathew	 * Calculate the offset based on return address in x30.
504c5da062cSLouis Mayencourt	 * Assume that this function is called within a page at the start of
505c5da062cSLouis Mayencourt	 * fixup region.
506931f7c61SSoby Mathew	 */
507*d7b5f408SJimmy Brisson	and	x2, x30, #~(PAGE_SIZE_MASK)
508931f7c61SSoby Mathew	sub	x0, x2, x6	/* Diff(S) = Current Address - Compiled Address */
509931f7c61SSoby Mathew
510931f7c61SSoby Mathew	adrp	x1, __GOT_START__
511931f7c61SSoby Mathew	add	x1, x1, :lo12:__GOT_START__
512931f7c61SSoby Mathew	adrp	x2, __GOT_END__
513931f7c61SSoby Mathew	add	x2, x2, :lo12:__GOT_END__
514931f7c61SSoby Mathew
515931f7c61SSoby Mathew	/*
516931f7c61SSoby Mathew	 * GOT is an array of 64_bit addresses which must be fixed up as
517931f7c61SSoby Mathew	 * new_addr = old_addr + Diff(S).
518931f7c61SSoby Mathew	 * The new_addr is the address currently the binary is executing from
519931f7c61SSoby Mathew	 * and old_addr is the address at compile time.
520931f7c61SSoby Mathew	 */
521931f7c61SSoby Mathew1:
522931f7c61SSoby Mathew	ldr	x3, [x1]
523931f7c61SSoby Mathew	/* Skip adding offset if address is < lower limit */
524931f7c61SSoby Mathew	cmp	x3, x6
525931f7c61SSoby Mathew	b.lo	2f
526931f7c61SSoby Mathew	/* Skip adding offset if address is >= upper limit */
527931f7c61SSoby Mathew	cmp	x3, x7
528931f7c61SSoby Mathew	b.ge	2f
529931f7c61SSoby Mathew	add	x3, x3, x0
530931f7c61SSoby Mathew	str	x3, [x1]
531931f7c61SSoby Mathew2:
532931f7c61SSoby Mathew	add	x1, x1, #8
533931f7c61SSoby Mathew	cmp	x1, x2
534931f7c61SSoby Mathew	b.lo	1b
535931f7c61SSoby Mathew
536931f7c61SSoby Mathew	/* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */
537931f7c61SSoby Mathew	adrp	x1, __RELA_START__
538931f7c61SSoby Mathew	add	x1, x1, :lo12:__RELA_START__
539931f7c61SSoby Mathew	adrp	x2, __RELA_END__
540931f7c61SSoby Mathew	add	x2, x2, :lo12:__RELA_END__
541931f7c61SSoby Mathew	/*
542931f7c61SSoby Mathew	 * According to ELF-64 specification, the RELA data structure is as
543931f7c61SSoby Mathew	 * follows:
544931f7c61SSoby Mathew	 *	typedef struct
545931f7c61SSoby Mathew	 * 	{
546931f7c61SSoby Mathew	 *		Elf64_Addr r_offset;
547931f7c61SSoby Mathew	 *		Elf64_Xword r_info;
548931f7c61SSoby Mathew	 *		Elf64_Sxword r_addend;
549931f7c61SSoby Mathew	 *	} Elf64_Rela;
550931f7c61SSoby Mathew	 *
551931f7c61SSoby Mathew	 * r_offset is address of reference
552931f7c61SSoby Mathew	 * r_info is symbol index and type of relocation (in this case
553c5da062cSLouis Mayencourt	 * 0x403 which corresponds to R_AARCH64_RELATIVE).
554931f7c61SSoby Mathew	 * r_addend is constant part of expression.
555931f7c61SSoby Mathew	 *
556931f7c61SSoby Mathew	 * Size of Elf64_Rela structure is 24 bytes.
557931f7c61SSoby Mathew	 */
558931f7c61SSoby Mathew1:
559c5da062cSLouis Mayencourt	/* Assert that the relocation type is R_AARCH64_RELATIVE */
560931f7c61SSoby Mathew#if ENABLE_ASSERTIONS
561931f7c61SSoby Mathew	ldr	x3, [x1, #8]
562931f7c61SSoby Mathew	cmp	x3, #0x403
563931f7c61SSoby Mathew	ASM_ASSERT(eq)
564931f7c61SSoby Mathew#endif
565931f7c61SSoby Mathew	ldr	x3, [x1]	/* r_offset */
566931f7c61SSoby Mathew	add	x3, x0, x3
567931f7c61SSoby Mathew	ldr	x4, [x1, #16]	/* r_addend */
568931f7c61SSoby Mathew
569931f7c61SSoby Mathew	/* Skip adding offset if r_addend is < lower limit */
570931f7c61SSoby Mathew	cmp	x4, x6
571931f7c61SSoby Mathew	b.lo	2f
572931f7c61SSoby Mathew	/* Skip adding offset if r_addend entry is >= upper limit */
573931f7c61SSoby Mathew	cmp	x4, x7
574931f7c61SSoby Mathew	b.ge	2f
575931f7c61SSoby Mathew
576931f7c61SSoby Mathew	add	x4, x0, x4	/* Diff(S) + r_addend */
577931f7c61SSoby Mathew	str	x4, [x3]
578931f7c61SSoby Mathew
579931f7c61SSoby Mathew2:	add	x1, x1, #24
580931f7c61SSoby Mathew	cmp	x1, x2
581931f7c61SSoby Mathew	b.lo	1b
582931f7c61SSoby Mathew
583931f7c61SSoby Mathew	ret
584931f7c61SSoby Mathewendfunc fixup_gdt_reloc
585