xref: /rk3399_ARM-atf/lib/aarch64/misc_helpers.S (revision 6c09af9f8b36cdfa1dc4d5052f7e4792f63fa88a)
14ecca339SDan Handley/*
2db9736e3SAlexei Fedorov * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved.
34ecca339SDan Handley *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
54ecca339SDan Handley */
64ecca339SDan Handley
797043ac9SDan Handley#include <arch.h>
84ecca339SDan Handley#include <asm_macros.S>
9bc920128SSoby Mathew#include <assert_macros.S>
101242b9a9SVarun Wadekar#include <common/bl_common.h>
1109d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h>
124ecca339SDan Handley
134ecca339SDan Handley	.globl	smc
144ecca339SDan Handley
15308d359bSDouglas Raillard	.globl	zero_normalmem
16308d359bSDouglas Raillard	.globl	zeromem
174ecca339SDan Handley	.globl	memcpy16
1881c272b3SZelalem Aweke	.globl	gpt_tlbi_by_pa
194ecca339SDan Handley
20ec0c8fdaSAntonio Nino Diaz	.globl	disable_mmu_el1
212f5dcfefSAndrew Thoelke	.globl	disable_mmu_el3
22ec0c8fdaSAntonio Nino Diaz	.globl	disable_mmu_icache_el1
232f5dcfefSAndrew Thoelke	.globl	disable_mmu_icache_el3
24931f7c61SSoby Mathew	.globl	fixup_gdt_reloc
255c3272a7SAndrew Thoelke#if SUPPORT_VFP
265c3272a7SAndrew Thoelke	.globl	enable_vfp
275c3272a7SAndrew Thoelke#endif
285c3272a7SAndrew Thoelke
294ecca339SDan Handleyfunc smc
304ecca339SDan Handley	smc	#0
318b779620SKévin Petitendfunc smc
324ecca339SDan Handley
334ecca339SDan Handley/* -----------------------------------------------------------------------
34308d359bSDouglas Raillard * void zero_normalmem(void *mem, unsigned int length);
35308d359bSDouglas Raillard *
36308d359bSDouglas Raillard * Initialise a region in normal memory to 0. This functions complies with the
37308d359bSDouglas Raillard * AAPCS and can be called from C code.
38308d359bSDouglas Raillard *
39308d359bSDouglas Raillard * NOTE: MMU must be enabled when using this function as it can only operate on
40308d359bSDouglas Raillard *       normal memory. It is intended to be mainly used from C code when MMU
41308d359bSDouglas Raillard *       is usually enabled.
42308d359bSDouglas Raillard * -----------------------------------------------------------------------
43308d359bSDouglas Raillard */
44308d359bSDouglas Raillard.equ	zero_normalmem, zeromem_dczva
45308d359bSDouglas Raillard
46308d359bSDouglas Raillard/* -----------------------------------------------------------------------
47308d359bSDouglas Raillard * void zeromem(void *mem, unsigned int length);
48308d359bSDouglas Raillard *
49308d359bSDouglas Raillard * Initialise a region of device memory to 0. This functions complies with the
50308d359bSDouglas Raillard * AAPCS and can be called from C code.
51308d359bSDouglas Raillard *
52308d359bSDouglas Raillard * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be
53308d359bSDouglas Raillard *       used instead for faster zeroing.
54308d359bSDouglas Raillard *
55308d359bSDouglas Raillard * -----------------------------------------------------------------------
56308d359bSDouglas Raillard */
57308d359bSDouglas Raillardfunc zeromem
58308d359bSDouglas Raillard	/* x2 is the address past the last zeroed address */
59308d359bSDouglas Raillard	add	x2, x0, x1
60308d359bSDouglas Raillard	/*
61308d359bSDouglas Raillard	 * Uses the fallback path that does not use DC ZVA instruction and
62308d359bSDouglas Raillard	 * therefore does not need enabled MMU
63308d359bSDouglas Raillard	 */
64308d359bSDouglas Raillard	b	.Lzeromem_dczva_fallback_entry
65308d359bSDouglas Raillardendfunc zeromem
66308d359bSDouglas Raillard
67308d359bSDouglas Raillard/* -----------------------------------------------------------------------
68308d359bSDouglas Raillard * void zeromem_dczva(void *mem, unsigned int length);
69308d359bSDouglas Raillard *
70308d359bSDouglas Raillard * Fill a region of normal memory of size "length" in bytes with null bytes.
71308d359bSDouglas Raillard * MMU must be enabled and the memory be of
72308d359bSDouglas Raillard * normal type. This is because this function internally uses the DC ZVA
73308d359bSDouglas Raillard * instruction, which generates an Alignment fault if used on any type of
74308d359bSDouglas Raillard * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU
75308d359bSDouglas Raillard * is disabled, all memory behaves like Device-nGnRnE memory (see section
76308d359bSDouglas Raillard * D4.2.8), hence the requirement on the MMU being enabled.
77308d359bSDouglas Raillard * NOTE: The code assumes that the block size as defined in DCZID_EL0
78308d359bSDouglas Raillard *       register is at least 16 bytes.
79308d359bSDouglas Raillard *
80308d359bSDouglas Raillard * -----------------------------------------------------------------------
81308d359bSDouglas Raillard */
82308d359bSDouglas Raillardfunc zeromem_dczva
83308d359bSDouglas Raillard
84308d359bSDouglas Raillard	/*
85308d359bSDouglas Raillard	 * The function consists of a series of loops that zero memory one byte
86308d359bSDouglas Raillard	 * at a time, 16 bytes at a time or using the DC ZVA instruction to
87308d359bSDouglas Raillard	 * zero aligned block of bytes, which is assumed to be more than 16.
88308d359bSDouglas Raillard	 * In the case where the DC ZVA instruction cannot be used or if the
89308d359bSDouglas Raillard	 * first 16 bytes loop would overflow, there is fallback path that does
90308d359bSDouglas Raillard	 * not use DC ZVA.
91308d359bSDouglas Raillard	 * Note: The fallback path is also used by the zeromem function that
92308d359bSDouglas Raillard	 *       branches to it directly.
93308d359bSDouglas Raillard	 *
94308d359bSDouglas Raillard	 *              +---------+   zeromem_dczva
95308d359bSDouglas Raillard	 *              |  entry  |
96308d359bSDouglas Raillard	 *              +----+----+
97308d359bSDouglas Raillard	 *                   |
98308d359bSDouglas Raillard	 *                   v
99308d359bSDouglas Raillard	 *              +---------+
100308d359bSDouglas Raillard	 *              | checks  |>o-------+ (If any check fails, fallback)
101308d359bSDouglas Raillard	 *              +----+----+         |
102308d359bSDouglas Raillard	 *                   |              |---------------+
103308d359bSDouglas Raillard	 *                   v              | Fallback path |
104308d359bSDouglas Raillard	 *            +------+------+       |---------------+
105308d359bSDouglas Raillard	 *            | 1 byte loop |       |
106308d359bSDouglas Raillard	 *            +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end
107308d359bSDouglas Raillard	 *                   |              |
108308d359bSDouglas Raillard	 *                   v              |
109308d359bSDouglas Raillard	 *           +-------+-------+      |
110308d359bSDouglas Raillard	 *           | 16 bytes loop |      |
111308d359bSDouglas Raillard	 *           +-------+-------+      |
112308d359bSDouglas Raillard	 *                   |              |
113308d359bSDouglas Raillard	 *                   v              |
114308d359bSDouglas Raillard	 *            +------+------+ .Lzeromem_dczva_blocksize_aligned
115308d359bSDouglas Raillard	 *            | DC ZVA loop |       |
116308d359bSDouglas Raillard	 *            +------+------+       |
117308d359bSDouglas Raillard	 *       +--------+  |              |
118308d359bSDouglas Raillard	 *       |        |  |              |
119308d359bSDouglas Raillard	 *       |        v  v              |
120308d359bSDouglas Raillard	 *       |   +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned
121308d359bSDouglas Raillard	 *       |   | 16 bytes loop |      |
122308d359bSDouglas Raillard	 *       |   +-------+-------+      |
123308d359bSDouglas Raillard	 *       |           |              |
124308d359bSDouglas Raillard	 *       |           v              |
125308d359bSDouglas Raillard	 *       |    +------+------+ .Lzeromem_dczva_final_1byte_aligned
126308d359bSDouglas Raillard	 *       |    | 1 byte loop |       |
127308d359bSDouglas Raillard	 *       |    +-------------+       |
128308d359bSDouglas Raillard	 *       |           |              |
129308d359bSDouglas Raillard	 *       |           v              |
130308d359bSDouglas Raillard	 *       |       +---+--+           |
131308d359bSDouglas Raillard	 *       |       | exit |           |
132308d359bSDouglas Raillard	 *       |       +------+           |
133308d359bSDouglas Raillard	 *       |			    |
134308d359bSDouglas Raillard	 *       |           +--------------+    +------------------+ zeromem
135308d359bSDouglas Raillard	 *       |           |  +----------------| zeromem function |
136308d359bSDouglas Raillard	 *       |           |  |                +------------------+
137308d359bSDouglas Raillard	 *       |           v  v
138308d359bSDouglas Raillard	 *       |    +-------------+ .Lzeromem_dczva_fallback_entry
139308d359bSDouglas Raillard	 *       |    | 1 byte loop |
140308d359bSDouglas Raillard	 *       |    +------+------+
141308d359bSDouglas Raillard	 *       |           |
142308d359bSDouglas Raillard	 *       +-----------+
143308d359bSDouglas Raillard	 */
144308d359bSDouglas Raillard
145308d359bSDouglas Raillard	/*
146308d359bSDouglas Raillard	 * Readable names for registers
147308d359bSDouglas Raillard	 *
148308d359bSDouglas Raillard	 * Registers x0, x1 and x2 are also set by zeromem which
149308d359bSDouglas Raillard	 * branches into the fallback path directly, so cursor, length and
150308d359bSDouglas Raillard	 * stop_address should not be retargeted to other registers.
151308d359bSDouglas Raillard	 */
152308d359bSDouglas Raillard	cursor       .req x0 /* Start address and then current address */
153308d359bSDouglas Raillard	length       .req x1 /* Length in bytes of the region to zero out */
154308d359bSDouglas Raillard	/* Reusing x1 as length is never used after block_mask is set */
155308d359bSDouglas Raillard	block_mask   .req x1 /* Bitmask of the block size read in DCZID_EL0 */
156308d359bSDouglas Raillard	stop_address .req x2 /* Address past the last zeroed byte */
157308d359bSDouglas Raillard	block_size   .req x3 /* Size of a block in bytes as read in DCZID_EL0 */
158308d359bSDouglas Raillard	tmp1         .req x4
159308d359bSDouglas Raillard	tmp2         .req x5
160308d359bSDouglas Raillard
161044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
162308d359bSDouglas Raillard	/*
163308d359bSDouglas Raillard	 * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3)
164308d359bSDouglas Raillard	 * register value and panic if the MMU is disabled.
165308d359bSDouglas Raillard	 */
166*6c09af9fSZelalem Aweke#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \
167*6c09af9fSZelalem Aweke	(BL2_AT_EL3 || ENABLE_RME))
168308d359bSDouglas Raillard	mrs	tmp1, sctlr_el3
169308d359bSDouglas Raillard#else
170308d359bSDouglas Raillard	mrs	tmp1, sctlr_el1
171308d359bSDouglas Raillard#endif
172308d359bSDouglas Raillard
173308d359bSDouglas Raillard	tst	tmp1, #SCTLR_M_BIT
174308d359bSDouglas Raillard	ASM_ASSERT(ne)
175044bb2faSAntonio Nino Diaz#endif /* ENABLE_ASSERTIONS */
176308d359bSDouglas Raillard
177308d359bSDouglas Raillard	/* stop_address is the address past the last to zero */
178308d359bSDouglas Raillard	add	stop_address, cursor, length
179308d359bSDouglas Raillard
180308d359bSDouglas Raillard	/*
181308d359bSDouglas Raillard	 * Get block_size = (log2(<block size>) >> 2) (see encoding of
182308d359bSDouglas Raillard	 * dczid_el0 reg)
183308d359bSDouglas Raillard	 */
184308d359bSDouglas Raillard	mrs	block_size, dczid_el0
185308d359bSDouglas Raillard
186308d359bSDouglas Raillard	/*
187308d359bSDouglas Raillard	 * Select the 4 lowest bits and convert the extracted log2(<block size
188308d359bSDouglas Raillard	 * in words>) to <block size in bytes>
189308d359bSDouglas Raillard	 */
190308d359bSDouglas Raillard	ubfx	block_size, block_size, #0, #4
191308d359bSDouglas Raillard	mov	tmp2, #(1 << 2)
192308d359bSDouglas Raillard	lsl	block_size, tmp2, block_size
193308d359bSDouglas Raillard
194044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
195308d359bSDouglas Raillard	/*
196308d359bSDouglas Raillard	 * Assumes block size is at least 16 bytes to avoid manual realignment
197308d359bSDouglas Raillard	 * of the cursor at the end of the DCZVA loop.
198308d359bSDouglas Raillard	 */
199308d359bSDouglas Raillard	cmp	block_size, #16
200308d359bSDouglas Raillard	ASM_ASSERT(hs)
201308d359bSDouglas Raillard#endif
202308d359bSDouglas Raillard	/*
203308d359bSDouglas Raillard	 * Not worth doing all the setup for a region less than a block and
204308d359bSDouglas Raillard	 * protects against zeroing a whole block when the area to zero is
205308d359bSDouglas Raillard	 * smaller than that. Also, as it is assumed that the block size is at
206308d359bSDouglas Raillard	 * least 16 bytes, this also protects the initial aligning loops from
207308d359bSDouglas Raillard	 * trying to zero 16 bytes when length is less than 16.
208308d359bSDouglas Raillard	 */
209308d359bSDouglas Raillard	cmp	length, block_size
210308d359bSDouglas Raillard	b.lo	.Lzeromem_dczva_fallback_entry
211308d359bSDouglas Raillard
212308d359bSDouglas Raillard	/*
213308d359bSDouglas Raillard	 * Calculate the bitmask of the block alignment. It will never
214308d359bSDouglas Raillard	 * underflow as the block size is between 4 bytes and 2kB.
215308d359bSDouglas Raillard	 * block_mask = block_size - 1
216308d359bSDouglas Raillard	 */
217308d359bSDouglas Raillard	sub	block_mask, block_size, #1
218308d359bSDouglas Raillard
219308d359bSDouglas Raillard	/*
220308d359bSDouglas Raillard	 * length alias should not be used after this point unless it is
221308d359bSDouglas Raillard	 * defined as a register other than block_mask's.
222308d359bSDouglas Raillard	 */
223308d359bSDouglas Raillard	 .unreq length
224308d359bSDouglas Raillard
225308d359bSDouglas Raillard	/*
226308d359bSDouglas Raillard	 * If the start address is already aligned to zero block size, go
227308d359bSDouglas Raillard	 * straight to the cache zeroing loop. This is safe because at this
228308d359bSDouglas Raillard	 * point, the length cannot be smaller than a block size.
229308d359bSDouglas Raillard	 */
230308d359bSDouglas Raillard	tst	cursor, block_mask
231308d359bSDouglas Raillard	b.eq	.Lzeromem_dczva_blocksize_aligned
232308d359bSDouglas Raillard
233308d359bSDouglas Raillard	/*
234308d359bSDouglas Raillard	 * Calculate the first block-size-aligned address. It is assumed that
235308d359bSDouglas Raillard	 * the zero block size is at least 16 bytes. This address is the last
236308d359bSDouglas Raillard	 * address of this initial loop.
237308d359bSDouglas Raillard	 */
238308d359bSDouglas Raillard	orr	tmp1, cursor, block_mask
239308d359bSDouglas Raillard	add	tmp1, tmp1, #1
240308d359bSDouglas Raillard
241308d359bSDouglas Raillard	/*
242308d359bSDouglas Raillard	 * If the addition overflows, skip the cache zeroing loops. This is
243308d359bSDouglas Raillard	 * quite unlikely however.
244308d359bSDouglas Raillard	 */
245308d359bSDouglas Raillard	cbz	tmp1, .Lzeromem_dczva_fallback_entry
246308d359bSDouglas Raillard
247308d359bSDouglas Raillard	/*
248308d359bSDouglas Raillard	 * If the first block-size-aligned address is past the last address,
249308d359bSDouglas Raillard	 * fallback to the simpler code.
250308d359bSDouglas Raillard	 */
251308d359bSDouglas Raillard	cmp	tmp1, stop_address
252308d359bSDouglas Raillard	b.hi	.Lzeromem_dczva_fallback_entry
253308d359bSDouglas Raillard
254308d359bSDouglas Raillard	/*
255308d359bSDouglas Raillard	 * If the start address is already aligned to 16 bytes, skip this loop.
256308d359bSDouglas Raillard	 * It is safe to do this because tmp1 (the stop address of the initial
257308d359bSDouglas Raillard	 * 16 bytes loop) will never be greater than the final stop address.
258308d359bSDouglas Raillard	 */
259308d359bSDouglas Raillard	tst	cursor, #0xf
260308d359bSDouglas Raillard	b.eq	.Lzeromem_dczva_initial_1byte_aligned_end
261308d359bSDouglas Raillard
262308d359bSDouglas Raillard	/* Calculate the next address aligned to 16 bytes */
263308d359bSDouglas Raillard	orr	tmp2, cursor, #0xf
264308d359bSDouglas Raillard	add	tmp2, tmp2, #1
265308d359bSDouglas Raillard	/* If it overflows, fallback to the simple path (unlikely) */
266308d359bSDouglas Raillard	cbz	tmp2, .Lzeromem_dczva_fallback_entry
267308d359bSDouglas Raillard	/*
268308d359bSDouglas Raillard	 * Next aligned address cannot be after the stop address because the
269308d359bSDouglas Raillard	 * length cannot be smaller than 16 at this point.
270308d359bSDouglas Raillard	 */
271308d359bSDouglas Raillard
272308d359bSDouglas Raillard	/* First loop: zero byte per byte */
273308d359bSDouglas Raillard1:
274308d359bSDouglas Raillard	strb	wzr, [cursor], #1
275308d359bSDouglas Raillard	cmp	cursor, tmp2
276308d359bSDouglas Raillard	b.ne	1b
277308d359bSDouglas Raillard.Lzeromem_dczva_initial_1byte_aligned_end:
278308d359bSDouglas Raillard
279308d359bSDouglas Raillard	/*
280308d359bSDouglas Raillard	 * Second loop: we need to zero 16 bytes at a time from cursor to tmp1
281308d359bSDouglas Raillard	 * before being able to use the code that deals with block-size-aligned
282308d359bSDouglas Raillard	 * addresses.
283308d359bSDouglas Raillard	 */
284308d359bSDouglas Raillard	cmp	cursor, tmp1
285308d359bSDouglas Raillard	b.hs	2f
286308d359bSDouglas Raillard1:
287308d359bSDouglas Raillard	stp	xzr, xzr, [cursor], #16
288308d359bSDouglas Raillard	cmp	cursor, tmp1
289308d359bSDouglas Raillard	b.lo	1b
290308d359bSDouglas Raillard2:
291308d359bSDouglas Raillard
292308d359bSDouglas Raillard	/*
293308d359bSDouglas Raillard	 * Third loop: zero a block at a time using DC ZVA cache block zeroing
294308d359bSDouglas Raillard	 * instruction.
295308d359bSDouglas Raillard	 */
296308d359bSDouglas Raillard.Lzeromem_dczva_blocksize_aligned:
297308d359bSDouglas Raillard	/*
298308d359bSDouglas Raillard	 * Calculate the last block-size-aligned address. If the result equals
299308d359bSDouglas Raillard	 * to the start address, the loop will exit immediately.
300308d359bSDouglas Raillard	 */
301308d359bSDouglas Raillard	bic	tmp1, stop_address, block_mask
302308d359bSDouglas Raillard
303308d359bSDouglas Raillard	cmp	cursor, tmp1
304308d359bSDouglas Raillard	b.hs	2f
305308d359bSDouglas Raillard1:
306308d359bSDouglas Raillard	/* Zero the block containing the cursor */
307308d359bSDouglas Raillard	dc	zva, cursor
308308d359bSDouglas Raillard	/* Increment the cursor by the size of a block */
309308d359bSDouglas Raillard	add	cursor, cursor, block_size
310308d359bSDouglas Raillard	cmp	cursor, tmp1
311308d359bSDouglas Raillard	b.lo	1b
312308d359bSDouglas Raillard2:
313308d359bSDouglas Raillard
314308d359bSDouglas Raillard	/*
315308d359bSDouglas Raillard	 * Fourth loop: zero 16 bytes at a time and then byte per byte the
316308d359bSDouglas Raillard	 * remaining area
317308d359bSDouglas Raillard	 */
318308d359bSDouglas Raillard.Lzeromem_dczva_final_16bytes_aligned:
319308d359bSDouglas Raillard	/*
320308d359bSDouglas Raillard	 * Calculate the last 16 bytes aligned address. It is assumed that the
321308d359bSDouglas Raillard	 * block size will never be smaller than 16 bytes so that the current
322308d359bSDouglas Raillard	 * cursor is aligned to at least 16 bytes boundary.
323308d359bSDouglas Raillard	 */
324308d359bSDouglas Raillard	bic	tmp1, stop_address, #15
325308d359bSDouglas Raillard
326308d359bSDouglas Raillard	cmp	cursor, tmp1
327308d359bSDouglas Raillard	b.hs	2f
328308d359bSDouglas Raillard1:
329308d359bSDouglas Raillard	stp	xzr, xzr, [cursor], #16
330308d359bSDouglas Raillard	cmp	cursor, tmp1
331308d359bSDouglas Raillard	b.lo	1b
332308d359bSDouglas Raillard2:
333308d359bSDouglas Raillard
334308d359bSDouglas Raillard	/* Fifth and final loop: zero byte per byte */
335308d359bSDouglas Raillard.Lzeromem_dczva_final_1byte_aligned:
336308d359bSDouglas Raillard	cmp	cursor, stop_address
337308d359bSDouglas Raillard	b.eq	2f
338308d359bSDouglas Raillard1:
339308d359bSDouglas Raillard	strb	wzr, [cursor], #1
340308d359bSDouglas Raillard	cmp	cursor, stop_address
341308d359bSDouglas Raillard	b.ne	1b
342308d359bSDouglas Raillard2:
343308d359bSDouglas Raillard	ret
344308d359bSDouglas Raillard
345308d359bSDouglas Raillard	/* Fallback for unaligned start addresses */
346308d359bSDouglas Raillard.Lzeromem_dczva_fallback_entry:
347308d359bSDouglas Raillard	/*
348308d359bSDouglas Raillard	 * If the start address is already aligned to 16 bytes, skip this loop.
349308d359bSDouglas Raillard	 */
350308d359bSDouglas Raillard	tst	cursor, #0xf
351308d359bSDouglas Raillard	b.eq	.Lzeromem_dczva_final_16bytes_aligned
352308d359bSDouglas Raillard
353308d359bSDouglas Raillard	/* Calculate the next address aligned to 16 bytes */
354308d359bSDouglas Raillard	orr	tmp1, cursor, #15
355308d359bSDouglas Raillard	add	tmp1, tmp1, #1
356308d359bSDouglas Raillard	/* If it overflows, fallback to byte per byte zeroing */
357308d359bSDouglas Raillard	cbz	tmp1, .Lzeromem_dczva_final_1byte_aligned
358308d359bSDouglas Raillard	/* If the next aligned address is after the stop address, fall back */
359308d359bSDouglas Raillard	cmp	tmp1, stop_address
360308d359bSDouglas Raillard	b.hs	.Lzeromem_dczva_final_1byte_aligned
361308d359bSDouglas Raillard
362308d359bSDouglas Raillard	/* Fallback entry loop: zero byte per byte */
363308d359bSDouglas Raillard1:
364308d359bSDouglas Raillard	strb	wzr, [cursor], #1
365308d359bSDouglas Raillard	cmp	cursor, tmp1
366308d359bSDouglas Raillard	b.ne	1b
367308d359bSDouglas Raillard
368308d359bSDouglas Raillard	b	.Lzeromem_dczva_final_16bytes_aligned
369308d359bSDouglas Raillard
370308d359bSDouglas Raillard	.unreq	cursor
371308d359bSDouglas Raillard	/*
372308d359bSDouglas Raillard	 * length is already unreq'ed to reuse the register for another
373308d359bSDouglas Raillard	 * variable.
374308d359bSDouglas Raillard	 */
375308d359bSDouglas Raillard	.unreq	stop_address
376308d359bSDouglas Raillard	.unreq	block_size
377308d359bSDouglas Raillard	.unreq	block_mask
378308d359bSDouglas Raillard	.unreq	tmp1
379308d359bSDouglas Raillard	.unreq	tmp2
380308d359bSDouglas Raillardendfunc zeromem_dczva
3814ecca339SDan Handley
3824ecca339SDan Handley/* --------------------------------------------------------------------------
3834ecca339SDan Handley * void memcpy16(void *dest, const void *src, unsigned int length)
3844ecca339SDan Handley *
3854ecca339SDan Handley * Copy length bytes from memory area src to memory area dest.
3864ecca339SDan Handley * The memory areas should not overlap.
3874ecca339SDan Handley * Destination and source addresses must be 16-byte aligned.
3884ecca339SDan Handley * --------------------------------------------------------------------------
3894ecca339SDan Handley */
3904ecca339SDan Handleyfunc memcpy16
391044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
392bc920128SSoby Mathew	orr	x3, x0, x1
393bc920128SSoby Mathew	tst	x3, #0xf
394bc920128SSoby Mathew	ASM_ASSERT(eq)
395bc920128SSoby Mathew#endif
3964ecca339SDan Handley/* copy 16 bytes at a time */
3974ecca339SDan Handleym_loop16:
3984ecca339SDan Handley	cmp	x2, #16
399ea926532SDouglas Raillard	b.lo	m_loop1
4004ecca339SDan Handley	ldp	x3, x4, [x1], #16
4014ecca339SDan Handley	stp	x3, x4, [x0], #16
4024ecca339SDan Handley	sub	x2, x2, #16
4034ecca339SDan Handley	b	m_loop16
4044ecca339SDan Handley/* copy byte per byte */
4054ecca339SDan Handleym_loop1:
4064ecca339SDan Handley	cbz	x2, m_end
4074ecca339SDan Handley	ldrb	w3, [x1], #1
4084ecca339SDan Handley	strb	w3, [x0], #1
4094ecca339SDan Handley	subs	x2, x2, #1
4104ecca339SDan Handley	b.ne	m_loop1
4118b779620SKévin Petitm_end:
4128b779620SKévin Petit	ret
4138b779620SKévin Petitendfunc memcpy16
4142f5dcfefSAndrew Thoelke
4152f5dcfefSAndrew Thoelke/* ---------------------------------------------------------------------------
4162f5dcfefSAndrew Thoelke * Disable the MMU at EL3
4172f5dcfefSAndrew Thoelke * ---------------------------------------------------------------------------
4182f5dcfefSAndrew Thoelke */
4192f5dcfefSAndrew Thoelke
4202f5dcfefSAndrew Thoelkefunc disable_mmu_el3
4212f5dcfefSAndrew Thoelke	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
422ec0c8fdaSAntonio Nino Diazdo_disable_mmu_el3:
4232f5dcfefSAndrew Thoelke	mrs	x0, sctlr_el3
4242f5dcfefSAndrew Thoelke	bic	x0, x0, x1
4252f5dcfefSAndrew Thoelke	msr	sctlr_el3, x0
426ec0c8fdaSAntonio Nino Diaz	isb	/* ensure MMU is off */
42754dc71e7SAchin Gupta	dsb	sy
42854dc71e7SAchin Gupta	ret
4298b779620SKévin Petitendfunc disable_mmu_el3
4302f5dcfefSAndrew Thoelke
4312f5dcfefSAndrew Thoelke
4322f5dcfefSAndrew Thoelkefunc disable_mmu_icache_el3
4332f5dcfefSAndrew Thoelke	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
434ec0c8fdaSAntonio Nino Diaz	b	do_disable_mmu_el3
4358b779620SKévin Petitendfunc disable_mmu_icache_el3
4362f5dcfefSAndrew Thoelke
4375c3272a7SAndrew Thoelke/* ---------------------------------------------------------------------------
438ec0c8fdaSAntonio Nino Diaz * Disable the MMU at EL1
439ec0c8fdaSAntonio Nino Diaz * ---------------------------------------------------------------------------
440ec0c8fdaSAntonio Nino Diaz */
441ec0c8fdaSAntonio Nino Diaz
442ec0c8fdaSAntonio Nino Diazfunc disable_mmu_el1
443ec0c8fdaSAntonio Nino Diaz	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
444ec0c8fdaSAntonio Nino Diazdo_disable_mmu_el1:
445ec0c8fdaSAntonio Nino Diaz	mrs	x0, sctlr_el1
446ec0c8fdaSAntonio Nino Diaz	bic	x0, x0, x1
447ec0c8fdaSAntonio Nino Diaz	msr	sctlr_el1, x0
448ec0c8fdaSAntonio Nino Diaz	isb	/* ensure MMU is off */
449ec0c8fdaSAntonio Nino Diaz	dsb	sy
450ec0c8fdaSAntonio Nino Diaz	ret
451ec0c8fdaSAntonio Nino Diazendfunc disable_mmu_el1
452ec0c8fdaSAntonio Nino Diaz
453ec0c8fdaSAntonio Nino Diaz
454ec0c8fdaSAntonio Nino Diazfunc disable_mmu_icache_el1
455ec0c8fdaSAntonio Nino Diaz	mov	x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
456ec0c8fdaSAntonio Nino Diaz	b	do_disable_mmu_el1
457ec0c8fdaSAntonio Nino Diazendfunc disable_mmu_icache_el1
458ec0c8fdaSAntonio Nino Diaz
459ec0c8fdaSAntonio Nino Diaz/* ---------------------------------------------------------------------------
4605c3272a7SAndrew Thoelke * Enable the use of VFP at EL3
4615c3272a7SAndrew Thoelke * ---------------------------------------------------------------------------
4625c3272a7SAndrew Thoelke */
4635c3272a7SAndrew Thoelke#if SUPPORT_VFP
4645c3272a7SAndrew Thoelkefunc enable_vfp
4655c3272a7SAndrew Thoelke	mrs	x0, cpacr_el1
4665c3272a7SAndrew Thoelke	orr	x0, x0, #CPACR_VFP_BITS
4675c3272a7SAndrew Thoelke	msr	cpacr_el1, x0
4685c3272a7SAndrew Thoelke	mrs	x0, cptr_el3
4695c3272a7SAndrew Thoelke	mov	x1, #AARCH64_CPTR_TFP
4705c3272a7SAndrew Thoelke	bic	x0, x0, x1
4715c3272a7SAndrew Thoelke	msr	cptr_el3, x0
4725c3272a7SAndrew Thoelke	isb
4735c3272a7SAndrew Thoelke	ret
4748b779620SKévin Petitendfunc enable_vfp
4755c3272a7SAndrew Thoelke#endif
476931f7c61SSoby Mathew
477931f7c61SSoby Mathew/* ---------------------------------------------------------------------------
478931f7c61SSoby Mathew * Helper to fixup Global Descriptor table (GDT) and dynamic relocations
479931f7c61SSoby Mathew * (.rela.dyn) at runtime.
480931f7c61SSoby Mathew *
481931f7c61SSoby Mathew * This function is meant to be used when the firmware is compiled with -fpie
482931f7c61SSoby Mathew * and linked with -pie options. We rely on the linker script exporting
483931f7c61SSoby Mathew * appropriate markers for start and end of the section. For GOT, we
484931f7c61SSoby Mathew * expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect
485931f7c61SSoby Mathew * __RELA_START__ and __RELA_END__.
486931f7c61SSoby Mathew *
487931f7c61SSoby Mathew * The function takes the limits of the memory to apply fixups to as
488931f7c61SSoby Mathew * arguments (which is usually the limits of the relocable BL image).
489931f7c61SSoby Mathew *   x0 -  the start of the fixup region
490931f7c61SSoby Mathew *   x1 -  the limit of the fixup region
491db9736e3SAlexei Fedorov * These addresses have to be 4KB page aligned.
492931f7c61SSoby Mathew * ---------------------------------------------------------------------------
493931f7c61SSoby Mathew */
494db9736e3SAlexei Fedorov
495db9736e3SAlexei Fedorov/* Relocation codes */
496db9736e3SAlexei Fedorov#define	R_AARCH64_NONE		0
497db9736e3SAlexei Fedorov#define	R_AARCH64_RELATIVE	1027
498db9736e3SAlexei Fedorov
499931f7c61SSoby Mathewfunc fixup_gdt_reloc
500931f7c61SSoby Mathew	mov	x6, x0
501931f7c61SSoby Mathew	mov	x7, x1
502931f7c61SSoby Mathew
503931f7c61SSoby Mathew#if ENABLE_ASSERTIONS
504db9736e3SAlexei Fedorov	/* Test if the limits are 4KB aligned */
505931f7c61SSoby Mathew	orr	x0, x0, x1
506d7b5f408SJimmy Brisson	tst	x0, #(PAGE_SIZE_MASK)
507931f7c61SSoby Mathew	ASM_ASSERT(eq)
508931f7c61SSoby Mathew#endif
509931f7c61SSoby Mathew	/*
510931f7c61SSoby Mathew	 * Calculate the offset based on return address in x30.
511c5da062cSLouis Mayencourt	 * Assume that this function is called within a page at the start of
512c5da062cSLouis Mayencourt	 * fixup region.
513931f7c61SSoby Mathew	 */
514d7b5f408SJimmy Brisson	and	x2, x30, #~(PAGE_SIZE_MASK)
515db9736e3SAlexei Fedorov	subs	x0, x2, x6	/* Diff(S) = Current Address - Compiled Address */
516db9736e3SAlexei Fedorov	b.eq	3f		/* Diff(S) = 0. No relocation needed */
517931f7c61SSoby Mathew
518931f7c61SSoby Mathew	adrp	x1, __GOT_START__
519931f7c61SSoby Mathew	add	x1, x1, :lo12:__GOT_START__
520931f7c61SSoby Mathew	adrp	x2, __GOT_END__
521931f7c61SSoby Mathew	add	x2, x2, :lo12:__GOT_END__
522931f7c61SSoby Mathew
523931f7c61SSoby Mathew	/*
524931f7c61SSoby Mathew	 * GOT is an array of 64_bit addresses which must be fixed up as
525931f7c61SSoby Mathew	 * new_addr = old_addr + Diff(S).
526931f7c61SSoby Mathew	 * The new_addr is the address currently the binary is executing from
527931f7c61SSoby Mathew	 * and old_addr is the address at compile time.
528931f7c61SSoby Mathew	 */
529db9736e3SAlexei Fedorov1:	ldr	x3, [x1]
530db9736e3SAlexei Fedorov
531931f7c61SSoby Mathew	/* Skip adding offset if address is < lower limit */
532931f7c61SSoby Mathew	cmp	x3, x6
533931f7c61SSoby Mathew	b.lo	2f
534db9736e3SAlexei Fedorov
535931f7c61SSoby Mathew	/* Skip adding offset if address is >= upper limit */
536931f7c61SSoby Mathew	cmp	x3, x7
537db9736e3SAlexei Fedorov	b.hs	2f
538931f7c61SSoby Mathew	add	x3, x3, x0
539931f7c61SSoby Mathew	str	x3, [x1]
540db9736e3SAlexei Fedorov
541db9736e3SAlexei Fedorov2:	add	x1, x1, #8
542931f7c61SSoby Mathew	cmp	x1, x2
543931f7c61SSoby Mathew	b.lo	1b
544931f7c61SSoby Mathew
545931f7c61SSoby Mathew	/* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */
546db9736e3SAlexei Fedorov3:	adrp	x1, __RELA_START__
547931f7c61SSoby Mathew	add	x1, x1, :lo12:__RELA_START__
548931f7c61SSoby Mathew	adrp	x2, __RELA_END__
549931f7c61SSoby Mathew	add	x2, x2, :lo12:__RELA_END__
550db9736e3SAlexei Fedorov
551931f7c61SSoby Mathew	/*
552931f7c61SSoby Mathew	 * According to ELF-64 specification, the RELA data structure is as
553931f7c61SSoby Mathew	 * follows:
554db9736e3SAlexei Fedorov	 *	typedef struct {
555931f7c61SSoby Mathew	 *		Elf64_Addr r_offset;
556931f7c61SSoby Mathew	 *		Elf64_Xword r_info;
557931f7c61SSoby Mathew	 *		Elf64_Sxword r_addend;
558931f7c61SSoby Mathew	 *	} Elf64_Rela;
559931f7c61SSoby Mathew	 *
560931f7c61SSoby Mathew	 * r_offset is address of reference
561931f7c61SSoby Mathew	 * r_info is symbol index and type of relocation (in this case
562db9736e3SAlexei Fedorov	 * code 1027 which corresponds to R_AARCH64_RELATIVE).
563931f7c61SSoby Mathew	 * r_addend is constant part of expression.
564931f7c61SSoby Mathew	 *
565931f7c61SSoby Mathew	 * Size of Elf64_Rela structure is 24 bytes.
566931f7c61SSoby Mathew	 */
567db9736e3SAlexei Fedorov
568db9736e3SAlexei Fedorov	/* Skip R_AARCH64_NONE entry with code 0 */
569db9736e3SAlexei Fedorov1:	ldr	x3, [x1, #8]
570db9736e3SAlexei Fedorov	cbz	x3, 2f
571db9736e3SAlexei Fedorov
572931f7c61SSoby Mathew#if ENABLE_ASSERTIONS
573db9736e3SAlexei Fedorov	/* Assert that the relocation type is R_AARCH64_RELATIVE */
574db9736e3SAlexei Fedorov	cmp	x3, #R_AARCH64_RELATIVE
575931f7c61SSoby Mathew	ASM_ASSERT(eq)
576931f7c61SSoby Mathew#endif
577931f7c61SSoby Mathew	ldr	x3, [x1]	/* r_offset */
578931f7c61SSoby Mathew	add	x3, x0, x3
579931f7c61SSoby Mathew	ldr	x4, [x1, #16]	/* r_addend */
580931f7c61SSoby Mathew
581931f7c61SSoby Mathew	/* Skip adding offset if r_addend is < lower limit */
582931f7c61SSoby Mathew	cmp	x4, x6
583931f7c61SSoby Mathew	b.lo	2f
584db9736e3SAlexei Fedorov
585931f7c61SSoby Mathew	/* Skip adding offset if r_addend entry is >= upper limit */
586931f7c61SSoby Mathew	cmp	x4, x7
587db9736e3SAlexei Fedorov	b.hs	2f
588931f7c61SSoby Mathew
589931f7c61SSoby Mathew	add	x4, x0, x4	/* Diff(S) + r_addend */
590931f7c61SSoby Mathew	str	x4, [x3]
591931f7c61SSoby Mathew
592931f7c61SSoby Mathew2:	add	x1, x1, #24
593931f7c61SSoby Mathew	cmp	x1, x2
594931f7c61SSoby Mathew	b.lo	1b
595931f7c61SSoby Mathew	ret
596931f7c61SSoby Mathewendfunc fixup_gdt_reloc
59781c272b3SZelalem Aweke
59881c272b3SZelalem Aweke/*
59981c272b3SZelalem Aweke * TODO: Currently only supports size of 4KB,
60081c272b3SZelalem Aweke * support other sizes as well.
60181c272b3SZelalem Aweke */
60281c272b3SZelalem Awekefunc gpt_tlbi_by_pa
60381c272b3SZelalem Aweke#if ENABLE_ASSERTIONS
60481c272b3SZelalem Aweke	cmp	x1, #PAGE_SIZE_4KB
60581c272b3SZelalem Aweke	ASM_ASSERT(eq)
60681c272b3SZelalem Aweke	tst	x0, #(PAGE_SIZE_MASK)
60781c272b3SZelalem Aweke	ASM_ASSERT(eq)
60881c272b3SZelalem Aweke#endif
60981c272b3SZelalem Aweke	lsr	x0, x0, #FOUR_KB_SHIFT	/* 4KB size encoding is zero */
61081c272b3SZelalem Aweke	sys	#6, c8, c4, #3, x0 	/* TLBI RPAOS, <Xt> */
61181c272b3SZelalem Aweke	dsb	sy
61281c272b3SZelalem Aweke	ret
61381c272b3SZelalem Awekeendfunc gpt_tlbi_by_pa
614