xref: /rk3399_ARM-atf/lib/aarch32/misc_helpers.S (revision 82cb2c1ad9897473743f08437d0a3995bed561b9)
1f24307deSSoby Mathew/*
2308d359bSDouglas Raillard * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3f24307deSSoby Mathew *
4*82cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
5f24307deSSoby Mathew */
6f24307deSSoby Mathew
7f24307deSSoby Mathew#include <arch.h>
8f24307deSSoby Mathew#include <asm_macros.S>
9f24307deSSoby Mathew#include <assert_macros.S>
10f24307deSSoby Mathew
111a0a3f06SYatharth Kochar	.globl	smc
12f24307deSSoby Mathew	.globl	zeromem
13308d359bSDouglas Raillard	.globl	zero_normalmem
149c1dceb1SYatharth Kochar	.globl	memcpy4
151a0a3f06SYatharth Kochar	.globl	disable_mmu_icache_secure
161a0a3f06SYatharth Kochar	.globl	disable_mmu_secure
171a0a3f06SYatharth Kochar
181a0a3f06SYatharth Kocharfunc smc
191a0a3f06SYatharth Kochar	/*
201a0a3f06SYatharth Kochar	 * For AArch32 only r0-r3 will be in the registers;
211a0a3f06SYatharth Kochar	 * rest r4-r6 will be pushed on to the stack. So here, we'll
221a0a3f06SYatharth Kochar	 * have to load them from the stack to registers r4-r6 explicitly.
231a0a3f06SYatharth Kochar	 * Clobbers: r4-r6
241a0a3f06SYatharth Kochar	 */
251a0a3f06SYatharth Kochar	ldm	sp, {r4, r5, r6}
261a0a3f06SYatharth Kochar	smc	#0
271a0a3f06SYatharth Kocharendfunc smc
28f24307deSSoby Mathew
29f24307deSSoby Mathew/* -----------------------------------------------------------------------
30308d359bSDouglas Raillard * void zeromem(void *mem, unsigned int length)
31f24307deSSoby Mathew *
32308d359bSDouglas Raillard * Initialise a region in normal memory to 0. This functions complies with the
33308d359bSDouglas Raillard * AAPCS and can be called from C code.
34308d359bSDouglas Raillard *
35f24307deSSoby Mathew * -----------------------------------------------------------------------
36f24307deSSoby Mathew */
37f24307deSSoby Mathewfunc zeromem
38308d359bSDouglas Raillard	/*
39308d359bSDouglas Raillard	 * Readable names for registers
40308d359bSDouglas Raillard	 *
41308d359bSDouglas Raillard	 * Registers r0, r1 and r2 are also set by zeromem which
42308d359bSDouglas Raillard	 * branches into the fallback path directly, so cursor, length and
43308d359bSDouglas Raillard	 * stop_address should not be retargeted to other registers.
44308d359bSDouglas Raillard	 */
45308d359bSDouglas Raillard	cursor       .req r0 /* Start address and then current address */
46308d359bSDouglas Raillard	length       .req r1 /* Length in bytes of the region to zero out */
47308d359bSDouglas Raillard	/*
48308d359bSDouglas Raillard	 * Reusing the r1 register as length is only used at the beginning of
49308d359bSDouglas Raillard	 * the function.
50308d359bSDouglas Raillard	 */
51308d359bSDouglas Raillard	stop_address .req r1  /* Address past the last zeroed byte */
52308d359bSDouglas Raillard	zeroreg1     .req r2  /* Source register filled with 0 */
53308d359bSDouglas Raillard	zeroreg2     .req r3  /* Source register filled with 0 */
54308d359bSDouglas Raillard	tmp	     .req r12 /* Temporary scratch register */
55308d359bSDouglas Raillard
56308d359bSDouglas Raillard	mov	zeroreg1, #0
57308d359bSDouglas Raillard
58308d359bSDouglas Raillard	/* stop_address is the address past the last to zero */
59308d359bSDouglas Raillard	add	stop_address, cursor, length
60308d359bSDouglas Raillard
61308d359bSDouglas Raillard	/*
62308d359bSDouglas Raillard	 * Length cannot be used anymore as it shares the same register with
63308d359bSDouglas Raillard	 * stop_address.
64308d359bSDouglas Raillard	 */
65308d359bSDouglas Raillard	.unreq	length
66308d359bSDouglas Raillard
67308d359bSDouglas Raillard	/*
68308d359bSDouglas Raillard	 * If the start address is already aligned to 8 bytes, skip this loop.
69308d359bSDouglas Raillard	 */
70308d359bSDouglas Raillard	tst	cursor, #(8-1)
71308d359bSDouglas Raillard	beq	.Lzeromem_8bytes_aligned
72308d359bSDouglas Raillard
73308d359bSDouglas Raillard	/* Calculate the next address aligned to 8 bytes */
74308d359bSDouglas Raillard	orr	tmp, cursor, #(8-1)
75308d359bSDouglas Raillard	adds	tmp, tmp, #1
76308d359bSDouglas Raillard	/* If it overflows, fallback to byte per byte zeroing */
77308d359bSDouglas Raillard	beq	.Lzeromem_1byte_aligned
78308d359bSDouglas Raillard	/* If the next aligned address is after the stop address, fall back */
79308d359bSDouglas Raillard	cmp	tmp, stop_address
80308d359bSDouglas Raillard	bhs	.Lzeromem_1byte_aligned
81308d359bSDouglas Raillard
82308d359bSDouglas Raillard	/* zero byte per byte */
83308d359bSDouglas Raillard1:
84308d359bSDouglas Raillard	strb	zeroreg1, [cursor], #1
85308d359bSDouglas Raillard	cmp	cursor, tmp
86308d359bSDouglas Raillard	bne	1b
87308d359bSDouglas Raillard
88308d359bSDouglas Raillard	/* zero 8 bytes at a time */
89308d359bSDouglas Raillard.Lzeromem_8bytes_aligned:
90308d359bSDouglas Raillard
91308d359bSDouglas Raillard	/* Calculate the last 8 bytes aligned address. */
92308d359bSDouglas Raillard	bic	tmp, stop_address, #(8-1)
93308d359bSDouglas Raillard
94308d359bSDouglas Raillard	cmp	cursor, tmp
95308d359bSDouglas Raillard	bhs	2f
96308d359bSDouglas Raillard
97308d359bSDouglas Raillard	mov	zeroreg2, #0
98308d359bSDouglas Raillard1:
99308d359bSDouglas Raillard	stmia	cursor!, {zeroreg1, zeroreg2}
100308d359bSDouglas Raillard	cmp	cursor, tmp
101308d359bSDouglas Raillard	blo	1b
102308d359bSDouglas Raillard2:
103308d359bSDouglas Raillard
104308d359bSDouglas Raillard	/* zero byte per byte */
105308d359bSDouglas Raillard.Lzeromem_1byte_aligned:
106308d359bSDouglas Raillard	cmp	cursor, stop_address
107308d359bSDouglas Raillard	beq	2f
108308d359bSDouglas Raillard1:
109308d359bSDouglas Raillard	strb	zeroreg1, [cursor], #1
110308d359bSDouglas Raillard	cmp	cursor, stop_address
111308d359bSDouglas Raillard	bne	1b
112308d359bSDouglas Raillard2:
113f24307deSSoby Mathew	bx	lr
114308d359bSDouglas Raillard
115308d359bSDouglas Raillard	.unreq	cursor
116308d359bSDouglas Raillard	/*
117308d359bSDouglas Raillard	 * length is already unreq'ed to reuse the register for another
118308d359bSDouglas Raillard	 * variable.
119308d359bSDouglas Raillard	 */
120308d359bSDouglas Raillard	.unreq	stop_address
121308d359bSDouglas Raillard	.unreq	zeroreg1
122308d359bSDouglas Raillard	.unreq	zeroreg2
123308d359bSDouglas Raillard	.unreq	tmp
124f24307deSSoby Mathewendfunc zeromem
1251a0a3f06SYatharth Kochar
126308d359bSDouglas Raillard/*
127308d359bSDouglas Raillard * AArch32 does not have special ways of zeroing normal memory as AArch64 does
128308d359bSDouglas Raillard * using the DC ZVA instruction, so we just alias zero_normalmem to zeromem.
129308d359bSDouglas Raillard */
130308d359bSDouglas Raillard.equ	zero_normalmem, zeromem
131308d359bSDouglas Raillard
1329c1dceb1SYatharth Kochar/* --------------------------------------------------------------------------
1339c1dceb1SYatharth Kochar * void memcpy4(void *dest, const void *src, unsigned int length)
1349c1dceb1SYatharth Kochar *
1359c1dceb1SYatharth Kochar * Copy length bytes from memory area src to memory area dest.
1369c1dceb1SYatharth Kochar * The memory areas should not overlap.
1379c1dceb1SYatharth Kochar * Destination and source addresses must be 4-byte aligned.
1389c1dceb1SYatharth Kochar * --------------------------------------------------------------------------
1399c1dceb1SYatharth Kochar */
1409c1dceb1SYatharth Kocharfunc memcpy4
141044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
1429c1dceb1SYatharth Kochar	orr	r3, r0, r1
1439c1dceb1SYatharth Kochar	tst	r3, #0x3
1449c1dceb1SYatharth Kochar	ASM_ASSERT(eq)
1459c1dceb1SYatharth Kochar#endif
1469c1dceb1SYatharth Kochar/* copy 4 bytes at a time */
1479c1dceb1SYatharth Kocharm_loop4:
1489c1dceb1SYatharth Kochar	cmp	r2, #4
149355a5d03SDouglas Raillard	blo	m_loop1
1509c1dceb1SYatharth Kochar	ldr	r3, [r1], #4
1519c1dceb1SYatharth Kochar	str	r3, [r0], #4
1529c1dceb1SYatharth Kochar	sub	r2, r2, #4
1539c1dceb1SYatharth Kochar	b	m_loop4
1549c1dceb1SYatharth Kochar/* copy byte per byte */
1559c1dceb1SYatharth Kocharm_loop1:
1569c1dceb1SYatharth Kochar	cmp	r2,#0
1579c1dceb1SYatharth Kochar	beq	m_end
1589c1dceb1SYatharth Kochar	ldrb	r3, [r1], #1
1599c1dceb1SYatharth Kochar	strb	r3, [r0], #1
1609c1dceb1SYatharth Kochar	subs	r2, r2, #1
1619c1dceb1SYatharth Kochar	bne	m_loop1
1629c1dceb1SYatharth Kocharm_end:
1639c1dceb1SYatharth Kochar	bx	lr
1649c1dceb1SYatharth Kocharendfunc memcpy4
1659c1dceb1SYatharth Kochar
1661a0a3f06SYatharth Kochar/* ---------------------------------------------------------------------------
1671a0a3f06SYatharth Kochar * Disable the MMU in Secure State
1681a0a3f06SYatharth Kochar * ---------------------------------------------------------------------------
1691a0a3f06SYatharth Kochar */
1701a0a3f06SYatharth Kochar
1711a0a3f06SYatharth Kocharfunc disable_mmu_secure
1721a0a3f06SYatharth Kochar	mov	r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
1731a0a3f06SYatharth Kochardo_disable_mmu:
1741a0a3f06SYatharth Kochar	ldcopr	r0, SCTLR
1751a0a3f06SYatharth Kochar	bic	r0, r0, r1
1761a0a3f06SYatharth Kochar	stcopr	r0, SCTLR
1771a0a3f06SYatharth Kochar	isb				// ensure MMU is off
1781a0a3f06SYatharth Kochar	dsb	sy
1791a0a3f06SYatharth Kochar	bx	lr
1801a0a3f06SYatharth Kocharendfunc disable_mmu_secure
1811a0a3f06SYatharth Kochar
1821a0a3f06SYatharth Kochar
1831a0a3f06SYatharth Kocharfunc disable_mmu_icache_secure
1841a0a3f06SYatharth Kochar	ldr	r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
1851a0a3f06SYatharth Kochar	b	do_disable_mmu
1861a0a3f06SYatharth Kocharendfunc disable_mmu_icache_secure
187