xref: /rk3399_ARM-atf/lib/aarch32/misc_helpers.S (revision 044bb2faabd7981af4ef419e1037fec28e5b3f8b)
1f24307deSSoby Mathew/*
2308d359bSDouglas Raillard * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3f24307deSSoby Mathew *
4f24307deSSoby Mathew * Redistribution and use in source and binary forms, with or without
5f24307deSSoby Mathew * modification, are permitted provided that the following conditions are met:
6f24307deSSoby Mathew *
7f24307deSSoby Mathew * Redistributions of source code must retain the above copyright notice, this
8f24307deSSoby Mathew * list of conditions and the following disclaimer.
9f24307deSSoby Mathew *
10f24307deSSoby Mathew * Redistributions in binary form must reproduce the above copyright notice,
11f24307deSSoby Mathew * this list of conditions and the following disclaimer in the documentation
12f24307deSSoby Mathew * and/or other materials provided with the distribution.
13f24307deSSoby Mathew *
14f24307deSSoby Mathew * Neither the name of ARM nor the names of its contributors may be used
15f24307deSSoby Mathew * to endorse or promote products derived from this software without specific
16f24307deSSoby Mathew * prior written permission.
17f24307deSSoby Mathew *
18f24307deSSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19f24307deSSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20f24307deSSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21f24307deSSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22f24307deSSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23f24307deSSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24f24307deSSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25f24307deSSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26f24307deSSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27f24307deSSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28f24307deSSoby Mathew * POSSIBILITY OF SUCH DAMAGE.
29f24307deSSoby Mathew */
30f24307deSSoby Mathew
31f24307deSSoby Mathew#include <arch.h>
32f24307deSSoby Mathew#include <asm_macros.S>
33f24307deSSoby Mathew#include <assert_macros.S>
34f24307deSSoby Mathew
351a0a3f06SYatharth Kochar	.globl	smc
36f24307deSSoby Mathew	.globl	zeromem
37308d359bSDouglas Raillard	.globl	zero_normalmem
389c1dceb1SYatharth Kochar	.globl	memcpy4
391a0a3f06SYatharth Kochar	.globl	disable_mmu_icache_secure
401a0a3f06SYatharth Kochar	.globl	disable_mmu_secure
411a0a3f06SYatharth Kochar
421a0a3f06SYatharth Kocharfunc smc
431a0a3f06SYatharth Kochar	/*
441a0a3f06SYatharth Kochar	 * For AArch32 only r0-r3 will be in the registers;
451a0a3f06SYatharth Kochar	 * rest r4-r6 will be pushed on to the stack. So here, we'll
461a0a3f06SYatharth Kochar	 * have to load them from the stack to registers r4-r6 explicitly.
471a0a3f06SYatharth Kochar	 * Clobbers: r4-r6
481a0a3f06SYatharth Kochar	 */
491a0a3f06SYatharth Kochar	ldm	sp, {r4, r5, r6}
501a0a3f06SYatharth Kochar	smc	#0
511a0a3f06SYatharth Kocharendfunc smc
52f24307deSSoby Mathew
53f24307deSSoby Mathew/* -----------------------------------------------------------------------
54308d359bSDouglas Raillard * void zeromem(void *mem, unsigned int length)
55f24307deSSoby Mathew *
56308d359bSDouglas Raillard * Initialise a region in normal memory to 0. This functions complies with the
57308d359bSDouglas Raillard * AAPCS and can be called from C code.
58308d359bSDouglas Raillard *
59f24307deSSoby Mathew * -----------------------------------------------------------------------
60f24307deSSoby Mathew */
61f24307deSSoby Mathewfunc zeromem
62308d359bSDouglas Raillard	/*
63308d359bSDouglas Raillard	 * Readable names for registers
64308d359bSDouglas Raillard	 *
65308d359bSDouglas Raillard	 * Registers r0, r1 and r2 are also set by zeromem which
66308d359bSDouglas Raillard	 * branches into the fallback path directly, so cursor, length and
67308d359bSDouglas Raillard	 * stop_address should not be retargeted to other registers.
68308d359bSDouglas Raillard	 */
69308d359bSDouglas Raillard	cursor       .req r0 /* Start address and then current address */
70308d359bSDouglas Raillard	length       .req r1 /* Length in bytes of the region to zero out */
71308d359bSDouglas Raillard	/*
72308d359bSDouglas Raillard	 * Reusing the r1 register as length is only used at the beginning of
73308d359bSDouglas Raillard	 * the function.
74308d359bSDouglas Raillard	 */
75308d359bSDouglas Raillard	stop_address .req r1  /* Address past the last zeroed byte */
76308d359bSDouglas Raillard	zeroreg1     .req r2  /* Source register filled with 0 */
77308d359bSDouglas Raillard	zeroreg2     .req r3  /* Source register filled with 0 */
78308d359bSDouglas Raillard	tmp	     .req r12 /* Temporary scratch register */
79308d359bSDouglas Raillard
80308d359bSDouglas Raillard	mov	zeroreg1, #0
81308d359bSDouglas Raillard
82308d359bSDouglas Raillard	/* stop_address is the address past the last to zero */
83308d359bSDouglas Raillard	add	stop_address, cursor, length
84308d359bSDouglas Raillard
85308d359bSDouglas Raillard	/*
86308d359bSDouglas Raillard	 * Length cannot be used anymore as it shares the same register with
87308d359bSDouglas Raillard	 * stop_address.
88308d359bSDouglas Raillard	 */
89308d359bSDouglas Raillard	.unreq	length
90308d359bSDouglas Raillard
91308d359bSDouglas Raillard	/*
92308d359bSDouglas Raillard	 * If the start address is already aligned to 8 bytes, skip this loop.
93308d359bSDouglas Raillard	 */
94308d359bSDouglas Raillard	tst	cursor, #(8-1)
95308d359bSDouglas Raillard	beq	.Lzeromem_8bytes_aligned
96308d359bSDouglas Raillard
97308d359bSDouglas Raillard	/* Calculate the next address aligned to 8 bytes */
98308d359bSDouglas Raillard	orr	tmp, cursor, #(8-1)
99308d359bSDouglas Raillard	adds	tmp, tmp, #1
100308d359bSDouglas Raillard	/* If it overflows, fallback to byte per byte zeroing */
101308d359bSDouglas Raillard	beq	.Lzeromem_1byte_aligned
102308d359bSDouglas Raillard	/* If the next aligned address is after the stop address, fall back */
103308d359bSDouglas Raillard	cmp	tmp, stop_address
104308d359bSDouglas Raillard	bhs	.Lzeromem_1byte_aligned
105308d359bSDouglas Raillard
106308d359bSDouglas Raillard	/* zero byte per byte */
107308d359bSDouglas Raillard1:
108308d359bSDouglas Raillard	strb	zeroreg1, [cursor], #1
109308d359bSDouglas Raillard	cmp	cursor, tmp
110308d359bSDouglas Raillard	bne	1b
111308d359bSDouglas Raillard
112308d359bSDouglas Raillard	/* zero 8 bytes at a time */
113308d359bSDouglas Raillard.Lzeromem_8bytes_aligned:
114308d359bSDouglas Raillard
115308d359bSDouglas Raillard	/* Calculate the last 8 bytes aligned address. */
116308d359bSDouglas Raillard	bic	tmp, stop_address, #(8-1)
117308d359bSDouglas Raillard
118308d359bSDouglas Raillard	cmp	cursor, tmp
119308d359bSDouglas Raillard	bhs	2f
120308d359bSDouglas Raillard
121308d359bSDouglas Raillard	mov	zeroreg2, #0
122308d359bSDouglas Raillard1:
123308d359bSDouglas Raillard	stmia	cursor!, {zeroreg1, zeroreg2}
124308d359bSDouglas Raillard	cmp	cursor, tmp
125308d359bSDouglas Raillard	blo	1b
126308d359bSDouglas Raillard2:
127308d359bSDouglas Raillard
128308d359bSDouglas Raillard	/* zero byte per byte */
129308d359bSDouglas Raillard.Lzeromem_1byte_aligned:
130308d359bSDouglas Raillard	cmp	cursor, stop_address
131308d359bSDouglas Raillard	beq	2f
132308d359bSDouglas Raillard1:
133308d359bSDouglas Raillard	strb	zeroreg1, [cursor], #1
134308d359bSDouglas Raillard	cmp	cursor, stop_address
135308d359bSDouglas Raillard	bne	1b
136308d359bSDouglas Raillard2:
137f24307deSSoby Mathew	bx	lr
138308d359bSDouglas Raillard
139308d359bSDouglas Raillard	.unreq	cursor
140308d359bSDouglas Raillard	/*
141308d359bSDouglas Raillard	 * length is already unreq'ed to reuse the register for another
142308d359bSDouglas Raillard	 * variable.
143308d359bSDouglas Raillard	 */
144308d359bSDouglas Raillard	.unreq	stop_address
145308d359bSDouglas Raillard	.unreq	zeroreg1
146308d359bSDouglas Raillard	.unreq	zeroreg2
147308d359bSDouglas Raillard	.unreq	tmp
148f24307deSSoby Mathewendfunc zeromem
1491a0a3f06SYatharth Kochar
150308d359bSDouglas Raillard/*
151308d359bSDouglas Raillard * AArch32 does not have special ways of zeroing normal memory as AArch64 does
152308d359bSDouglas Raillard * using the DC ZVA instruction, so we just alias zero_normalmem to zeromem.
153308d359bSDouglas Raillard */
154308d359bSDouglas Raillard.equ	zero_normalmem, zeromem
155308d359bSDouglas Raillard
1569c1dceb1SYatharth Kochar/* --------------------------------------------------------------------------
1579c1dceb1SYatharth Kochar * void memcpy4(void *dest, const void *src, unsigned int length)
1589c1dceb1SYatharth Kochar *
1599c1dceb1SYatharth Kochar * Copy length bytes from memory area src to memory area dest.
1609c1dceb1SYatharth Kochar * The memory areas should not overlap.
1619c1dceb1SYatharth Kochar * Destination and source addresses must be 4-byte aligned.
1629c1dceb1SYatharth Kochar * --------------------------------------------------------------------------
1639c1dceb1SYatharth Kochar */
1649c1dceb1SYatharth Kocharfunc memcpy4
165*044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
1669c1dceb1SYatharth Kochar	orr	r3, r0, r1
1679c1dceb1SYatharth Kochar	tst	r3, #0x3
1689c1dceb1SYatharth Kochar	ASM_ASSERT(eq)
1699c1dceb1SYatharth Kochar#endif
1709c1dceb1SYatharth Kochar/* copy 4 bytes at a time */
1719c1dceb1SYatharth Kocharm_loop4:
1729c1dceb1SYatharth Kochar	cmp	r2, #4
173355a5d03SDouglas Raillard	blo	m_loop1
1749c1dceb1SYatharth Kochar	ldr	r3, [r1], #4
1759c1dceb1SYatharth Kochar	str	r3, [r0], #4
1769c1dceb1SYatharth Kochar	sub	r2, r2, #4
1779c1dceb1SYatharth Kochar	b	m_loop4
1789c1dceb1SYatharth Kochar/* copy byte per byte */
1799c1dceb1SYatharth Kocharm_loop1:
1809c1dceb1SYatharth Kochar	cmp	r2,#0
1819c1dceb1SYatharth Kochar	beq	m_end
1829c1dceb1SYatharth Kochar	ldrb	r3, [r1], #1
1839c1dceb1SYatharth Kochar	strb	r3, [r0], #1
1849c1dceb1SYatharth Kochar	subs	r2, r2, #1
1859c1dceb1SYatharth Kochar	bne	m_loop1
1869c1dceb1SYatharth Kocharm_end:
1879c1dceb1SYatharth Kochar	bx	lr
1889c1dceb1SYatharth Kocharendfunc memcpy4
1899c1dceb1SYatharth Kochar
1901a0a3f06SYatharth Kochar/* ---------------------------------------------------------------------------
1911a0a3f06SYatharth Kochar * Disable the MMU in Secure State
1921a0a3f06SYatharth Kochar * ---------------------------------------------------------------------------
1931a0a3f06SYatharth Kochar */
1941a0a3f06SYatharth Kochar
1951a0a3f06SYatharth Kocharfunc disable_mmu_secure
1961a0a3f06SYatharth Kochar	mov	r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
1971a0a3f06SYatharth Kochardo_disable_mmu:
1981a0a3f06SYatharth Kochar	ldcopr	r0, SCTLR
1991a0a3f06SYatharth Kochar	bic	r0, r0, r1
2001a0a3f06SYatharth Kochar	stcopr	r0, SCTLR
2011a0a3f06SYatharth Kochar	isb				// ensure MMU is off
2021a0a3f06SYatharth Kochar	dsb	sy
2031a0a3f06SYatharth Kochar	bx	lr
2041a0a3f06SYatharth Kocharendfunc disable_mmu_secure
2051a0a3f06SYatharth Kochar
2061a0a3f06SYatharth Kochar
2071a0a3f06SYatharth Kocharfunc disable_mmu_icache_secure
2081a0a3f06SYatharth Kochar	ldr	r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
2091a0a3f06SYatharth Kochar	b	do_disable_mmu
2101a0a3f06SYatharth Kocharendfunc disable_mmu_icache_secure
211