1/* 2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * Redistributions of source code must retain the above copyright notice, this 8 * list of conditions and the following disclaimer. 9 * 10 * Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 14 * Neither the name of ARM nor the names of its contributors may be used 15 * to endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31#include <arch.h> 32#include <asm_macros.S> 33#include <assert_macros.S> 34 35 .globl smc 36 .globl zeromem 37 .globl zero_normalmem 38 .globl memcpy4 39 .globl disable_mmu_icache_secure 40 .globl disable_mmu_secure 41 42func smc 43 /* 44 * For AArch32 only r0-r3 will be in the registers; 45 * rest r4-r6 will be pushed on to the stack. So here, we'll 46 * have to load them from the stack to registers r4-r6 explicitly. 47 * Clobbers: r4-r6 48 */ 49 ldm sp, {r4, r5, r6} 50 smc #0 51endfunc smc 52 53/* ----------------------------------------------------------------------- 54 * void zeromem(void *mem, unsigned int length) 55 * 56 * Initialise a region in normal memory to 0. This functions complies with the 57 * AAPCS and can be called from C code. 58 * 59 * ----------------------------------------------------------------------- 60 */ 61func zeromem 62 /* 63 * Readable names for registers 64 * 65 * Registers r0, r1 and r2 are also set by zeromem which 66 * branches into the fallback path directly, so cursor, length and 67 * stop_address should not be retargeted to other registers. 68 */ 69 cursor .req r0 /* Start address and then current address */ 70 length .req r1 /* Length in bytes of the region to zero out */ 71 /* 72 * Reusing the r1 register as length is only used at the beginning of 73 * the function. 74 */ 75 stop_address .req r1 /* Address past the last zeroed byte */ 76 zeroreg1 .req r2 /* Source register filled with 0 */ 77 zeroreg2 .req r3 /* Source register filled with 0 */ 78 tmp .req r12 /* Temporary scratch register */ 79 80 mov zeroreg1, #0 81 82 /* stop_address is the address past the last to zero */ 83 add stop_address, cursor, length 84 85 /* 86 * Length cannot be used anymore as it shares the same register with 87 * stop_address. 88 */ 89 .unreq length 90 91 /* 92 * If the start address is already aligned to 8 bytes, skip this loop. 93 */ 94 tst cursor, #(8-1) 95 beq .Lzeromem_8bytes_aligned 96 97 /* Calculate the next address aligned to 8 bytes */ 98 orr tmp, cursor, #(8-1) 99 adds tmp, tmp, #1 100 /* If it overflows, fallback to byte per byte zeroing */ 101 beq .Lzeromem_1byte_aligned 102 /* If the next aligned address is after the stop address, fall back */ 103 cmp tmp, stop_address 104 bhs .Lzeromem_1byte_aligned 105 106 /* zero byte per byte */ 1071: 108 strb zeroreg1, [cursor], #1 109 cmp cursor, tmp 110 bne 1b 111 112 /* zero 8 bytes at a time */ 113.Lzeromem_8bytes_aligned: 114 115 /* Calculate the last 8 bytes aligned address. */ 116 bic tmp, stop_address, #(8-1) 117 118 cmp cursor, tmp 119 bhs 2f 120 121 mov zeroreg2, #0 1221: 123 stmia cursor!, {zeroreg1, zeroreg2} 124 cmp cursor, tmp 125 blo 1b 1262: 127 128 /* zero byte per byte */ 129.Lzeromem_1byte_aligned: 130 cmp cursor, stop_address 131 beq 2f 1321: 133 strb zeroreg1, [cursor], #1 134 cmp cursor, stop_address 135 bne 1b 1362: 137 bx lr 138 139 .unreq cursor 140 /* 141 * length is already unreq'ed to reuse the register for another 142 * variable. 143 */ 144 .unreq stop_address 145 .unreq zeroreg1 146 .unreq zeroreg2 147 .unreq tmp 148endfunc zeromem 149 150/* 151 * AArch32 does not have special ways of zeroing normal memory as AArch64 does 152 * using the DC ZVA instruction, so we just alias zero_normalmem to zeromem. 153 */ 154.equ zero_normalmem, zeromem 155 156/* -------------------------------------------------------------------------- 157 * void memcpy4(void *dest, const void *src, unsigned int length) 158 * 159 * Copy length bytes from memory area src to memory area dest. 160 * The memory areas should not overlap. 161 * Destination and source addresses must be 4-byte aligned. 162 * -------------------------------------------------------------------------- 163 */ 164func memcpy4 165#if ASM_ASSERTION 166 orr r3, r0, r1 167 tst r3, #0x3 168 ASM_ASSERT(eq) 169#endif 170/* copy 4 bytes at a time */ 171m_loop4: 172 cmp r2, #4 173 blo m_loop1 174 ldr r3, [r1], #4 175 str r3, [r0], #4 176 sub r2, r2, #4 177 b m_loop4 178/* copy byte per byte */ 179m_loop1: 180 cmp r2,#0 181 beq m_end 182 ldrb r3, [r1], #1 183 strb r3, [r0], #1 184 subs r2, r2, #1 185 bne m_loop1 186m_end: 187 bx lr 188endfunc memcpy4 189 190/* --------------------------------------------------------------------------- 191 * Disable the MMU in Secure State 192 * --------------------------------------------------------------------------- 193 */ 194 195func disable_mmu_secure 196 mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT) 197do_disable_mmu: 198 ldcopr r0, SCTLR 199 bic r0, r0, r1 200 stcopr r0, SCTLR 201 isb // ensure MMU is off 202 dsb sy 203 bx lr 204endfunc disable_mmu_secure 205 206 207func disable_mmu_icache_secure 208 ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) 209 b do_disable_mmu 210endfunc disable_mmu_icache_secure 211