14ecca339SDan Handley/* 2308d359bSDouglas Raillard * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 34ecca339SDan Handley * 44ecca339SDan Handley * Redistribution and use in source and binary forms, with or without 54ecca339SDan Handley * modification, are permitted provided that the following conditions are met: 64ecca339SDan Handley * 74ecca339SDan Handley * Redistributions of source code must retain the above copyright notice, this 84ecca339SDan Handley * list of conditions and the following disclaimer. 94ecca339SDan Handley * 104ecca339SDan Handley * Redistributions in binary form must reproduce the above copyright notice, 114ecca339SDan Handley * this list of conditions and the following disclaimer in the documentation 124ecca339SDan Handley * and/or other materials provided with the distribution. 134ecca339SDan Handley * 144ecca339SDan Handley * Neither the name of ARM nor the names of its contributors may be used 154ecca339SDan Handley * to endorse or promote products derived from this software without specific 164ecca339SDan Handley * prior written permission. 174ecca339SDan Handley * 184ecca339SDan Handley * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 194ecca339SDan Handley * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 204ecca339SDan Handley * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 214ecca339SDan Handley * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 224ecca339SDan Handley * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 234ecca339SDan Handley * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 244ecca339SDan Handley * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 254ecca339SDan Handley * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 264ecca339SDan Handley * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 274ecca339SDan Handley * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 284ecca339SDan Handley * POSSIBILITY OF SUCH DAMAGE. 294ecca339SDan Handley */ 304ecca339SDan Handley 3197043ac9SDan Handley#include <arch.h> 324ecca339SDan Handley#include <asm_macros.S> 33bc920128SSoby Mathew#include <assert_macros.S> 344ecca339SDan Handley 354ecca339SDan Handley .globl get_afflvl_shift 364ecca339SDan Handley .globl mpidr_mask_lower_afflvls 374ecca339SDan Handley .globl eret 384ecca339SDan Handley .globl smc 394ecca339SDan Handley 40308d359bSDouglas Raillard .globl zero_normalmem 41308d359bSDouglas Raillard .globl zeromem 424ecca339SDan Handley .globl zeromem16 434ecca339SDan Handley .globl memcpy16 444ecca339SDan Handley 452f5dcfefSAndrew Thoelke .globl disable_mmu_el3 462f5dcfefSAndrew Thoelke .globl disable_mmu_icache_el3 472f5dcfefSAndrew Thoelke 485c3272a7SAndrew Thoelke#if SUPPORT_VFP 495c3272a7SAndrew Thoelke .globl enable_vfp 505c3272a7SAndrew Thoelke#endif 515c3272a7SAndrew Thoelke 524ecca339SDan Handleyfunc get_afflvl_shift 534ecca339SDan Handley cmp x0, #3 544ecca339SDan Handley cinc x0, x0, eq 554ecca339SDan Handley mov x1, #MPIDR_AFFLVL_SHIFT 564ecca339SDan Handley lsl x0, x0, x1 574ecca339SDan Handley ret 588b779620SKévin Petitendfunc get_afflvl_shift 594ecca339SDan Handley 604ecca339SDan Handleyfunc mpidr_mask_lower_afflvls 614ecca339SDan Handley cmp x1, #3 624ecca339SDan Handley cinc x1, x1, eq 634ecca339SDan Handley mov x2, #MPIDR_AFFLVL_SHIFT 644ecca339SDan Handley lsl x2, x1, x2 654ecca339SDan Handley lsr x0, x0, x2 664ecca339SDan Handley lsl x0, x0, x2 674ecca339SDan Handley ret 688b779620SKévin Petitendfunc mpidr_mask_lower_afflvls 694ecca339SDan Handley 704ecca339SDan Handley 714ecca339SDan Handleyfunc eret 724ecca339SDan Handley eret 738b779620SKévin Petitendfunc eret 744ecca339SDan Handley 754ecca339SDan Handley 764ecca339SDan Handleyfunc smc 774ecca339SDan Handley smc #0 788b779620SKévin Petitendfunc smc 794ecca339SDan Handley 804ecca339SDan Handley/* ----------------------------------------------------------------------- 814ecca339SDan Handley * void zeromem16(void *mem, unsigned int length); 824ecca339SDan Handley * 834ecca339SDan Handley * Initialise a memory region to 0. 844ecca339SDan Handley * The memory address must be 16-byte aligned. 85308d359bSDouglas Raillard * NOTE: This function is deprecated and zeromem should be used instead. 864ecca339SDan Handley * ----------------------------------------------------------------------- 874ecca339SDan Handley */ 88308d359bSDouglas Raillard.equ zeromem16, zeromem 894ecca339SDan Handley 90308d359bSDouglas Raillard/* ----------------------------------------------------------------------- 91308d359bSDouglas Raillard * void zero_normalmem(void *mem, unsigned int length); 92308d359bSDouglas Raillard * 93308d359bSDouglas Raillard * Initialise a region in normal memory to 0. This functions complies with the 94308d359bSDouglas Raillard * AAPCS and can be called from C code. 95308d359bSDouglas Raillard * 96308d359bSDouglas Raillard * NOTE: MMU must be enabled when using this function as it can only operate on 97308d359bSDouglas Raillard * normal memory. It is intended to be mainly used from C code when MMU 98308d359bSDouglas Raillard * is usually enabled. 99308d359bSDouglas Raillard * ----------------------------------------------------------------------- 100308d359bSDouglas Raillard */ 101308d359bSDouglas Raillard.equ zero_normalmem, zeromem_dczva 102308d359bSDouglas Raillard 103308d359bSDouglas Raillard/* ----------------------------------------------------------------------- 104308d359bSDouglas Raillard * void zeromem(void *mem, unsigned int length); 105308d359bSDouglas Raillard * 106308d359bSDouglas Raillard * Initialise a region of device memory to 0. This functions complies with the 107308d359bSDouglas Raillard * AAPCS and can be called from C code. 108308d359bSDouglas Raillard * 109308d359bSDouglas Raillard * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be 110308d359bSDouglas Raillard * used instead for faster zeroing. 111308d359bSDouglas Raillard * 112308d359bSDouglas Raillard * ----------------------------------------------------------------------- 113308d359bSDouglas Raillard */ 114308d359bSDouglas Raillardfunc zeromem 115308d359bSDouglas Raillard /* x2 is the address past the last zeroed address */ 116308d359bSDouglas Raillard add x2, x0, x1 117308d359bSDouglas Raillard /* 118308d359bSDouglas Raillard * Uses the fallback path that does not use DC ZVA instruction and 119308d359bSDouglas Raillard * therefore does not need enabled MMU 120308d359bSDouglas Raillard */ 121308d359bSDouglas Raillard b .Lzeromem_dczva_fallback_entry 122308d359bSDouglas Raillardendfunc zeromem 123308d359bSDouglas Raillard 124308d359bSDouglas Raillard/* ----------------------------------------------------------------------- 125308d359bSDouglas Raillard * void zeromem_dczva(void *mem, unsigned int length); 126308d359bSDouglas Raillard * 127308d359bSDouglas Raillard * Fill a region of normal memory of size "length" in bytes with null bytes. 128308d359bSDouglas Raillard * MMU must be enabled and the memory be of 129308d359bSDouglas Raillard * normal type. This is because this function internally uses the DC ZVA 130308d359bSDouglas Raillard * instruction, which generates an Alignment fault if used on any type of 131308d359bSDouglas Raillard * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU 132308d359bSDouglas Raillard * is disabled, all memory behaves like Device-nGnRnE memory (see section 133308d359bSDouglas Raillard * D4.2.8), hence the requirement on the MMU being enabled. 134308d359bSDouglas Raillard * NOTE: The code assumes that the block size as defined in DCZID_EL0 135308d359bSDouglas Raillard * register is at least 16 bytes. 136308d359bSDouglas Raillard * 137308d359bSDouglas Raillard * ----------------------------------------------------------------------- 138308d359bSDouglas Raillard */ 139308d359bSDouglas Raillardfunc zeromem_dczva 140308d359bSDouglas Raillard 141308d359bSDouglas Raillard /* 142308d359bSDouglas Raillard * The function consists of a series of loops that zero memory one byte 143308d359bSDouglas Raillard * at a time, 16 bytes at a time or using the DC ZVA instruction to 144308d359bSDouglas Raillard * zero aligned block of bytes, which is assumed to be more than 16. 145308d359bSDouglas Raillard * In the case where the DC ZVA instruction cannot be used or if the 146308d359bSDouglas Raillard * first 16 bytes loop would overflow, there is fallback path that does 147308d359bSDouglas Raillard * not use DC ZVA. 148308d359bSDouglas Raillard * Note: The fallback path is also used by the zeromem function that 149308d359bSDouglas Raillard * branches to it directly. 150308d359bSDouglas Raillard * 151308d359bSDouglas Raillard * +---------+ zeromem_dczva 152308d359bSDouglas Raillard * | entry | 153308d359bSDouglas Raillard * +----+----+ 154308d359bSDouglas Raillard * | 155308d359bSDouglas Raillard * v 156308d359bSDouglas Raillard * +---------+ 157308d359bSDouglas Raillard * | checks |>o-------+ (If any check fails, fallback) 158308d359bSDouglas Raillard * +----+----+ | 159308d359bSDouglas Raillard * | |---------------+ 160308d359bSDouglas Raillard * v | Fallback path | 161308d359bSDouglas Raillard * +------+------+ |---------------+ 162308d359bSDouglas Raillard * | 1 byte loop | | 163308d359bSDouglas Raillard * +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end 164308d359bSDouglas Raillard * | | 165308d359bSDouglas Raillard * v | 166308d359bSDouglas Raillard * +-------+-------+ | 167308d359bSDouglas Raillard * | 16 bytes loop | | 168308d359bSDouglas Raillard * +-------+-------+ | 169308d359bSDouglas Raillard * | | 170308d359bSDouglas Raillard * v | 171308d359bSDouglas Raillard * +------+------+ .Lzeromem_dczva_blocksize_aligned 172308d359bSDouglas Raillard * | DC ZVA loop | | 173308d359bSDouglas Raillard * +------+------+ | 174308d359bSDouglas Raillard * +--------+ | | 175308d359bSDouglas Raillard * | | | | 176308d359bSDouglas Raillard * | v v | 177308d359bSDouglas Raillard * | +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned 178308d359bSDouglas Raillard * | | 16 bytes loop | | 179308d359bSDouglas Raillard * | +-------+-------+ | 180308d359bSDouglas Raillard * | | | 181308d359bSDouglas Raillard * | v | 182308d359bSDouglas Raillard * | +------+------+ .Lzeromem_dczva_final_1byte_aligned 183308d359bSDouglas Raillard * | | 1 byte loop | | 184308d359bSDouglas Raillard * | +-------------+ | 185308d359bSDouglas Raillard * | | | 186308d359bSDouglas Raillard * | v | 187308d359bSDouglas Raillard * | +---+--+ | 188308d359bSDouglas Raillard * | | exit | | 189308d359bSDouglas Raillard * | +------+ | 190308d359bSDouglas Raillard * | | 191308d359bSDouglas Raillard * | +--------------+ +------------------+ zeromem 192308d359bSDouglas Raillard * | | +----------------| zeromem function | 193308d359bSDouglas Raillard * | | | +------------------+ 194308d359bSDouglas Raillard * | v v 195308d359bSDouglas Raillard * | +-------------+ .Lzeromem_dczva_fallback_entry 196308d359bSDouglas Raillard * | | 1 byte loop | 197308d359bSDouglas Raillard * | +------+------+ 198308d359bSDouglas Raillard * | | 199308d359bSDouglas Raillard * +-----------+ 200308d359bSDouglas Raillard */ 201308d359bSDouglas Raillard 202308d359bSDouglas Raillard /* 203308d359bSDouglas Raillard * Readable names for registers 204308d359bSDouglas Raillard * 205308d359bSDouglas Raillard * Registers x0, x1 and x2 are also set by zeromem which 206308d359bSDouglas Raillard * branches into the fallback path directly, so cursor, length and 207308d359bSDouglas Raillard * stop_address should not be retargeted to other registers. 208308d359bSDouglas Raillard */ 209308d359bSDouglas Raillard cursor .req x0 /* Start address and then current address */ 210308d359bSDouglas Raillard length .req x1 /* Length in bytes of the region to zero out */ 211308d359bSDouglas Raillard /* Reusing x1 as length is never used after block_mask is set */ 212308d359bSDouglas Raillard block_mask .req x1 /* Bitmask of the block size read in DCZID_EL0 */ 213308d359bSDouglas Raillard stop_address .req x2 /* Address past the last zeroed byte */ 214308d359bSDouglas Raillard block_size .req x3 /* Size of a block in bytes as read in DCZID_EL0 */ 215308d359bSDouglas Raillard tmp1 .req x4 216308d359bSDouglas Raillard tmp2 .req x5 217308d359bSDouglas Raillard 218*044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS 219308d359bSDouglas Raillard /* 220308d359bSDouglas Raillard * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3) 221308d359bSDouglas Raillard * register value and panic if the MMU is disabled. 222308d359bSDouglas Raillard */ 223308d359bSDouglas Raillard#if defined(IMAGE_BL1) || defined(IMAGE_BL31) 224308d359bSDouglas Raillard mrs tmp1, sctlr_el3 225308d359bSDouglas Raillard#else 226308d359bSDouglas Raillard mrs tmp1, sctlr_el1 227308d359bSDouglas Raillard#endif 228308d359bSDouglas Raillard 229308d359bSDouglas Raillard tst tmp1, #SCTLR_M_BIT 230308d359bSDouglas Raillard ASM_ASSERT(ne) 231*044bb2faSAntonio Nino Diaz#endif /* ENABLE_ASSERTIONS */ 232308d359bSDouglas Raillard 233308d359bSDouglas Raillard /* stop_address is the address past the last to zero */ 234308d359bSDouglas Raillard add stop_address, cursor, length 235308d359bSDouglas Raillard 236308d359bSDouglas Raillard /* 237308d359bSDouglas Raillard * Get block_size = (log2(<block size>) >> 2) (see encoding of 238308d359bSDouglas Raillard * dczid_el0 reg) 239308d359bSDouglas Raillard */ 240308d359bSDouglas Raillard mrs block_size, dczid_el0 241308d359bSDouglas Raillard 242308d359bSDouglas Raillard /* 243308d359bSDouglas Raillard * Select the 4 lowest bits and convert the extracted log2(<block size 244308d359bSDouglas Raillard * in words>) to <block size in bytes> 245308d359bSDouglas Raillard */ 246308d359bSDouglas Raillard ubfx block_size, block_size, #0, #4 247308d359bSDouglas Raillard mov tmp2, #(1 << 2) 248308d359bSDouglas Raillard lsl block_size, tmp2, block_size 249308d359bSDouglas Raillard 250*044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS 251308d359bSDouglas Raillard /* 252308d359bSDouglas Raillard * Assumes block size is at least 16 bytes to avoid manual realignment 253308d359bSDouglas Raillard * of the cursor at the end of the DCZVA loop. 254308d359bSDouglas Raillard */ 255308d359bSDouglas Raillard cmp block_size, #16 256308d359bSDouglas Raillard ASM_ASSERT(hs) 257308d359bSDouglas Raillard#endif 258308d359bSDouglas Raillard /* 259308d359bSDouglas Raillard * Not worth doing all the setup for a region less than a block and 260308d359bSDouglas Raillard * protects against zeroing a whole block when the area to zero is 261308d359bSDouglas Raillard * smaller than that. Also, as it is assumed that the block size is at 262308d359bSDouglas Raillard * least 16 bytes, this also protects the initial aligning loops from 263308d359bSDouglas Raillard * trying to zero 16 bytes when length is less than 16. 264308d359bSDouglas Raillard */ 265308d359bSDouglas Raillard cmp length, block_size 266308d359bSDouglas Raillard b.lo .Lzeromem_dczva_fallback_entry 267308d359bSDouglas Raillard 268308d359bSDouglas Raillard /* 269308d359bSDouglas Raillard * Calculate the bitmask of the block alignment. It will never 270308d359bSDouglas Raillard * underflow as the block size is between 4 bytes and 2kB. 271308d359bSDouglas Raillard * block_mask = block_size - 1 272308d359bSDouglas Raillard */ 273308d359bSDouglas Raillard sub block_mask, block_size, #1 274308d359bSDouglas Raillard 275308d359bSDouglas Raillard /* 276308d359bSDouglas Raillard * length alias should not be used after this point unless it is 277308d359bSDouglas Raillard * defined as a register other than block_mask's. 278308d359bSDouglas Raillard */ 279308d359bSDouglas Raillard .unreq length 280308d359bSDouglas Raillard 281308d359bSDouglas Raillard /* 282308d359bSDouglas Raillard * If the start address is already aligned to zero block size, go 283308d359bSDouglas Raillard * straight to the cache zeroing loop. This is safe because at this 284308d359bSDouglas Raillard * point, the length cannot be smaller than a block size. 285308d359bSDouglas Raillard */ 286308d359bSDouglas Raillard tst cursor, block_mask 287308d359bSDouglas Raillard b.eq .Lzeromem_dczva_blocksize_aligned 288308d359bSDouglas Raillard 289308d359bSDouglas Raillard /* 290308d359bSDouglas Raillard * Calculate the first block-size-aligned address. It is assumed that 291308d359bSDouglas Raillard * the zero block size is at least 16 bytes. This address is the last 292308d359bSDouglas Raillard * address of this initial loop. 293308d359bSDouglas Raillard */ 294308d359bSDouglas Raillard orr tmp1, cursor, block_mask 295308d359bSDouglas Raillard add tmp1, tmp1, #1 296308d359bSDouglas Raillard 297308d359bSDouglas Raillard /* 298308d359bSDouglas Raillard * If the addition overflows, skip the cache zeroing loops. This is 299308d359bSDouglas Raillard * quite unlikely however. 300308d359bSDouglas Raillard */ 301308d359bSDouglas Raillard cbz tmp1, .Lzeromem_dczva_fallback_entry 302308d359bSDouglas Raillard 303308d359bSDouglas Raillard /* 304308d359bSDouglas Raillard * If the first block-size-aligned address is past the last address, 305308d359bSDouglas Raillard * fallback to the simpler code. 306308d359bSDouglas Raillard */ 307308d359bSDouglas Raillard cmp tmp1, stop_address 308308d359bSDouglas Raillard b.hi .Lzeromem_dczva_fallback_entry 309308d359bSDouglas Raillard 310308d359bSDouglas Raillard /* 311308d359bSDouglas Raillard * If the start address is already aligned to 16 bytes, skip this loop. 312308d359bSDouglas Raillard * It is safe to do this because tmp1 (the stop address of the initial 313308d359bSDouglas Raillard * 16 bytes loop) will never be greater than the final stop address. 314308d359bSDouglas Raillard */ 315308d359bSDouglas Raillard tst cursor, #0xf 316308d359bSDouglas Raillard b.eq .Lzeromem_dczva_initial_1byte_aligned_end 317308d359bSDouglas Raillard 318308d359bSDouglas Raillard /* Calculate the next address aligned to 16 bytes */ 319308d359bSDouglas Raillard orr tmp2, cursor, #0xf 320308d359bSDouglas Raillard add tmp2, tmp2, #1 321308d359bSDouglas Raillard /* If it overflows, fallback to the simple path (unlikely) */ 322308d359bSDouglas Raillard cbz tmp2, .Lzeromem_dczva_fallback_entry 323308d359bSDouglas Raillard /* 324308d359bSDouglas Raillard * Next aligned address cannot be after the stop address because the 325308d359bSDouglas Raillard * length cannot be smaller than 16 at this point. 326308d359bSDouglas Raillard */ 327308d359bSDouglas Raillard 328308d359bSDouglas Raillard /* First loop: zero byte per byte */ 329308d359bSDouglas Raillard1: 330308d359bSDouglas Raillard strb wzr, [cursor], #1 331308d359bSDouglas Raillard cmp cursor, tmp2 332308d359bSDouglas Raillard b.ne 1b 333308d359bSDouglas Raillard.Lzeromem_dczva_initial_1byte_aligned_end: 334308d359bSDouglas Raillard 335308d359bSDouglas Raillard /* 336308d359bSDouglas Raillard * Second loop: we need to zero 16 bytes at a time from cursor to tmp1 337308d359bSDouglas Raillard * before being able to use the code that deals with block-size-aligned 338308d359bSDouglas Raillard * addresses. 339308d359bSDouglas Raillard */ 340308d359bSDouglas Raillard cmp cursor, tmp1 341308d359bSDouglas Raillard b.hs 2f 342308d359bSDouglas Raillard1: 343308d359bSDouglas Raillard stp xzr, xzr, [cursor], #16 344308d359bSDouglas Raillard cmp cursor, tmp1 345308d359bSDouglas Raillard b.lo 1b 346308d359bSDouglas Raillard2: 347308d359bSDouglas Raillard 348308d359bSDouglas Raillard /* 349308d359bSDouglas Raillard * Third loop: zero a block at a time using DC ZVA cache block zeroing 350308d359bSDouglas Raillard * instruction. 351308d359bSDouglas Raillard */ 352308d359bSDouglas Raillard.Lzeromem_dczva_blocksize_aligned: 353308d359bSDouglas Raillard /* 354308d359bSDouglas Raillard * Calculate the last block-size-aligned address. If the result equals 355308d359bSDouglas Raillard * to the start address, the loop will exit immediately. 356308d359bSDouglas Raillard */ 357308d359bSDouglas Raillard bic tmp1, stop_address, block_mask 358308d359bSDouglas Raillard 359308d359bSDouglas Raillard cmp cursor, tmp1 360308d359bSDouglas Raillard b.hs 2f 361308d359bSDouglas Raillard1: 362308d359bSDouglas Raillard /* Zero the block containing the cursor */ 363308d359bSDouglas Raillard dc zva, cursor 364308d359bSDouglas Raillard /* Increment the cursor by the size of a block */ 365308d359bSDouglas Raillard add cursor, cursor, block_size 366308d359bSDouglas Raillard cmp cursor, tmp1 367308d359bSDouglas Raillard b.lo 1b 368308d359bSDouglas Raillard2: 369308d359bSDouglas Raillard 370308d359bSDouglas Raillard /* 371308d359bSDouglas Raillard * Fourth loop: zero 16 bytes at a time and then byte per byte the 372308d359bSDouglas Raillard * remaining area 373308d359bSDouglas Raillard */ 374308d359bSDouglas Raillard.Lzeromem_dczva_final_16bytes_aligned: 375308d359bSDouglas Raillard /* 376308d359bSDouglas Raillard * Calculate the last 16 bytes aligned address. It is assumed that the 377308d359bSDouglas Raillard * block size will never be smaller than 16 bytes so that the current 378308d359bSDouglas Raillard * cursor is aligned to at least 16 bytes boundary. 379308d359bSDouglas Raillard */ 380308d359bSDouglas Raillard bic tmp1, stop_address, #15 381308d359bSDouglas Raillard 382308d359bSDouglas Raillard cmp cursor, tmp1 383308d359bSDouglas Raillard b.hs 2f 384308d359bSDouglas Raillard1: 385308d359bSDouglas Raillard stp xzr, xzr, [cursor], #16 386308d359bSDouglas Raillard cmp cursor, tmp1 387308d359bSDouglas Raillard b.lo 1b 388308d359bSDouglas Raillard2: 389308d359bSDouglas Raillard 390308d359bSDouglas Raillard /* Fifth and final loop: zero byte per byte */ 391308d359bSDouglas Raillard.Lzeromem_dczva_final_1byte_aligned: 392308d359bSDouglas Raillard cmp cursor, stop_address 393308d359bSDouglas Raillard b.eq 2f 394308d359bSDouglas Raillard1: 395308d359bSDouglas Raillard strb wzr, [cursor], #1 396308d359bSDouglas Raillard cmp cursor, stop_address 397308d359bSDouglas Raillard b.ne 1b 398308d359bSDouglas Raillard2: 399308d359bSDouglas Raillard ret 400308d359bSDouglas Raillard 401308d359bSDouglas Raillard /* Fallback for unaligned start addresses */ 402308d359bSDouglas Raillard.Lzeromem_dczva_fallback_entry: 403308d359bSDouglas Raillard /* 404308d359bSDouglas Raillard * If the start address is already aligned to 16 bytes, skip this loop. 405308d359bSDouglas Raillard */ 406308d359bSDouglas Raillard tst cursor, #0xf 407308d359bSDouglas Raillard b.eq .Lzeromem_dczva_final_16bytes_aligned 408308d359bSDouglas Raillard 409308d359bSDouglas Raillard /* Calculate the next address aligned to 16 bytes */ 410308d359bSDouglas Raillard orr tmp1, cursor, #15 411308d359bSDouglas Raillard add tmp1, tmp1, #1 412308d359bSDouglas Raillard /* If it overflows, fallback to byte per byte zeroing */ 413308d359bSDouglas Raillard cbz tmp1, .Lzeromem_dczva_final_1byte_aligned 414308d359bSDouglas Raillard /* If the next aligned address is after the stop address, fall back */ 415308d359bSDouglas Raillard cmp tmp1, stop_address 416308d359bSDouglas Raillard b.hs .Lzeromem_dczva_final_1byte_aligned 417308d359bSDouglas Raillard 418308d359bSDouglas Raillard /* Fallback entry loop: zero byte per byte */ 419308d359bSDouglas Raillard1: 420308d359bSDouglas Raillard strb wzr, [cursor], #1 421308d359bSDouglas Raillard cmp cursor, tmp1 422308d359bSDouglas Raillard b.ne 1b 423308d359bSDouglas Raillard 424308d359bSDouglas Raillard b .Lzeromem_dczva_final_16bytes_aligned 425308d359bSDouglas Raillard 426308d359bSDouglas Raillard .unreq cursor 427308d359bSDouglas Raillard /* 428308d359bSDouglas Raillard * length is already unreq'ed to reuse the register for another 429308d359bSDouglas Raillard * variable. 430308d359bSDouglas Raillard */ 431308d359bSDouglas Raillard .unreq stop_address 432308d359bSDouglas Raillard .unreq block_size 433308d359bSDouglas Raillard .unreq block_mask 434308d359bSDouglas Raillard .unreq tmp1 435308d359bSDouglas Raillard .unreq tmp2 436308d359bSDouglas Raillardendfunc zeromem_dczva 4374ecca339SDan Handley 4384ecca339SDan Handley/* -------------------------------------------------------------------------- 4394ecca339SDan Handley * void memcpy16(void *dest, const void *src, unsigned int length) 4404ecca339SDan Handley * 4414ecca339SDan Handley * Copy length bytes from memory area src to memory area dest. 4424ecca339SDan Handley * The memory areas should not overlap. 4434ecca339SDan Handley * Destination and source addresses must be 16-byte aligned. 4444ecca339SDan Handley * -------------------------------------------------------------------------- 4454ecca339SDan Handley */ 4464ecca339SDan Handleyfunc memcpy16 447*044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS 448bc920128SSoby Mathew orr x3, x0, x1 449bc920128SSoby Mathew tst x3, #0xf 450bc920128SSoby Mathew ASM_ASSERT(eq) 451bc920128SSoby Mathew#endif 4524ecca339SDan Handley/* copy 16 bytes at a time */ 4534ecca339SDan Handleym_loop16: 4544ecca339SDan Handley cmp x2, #16 455ea926532SDouglas Raillard b.lo m_loop1 4564ecca339SDan Handley ldp x3, x4, [x1], #16 4574ecca339SDan Handley stp x3, x4, [x0], #16 4584ecca339SDan Handley sub x2, x2, #16 4594ecca339SDan Handley b m_loop16 4604ecca339SDan Handley/* copy byte per byte */ 4614ecca339SDan Handleym_loop1: 4624ecca339SDan Handley cbz x2, m_end 4634ecca339SDan Handley ldrb w3, [x1], #1 4644ecca339SDan Handley strb w3, [x0], #1 4654ecca339SDan Handley subs x2, x2, #1 4664ecca339SDan Handley b.ne m_loop1 4678b779620SKévin Petitm_end: 4688b779620SKévin Petit ret 4698b779620SKévin Petitendfunc memcpy16 4702f5dcfefSAndrew Thoelke 4712f5dcfefSAndrew Thoelke/* --------------------------------------------------------------------------- 4722f5dcfefSAndrew Thoelke * Disable the MMU at EL3 4732f5dcfefSAndrew Thoelke * --------------------------------------------------------------------------- 4742f5dcfefSAndrew Thoelke */ 4752f5dcfefSAndrew Thoelke 4762f5dcfefSAndrew Thoelkefunc disable_mmu_el3 4772f5dcfefSAndrew Thoelke mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT) 4782f5dcfefSAndrew Thoelkedo_disable_mmu: 4792f5dcfefSAndrew Thoelke mrs x0, sctlr_el3 4802f5dcfefSAndrew Thoelke bic x0, x0, x1 4812f5dcfefSAndrew Thoelke msr sctlr_el3, x0 4822f5dcfefSAndrew Thoelke isb // ensure MMU is off 48354dc71e7SAchin Gupta dsb sy 48454dc71e7SAchin Gupta ret 4858b779620SKévin Petitendfunc disable_mmu_el3 4862f5dcfefSAndrew Thoelke 4872f5dcfefSAndrew Thoelke 4882f5dcfefSAndrew Thoelkefunc disable_mmu_icache_el3 4892f5dcfefSAndrew Thoelke mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) 4902f5dcfefSAndrew Thoelke b do_disable_mmu 4918b779620SKévin Petitendfunc disable_mmu_icache_el3 4922f5dcfefSAndrew Thoelke 4935c3272a7SAndrew Thoelke/* --------------------------------------------------------------------------- 4945c3272a7SAndrew Thoelke * Enable the use of VFP at EL3 4955c3272a7SAndrew Thoelke * --------------------------------------------------------------------------- 4965c3272a7SAndrew Thoelke */ 4975c3272a7SAndrew Thoelke#if SUPPORT_VFP 4985c3272a7SAndrew Thoelkefunc enable_vfp 4995c3272a7SAndrew Thoelke mrs x0, cpacr_el1 5005c3272a7SAndrew Thoelke orr x0, x0, #CPACR_VFP_BITS 5015c3272a7SAndrew Thoelke msr cpacr_el1, x0 5025c3272a7SAndrew Thoelke mrs x0, cptr_el3 5035c3272a7SAndrew Thoelke mov x1, #AARCH64_CPTR_TFP 5045c3272a7SAndrew Thoelke bic x0, x0, x1 5055c3272a7SAndrew Thoelke msr cptr_el3, x0 5065c3272a7SAndrew Thoelke isb 5075c3272a7SAndrew Thoelke ret 5088b779620SKévin Petitendfunc enable_vfp 5095c3272a7SAndrew Thoelke#endif 510