1/* 2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <assert_macros.S> 10#include <common/bl_common.h> 11#include <lib/xlat_tables/xlat_tables_defs.h> 12 13#if !ERROR_DEPRECATED 14 .globl get_afflvl_shift 15 .globl mpidr_mask_lower_afflvls 16 .globl eret 17#endif /* ERROR_DEPRECATED */ 18 .globl smc 19 20 .globl zero_normalmem 21 .globl zeromem 22 .globl memcpy16 23 24 .globl disable_mmu_el1 25 .globl disable_mmu_el3 26 .globl disable_mmu_icache_el1 27 .globl disable_mmu_icache_el3 28 .globl fixup_gdt_reloc 29#if SUPPORT_VFP 30 .globl enable_vfp 31#endif 32 33#if !ERROR_DEPRECATED 34func get_afflvl_shift 35 cmp x0, #3 36 cinc x0, x0, eq 37 mov x1, #MPIDR_AFFLVL_SHIFT 38 lsl x0, x0, x1 39 ret 40endfunc get_afflvl_shift 41 42func mpidr_mask_lower_afflvls 43 cmp x1, #3 44 cinc x1, x1, eq 45 mov x2, #MPIDR_AFFLVL_SHIFT 46 lsl x2, x1, x2 47 lsr x0, x0, x2 48 lsl x0, x0, x2 49 ret 50endfunc mpidr_mask_lower_afflvls 51 52 53func eret 54 eret 55endfunc eret 56#endif /* ERROR_DEPRECATED */ 57 58func smc 59 smc #0 60endfunc smc 61 62/* ----------------------------------------------------------------------- 63 * void zero_normalmem(void *mem, unsigned int length); 64 * 65 * Initialise a region in normal memory to 0. This functions complies with the 66 * AAPCS and can be called from C code. 67 * 68 * NOTE: MMU must be enabled when using this function as it can only operate on 69 * normal memory. It is intended to be mainly used from C code when MMU 70 * is usually enabled. 71 * ----------------------------------------------------------------------- 72 */ 73.equ zero_normalmem, zeromem_dczva 74 75/* ----------------------------------------------------------------------- 76 * void zeromem(void *mem, unsigned int length); 77 * 78 * Initialise a region of device memory to 0. This functions complies with the 79 * AAPCS and can be called from C code. 80 * 81 * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be 82 * used instead for faster zeroing. 83 * 84 * ----------------------------------------------------------------------- 85 */ 86func zeromem 87 /* x2 is the address past the last zeroed address */ 88 add x2, x0, x1 89 /* 90 * Uses the fallback path that does not use DC ZVA instruction and 91 * therefore does not need enabled MMU 92 */ 93 b .Lzeromem_dczva_fallback_entry 94endfunc zeromem 95 96/* ----------------------------------------------------------------------- 97 * void zeromem_dczva(void *mem, unsigned int length); 98 * 99 * Fill a region of normal memory of size "length" in bytes with null bytes. 100 * MMU must be enabled and the memory be of 101 * normal type. This is because this function internally uses the DC ZVA 102 * instruction, which generates an Alignment fault if used on any type of 103 * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU 104 * is disabled, all memory behaves like Device-nGnRnE memory (see section 105 * D4.2.8), hence the requirement on the MMU being enabled. 106 * NOTE: The code assumes that the block size as defined in DCZID_EL0 107 * register is at least 16 bytes. 108 * 109 * ----------------------------------------------------------------------- 110 */ 111func zeromem_dczva 112 113 /* 114 * The function consists of a series of loops that zero memory one byte 115 * at a time, 16 bytes at a time or using the DC ZVA instruction to 116 * zero aligned block of bytes, which is assumed to be more than 16. 117 * In the case where the DC ZVA instruction cannot be used or if the 118 * first 16 bytes loop would overflow, there is fallback path that does 119 * not use DC ZVA. 120 * Note: The fallback path is also used by the zeromem function that 121 * branches to it directly. 122 * 123 * +---------+ zeromem_dczva 124 * | entry | 125 * +----+----+ 126 * | 127 * v 128 * +---------+ 129 * | checks |>o-------+ (If any check fails, fallback) 130 * +----+----+ | 131 * | |---------------+ 132 * v | Fallback path | 133 * +------+------+ |---------------+ 134 * | 1 byte loop | | 135 * +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end 136 * | | 137 * v | 138 * +-------+-------+ | 139 * | 16 bytes loop | | 140 * +-------+-------+ | 141 * | | 142 * v | 143 * +------+------+ .Lzeromem_dczva_blocksize_aligned 144 * | DC ZVA loop | | 145 * +------+------+ | 146 * +--------+ | | 147 * | | | | 148 * | v v | 149 * | +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned 150 * | | 16 bytes loop | | 151 * | +-------+-------+ | 152 * | | | 153 * | v | 154 * | +------+------+ .Lzeromem_dczva_final_1byte_aligned 155 * | | 1 byte loop | | 156 * | +-------------+ | 157 * | | | 158 * | v | 159 * | +---+--+ | 160 * | | exit | | 161 * | +------+ | 162 * | | 163 * | +--------------+ +------------------+ zeromem 164 * | | +----------------| zeromem function | 165 * | | | +------------------+ 166 * | v v 167 * | +-------------+ .Lzeromem_dczva_fallback_entry 168 * | | 1 byte loop | 169 * | +------+------+ 170 * | | 171 * +-----------+ 172 */ 173 174 /* 175 * Readable names for registers 176 * 177 * Registers x0, x1 and x2 are also set by zeromem which 178 * branches into the fallback path directly, so cursor, length and 179 * stop_address should not be retargeted to other registers. 180 */ 181 cursor .req x0 /* Start address and then current address */ 182 length .req x1 /* Length in bytes of the region to zero out */ 183 /* Reusing x1 as length is never used after block_mask is set */ 184 block_mask .req x1 /* Bitmask of the block size read in DCZID_EL0 */ 185 stop_address .req x2 /* Address past the last zeroed byte */ 186 block_size .req x3 /* Size of a block in bytes as read in DCZID_EL0 */ 187 tmp1 .req x4 188 tmp2 .req x5 189 190#if ENABLE_ASSERTIONS 191 /* 192 * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3) 193 * register value and panic if the MMU is disabled. 194 */ 195#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3) 196 mrs tmp1, sctlr_el3 197#else 198 mrs tmp1, sctlr_el1 199#endif 200 201 tst tmp1, #SCTLR_M_BIT 202 ASM_ASSERT(ne) 203#endif /* ENABLE_ASSERTIONS */ 204 205 /* stop_address is the address past the last to zero */ 206 add stop_address, cursor, length 207 208 /* 209 * Get block_size = (log2(<block size>) >> 2) (see encoding of 210 * dczid_el0 reg) 211 */ 212 mrs block_size, dczid_el0 213 214 /* 215 * Select the 4 lowest bits and convert the extracted log2(<block size 216 * in words>) to <block size in bytes> 217 */ 218 ubfx block_size, block_size, #0, #4 219 mov tmp2, #(1 << 2) 220 lsl block_size, tmp2, block_size 221 222#if ENABLE_ASSERTIONS 223 /* 224 * Assumes block size is at least 16 bytes to avoid manual realignment 225 * of the cursor at the end of the DCZVA loop. 226 */ 227 cmp block_size, #16 228 ASM_ASSERT(hs) 229#endif 230 /* 231 * Not worth doing all the setup for a region less than a block and 232 * protects against zeroing a whole block when the area to zero is 233 * smaller than that. Also, as it is assumed that the block size is at 234 * least 16 bytes, this also protects the initial aligning loops from 235 * trying to zero 16 bytes when length is less than 16. 236 */ 237 cmp length, block_size 238 b.lo .Lzeromem_dczva_fallback_entry 239 240 /* 241 * Calculate the bitmask of the block alignment. It will never 242 * underflow as the block size is between 4 bytes and 2kB. 243 * block_mask = block_size - 1 244 */ 245 sub block_mask, block_size, #1 246 247 /* 248 * length alias should not be used after this point unless it is 249 * defined as a register other than block_mask's. 250 */ 251 .unreq length 252 253 /* 254 * If the start address is already aligned to zero block size, go 255 * straight to the cache zeroing loop. This is safe because at this 256 * point, the length cannot be smaller than a block size. 257 */ 258 tst cursor, block_mask 259 b.eq .Lzeromem_dczva_blocksize_aligned 260 261 /* 262 * Calculate the first block-size-aligned address. It is assumed that 263 * the zero block size is at least 16 bytes. This address is the last 264 * address of this initial loop. 265 */ 266 orr tmp1, cursor, block_mask 267 add tmp1, tmp1, #1 268 269 /* 270 * If the addition overflows, skip the cache zeroing loops. This is 271 * quite unlikely however. 272 */ 273 cbz tmp1, .Lzeromem_dczva_fallback_entry 274 275 /* 276 * If the first block-size-aligned address is past the last address, 277 * fallback to the simpler code. 278 */ 279 cmp tmp1, stop_address 280 b.hi .Lzeromem_dczva_fallback_entry 281 282 /* 283 * If the start address is already aligned to 16 bytes, skip this loop. 284 * It is safe to do this because tmp1 (the stop address of the initial 285 * 16 bytes loop) will never be greater than the final stop address. 286 */ 287 tst cursor, #0xf 288 b.eq .Lzeromem_dczva_initial_1byte_aligned_end 289 290 /* Calculate the next address aligned to 16 bytes */ 291 orr tmp2, cursor, #0xf 292 add tmp2, tmp2, #1 293 /* If it overflows, fallback to the simple path (unlikely) */ 294 cbz tmp2, .Lzeromem_dczva_fallback_entry 295 /* 296 * Next aligned address cannot be after the stop address because the 297 * length cannot be smaller than 16 at this point. 298 */ 299 300 /* First loop: zero byte per byte */ 3011: 302 strb wzr, [cursor], #1 303 cmp cursor, tmp2 304 b.ne 1b 305.Lzeromem_dczva_initial_1byte_aligned_end: 306 307 /* 308 * Second loop: we need to zero 16 bytes at a time from cursor to tmp1 309 * before being able to use the code that deals with block-size-aligned 310 * addresses. 311 */ 312 cmp cursor, tmp1 313 b.hs 2f 3141: 315 stp xzr, xzr, [cursor], #16 316 cmp cursor, tmp1 317 b.lo 1b 3182: 319 320 /* 321 * Third loop: zero a block at a time using DC ZVA cache block zeroing 322 * instruction. 323 */ 324.Lzeromem_dczva_blocksize_aligned: 325 /* 326 * Calculate the last block-size-aligned address. If the result equals 327 * to the start address, the loop will exit immediately. 328 */ 329 bic tmp1, stop_address, block_mask 330 331 cmp cursor, tmp1 332 b.hs 2f 3331: 334 /* Zero the block containing the cursor */ 335 dc zva, cursor 336 /* Increment the cursor by the size of a block */ 337 add cursor, cursor, block_size 338 cmp cursor, tmp1 339 b.lo 1b 3402: 341 342 /* 343 * Fourth loop: zero 16 bytes at a time and then byte per byte the 344 * remaining area 345 */ 346.Lzeromem_dczva_final_16bytes_aligned: 347 /* 348 * Calculate the last 16 bytes aligned address. It is assumed that the 349 * block size will never be smaller than 16 bytes so that the current 350 * cursor is aligned to at least 16 bytes boundary. 351 */ 352 bic tmp1, stop_address, #15 353 354 cmp cursor, tmp1 355 b.hs 2f 3561: 357 stp xzr, xzr, [cursor], #16 358 cmp cursor, tmp1 359 b.lo 1b 3602: 361 362 /* Fifth and final loop: zero byte per byte */ 363.Lzeromem_dczva_final_1byte_aligned: 364 cmp cursor, stop_address 365 b.eq 2f 3661: 367 strb wzr, [cursor], #1 368 cmp cursor, stop_address 369 b.ne 1b 3702: 371 ret 372 373 /* Fallback for unaligned start addresses */ 374.Lzeromem_dczva_fallback_entry: 375 /* 376 * If the start address is already aligned to 16 bytes, skip this loop. 377 */ 378 tst cursor, #0xf 379 b.eq .Lzeromem_dczva_final_16bytes_aligned 380 381 /* Calculate the next address aligned to 16 bytes */ 382 orr tmp1, cursor, #15 383 add tmp1, tmp1, #1 384 /* If it overflows, fallback to byte per byte zeroing */ 385 cbz tmp1, .Lzeromem_dczva_final_1byte_aligned 386 /* If the next aligned address is after the stop address, fall back */ 387 cmp tmp1, stop_address 388 b.hs .Lzeromem_dczva_final_1byte_aligned 389 390 /* Fallback entry loop: zero byte per byte */ 3911: 392 strb wzr, [cursor], #1 393 cmp cursor, tmp1 394 b.ne 1b 395 396 b .Lzeromem_dczva_final_16bytes_aligned 397 398 .unreq cursor 399 /* 400 * length is already unreq'ed to reuse the register for another 401 * variable. 402 */ 403 .unreq stop_address 404 .unreq block_size 405 .unreq block_mask 406 .unreq tmp1 407 .unreq tmp2 408endfunc zeromem_dczva 409 410/* -------------------------------------------------------------------------- 411 * void memcpy16(void *dest, const void *src, unsigned int length) 412 * 413 * Copy length bytes from memory area src to memory area dest. 414 * The memory areas should not overlap. 415 * Destination and source addresses must be 16-byte aligned. 416 * -------------------------------------------------------------------------- 417 */ 418func memcpy16 419#if ENABLE_ASSERTIONS 420 orr x3, x0, x1 421 tst x3, #0xf 422 ASM_ASSERT(eq) 423#endif 424/* copy 16 bytes at a time */ 425m_loop16: 426 cmp x2, #16 427 b.lo m_loop1 428 ldp x3, x4, [x1], #16 429 stp x3, x4, [x0], #16 430 sub x2, x2, #16 431 b m_loop16 432/* copy byte per byte */ 433m_loop1: 434 cbz x2, m_end 435 ldrb w3, [x1], #1 436 strb w3, [x0], #1 437 subs x2, x2, #1 438 b.ne m_loop1 439m_end: 440 ret 441endfunc memcpy16 442 443/* --------------------------------------------------------------------------- 444 * Disable the MMU at EL3 445 * --------------------------------------------------------------------------- 446 */ 447 448func disable_mmu_el3 449 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT) 450do_disable_mmu_el3: 451 mrs x0, sctlr_el3 452 bic x0, x0, x1 453 msr sctlr_el3, x0 454 isb /* ensure MMU is off */ 455 dsb sy 456 ret 457endfunc disable_mmu_el3 458 459 460func disable_mmu_icache_el3 461 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) 462 b do_disable_mmu_el3 463endfunc disable_mmu_icache_el3 464 465/* --------------------------------------------------------------------------- 466 * Disable the MMU at EL1 467 * --------------------------------------------------------------------------- 468 */ 469 470func disable_mmu_el1 471 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT) 472do_disable_mmu_el1: 473 mrs x0, sctlr_el1 474 bic x0, x0, x1 475 msr sctlr_el1, x0 476 isb /* ensure MMU is off */ 477 dsb sy 478 ret 479endfunc disable_mmu_el1 480 481 482func disable_mmu_icache_el1 483 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) 484 b do_disable_mmu_el1 485endfunc disable_mmu_icache_el1 486 487/* --------------------------------------------------------------------------- 488 * Enable the use of VFP at EL3 489 * --------------------------------------------------------------------------- 490 */ 491#if SUPPORT_VFP 492func enable_vfp 493 mrs x0, cpacr_el1 494 orr x0, x0, #CPACR_VFP_BITS 495 msr cpacr_el1, x0 496 mrs x0, cptr_el3 497 mov x1, #AARCH64_CPTR_TFP 498 bic x0, x0, x1 499 msr cptr_el3, x0 500 isb 501 ret 502endfunc enable_vfp 503#endif 504 505/* --------------------------------------------------------------------------- 506 * Helper to fixup Global Descriptor table (GDT) and dynamic relocations 507 * (.rela.dyn) at runtime. 508 * 509 * This function is meant to be used when the firmware is compiled with -fpie 510 * and linked with -pie options. We rely on the linker script exporting 511 * appropriate markers for start and end of the section. For GOT, we 512 * expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect 513 * __RELA_START__ and __RELA_END__. 514 * 515 * The function takes the limits of the memory to apply fixups to as 516 * arguments (which is usually the limits of the relocable BL image). 517 * x0 - the start of the fixup region 518 * x1 - the limit of the fixup region 519 * These addresses have to be page (4KB aligned). 520 * --------------------------------------------------------------------------- 521 */ 522func fixup_gdt_reloc 523 mov x6, x0 524 mov x7, x1 525 526 /* Test if the limits are 4K aligned */ 527#if ENABLE_ASSERTIONS 528 orr x0, x0, x1 529 tst x0, #(PAGE_SIZE - 1) 530 ASM_ASSERT(eq) 531#endif 532 /* 533 * Calculate the offset based on return address in x30. 534 * Assume that this funtion is called within a page of the start of 535 * of fixup region. 536 */ 537 and x2, x30, #~(PAGE_SIZE - 1) 538 sub x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */ 539 540 adrp x1, __GOT_START__ 541 add x1, x1, :lo12:__GOT_START__ 542 adrp x2, __GOT_END__ 543 add x2, x2, :lo12:__GOT_END__ 544 545 /* 546 * GOT is an array of 64_bit addresses which must be fixed up as 547 * new_addr = old_addr + Diff(S). 548 * The new_addr is the address currently the binary is executing from 549 * and old_addr is the address at compile time. 550 */ 5511: 552 ldr x3, [x1] 553 /* Skip adding offset if address is < lower limit */ 554 cmp x3, x6 555 b.lo 2f 556 /* Skip adding offset if address is >= upper limit */ 557 cmp x3, x7 558 b.ge 2f 559 add x3, x3, x0 560 str x3, [x1] 5612: 562 add x1, x1, #8 563 cmp x1, x2 564 b.lo 1b 565 566 /* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */ 567 adrp x1, __RELA_START__ 568 add x1, x1, :lo12:__RELA_START__ 569 adrp x2, __RELA_END__ 570 add x2, x2, :lo12:__RELA_END__ 571 /* 572 * According to ELF-64 specification, the RELA data structure is as 573 * follows: 574 * typedef struct 575 * { 576 * Elf64_Addr r_offset; 577 * Elf64_Xword r_info; 578 * Elf64_Sxword r_addend; 579 * } Elf64_Rela; 580 * 581 * r_offset is address of reference 582 * r_info is symbol index and type of relocation (in this case 583 * 0x403 which corresponds to R_AARCH64_RELATIV). 584 * r_addend is constant part of expression. 585 * 586 * Size of Elf64_Rela structure is 24 bytes. 587 */ 5881: 589 /* Assert that the relocation type is R_AARCH64_RELATIV */ 590#if ENABLE_ASSERTIONS 591 ldr x3, [x1, #8] 592 cmp x3, #0x403 593 ASM_ASSERT(eq) 594#endif 595 ldr x3, [x1] /* r_offset */ 596 add x3, x0, x3 597 ldr x4, [x1, #16] /* r_addend */ 598 599 /* Skip adding offset if r_addend is < lower limit */ 600 cmp x4, x6 601 b.lo 2f 602 /* Skip adding offset if r_addend entry is >= upper limit */ 603 cmp x4, x7 604 b.ge 2f 605 606 add x4, x0, x4 /* Diff(S) + r_addend */ 607 str x4, [x3] 608 6092: add x1, x1, #24 610 cmp x1, x2 611 b.lo 1b 612 613 ret 614endfunc fixup_gdt_reloc 615