1/* 2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <arch.h> 8#include <asm_macros.S> 9#include <assert_macros.S> 10#include <xlat_tables_defs.h> 11 12 .globl get_afflvl_shift 13 .globl mpidr_mask_lower_afflvls 14 .globl eret 15 .globl smc 16 17 .globl zero_normalmem 18 .globl zeromem 19 .globl zeromem16 20 .globl memcpy16 21 22 .globl disable_mmu_el1 23 .globl disable_mmu_el3 24 .globl disable_mmu_icache_el1 25 .globl disable_mmu_icache_el3 26 27 .globl fixup_gdt_reloc 28 29#if SUPPORT_VFP 30 .globl enable_vfp 31#endif 32 33func get_afflvl_shift 34 cmp x0, #3 35 cinc x0, x0, eq 36 mov x1, #MPIDR_AFFLVL_SHIFT 37 lsl x0, x0, x1 38 ret 39endfunc get_afflvl_shift 40 41func mpidr_mask_lower_afflvls 42 cmp x1, #3 43 cinc x1, x1, eq 44 mov x2, #MPIDR_AFFLVL_SHIFT 45 lsl x2, x1, x2 46 lsr x0, x0, x2 47 lsl x0, x0, x2 48 ret 49endfunc mpidr_mask_lower_afflvls 50 51 52func eret 53 eret 54endfunc eret 55 56 57func smc 58 smc #0 59endfunc smc 60 61/* ----------------------------------------------------------------------- 62 * void zero_normalmem(void *mem, unsigned int length); 63 * 64 * Initialise a region in normal memory to 0. This functions complies with the 65 * AAPCS and can be called from C code. 66 * 67 * NOTE: MMU must be enabled when using this function as it can only operate on 68 * normal memory. It is intended to be mainly used from C code when MMU 69 * is usually enabled. 70 * ----------------------------------------------------------------------- 71 */ 72.equ zero_normalmem, zeromem_dczva 73 74/* ----------------------------------------------------------------------- 75 * void zeromem(void *mem, unsigned int length); 76 * 77 * Initialise a region of device memory to 0. This functions complies with the 78 * AAPCS and can be called from C code. 79 * 80 * NOTE: When data caches and MMU are enabled, zero_normalmem can usually be 81 * used instead for faster zeroing. 82 * 83 * ----------------------------------------------------------------------- 84 */ 85func zeromem 86 /* x2 is the address past the last zeroed address */ 87 add x2, x0, x1 88 /* 89 * Uses the fallback path that does not use DC ZVA instruction and 90 * therefore does not need enabled MMU 91 */ 92 b .Lzeromem_dczva_fallback_entry 93endfunc zeromem 94 95/* ----------------------------------------------------------------------- 96 * void zeromem_dczva(void *mem, unsigned int length); 97 * 98 * Fill a region of normal memory of size "length" in bytes with null bytes. 99 * MMU must be enabled and the memory be of 100 * normal type. This is because this function internally uses the DC ZVA 101 * instruction, which generates an Alignment fault if used on any type of 102 * Device memory (see section D3.4.9 of the ARMv8 ARM, issue k). When the MMU 103 * is disabled, all memory behaves like Device-nGnRnE memory (see section 104 * D4.2.8), hence the requirement on the MMU being enabled. 105 * NOTE: The code assumes that the block size as defined in DCZID_EL0 106 * register is at least 16 bytes. 107 * 108 * ----------------------------------------------------------------------- 109 */ 110func zeromem_dczva 111 112 /* 113 * The function consists of a series of loops that zero memory one byte 114 * at a time, 16 bytes at a time or using the DC ZVA instruction to 115 * zero aligned block of bytes, which is assumed to be more than 16. 116 * In the case where the DC ZVA instruction cannot be used or if the 117 * first 16 bytes loop would overflow, there is fallback path that does 118 * not use DC ZVA. 119 * Note: The fallback path is also used by the zeromem function that 120 * branches to it directly. 121 * 122 * +---------+ zeromem_dczva 123 * | entry | 124 * +----+----+ 125 * | 126 * v 127 * +---------+ 128 * | checks |>o-------+ (If any check fails, fallback) 129 * +----+----+ | 130 * | |---------------+ 131 * v | Fallback path | 132 * +------+------+ |---------------+ 133 * | 1 byte loop | | 134 * +------+------+ .Lzeromem_dczva_initial_1byte_aligned_end 135 * | | 136 * v | 137 * +-------+-------+ | 138 * | 16 bytes loop | | 139 * +-------+-------+ | 140 * | | 141 * v | 142 * +------+------+ .Lzeromem_dczva_blocksize_aligned 143 * | DC ZVA loop | | 144 * +------+------+ | 145 * +--------+ | | 146 * | | | | 147 * | v v | 148 * | +-------+-------+ .Lzeromem_dczva_final_16bytes_aligned 149 * | | 16 bytes loop | | 150 * | +-------+-------+ | 151 * | | | 152 * | v | 153 * | +------+------+ .Lzeromem_dczva_final_1byte_aligned 154 * | | 1 byte loop | | 155 * | +-------------+ | 156 * | | | 157 * | v | 158 * | +---+--+ | 159 * | | exit | | 160 * | +------+ | 161 * | | 162 * | +--------------+ +------------------+ zeromem 163 * | | +----------------| zeromem function | 164 * | | | +------------------+ 165 * | v v 166 * | +-------------+ .Lzeromem_dczva_fallback_entry 167 * | | 1 byte loop | 168 * | +------+------+ 169 * | | 170 * +-----------+ 171 */ 172 173 /* 174 * Readable names for registers 175 * 176 * Registers x0, x1 and x2 are also set by zeromem which 177 * branches into the fallback path directly, so cursor, length and 178 * stop_address should not be retargeted to other registers. 179 */ 180 cursor .req x0 /* Start address and then current address */ 181 length .req x1 /* Length in bytes of the region to zero out */ 182 /* Reusing x1 as length is never used after block_mask is set */ 183 block_mask .req x1 /* Bitmask of the block size read in DCZID_EL0 */ 184 stop_address .req x2 /* Address past the last zeroed byte */ 185 block_size .req x3 /* Size of a block in bytes as read in DCZID_EL0 */ 186 tmp1 .req x4 187 tmp2 .req x5 188 189#if ENABLE_ASSERTIONS 190 /* 191 * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3) 192 * register value and panic if the MMU is disabled. 193 */ 194#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3) 195 mrs tmp1, sctlr_el3 196#else 197 mrs tmp1, sctlr_el1 198#endif 199 200 tst tmp1, #SCTLR_M_BIT 201 ASM_ASSERT(ne) 202#endif /* ENABLE_ASSERTIONS */ 203 204 /* stop_address is the address past the last to zero */ 205 add stop_address, cursor, length 206 207 /* 208 * Get block_size = (log2(<block size>) >> 2) (see encoding of 209 * dczid_el0 reg) 210 */ 211 mrs block_size, dczid_el0 212 213 /* 214 * Select the 4 lowest bits and convert the extracted log2(<block size 215 * in words>) to <block size in bytes> 216 */ 217 ubfx block_size, block_size, #0, #4 218 mov tmp2, #(1 << 2) 219 lsl block_size, tmp2, block_size 220 221#if ENABLE_ASSERTIONS 222 /* 223 * Assumes block size is at least 16 bytes to avoid manual realignment 224 * of the cursor at the end of the DCZVA loop. 225 */ 226 cmp block_size, #16 227 ASM_ASSERT(hs) 228#endif 229 /* 230 * Not worth doing all the setup for a region less than a block and 231 * protects against zeroing a whole block when the area to zero is 232 * smaller than that. Also, as it is assumed that the block size is at 233 * least 16 bytes, this also protects the initial aligning loops from 234 * trying to zero 16 bytes when length is less than 16. 235 */ 236 cmp length, block_size 237 b.lo .Lzeromem_dczva_fallback_entry 238 239 /* 240 * Calculate the bitmask of the block alignment. It will never 241 * underflow as the block size is between 4 bytes and 2kB. 242 * block_mask = block_size - 1 243 */ 244 sub block_mask, block_size, #1 245 246 /* 247 * length alias should not be used after this point unless it is 248 * defined as a register other than block_mask's. 249 */ 250 .unreq length 251 252 /* 253 * If the start address is already aligned to zero block size, go 254 * straight to the cache zeroing loop. This is safe because at this 255 * point, the length cannot be smaller than a block size. 256 */ 257 tst cursor, block_mask 258 b.eq .Lzeromem_dczva_blocksize_aligned 259 260 /* 261 * Calculate the first block-size-aligned address. It is assumed that 262 * the zero block size is at least 16 bytes. This address is the last 263 * address of this initial loop. 264 */ 265 orr tmp1, cursor, block_mask 266 add tmp1, tmp1, #1 267 268 /* 269 * If the addition overflows, skip the cache zeroing loops. This is 270 * quite unlikely however. 271 */ 272 cbz tmp1, .Lzeromem_dczva_fallback_entry 273 274 /* 275 * If the first block-size-aligned address is past the last address, 276 * fallback to the simpler code. 277 */ 278 cmp tmp1, stop_address 279 b.hi .Lzeromem_dczva_fallback_entry 280 281 /* 282 * If the start address is already aligned to 16 bytes, skip this loop. 283 * It is safe to do this because tmp1 (the stop address of the initial 284 * 16 bytes loop) will never be greater than the final stop address. 285 */ 286 tst cursor, #0xf 287 b.eq .Lzeromem_dczva_initial_1byte_aligned_end 288 289 /* Calculate the next address aligned to 16 bytes */ 290 orr tmp2, cursor, #0xf 291 add tmp2, tmp2, #1 292 /* If it overflows, fallback to the simple path (unlikely) */ 293 cbz tmp2, .Lzeromem_dczva_fallback_entry 294 /* 295 * Next aligned address cannot be after the stop address because the 296 * length cannot be smaller than 16 at this point. 297 */ 298 299 /* First loop: zero byte per byte */ 3001: 301 strb wzr, [cursor], #1 302 cmp cursor, tmp2 303 b.ne 1b 304.Lzeromem_dczva_initial_1byte_aligned_end: 305 306 /* 307 * Second loop: we need to zero 16 bytes at a time from cursor to tmp1 308 * before being able to use the code that deals with block-size-aligned 309 * addresses. 310 */ 311 cmp cursor, tmp1 312 b.hs 2f 3131: 314 stp xzr, xzr, [cursor], #16 315 cmp cursor, tmp1 316 b.lo 1b 3172: 318 319 /* 320 * Third loop: zero a block at a time using DC ZVA cache block zeroing 321 * instruction. 322 */ 323.Lzeromem_dczva_blocksize_aligned: 324 /* 325 * Calculate the last block-size-aligned address. If the result equals 326 * to the start address, the loop will exit immediately. 327 */ 328 bic tmp1, stop_address, block_mask 329 330 cmp cursor, tmp1 331 b.hs 2f 3321: 333 /* Zero the block containing the cursor */ 334 dc zva, cursor 335 /* Increment the cursor by the size of a block */ 336 add cursor, cursor, block_size 337 cmp cursor, tmp1 338 b.lo 1b 3392: 340 341 /* 342 * Fourth loop: zero 16 bytes at a time and then byte per byte the 343 * remaining area 344 */ 345.Lzeromem_dczva_final_16bytes_aligned: 346 /* 347 * Calculate the last 16 bytes aligned address. It is assumed that the 348 * block size will never be smaller than 16 bytes so that the current 349 * cursor is aligned to at least 16 bytes boundary. 350 */ 351 bic tmp1, stop_address, #15 352 353 cmp cursor, tmp1 354 b.hs 2f 3551: 356 stp xzr, xzr, [cursor], #16 357 cmp cursor, tmp1 358 b.lo 1b 3592: 360 361 /* Fifth and final loop: zero byte per byte */ 362.Lzeromem_dczva_final_1byte_aligned: 363 cmp cursor, stop_address 364 b.eq 2f 3651: 366 strb wzr, [cursor], #1 367 cmp cursor, stop_address 368 b.ne 1b 3692: 370 ret 371 372 /* Fallback for unaligned start addresses */ 373.Lzeromem_dczva_fallback_entry: 374 /* 375 * If the start address is already aligned to 16 bytes, skip this loop. 376 */ 377 tst cursor, #0xf 378 b.eq .Lzeromem_dczva_final_16bytes_aligned 379 380 /* Calculate the next address aligned to 16 bytes */ 381 orr tmp1, cursor, #15 382 add tmp1, tmp1, #1 383 /* If it overflows, fallback to byte per byte zeroing */ 384 cbz tmp1, .Lzeromem_dczva_final_1byte_aligned 385 /* If the next aligned address is after the stop address, fall back */ 386 cmp tmp1, stop_address 387 b.hs .Lzeromem_dczva_final_1byte_aligned 388 389 /* Fallback entry loop: zero byte per byte */ 3901: 391 strb wzr, [cursor], #1 392 cmp cursor, tmp1 393 b.ne 1b 394 395 b .Lzeromem_dczva_final_16bytes_aligned 396 397 .unreq cursor 398 /* 399 * length is already unreq'ed to reuse the register for another 400 * variable. 401 */ 402 .unreq stop_address 403 .unreq block_size 404 .unreq block_mask 405 .unreq tmp1 406 .unreq tmp2 407endfunc zeromem_dczva 408 409/* -------------------------------------------------------------------------- 410 * void memcpy16(void *dest, const void *src, unsigned int length) 411 * 412 * Copy length bytes from memory area src to memory area dest. 413 * The memory areas should not overlap. 414 * Destination and source addresses must be 16-byte aligned. 415 * -------------------------------------------------------------------------- 416 */ 417func memcpy16 418#if ENABLE_ASSERTIONS 419 orr x3, x0, x1 420 tst x3, #0xf 421 ASM_ASSERT(eq) 422#endif 423/* copy 16 bytes at a time */ 424m_loop16: 425 cmp x2, #16 426 b.lo m_loop1 427 ldp x3, x4, [x1], #16 428 stp x3, x4, [x0], #16 429 sub x2, x2, #16 430 b m_loop16 431/* copy byte per byte */ 432m_loop1: 433 cbz x2, m_end 434 ldrb w3, [x1], #1 435 strb w3, [x0], #1 436 subs x2, x2, #1 437 b.ne m_loop1 438m_end: 439 ret 440endfunc memcpy16 441 442/* --------------------------------------------------------------------------- 443 * Disable the MMU at EL3 444 * --------------------------------------------------------------------------- 445 */ 446 447func disable_mmu_el3 448 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT) 449do_disable_mmu_el3: 450 mrs x0, sctlr_el3 451 bic x0, x0, x1 452 msr sctlr_el3, x0 453 isb /* ensure MMU is off */ 454 dsb sy 455 ret 456endfunc disable_mmu_el3 457 458 459func disable_mmu_icache_el3 460 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) 461 b do_disable_mmu_el3 462endfunc disable_mmu_icache_el3 463 464/* --------------------------------------------------------------------------- 465 * Disable the MMU at EL1 466 * --------------------------------------------------------------------------- 467 */ 468 469func disable_mmu_el1 470 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT) 471do_disable_mmu_el1: 472 mrs x0, sctlr_el1 473 bic x0, x0, x1 474 msr sctlr_el1, x0 475 isb /* ensure MMU is off */ 476 dsb sy 477 ret 478endfunc disable_mmu_el1 479 480 481func disable_mmu_icache_el1 482 mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT) 483 b do_disable_mmu_el1 484endfunc disable_mmu_icache_el1 485 486/* --------------------------------------------------------------------------- 487 * Enable the use of VFP at EL3 488 * --------------------------------------------------------------------------- 489 */ 490#if SUPPORT_VFP 491func enable_vfp 492 mrs x0, cpacr_el1 493 orr x0, x0, #CPACR_VFP_BITS 494 msr cpacr_el1, x0 495 mrs x0, cptr_el3 496 mov x1, #AARCH64_CPTR_TFP 497 bic x0, x0, x1 498 msr cptr_el3, x0 499 isb 500 ret 501endfunc enable_vfp 502#endif 503 504/* --------------------------------------------------------------------------- 505 * Helper to fixup Global Descriptor table (GDT) and dynamic relocations 506 * (.rela.dyn) at runtime. 507 * 508 * This function is meant to be used when the firmware is compiled with -fpie 509 * and linked with -pie options. We rely on the linker script exporting 510 * appropriate markers for start and end of the section. For GOT, we 511 * expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect 512 * __RELA_START__ and __RELA_END__. 513 * 514 * The function takes the limits of the memory to apply fixups to as 515 * arguments (which is usually the limits of the relocable BL image). 516 * x0 - the start of the fixup region 517 * x1 - the limit of the fixup region 518 * These addresses have to be page (4KB aligned). 519 * --------------------------------------------------------------------------- 520 */ 521func fixup_gdt_reloc 522 mov x6, x0 523 mov x7, x1 524 525 /* Test if the limits are 4K aligned */ 526#if ENABLE_ASSERTIONS 527 orr x0, x0, x1 528 tst x0, #(PAGE_SIZE - 1) 529 ASM_ASSERT(eq) 530#endif 531 /* 532 * Calculate the offset based on return address in x30. 533 * Assume that this funtion is called within a page of the start of 534 * of fixup region. 535 */ 536 and x2, x30, #~(PAGE_SIZE - 1) 537 sub x0, x2, x6 /* Diff(S) = Current Address - Compiled Address */ 538 539 adrp x1, __GOT_START__ 540 add x1, x1, :lo12:__GOT_START__ 541 adrp x2, __GOT_END__ 542 add x2, x2, :lo12:__GOT_END__ 543 544 /* 545 * GOT is an array of 64_bit addresses which must be fixed up as 546 * new_addr = old_addr + Diff(S). 547 * The new_addr is the address currently the binary is executing from 548 * and old_addr is the address at compile time. 549 */ 5501: 551 ldr x3, [x1] 552 /* Skip adding offset if address is < lower limit */ 553 cmp x3, x6 554 b.lo 2f 555 /* Skip adding offset if address is >= upper limit */ 556 cmp x3, x7 557 b.ge 2f 558 add x3, x3, x0 559 str x3, [x1] 5602: 561 add x1, x1, #8 562 cmp x1, x2 563 b.lo 1b 564 565 /* Starting dynamic relocations. Use adrp/adr to get RELA_START and END */ 566 adrp x1, __RELA_START__ 567 add x1, x1, :lo12:__RELA_START__ 568 adrp x2, __RELA_END__ 569 add x2, x2, :lo12:__RELA_END__ 570 /* 571 * According to ELF-64 specification, the RELA data structure is as 572 * follows: 573 * typedef struct 574 * { 575 * Elf64_Addr r_offset; 576 * Elf64_Xword r_info; 577 * Elf64_Sxword r_addend; 578 * } Elf64_Rela; 579 * 580 * r_offset is address of reference 581 * r_info is symbol index and type of relocation (in this case 582 * 0x403 which corresponds to R_AARCH64_RELATIV). 583 * r_addend is constant part of expression. 584 * 585 * Size of Elf64_Rela structure is 24 bytes. 586 */ 5871: 588 /* Assert that the relocation type is R_AARCH64_RELATIV */ 589#if ENABLE_ASSERTIONS 590 ldr x3, [x1, #8] 591 cmp x3, #0x403 592 ASM_ASSERT(eq) 593#endif 594 ldr x3, [x1] /* r_offset */ 595 add x3, x0, x3 596 ldr x4, [x1, #16] /* r_addend */ 597 598 /* Skip adding offset if r_addend is < lower limit */ 599 cmp x4, x6 600 b.lo 2f 601 /* Skip adding offset if r_addend entry is >= upper limit */ 602 cmp x4, x7 603 b.ge 2f 604 605 add x4, x0, x4 /* Diff(S) + r_addend */ 606 str x4, [x3] 607 6082: add x1, x1, #24 609 cmp x1, x2 610 b.lo 1b 611 612 ret 613endfunc fixup_gdt_reloc 614