1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2014, Linaro Limited 4 * Copyright (c) 2021-2023, Arm Limited 5 */ 6 7#include <arm32_macros.S> 8#include <arm.h> 9#include <asm.S> 10#include <generated/asm-defines.h> 11#include <keep.h> 12#include <kernel/asan.h> 13#include <kernel/cache_helpers.h> 14#include <kernel/thread_private.h> 15#include <mm/core_mmu.h> 16#include <platform_config.h> 17#include <sm/optee_smc.h> 18#include <sm/teesmc_opteed.h> 19#include <sm/teesmc_opteed_macros.h> 20 21.arch_extension sec 22 23.section .data 24.balign 4 25 26#ifdef CFG_BOOT_SYNC_CPU 27.equ SEM_CPU_READY, 1 28#endif 29 30#ifdef CFG_PL310 31.section .rodata.init 32panic_boot_file: 33 .asciz __FILE__ 34 35/* 36 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line) 37 */ 38LOCAL_FUNC __assert_flat_mapped_range , : 39UNWIND( .cantunwind) 40 push { r4-r6, lr } 41 mov r4, r0 42 mov r5, r1 43 bl cpu_mmu_enabled 44 cmp r0, #0 45 beq 1f 46 mov r0, r4 47 bl virt_to_phys 48 cmp r0, r4 49 beq 1f 50 /* 51 * this must be compliant with the panic generic routine: 52 * __do_panic(__FILE__, __LINE__, __func__, str) 53 */ 54 ldr r0, =panic_boot_file 55 mov r1, r5 56 mov r2, #0 57 mov r3, #0 58 bl __do_panic 59 b . /* should NOT return */ 601: pop { r4-r6, pc } 61END_FUNC __assert_flat_mapped_range 62 63 /* panic if mmu is enable and vaddr != paddr (scratch lr) */ 64 .macro assert_flat_mapped_range va, line 65 ldr r0, \va 66 ldr r1, =\line 67 bl __assert_flat_mapped_range 68 .endm 69#endif /* CFG_PL310 */ 70 71WEAK_FUNC plat_cpu_reset_early , : 72 bx lr 73END_FUNC plat_cpu_reset_early 74DECLARE_KEEP_PAGER plat_cpu_reset_early 75 76 .section .identity_map, "ax" 77 .align 5 78LOCAL_FUNC reset_vect_table , : , .identity_map 79 b . 80 b . /* Undef */ 81 b . /* Syscall */ 82 b . /* Prefetch abort */ 83 b . /* Data abort */ 84 b . /* Reserved */ 85 b . /* IRQ */ 86 b . /* FIQ */ 87END_FUNC reset_vect_table 88 89 .macro cpu_is_ready 90#ifdef CFG_BOOT_SYNC_CPU 91 bl __get_core_pos 92 lsl r0, r0, #2 93 ldr r1,=sem_cpu_sync 94 ldr r2, =SEM_CPU_READY 95 str r2, [r1, r0] 96 dsb 97 sev 98#endif 99 .endm 100 101 .macro wait_primary 102#ifdef CFG_BOOT_SYNC_CPU 103 ldr r0, =sem_cpu_sync 104 mov r2, #SEM_CPU_READY 105 sev 1061: 107 ldr r1, [r0] 108 cmp r1, r2 109 wfene 110 bne 1b 111#endif 112 .endm 113 114 .macro wait_secondary 115#ifdef CFG_BOOT_SYNC_CPU 116 ldr r0, =sem_cpu_sync 117 mov r3, #CFG_TEE_CORE_NB_CORE 118 mov r2, #SEM_CPU_READY 119 sev 1201: 121 subs r3, r3, #1 122 beq 3f 123 add r0, r0, #4 1242: 125 ldr r1, [r0] 126 cmp r1, r2 127 wfene 128 bne 2b 129 b 1b 1303: 131#endif 132 .endm 133 134 /* 135 * set_sctlr : Setup some core configuration in CP15 SCTLR 136 * 137 * Setup required by current implementation of the OP-TEE core: 138 * - Disable data and instruction cache. 139 * - MMU is expected off and exceptions trapped in ARM mode. 140 * - Enable or disable alignment checks upon platform configuration. 141 * - Optionally enable write-implies-execute-never. 142 * - Optionally enable round robin strategy for cache replacement. 143 * 144 * Clobbers r0. 145 */ 146 .macro set_sctlr 147 read_sctlr r0 148 bic r0, r0, #(SCTLR_M | SCTLR_C) 149 bic r0, r0, #SCTLR_I 150 bic r0, r0, #SCTLR_TE 151 orr r0, r0, #SCTLR_SPAN 152#if defined(CFG_SCTLR_ALIGNMENT_CHECK) 153 orr r0, r0, #SCTLR_A 154#else 155 bic r0, r0, #SCTLR_A 156#endif 157#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC) 158 orr r0, r0, #(SCTLR_WXN | SCTLR_UWXN) 159#endif 160#if defined(CFG_ENABLE_SCTLR_RR) 161 orr r0, r0, #SCTLR_RR 162#endif 163 write_sctlr r0 164 .endm 165 166#if defined(CFG_CORE_SEL1_SPMC) && defined(CFG_WITH_ARM_TRUSTED_FW) 167 /* 168 * With OP-TEE as SPMC at S-EL1 the SPMD (SPD_spmd) in TF-A passes 169 * the DTB in r0, pagable part in r1, and the rest of the registers 170 * are unused 171 * 172 * Save boot arguments passed 173 * entry r0, saved r6: device tree address 174 * entry r1, saved r4: pagestore 175 * saved r5, r7: Zero 176 */ 177 .macro bootargs_entry 178 mov r6, r0 179 mov r4, r1 180 mov r5, #0 181 mov r7, #0 182 .endm 183#else 184 /* 185 * Save boot arguments 186 * entry r0, saved r4: pagestore 187 * entry r1, saved r7: (ARMv7 standard bootarg #1) 188 * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2) 189 * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0) 190 */ 191 .macro bootargs_entry 192#if defined(CFG_NS_ENTRY_ADDR) 193 ldr r5, =CFG_NS_ENTRY_ADDR 194#else 195 mov r5, lr 196#endif 197#if defined(CFG_PAGEABLE_ADDR) 198 ldr r4, =CFG_PAGEABLE_ADDR 199#else 200 mov r4, r0 201#endif 202#if defined(CFG_DT_ADDR) 203 ldr r6, =CFG_DT_ADDR 204#else 205 mov r6, r2 206#endif 207 mov r7, r1 208 .endm 209#endif 210 211 .macro maybe_init_spectre_workaround 212#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \ 213 (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \ 214 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)) 215 read_midr r0 216 ubfx r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH 217 cmp r1, #MIDR_IMPLEMENTER_ARM 218 bne 1f 219 ubfx r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \ 220 #MIDR_PRIMARY_PART_NUM_WIDTH 221 222 movw r2, #CORTEX_A8_PART_NUM 223 cmp r1, r2 224 moveq r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB 225 beq 2f 226 227 movw r2, #CORTEX_A15_PART_NUM 228 cmp r1, r2 229 moveq r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB 230 bne 1f /* Skip it for all other CPUs */ 2312: 232 read_actlr r0 233 orr r0, r0, r2 234 write_actlr r0 235 isb 2361: 237#endif 238 .endm 239 240FUNC _start , : 241UNWIND( .cantunwind) 242 243 bootargs_entry 244 245 /* 246 * 32bit entry is expected to execute Supervisor mode, 247 * some bootloader may enter in Supervisor or Monitor 248 */ 249 cps #CPSR_MODE_SVC 250 251 /* Early ARM secure MP specific configuration */ 252 bl plat_cpu_reset_early 253 maybe_init_spectre_workaround 254 255 set_sctlr 256 isb 257 258 ldr r0, =reset_vect_table 259 write_vbar r0 260 261#if defined(CFG_WITH_ARM_TRUSTED_FW) 262 b reset_primary 263#else 264 bl __get_core_pos 265 cmp r0, #0 266 beq reset_primary 267 b reset_secondary 268#endif 269END_FUNC _start 270DECLARE_KEEP_INIT _start 271 272 /* 273 * Setup sp to point to the top of the tmp stack for the current CPU: 274 * sp is assigned: 275 * stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD 276 */ 277 .macro set_sp 278 bl __get_core_pos 279 cmp r0, #CFG_TEE_CORE_NB_CORE 280 /* Unsupported CPU, park it before it breaks something */ 281 bge unhandled_cpu 282 add r0, r0, #1 283 284 /* r2 = stack_tmp - STACK_TMP_GUARD */ 285 adr r3, stack_tmp_rel 286 ldr r2, [r3] 287 add r2, r2, r3 288 289 /* 290 * stack_tmp_stride and stack_tmp_stride_rel are the 291 * equivalent of: 292 * extern const u32 stack_tmp_stride; 293 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride - 294 * (u32)&stack_tmp_stride_rel 295 * 296 * To load the value of stack_tmp_stride we do the equivalent 297 * of: 298 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel) 299 */ 300 adr r3, stack_tmp_stride_rel 301 ldr r1, [r3] 302 ldr r1, [r1, r3] 303 304 /* 305 * r0 is core pos + 1 306 * r1 is value of stack_tmp_stride 307 * r2 is value of stack_tmp + guard 308 */ 309 mul r1, r0, r1 310 add sp, r1, r2 311 .endm 312 313 /* 314 * Cache maintenance during entry: handle outer cache. 315 * End address is exclusive: first byte not to be changed. 316 * Note however arm_clX_inv/cleanbyva operate on full cache lines. 317 * 318 * Use ANSI #define to trap source file line number for PL310 assertion 319 */ 320 .macro __inval_cache_vrange vbase, vend, line 321#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) 322 assert_flat_mapped_range (\vbase), (\line) 323 bl pl310_base 324 ldr r1, \vbase 325 ldr r2, \vend 326 bl arm_cl2_invbypa 327#endif 328 ldr r0, \vbase 329 ldr r1, \vend 330 sub r1, r1, r0 331 bl dcache_inv_range 332 .endm 333 334 .macro __flush_cache_vrange vbase, vend, line 335#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) 336 assert_flat_mapped_range (\vbase), (\line) 337 ldr r0, \vbase 338 ldr r1, \vend 339 sub r1, r1, r0 340 bl dcache_clean_range 341 bl pl310_base 342 ldr r1, \vbase 343 ldr r2, \vend 344 bl arm_cl2_cleaninvbypa 345#endif 346 ldr r0, \vbase 347 ldr r1, \vend 348 sub r1, r1, r0 349 bl dcache_cleaninv_range 350 .endm 351 352#define inval_cache_vrange(vbase, vend) \ 353 __inval_cache_vrange vbase, vend, __LINE__ 354 355#define flush_cache_vrange(vbase, vend) \ 356 __flush_cache_vrange vbase, vend, __LINE__ 357 358#ifdef CFG_BOOT_SYNC_CPU 359#define flush_cpu_semaphores \ 360 flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end) 361#else 362#define flush_cpu_semaphores 363#endif 364 365LOCAL_FUNC reset_primary , : , .identity_map 366UNWIND( .cantunwind) 367 368 /* preserve r4-r7: bootargs */ 369 370#ifdef CFG_WITH_PAGER 371 /* 372 * Move init code into correct location and move hashes to a 373 * temporary safe location until the heap is initialized. 374 * 375 * The binary is built as: 376 * [Pager code, rodata and data] : In correct location 377 * [Init code and rodata] : Should be copied to __init_start 378 * [struct boot_embdata + data] : Should be saved before 379 * initializing pager, first uint32_t tells the length of the data 380 */ 381 ldr r0, =__init_start /* dst */ 382 ldr r1, =__data_end /* src */ 383 ldr r2, =__init_end 384 sub r2, r2, r0 /* init len */ 385 ldr r12, [r1, r2] /* length of hashes etc */ 386 add r2, r2, r12 /* length of init and hashes etc */ 387 /* Copy backwards (as memmove) in case we're overlapping */ 388 add r0, r0, r2 /* __init_start + len */ 389 add r1, r1, r2 /* __data_end + len */ 390 str r0, cached_mem_end 391 ldr r2, =__init_start 392copy_init: 393 ldmdb r1!, {r3, r8-r12} 394 stmdb r0!, {r3, r8-r12} 395 cmp r0, r2 396 bgt copy_init 397#else 398 /* 399 * The binary is built as: 400 * [Core, rodata and data] : In correct location 401 * [struct boot_embdata + data] : Should be moved to __end, first 402 * uint32_t tells the length of the struct + data 403 */ 404 ldr r0, =__end /* dst */ 405 ldr r1, =__data_end /* src */ 406 ldr r2, [r1] /* struct boot_embdata::total_len */ 407 /* Copy backwards (as memmove) in case we're overlapping */ 408 add r0, r0, r2 409 add r1, r1, r2 410 str r0, cached_mem_end 411 ldr r2, =__end 412 413copy_init: 414 ldmdb r1!, {r3, r8-r12} 415 stmdb r0!, {r3, r8-r12} 416 cmp r0, r2 417 bgt copy_init 418#endif 419 420 /* 421 * Clear .bss, this code obviously depends on the linker keeping 422 * start/end of .bss at least 8 byte aligned. 423 */ 424 ldr r0, =__bss_start 425 ldr r1, =__bss_end 426 mov r2, #0 427 mov r3, #0 428clear_bss: 429 stmia r0!, {r2, r3} 430 cmp r0, r1 431 bls clear_bss 432 433#ifdef CFG_NS_VIRTUALIZATION 434 /* 435 * Clear .nex_bss, this code obviously depends on the linker keeping 436 * start/end of .bss at least 8 byte aligned. 437 */ 438 ldr r0, =__nex_bss_start 439 ldr r1, =__nex_bss_end 440 mov r2, #0 441 mov r3, #0 442clear_nex_bss: 443 stmia r0!, {r2, r3} 444 cmp r0, r1 445 bls clear_nex_bss 446#endif 447 448#ifdef CFG_CORE_SANITIZE_KADDRESS 449 /* First initialize the entire shadow area with no access */ 450 ldr r0, =__asan_shadow_start /* start */ 451 ldr r1, =__asan_shadow_end /* limit */ 452 mov r2, #ASAN_DATA_RED_ZONE 453shadow_no_access: 454 str r2, [r0], #4 455 cmp r0, r1 456 bls shadow_no_access 457 458 /* Mark the entire stack area as OK */ 459 ldr r2, =CFG_ASAN_SHADOW_OFFSET 460 ldr r0, =__nozi_stack_start /* start */ 461 lsr r0, r0, #ASAN_BLOCK_SHIFT 462 add r0, r0, r2 463 ldr r1, =__nozi_stack_end /* limit */ 464 lsr r1, r1, #ASAN_BLOCK_SHIFT 465 add r1, r1, r2 466 mov r2, #0 467shadow_stack_access_ok: 468 strb r2, [r0], #1 469 cmp r0, r1 470 bls shadow_stack_access_ok 471#endif 472 473 set_sp 474 475 bl thread_init_thread_core_local 476 477 /* complete ARM secure MP common configuration */ 478 bl plat_primary_init_early 479 480 /* Enable Console */ 481 bl console_init 482 483#ifdef CFG_PL310 484 bl pl310_base 485 bl arm_cl2_config 486#endif 487 488 /* 489 * Invalidate dcache for all memory used during initialization to 490 * avoid nasty surprices when the cache is turned on. We must not 491 * invalidate memory not used by OP-TEE since we may invalidate 492 * entries used by for instance ARM Trusted Firmware. 493 */ 494 inval_cache_vrange(cached_mem_start, cached_mem_end) 495 496#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) 497 /* Enable PL310 if not yet enabled */ 498 bl pl310_base 499 bl arm_cl2_enable 500#endif 501 502#ifdef CFG_CORE_ASLR 503 mov r0, r6 504 bl get_aslr_seed 505#else 506 mov r0, #0 507#endif 508 509 ldr r1, =boot_mmu_config 510 bl core_init_mmu_map 511 512#ifdef CFG_CORE_ASLR 513 /* 514 * Process relocation information for updating with the virtual map 515 * offset. We're doing this now before MMU is enabled as some of 516 * the memory will become write protected. 517 */ 518 ldr r0, =boot_mmu_config 519 ldr r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET] 520 /* 521 * Update cached_mem_end address with load offset since it was 522 * calculated before relocation. 523 */ 524 ldr r2, cached_mem_end 525 add r2, r2, r0 526 str r2, cached_mem_end 527 528 bl relocate 529#endif 530 531 bl __get_core_pos 532 bl enable_mmu 533#ifdef CFG_CORE_ASLR 534 /* 535 * Reinitialize console, since register_serial_console() has 536 * previously registered a PA and with ASLR the VA is different 537 * from the PA. 538 */ 539 bl console_init 540#endif 541 542#ifdef CFG_NS_VIRTUALIZATION 543 /* 544 * Initialize partition tables for each partition to 545 * default_partition which has been relocated now to a different VA 546 */ 547 bl core_mmu_set_default_prtn_tbl 548#endif 549 550 mov r0, r4 /* pageable part address */ 551 mov r1, r5 /* ns-entry address */ 552 bl boot_init_primary_early 553#ifndef CFG_NS_VIRTUALIZATION 554 mov r9, sp 555 ldr r0, =threads 556 ldr r0, [r0, #THREAD_CTX_STACK_VA_END] 557 mov sp, r0 558 bl thread_get_core_local 559 mov r8, r0 560 mov r0, #0 561 str r0, [r8, #THREAD_CORE_LOCAL_FLAGS] 562#endif 563 mov r0, r6 /* DT address */ 564 mov r1, #0 /* unused */ 565 bl boot_init_primary_late 566#ifndef CFG_NS_VIRTUALIZATION 567 mov r0, #THREAD_CLF_TMP 568 str r0, [r8, #THREAD_CORE_LOCAL_FLAGS] 569 mov sp, r9 570#endif 571 572#ifdef _CFG_CORE_STACK_PROTECTOR 573 /* Update stack canary value */ 574 bl plat_get_random_stack_canary 575 ldr r1, =__stack_chk_guard 576 str r0, [r1] 577#endif 578 579 /* 580 * In case we've touched memory that secondary CPUs will use before 581 * they have turned on their D-cache, clean and invalidate the 582 * D-cache before exiting to normal world. 583 */ 584 flush_cache_vrange(cached_mem_start, cached_mem_end) 585 586 /* release secondary boot cores and sync with them */ 587 cpu_is_ready 588 flush_cpu_semaphores 589 wait_secondary 590 591#ifdef CFG_PL310_LOCKED 592#ifdef CFG_PL310_SIP_PROTOCOL 593#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y" 594#endif 595 /* lock/invalidate all lines: pl310 behaves as if disable */ 596 bl pl310_base 597 bl arm_cl2_lockallways 598 bl pl310_base 599 bl arm_cl2_cleaninvbyway 600#endif 601 602 /* 603 * Clear current thread id now to allow the thread to be reused on 604 * next entry. Matches the thread_init_boot_thread() in 605 * boot.c. 606 */ 607#ifndef CFG_NS_VIRTUALIZATION 608 bl thread_clr_boot_thread 609#endif 610 611#ifdef CFG_CORE_FFA 612 ldr r0, =cpu_on_handler 613 /* 614 * Compensate for the virtual map offset since cpu_on_handler() is 615 * called with MMU off. 616 */ 617 ldr r1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET 618 sub r0, r0, r1 619 bl thread_spmc_register_secondary_ep 620 b thread_ffa_msg_wait 621#else /* CFG_CORE_FFA */ 622 623#if defined(CFG_WITH_ARM_TRUSTED_FW) 624 ldr r0, =boot_mmu_config 625 ldr r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET] 626 ldr r1, =thread_vector_table 627 /* Pass the vector address returned from main_init */ 628 sub r1, r1, r0 629#else 630 /* realy standard bootarg #1 and #2 to non secure entry */ 631 mov r4, #0 632 mov r3, r6 /* std bootarg #2 for register R2 */ 633 mov r2, r7 /* std bootarg #1 for register R1 */ 634 mov r1, #0 635#endif /* CFG_WITH_ARM_TRUSTED_FW */ 636 637 mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE 638 smc #0 639 /* SMC should not return */ 640 panic_at_smc_return 641#endif /* CFG_CORE_FFA */ 642END_FUNC reset_primary 643 644#ifdef CFG_BOOT_SYNC_CPU 645LOCAL_DATA sem_cpu_sync_start , : 646 .word sem_cpu_sync 647END_DATA sem_cpu_sync_start 648 649LOCAL_DATA sem_cpu_sync_end , : 650 .word sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2) 651END_DATA sem_cpu_sync_end 652#endif 653 654LOCAL_DATA cached_mem_start , : 655 .word __text_start 656END_DATA cached_mem_start 657 658LOCAL_DATA cached_mem_end , : 659 .skip 4 660END_DATA cached_mem_end 661 662LOCAL_FUNC unhandled_cpu , : 663 wfi 664 b unhandled_cpu 665END_FUNC unhandled_cpu 666 667#ifdef CFG_CORE_ASLR 668LOCAL_FUNC relocate , : 669 push {r4-r5} 670 /* r0 holds load offset */ 671#ifdef CFG_WITH_PAGER 672 ldr r12, =__init_end 673#else 674 ldr r12, =__end 675#endif 676 ldr r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET] 677 ldr r3, [r12, #BOOT_EMBDATA_RELOC_LEN] 678 679 mov_imm r1, TEE_LOAD_ADDR 680 add r2, r2, r12 /* start of relocations */ 681 add r3, r3, r2 /* end of relocations */ 682 683 /* 684 * Relocations are not formatted as Rel32, instead they are in a 685 * compressed format created by get_reloc_bin() in 686 * scripts/gen_tee_bin.py 687 * 688 * All the R_ARM_RELATIVE relocations are translated into a list of 689 * 32-bit offsets from TEE_LOAD_ADDR. At each address a 32-bit 690 * value pointed out which increased with the load offset. 691 */ 692 693#ifdef CFG_WITH_PAGER 694 /* 695 * With pager enabled we can only relocate the pager and init 696 * parts, the rest has to be done when a page is populated. 697 */ 698 sub r12, r12, r1 699#endif 700 701 b 2f 702 /* Loop over the relocation addresses and process all entries */ 7031: ldr r4, [r2], #4 704#ifdef CFG_WITH_PAGER 705 /* Skip too large addresses */ 706 cmp r4, r12 707 bge 2f 708#endif 709 ldr r5, [r4, r1] 710 add r5, r5, r0 711 str r5, [r4, r1] 712 7132: cmp r2, r3 714 bne 1b 715 716 pop {r4-r5} 717 bx lr 718END_FUNC relocate 719#endif 720 721/* 722 * void enable_mmu(unsigned long core_pos); 723 * 724 * This function depends on being mapped with in the identity map where 725 * physical address and virtual address is the same. After MMU has been 726 * enabled the instruction pointer will be updated to execute as the new 727 * offset instead. Stack pointers and the return address are updated. 728 */ 729LOCAL_FUNC enable_mmu , : , .identity_map 730 /* r0 = core pos */ 731 adr r1, boot_mmu_config 732 733#ifdef CFG_WITH_LPAE 734 ldm r1!, {r2, r3} 735 /* 736 * r2 = ttbcr 737 * r3 = mair0 738 */ 739 write_ttbcr r2 740 write_mair0 r3 741 742 ldm r1!, {r2, r3} 743 /* 744 * r2 = ttbr0_base 745 * r3 = ttbr0_core_offset 746 */ 747 748 /* 749 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos 750 */ 751 mla r12, r0, r3, r2 752 mov r0, #0 753 write_ttbr0_64bit r12, r0 754 write_ttbr1_64bit r0, r0 755#else 756 ldm r1!, {r2, r3} 757 /* 758 * r2 = prrr 759 * r3 = nmrr 760 */ 761 write_prrr r2 762 write_nmrr r3 763 764 ldm r1!, {r2, r3} 765 /* 766 * r2 = dacr 767 * r3 = ttbcr 768 */ 769 write_dacr r2 770 write_ttbcr r3 771 772 ldm r1!, {r2} 773 /* r2 = ttbr */ 774 write_ttbr0 r2 775 write_ttbr1 r2 776 777 mov r2, #0 778 write_contextidr r2 779#endif 780 ldm r1!, {r2} 781 /* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */ 782 isb 783 784 /* Invalidate TLB */ 785 write_tlbiall 786 787 /* 788 * Make sure translation table writes have drained into memory and 789 * the TLB invalidation is complete. 790 */ 791 dsb sy 792 isb 793 794 read_sctlr r0 795 orr r0, r0, #SCTLR_M 796#ifndef CFG_WITH_LPAE 797 /* Enable Access flag (simplified access permissions) and TEX remap */ 798 orr r0, r0, #(SCTLR_AFE | SCTLR_TRE) 799#endif 800 write_sctlr r0 801 isb 802 803 /* Update vbar */ 804 read_vbar r1 805 add r1, r1, r2 806 write_vbar r1 807 isb 808 809 /* Invalidate instruction cache and branch predictor */ 810 write_iciallu 811 write_bpiall 812 isb 813 814 read_sctlr r0 815 /* Enable I and D cache */ 816 orr r0, r0, #SCTLR_I 817 orr r0, r0, #SCTLR_C 818#if defined(CFG_ENABLE_SCTLR_Z) 819 /* 820 * This is only needed on ARMv7 architecture and hence conditionned 821 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent 822 * architectures, the program flow prediction is automatically 823 * enabled upon MMU enablement. 824 */ 825 orr r0, r0, #SCTLR_Z 826#endif 827 write_sctlr r0 828 isb 829 830 /* Adjust stack pointer and return address */ 831 add sp, sp, r2 832 add lr, lr, r2 833 834 bx lr 835END_FUNC enable_mmu 836 837LOCAL_DATA stack_tmp_rel , : 838 .word stack_tmp - stack_tmp_rel - STACK_TMP_GUARD 839END_DATA stack_tmp_rel 840 841LOCAL_DATA stack_tmp_stride_rel , : 842 .word stack_tmp_stride - stack_tmp_stride_rel 843END_DATA stack_tmp_stride_rel 844 845DATA boot_mmu_config , : /* struct core_mmu_config */ 846 .skip CORE_MMU_CONFIG_SIZE 847END_DATA boot_mmu_config 848 849#if defined(CFG_WITH_ARM_TRUSTED_FW) 850FUNC cpu_on_handler , : , .identity_map 851UNWIND( .cantunwind) 852 mov r4, r0 853 mov r5, r1 854 mov r6, lr 855 856 set_sctlr 857 isb 858 859 adr r0, reset_vect_table 860 write_vbar r0 861 862 mov r4, lr 863 864 bl __get_core_pos 865 bl enable_mmu 866 867 set_sp 868 869 mov r0, r4 870 mov r1, r5 871 bl boot_cpu_on_handler 872#ifdef CFG_CORE_FFA 873 b thread_ffa_msg_wait 874#else 875 bx r6 876#endif 877END_FUNC cpu_on_handler 878DECLARE_KEEP_PAGER cpu_on_handler 879 880#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */ 881 882LOCAL_FUNC reset_secondary , : , .identity_map 883UNWIND( .cantunwind) 884 adr r0, reset_vect_table 885 write_vbar r0 886 887 wait_primary 888 889 set_sp 890 891#if defined (CFG_BOOT_SECONDARY_REQUEST) 892 /* if L1 is not invalidated before, do it here */ 893 mov r0, #DCACHE_OP_INV 894 bl dcache_op_level1 895#endif 896 897 bl __get_core_pos 898 bl enable_mmu 899 900 cpu_is_ready 901 902#if defined (CFG_BOOT_SECONDARY_REQUEST) 903 /* 904 * boot_core_hpen() return value (r0) is address of 905 * ns entry context structure 906 */ 907 bl boot_core_hpen 908 ldm r0, {r0, r6} 909#else 910 mov r0, r5 /* ns-entry address */ 911 mov r6, #0 912#endif 913 bl boot_init_secondary 914 915 mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE 916 mov r1, r6 917 mov r2, #0 918 mov r3, #0 919 mov r4, #0 920 smc #0 921 /* SMC should not return */ 922 panic_at_smc_return 923END_FUNC reset_secondary 924DECLARE_KEEP_PAGER reset_secondary 925#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */ 926