1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2014, Linaro Limited 4 */ 5 6#include <arm32_macros.S> 7#include <arm.h> 8#include <asm.S> 9#include <generated/asm-defines.h> 10#include <keep.h> 11#include <kernel/asan.h> 12#include <kernel/cache_helpers.h> 13#include <kernel/unwind.h> 14#include <platform_config.h> 15#include <sm/optee_smc.h> 16#include <sm/teesmc_opteed.h> 17#include <sm/teesmc_opteed_macros.h> 18 19.arch_extension sec 20 21.section .data 22.balign 4 23 24#ifdef CFG_BOOT_SYNC_CPU 25.equ SEM_CPU_READY, 1 26#endif 27 28#ifdef CFG_PL310 29.section .rodata.init 30panic_boot_file: 31 .asciz __FILE__ 32 33/* 34 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line) 35 */ 36LOCAL_FUNC __assert_flat_mapped_range , : 37UNWIND( .fnstart) 38UNWIND( .cantunwind) 39 push { r4-r6, lr } 40 mov r4, r0 41 mov r5, r1 42 bl cpu_mmu_enabled 43 cmp r0, #0 44 beq 1f 45 mov r0, r4 46 bl virt_to_phys 47 cmp r0, r4 48 beq 1f 49 /* 50 * this must be compliant with the panic generic routine: 51 * __do_panic(__FILE__, __LINE__, __func__, str) 52 */ 53 ldr r0, =panic_boot_file 54 mov r1, r5 55 mov r2, #0 56 mov r3, #0 57 bl __do_panic 58 b . /* should NOT return */ 591: pop { r4-r6, pc } 60UNWIND( .fnend) 61END_FUNC __assert_flat_mapped_range 62 63 /* panic if mmu is enable and vaddr != paddr (scratch lr) */ 64 .macro assert_flat_mapped_range va, line 65 ldr r0, \va 66 ldr r1, =\line 67 bl __assert_flat_mapped_range 68 .endm 69#endif /* CFG_PL310 */ 70 71FUNC plat_cpu_reset_early , : 72UNWIND( .fnstart) 73 bx lr 74UNWIND( .fnend) 75END_FUNC plat_cpu_reset_early 76DECLARE_KEEP_PAGER plat_cpu_reset_early 77.weak plat_cpu_reset_early 78 79 .section .identity_map, "ax" 80 .align 5 81LOCAL_FUNC reset_vect_table , : , .identity_map 82 b . 83 b . /* Undef */ 84 b . /* Syscall */ 85 b . /* Prefetch abort */ 86 b . /* Data abort */ 87 b . /* Reserved */ 88 b . /* IRQ */ 89 b . /* FIQ */ 90END_FUNC reset_vect_table 91 92 .macro cpu_is_ready 93#ifdef CFG_BOOT_SYNC_CPU 94 bl __get_core_pos 95 lsl r0, r0, #2 96 ldr r1,=sem_cpu_sync 97 ldr r2, =SEM_CPU_READY 98 str r2, [r1, r0] 99 dsb 100 sev 101#endif 102 .endm 103 104 .macro wait_primary 105#ifdef CFG_BOOT_SYNC_CPU 106 ldr r0, =sem_cpu_sync 107 mov r2, #SEM_CPU_READY 108 sev 1091: 110 ldr r1, [r0] 111 cmp r1, r2 112 wfene 113 bne 1b 114#endif 115 .endm 116 117 .macro wait_secondary 118#ifdef CFG_BOOT_SYNC_CPU 119 ldr r0, =sem_cpu_sync 120 mov r3, #CFG_TEE_CORE_NB_CORE 121 mov r2, #SEM_CPU_READY 122 sev 1231: 124 subs r3, r3, #1 125 beq 3f 126 add r0, r0, #4 1272: 128 ldr r1, [r0] 129 cmp r1, r2 130 wfene 131 bne 2b 132 b 1b 1333: 134#endif 135 .endm 136 137 /* 138 * set_sctlr : Setup some core configuration in CP15 SCTLR 139 * 140 * Setup required by current implementation of the OP-TEE core: 141 * - Disable data and instruction cache. 142 * - MMU is expected off and exceptions trapped in ARM mode. 143 * - Enable or disable alignment checks upon platform configuration. 144 * - Optionally enable write-implies-execute-never. 145 * - Optionally enable round robin strategy for cache replacement. 146 * 147 * Clobbers r0. 148 */ 149 .macro set_sctlr 150 read_sctlr r0 151 bic r0, r0, #(SCTLR_M | SCTLR_C) 152 bic r0, r0, #SCTLR_I 153 bic r0, r0, #SCTLR_TE 154 orr r0, r0, #SCTLR_SPAN 155#if defined(CFG_SCTLR_ALIGNMENT_CHECK) 156 orr r0, r0, #SCTLR_A 157#else 158 bic r0, r0, #SCTLR_A 159#endif 160#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC) 161 orr r0, r0, #(SCTLR_WXN | SCTLR_UWXN) 162#endif 163#if defined(CFG_ENABLE_SCTLR_RR) 164 orr r0, r0, #SCTLR_RR 165#endif 166 write_sctlr r0 167 .endm 168 169 /* 170 * Save boot arguments 171 * entry r0, saved r4: pagestore 172 * entry r1, saved r7: (ARMv7 standard bootarg #1) 173 * entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2) 174 * entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0) 175 */ 176 .macro bootargs_entry 177#if defined(CFG_NS_ENTRY_ADDR) 178 ldr r5, =CFG_NS_ENTRY_ADDR 179#else 180 mov r5, lr 181#endif 182#if defined(CFG_PAGEABLE_ADDR) 183 ldr r4, =CFG_PAGEABLE_ADDR 184#else 185 mov r4, r0 186#endif 187#if defined(CFG_DT_ADDR) 188 ldr r6, =CFG_DT_ADDR 189#else 190 mov r6, r2 191#endif 192 mov r7, r1 193 .endm 194 195 .macro maybe_init_spectre_workaround 196#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \ 197 (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \ 198 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)) 199 read_midr r0 200 ubfx r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH 201 cmp r1, #MIDR_IMPLEMENTER_ARM 202 bne 1f 203 ubfx r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \ 204 #MIDR_PRIMARY_PART_NUM_WIDTH 205 206 movw r2, #CORTEX_A8_PART_NUM 207 cmp r1, r2 208 moveq r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB 209 beq 2f 210 211 movw r2, #CORTEX_A15_PART_NUM 212 cmp r1, r2 213 moveq r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB 214 bne 1f /* Skip it for all other CPUs */ 2152: 216 read_actlr r0 217 orr r0, r0, r2 218 write_actlr r0 219 isb 2201: 221#endif 222 .endm 223 224FUNC _start , : 225UNWIND( .fnstart) 226UNWIND( .cantunwind) 227 228 bootargs_entry 229 230 /* 231 * 32bit entry is expected to execute Supervisor mode, 232 * some bootloader may enter in Supervisor or Monitor 233 */ 234 cps #CPSR_MODE_SVC 235 236 /* Early ARM secure MP specific configuration */ 237 bl plat_cpu_reset_early 238 maybe_init_spectre_workaround 239 240 set_sctlr 241 isb 242 243 ldr r0, =reset_vect_table 244 write_vbar r0 245 246#if defined(CFG_WITH_ARM_TRUSTED_FW) 247 b reset_primary 248#else 249 bl __get_core_pos 250 cmp r0, #0 251 beq reset_primary 252 b reset_secondary 253#endif 254UNWIND( .fnend) 255END_FUNC _start 256DECLARE_KEEP_INIT _start 257 258 /* 259 * Setup sp to point to the top of the tmp stack for the current CPU: 260 * sp is assigned stack_tmp_export + cpu_id * stack_tmp_stride 261 */ 262 .macro set_sp 263 bl __get_core_pos 264 cmp r0, #CFG_TEE_CORE_NB_CORE 265 /* Unsupported CPU, park it before it breaks something */ 266 bge unhandled_cpu 267 268 /* 269 * stack_tmp_stride and stack_tmp_stride_rel are the 270 * equivalent of: 271 * extern const u32 stack_tmp_stride; 272 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride - 273 * (u32)&stack_tmp_stride_rel 274 * 275 * To load the value of stack_tmp_stride we do the equivalent 276 * of: 277 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel) 278 */ 279 adr r3, stack_tmp_stride_rel 280 ldr r1, [r3] 281 ldr r1, [r1, r3] 282 283 /* Same pattern as for stack_tmp_stride above */ 284 adr r3, stack_tmp_export_rel 285 ldr r2, [r3] 286 ldr r2, [r2, r3] 287 288 /* 289 * r0 is core pos 290 * r1 is value of stack_tmp_stride 291 * r2 is value of stack_tmp_export 292 */ 293 mul r1, r0, r1 294 add sp, r1, r2 295 .endm 296 297 /* 298 * Cache maintenance during entry: handle outer cache. 299 * End address is exclusive: first byte not to be changed. 300 * Note however arm_clX_inv/cleanbyva operate on full cache lines. 301 * 302 * Use ANSI #define to trap source file line number for PL310 assertion 303 */ 304 .macro __inval_cache_vrange vbase, vend, line 305#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) 306 assert_flat_mapped_range (\vbase), (\line) 307 bl pl310_base 308 ldr r1, \vbase 309 ldr r2, \vend 310 bl arm_cl2_invbypa 311#endif 312 ldr r0, \vbase 313 ldr r1, \vend 314 sub r1, r1, r0 315 bl dcache_inv_range 316 .endm 317 318 .macro __flush_cache_vrange vbase, vend, line 319#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) 320 assert_flat_mapped_range (\vbase), (\line) 321 ldr r0, \vbase 322 ldr r1, \vend 323 sub r1, r1, r0 324 bl dcache_clean_range 325 bl pl310_base 326 ldr r1, \vbase 327 ldr r2, \vend 328 bl arm_cl2_cleaninvbypa 329#endif 330 ldr r0, \vbase 331 ldr r1, \vend 332 sub r1, r1, r0 333 bl dcache_cleaninv_range 334 .endm 335 336#define inval_cache_vrange(vbase, vend) \ 337 __inval_cache_vrange vbase, vend, __LINE__ 338 339#define flush_cache_vrange(vbase, vend) \ 340 __flush_cache_vrange vbase, vend, __LINE__ 341 342#ifdef CFG_BOOT_SYNC_CPU 343#define flush_cpu_semaphores \ 344 flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end) 345#else 346#define flush_cpu_semaphores 347#endif 348 349LOCAL_FUNC reset_primary , : , .identity_map 350UNWIND( .fnstart) 351UNWIND( .cantunwind) 352 353 /* preserve r4-r7: bootargs */ 354 355#ifdef CFG_WITH_PAGER 356 /* 357 * Move init code into correct location and move hashes to a 358 * temporary safe location until the heap is initialized. 359 * 360 * The binary is built as: 361 * [Pager code, rodata and data] : In correct location 362 * [Init code and rodata] : Should be copied to __init_start 363 * [struct boot_embdata + data] : Should be saved before 364 * initializing pager, first uint32_t tells the length of the data 365 */ 366 ldr r0, =__init_start /* dst */ 367 ldr r1, =__data_end /* src */ 368 ldr r2, =__init_end 369 sub r2, r2, r0 /* init len */ 370 ldr r12, [r1, r2] /* length of hashes etc */ 371 add r2, r2, r12 /* length of init and hashes etc */ 372 /* Copy backwards (as memmove) in case we're overlapping */ 373 add r0, r0, r2 /* __init_start + len */ 374 add r1, r1, r2 /* __data_end + len */ 375 str r0, cached_mem_end 376 ldr r2, =__init_start 377copy_init: 378 ldmdb r1!, {r3, r8-r12} 379 stmdb r0!, {r3, r8-r12} 380 cmp r0, r2 381 bgt copy_init 382#else 383 /* 384 * The binary is built as: 385 * [Core, rodata and data] : In correct location 386 * [struct boot_embdata + data] : Should be moved to __end, first 387 * uint32_t tells the length of the struct + data 388 */ 389 ldr r0, =__end /* dst */ 390 ldr r1, =__data_end /* src */ 391 ldr r2, [r1] /* struct boot_embdata::total_len */ 392 /* Copy backwards (as memmove) in case we're overlapping */ 393 add r0, r0, r2 394 add r1, r1, r2 395 str r0, cached_mem_end 396 ldr r2, =__end 397 398copy_init: 399 ldmdb r1!, {r3, r8-r12} 400 stmdb r0!, {r3, r8-r12} 401 cmp r0, r2 402 bgt copy_init 403#endif 404 405 /* 406 * Clear .bss, this code obviously depends on the linker keeping 407 * start/end of .bss at least 8 byte aligned. 408 */ 409 ldr r0, =__bss_start 410 ldr r1, =__bss_end 411 mov r2, #0 412 mov r3, #0 413clear_bss: 414 stmia r0!, {r2, r3} 415 cmp r0, r1 416 bls clear_bss 417 418#ifdef CFG_VIRTUALIZATION 419 /* 420 * Clear .nex_bss, this code obviously depends on the linker keeping 421 * start/end of .bss at least 8 byte aligned. 422 */ 423 ldr r0, =__nex_bss_start 424 ldr r1, =__nex_bss_end 425 mov r2, #0 426 mov r3, #0 427clear_nex_bss: 428 stmia r0!, {r2, r3} 429 cmp r0, r1 430 bls clear_nex_bss 431#endif 432 433#ifdef CFG_CORE_SANITIZE_KADDRESS 434 /* First initialize the entire shadow area with no access */ 435 ldr r0, =__asan_shadow_start /* start */ 436 ldr r1, =__asan_shadow_end /* limit */ 437 mov r2, #ASAN_DATA_RED_ZONE 438shadow_no_access: 439 str r2, [r0], #4 440 cmp r0, r1 441 bls shadow_no_access 442 443 /* Mark the entire stack area as OK */ 444 ldr r2, =CFG_ASAN_SHADOW_OFFSET 445 ldr r0, =__nozi_stack_start /* start */ 446 lsr r0, r0, #ASAN_BLOCK_SHIFT 447 add r0, r0, r2 448 ldr r1, =__nozi_stack_end /* limit */ 449 lsr r1, r1, #ASAN_BLOCK_SHIFT 450 add r1, r1, r2 451 mov r2, #0 452shadow_stack_access_ok: 453 strb r2, [r0], #1 454 cmp r0, r1 455 bls shadow_stack_access_ok 456#endif 457 458 set_sp 459 460 /* curr_thread needs to be -1 until threads are properly initialized */ 461 bl thread_clr_thread_core_local 462 463 /* complete ARM secure MP common configuration */ 464 bl plat_primary_init_early 465 466 /* Enable Console */ 467 bl console_init 468 469#ifdef CFG_PL310 470 bl pl310_base 471 bl arm_cl2_config 472#endif 473 474 /* 475 * Invalidate dcache for all memory used during initialization to 476 * avoid nasty surprices when the cache is turned on. We must not 477 * invalidate memory not used by OP-TEE since we may invalidate 478 * entries used by for instance ARM Trusted Firmware. 479 */ 480 inval_cache_vrange(cached_mem_start, cached_mem_end) 481 482#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) 483 /* Enable PL310 if not yet enabled */ 484 bl pl310_base 485 bl arm_cl2_enable 486#endif 487 488#ifdef CFG_CORE_ASLR 489 mov r0, r6 490 bl get_aslr_seed 491#else 492 mov r0, #0 493#endif 494 495 ldr r1, =boot_mmu_config 496 bl core_init_mmu_map 497 498#ifdef CFG_CORE_ASLR 499 /* 500 * Process relocation information for updating with the new offset. 501 * We're doing this now before MMU is enabled as some of the memory 502 * will become write protected. 503 */ 504 ldr r0, =boot_mmu_config 505 ldr r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET] 506 /* 507 * Update cached_mem_end address with load offset since it was 508 * calculated before relocation. 509 */ 510 ldr r2, cached_mem_end 511 add r2, r2, r0 512 str r2, cached_mem_end 513 514 bl relocate 515#endif 516 517 bl __get_core_pos 518 bl enable_mmu 519#ifdef CFG_CORE_ASLR 520 /* 521 * Reinitialize console, since register_serial_console() has 522 * previously registered a PA and with ASLR the VA is different 523 * from the PA. 524 */ 525 bl console_init 526#endif 527 528 mov r0, r4 /* pageable part address */ 529 mov r1, r5 /* ns-entry address */ 530 mov r2, r6 /* DT address */ 531 bl boot_init_primary 532 533 /* 534 * In case we've touched memory that secondary CPUs will use before 535 * they have turned on their D-cache, clean and invalidate the 536 * D-cache before exiting to normal world. 537 */ 538 flush_cache_vrange(cached_mem_start, cached_mem_end) 539 540 /* release secondary boot cores and sync with them */ 541 cpu_is_ready 542 flush_cpu_semaphores 543 wait_secondary 544 545#ifdef CFG_PL310_LOCKED 546#ifdef CFG_PL310_SIP_PROTOCOL 547#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y" 548#endif 549 /* lock/invalidate all lines: pl310 behaves as if disable */ 550 bl pl310_base 551 bl arm_cl2_lockallways 552 bl pl310_base 553 bl arm_cl2_cleaninvbyway 554#endif 555 556 /* 557 * Clear current thread id now to allow the thread to be reused on 558 * next entry. Matches the thread_init_boot_thread() in 559 * boot.c. 560 */ 561 bl thread_clr_boot_thread 562 563#ifdef CFG_CORE_FFA 564 ldr r0, =cpu_on_handler 565 /* 566 * Compensate for the load offset since cpu_on_handler() is 567 * called with MMU off. 568 */ 569 ldr r1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET 570 sub r0, r0, r1 571 bl ffa_secondary_cpu_boot_req 572 b thread_ffa_msg_wait 573#else /* CFG_CORE_FFA */ 574 575#if defined(CFG_WITH_ARM_TRUSTED_FW) 576 ldr r0, =boot_mmu_config 577 ldr r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET] 578 ldr r1, =thread_vector_table 579 /* Pass the vector address returned from main_init */ 580 sub r1, r1, r0 581#else 582 /* realy standard bootarg #1 and #2 to non secure entry */ 583 mov r4, #0 584 mov r3, r6 /* std bootarg #2 for register R2 */ 585 mov r2, r7 /* std bootarg #1 for register R1 */ 586 mov r1, #0 587#endif /* CFG_WITH_ARM_TRUSTED_FW */ 588 589 mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE 590 smc #0 591 b . /* SMC should not return */ 592#endif /* CFG_CORE_FFA */ 593UNWIND( .fnend) 594END_FUNC reset_primary 595 596#ifdef CFG_BOOT_SYNC_CPU 597LOCAL_DATA sem_cpu_sync_start , : 598 .word sem_cpu_sync 599END_DATA sem_cpu_sync_start 600 601LOCAL_DATA sem_cpu_sync_end , : 602 .word sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2) 603END_DATA sem_cpu_sync_end 604#endif 605 606LOCAL_DATA cached_mem_start , : 607 .word __text_start 608END_DATA cached_mem_start 609 610LOCAL_DATA cached_mem_end , : 611 .skip 4 612END_DATA cached_mem_end 613 614LOCAL_FUNC unhandled_cpu , : 615UNWIND( .fnstart) 616 wfi 617 b unhandled_cpu 618UNWIND( .fnend) 619END_FUNC unhandled_cpu 620 621#ifdef CFG_CORE_ASLR 622LOCAL_FUNC relocate , : 623 push {r4-r5} 624 /* r0 holds load offset */ 625#ifdef CFG_WITH_PAGER 626 ldr r12, =__init_end 627#else 628 ldr r12, =__end 629#endif 630 ldr r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET] 631 ldr r3, [r12, #BOOT_EMBDATA_RELOC_LEN] 632 633 mov_imm r1, TEE_RAM_START 634 add r2, r2, r12 /* start of relocations */ 635 add r3, r3, r2 /* end of relocations */ 636 637 /* 638 * Relocations are not formatted as Rel32, instead they are in a 639 * compressed format created by get_reloc_bin() in 640 * scripts/gen_tee_bin.py 641 * 642 * All the R_ARM_RELATIVE relocations are translated into a list 643 * list of 32-bit offsets from TEE_RAM_START. At each address a 644 * 32-bit value pointed out which increased with the load offset. 645 */ 646 647#ifdef CFG_WITH_PAGER 648 /* 649 * With pager enabled we can only relocate the pager and init 650 * parts, the rest has to be done when a page is populated. 651 */ 652 sub r12, r12, r1 653#endif 654 655 b 2f 656 /* Loop over the relocation addresses and process all entries */ 6571: ldr r4, [r2], #4 658#ifdef CFG_WITH_PAGER 659 /* Skip too large addresses */ 660 cmp r4, r12 661 bge 2f 662#endif 663 ldr r5, [r4, r1] 664 add r5, r5, r0 665 str r5, [r4, r1] 666 6672: cmp r2, r3 668 bne 1b 669 670 pop {r4-r5} 671 bx lr 672END_FUNC relocate 673#endif 674 675/* 676 * void enable_mmu(unsigned long core_pos); 677 * 678 * This function depends on being mapped with in the identity map where 679 * physical address and virtual address is the same. After MMU has been 680 * enabled the instruction pointer will be updated to execute as the new 681 * offset instead. Stack pointers and the return address are updated. 682 */ 683LOCAL_FUNC enable_mmu , : , .identity_map 684 /* r0 = core pos */ 685 adr r1, boot_mmu_config 686 687#ifdef CFG_WITH_LPAE 688 ldm r1!, {r2, r3} 689 /* 690 * r2 = ttbcr 691 * r3 = mair0 692 */ 693 write_ttbcr r2 694 write_mair0 r3 695 696 ldm r1!, {r2, r3} 697 /* 698 * r2 = ttbr0_base 699 * r3 = ttbr0_core_offset 700 */ 701 702 /* 703 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos 704 */ 705 mla r12, r0, r3, r2 706 mov r0, #0 707 write_ttbr0_64bit r12, r0 708 write_ttbr1_64bit r0, r0 709#else 710 ldm r1!, {r2, r3} 711 /* 712 * r2 = prrr 713 * r3 = nmrr 714 */ 715 write_prrr r2 716 write_nmrr r3 717 718 ldm r1!, {r2, r3} 719 /* 720 * r2 = dacr 721 * r3 = ttbcr 722 */ 723 write_dacr r2 724 write_ttbcr r3 725 726 ldm r1!, {r2} 727 /* r2 = ttbr */ 728 write_ttbr0 r2 729 write_ttbr1 r2 730 731 mov r2, #0 732 write_contextidr r2 733#endif 734 ldm r1!, {r2} 735 /* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */ 736 isb 737 738 /* Invalidate TLB */ 739 write_tlbiall 740 741 /* 742 * Make sure translation table writes have drained into memory and 743 * the TLB invalidation is complete. 744 */ 745 dsb sy 746 isb 747 748 read_sctlr r0 749 orr r0, r0, #SCTLR_M 750#ifndef CFG_WITH_LPAE 751 /* Enable Access flag (simplified access permissions) and TEX remap */ 752 orr r0, r0, #(SCTLR_AFE | SCTLR_TRE) 753#endif 754 write_sctlr r0 755 isb 756 757 /* Update vbar */ 758 read_vbar r1 759 add r1, r1, r2 760 write_vbar r1 761 isb 762 763 /* Invalidate instruction cache and branch predictor */ 764 write_iciallu 765 write_bpiall 766 isb 767 768 read_sctlr r0 769 /* Enable I and D cache */ 770 orr r0, r0, #SCTLR_I 771 orr r0, r0, #SCTLR_C 772#if defined(CFG_ENABLE_SCTLR_Z) 773 /* 774 * This is only needed on ARMv7 architecture and hence conditionned 775 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent 776 * architectures, the program flow prediction is automatically 777 * enabled upon MMU enablement. 778 */ 779 orr r0, r0, #SCTLR_Z 780#endif 781 write_sctlr r0 782 isb 783 784 /* Adjust stack pointer and return address */ 785 add sp, sp, r2 786 add lr, lr, r2 787 788 bx lr 789END_FUNC enable_mmu 790 791LOCAL_DATA stack_tmp_export_rel , : 792 .word stack_tmp_export - stack_tmp_export_rel 793END_DATA stack_tmp_export_rel 794 795LOCAL_DATA stack_tmp_stride_rel , : 796 .word stack_tmp_stride - stack_tmp_stride_rel 797END_DATA stack_tmp_stride_rel 798 799DATA boot_mmu_config , : /* struct core_mmu_config */ 800 .skip CORE_MMU_CONFIG_SIZE 801END_DATA boot_mmu_config 802 803#if defined(CFG_WITH_ARM_TRUSTED_FW) 804FUNC cpu_on_handler , : , .identity_map 805UNWIND( .fnstart) 806UNWIND( .cantunwind) 807 mov r4, r0 808 mov r5, r1 809 mov r6, lr 810 811 set_sctlr 812 isb 813 814 adr r0, reset_vect_table 815 write_vbar r0 816 817 mov r4, lr 818 819 bl __get_core_pos 820 bl enable_mmu 821 822 set_sp 823 824 mov r0, r4 825 mov r1, r5 826 bl boot_cpu_on_handler 827#ifdef CFG_CORE_FFA 828 b thread_ffa_msg_wait 829#else 830 bx r6 831#endif 832UNWIND( .fnend) 833END_FUNC cpu_on_handler 834DECLARE_KEEP_PAGER cpu_on_handler 835 836#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */ 837 838LOCAL_FUNC reset_secondary , : , .identity_map 839UNWIND( .fnstart) 840UNWIND( .cantunwind) 841 adr r0, reset_vect_table 842 write_vbar r0 843 844 wait_primary 845 846 set_sp 847#ifdef CFG_CORE_ASLR 848 /* 849 * stack_tmp_export which is used as base when initializing sp has 850 * been relocated to the new offset. Since MMU isn't enabled on 851 * this CPU yet we need to restore the corresponding physical 852 * address. 853 */ 854 adr r0, boot_mmu_config 855 ldr r0, [r0, #CORE_MMU_CONFIG_LOAD_OFFSET] 856 sub sp, sp, r0 857#endif 858 859#if defined (CFG_BOOT_SECONDARY_REQUEST) 860 /* if L1 is not invalidated before, do it here */ 861 mov r0, #DCACHE_OP_INV 862 bl dcache_op_level1 863#endif 864 865 bl __get_core_pos 866 bl enable_mmu 867 868 cpu_is_ready 869 870#if defined (CFG_BOOT_SECONDARY_REQUEST) 871 /* 872 * boot_core_hpen() return value (r0) is address of 873 * ns entry context structure 874 */ 875 bl boot_core_hpen 876 ldm r0, {r0, r6} 877#else 878 mov r0, r5 /* ns-entry address */ 879 mov r6, #0 880#endif 881 bl boot_init_secondary 882 883 mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE 884 mov r1, r6 885 mov r2, #0 886 mov r3, #0 887 mov r4, #0 888 smc #0 889 b . /* SMC should not return */ 890UNWIND( .fnend) 891END_FUNC reset_secondary 892DECLARE_KEEP_PAGER reset_secondary 893#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */ 894