1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2014, Linaro Limited 4 * Copyright (c) 2021-2023, Arm Limited 5 */ 6 7#include <arm32_macros.S> 8#include <arm.h> 9#include <asm.S> 10#include <generated/asm-defines.h> 11#include <keep.h> 12#include <kernel/asan.h> 13#include <kernel/cache_helpers.h> 14#include <kernel/thread_private.h> 15#include <mm/core_mmu.h> 16#include <platform_config.h> 17#include <sm/optee_smc.h> 18#include <sm/teesmc_opteed.h> 19#include <sm/teesmc_opteed_macros.h> 20 21.arch_extension sec 22 23.section .data 24.balign 4 25 26#ifdef CFG_BOOT_SYNC_CPU 27.equ SEM_CPU_READY, 1 28#endif 29 30#ifdef CFG_PL310 31.section .rodata.init 32panic_boot_file: 33 .asciz __FILE__ 34 35/* 36 * void assert_flat_mapped_range(uint32_t vaddr, uint32_t line) 37 */ 38LOCAL_FUNC __assert_flat_mapped_range , : 39UNWIND( .cantunwind) 40 push { r4-r6, lr } 41 mov r4, r0 42 mov r5, r1 43 bl cpu_mmu_enabled 44 cmp r0, #0 45 beq 1f 46 mov r0, r4 47 bl virt_to_phys 48 cmp r0, r4 49 beq 1f 50 /* 51 * this must be compliant with the panic generic routine: 52 * __do_panic(__FILE__, __LINE__, __func__, str) 53 */ 54 ldr r0, =panic_boot_file 55 mov r1, r5 56 mov r2, #0 57 mov r3, #0 58 bl __do_panic 59 b . /* should NOT return */ 601: pop { r4-r6, pc } 61END_FUNC __assert_flat_mapped_range 62 63 /* panic if mmu is enable and vaddr != paddr (scratch lr) */ 64 .macro assert_flat_mapped_range va, line 65 ldr r0, \va 66 ldr r1, =\line 67 bl __assert_flat_mapped_range 68 .endm 69#endif /* CFG_PL310 */ 70 71WEAK_FUNC plat_cpu_reset_early , : 72 bx lr 73END_FUNC plat_cpu_reset_early 74DECLARE_KEEP_PAGER plat_cpu_reset_early 75 76 .section .identity_map, "ax" 77 .align 5 78LOCAL_FUNC reset_vect_table , : , .identity_map 79 b . 80 b . /* Undef */ 81 b . /* Syscall */ 82 b . /* Prefetch abort */ 83 b . /* Data abort */ 84 b . /* Reserved */ 85 b . /* IRQ */ 86 b . /* FIQ */ 87END_FUNC reset_vect_table 88 89 .macro cpu_is_ready 90#ifdef CFG_BOOT_SYNC_CPU 91 bl __get_core_pos 92 lsl r0, r0, #2 93 ldr r1,=sem_cpu_sync 94 ldr r2, =SEM_CPU_READY 95 str r2, [r1, r0] 96 dsb 97 sev 98#endif 99 .endm 100 101 .macro wait_primary 102#ifdef CFG_BOOT_SYNC_CPU 103 ldr r0, =sem_cpu_sync 104 mov r2, #SEM_CPU_READY 105 sev 1061: 107 ldr r1, [r0] 108 cmp r1, r2 109 wfene 110 bne 1b 111#endif 112 .endm 113 114 .macro wait_secondary 115#ifdef CFG_BOOT_SYNC_CPU 116 ldr r0, =sem_cpu_sync 117 mov r3, #CFG_TEE_CORE_NB_CORE 118 mov r2, #SEM_CPU_READY 119 sev 1201: 121 subs r3, r3, #1 122 beq 3f 123 add r0, r0, #4 1242: 125 ldr r1, [r0] 126 cmp r1, r2 127 wfene 128 bne 2b 129 b 1b 1303: 131#endif 132 .endm 133 134 /* 135 * set_sctlr : Setup some core configuration in CP15 SCTLR 136 * 137 * Setup required by current implementation of the OP-TEE core: 138 * - Disable data and instruction cache. 139 * - MMU is expected off and exceptions trapped in ARM mode. 140 * - Enable or disable alignment checks upon platform configuration. 141 * - Optionally enable write-implies-execute-never. 142 * - Optionally enable round robin strategy for cache replacement. 143 * 144 * Clobbers r0. 145 */ 146 .macro set_sctlr 147 read_sctlr r0 148 bic r0, r0, #(SCTLR_M | SCTLR_C) 149 bic r0, r0, #SCTLR_I 150 bic r0, r0, #SCTLR_TE 151 orr r0, r0, #SCTLR_SPAN 152#if defined(CFG_SCTLR_ALIGNMENT_CHECK) 153 orr r0, r0, #SCTLR_A 154#else 155 bic r0, r0, #SCTLR_A 156#endif 157#if defined(CFG_HWSUPP_MEM_PERM_WXN) && defined(CFG_CORE_RWDATA_NOEXEC) 158 orr r0, r0, #(SCTLR_WXN | SCTLR_UWXN) 159#endif 160#if defined(CFG_ENABLE_SCTLR_RR) 161 orr r0, r0, #SCTLR_RR 162#endif 163 write_sctlr r0 164 .endm 165 166 .macro maybe_init_spectre_workaround 167#if !defined(CFG_WITH_ARM_TRUSTED_FW) && \ 168 (defined(CFG_CORE_WORKAROUND_SPECTRE_BP) || \ 169 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)) 170 read_midr r0 171 ubfx r1, r0, #MIDR_IMPLEMENTER_SHIFT, #MIDR_IMPLEMENTER_WIDTH 172 cmp r1, #MIDR_IMPLEMENTER_ARM 173 bne 1f 174 ubfx r1, r0, #MIDR_PRIMARY_PART_NUM_SHIFT, \ 175 #MIDR_PRIMARY_PART_NUM_WIDTH 176 177 movw r2, #CORTEX_A8_PART_NUM 178 cmp r1, r2 179 moveq r2, #ACTLR_CA8_ENABLE_INVALIDATE_BTB 180 beq 2f 181 182 movw r2, #CORTEX_A15_PART_NUM 183 cmp r1, r2 184 moveq r2, #ACTLR_CA15_ENABLE_INVALIDATE_BTB 185 bne 1f /* Skip it for all other CPUs */ 1862: 187 read_actlr r0 188 orr r0, r0, r2 189 write_actlr r0 190 isb 1911: 192#endif 193 .endm 194 195FUNC _start , : 196UNWIND( .cantunwind) 197 /* 198 * Temporary copy of boot argument registers, will be passed to 199 * boot_save_args() further down. 200 */ 201 mov r4, r0 202 mov r5, r1 203 mov r6, r2 204 mov r7, r3 205 mov r8, lr 206 207 /* 208 * 32bit entry is expected to execute Supervisor mode, 209 * some bootloader may enter in Supervisor or Monitor 210 */ 211 cps #CPSR_MODE_SVC 212 213 /* Early ARM secure MP specific configuration */ 214 bl plat_cpu_reset_early 215 maybe_init_spectre_workaround 216 217 set_sctlr 218 isb 219 220 ldr r0, =reset_vect_table 221 write_vbar r0 222 223#if defined(CFG_WITH_ARM_TRUSTED_FW) 224 b reset_primary 225#else 226 bl __get_core_pos 227 cmp r0, #0 228 beq reset_primary 229 b reset_secondary 230#endif 231END_FUNC _start 232DECLARE_KEEP_INIT _start 233 234 /* 235 * Setup sp to point to the top of the tmp stack for the current CPU: 236 * sp is assigned: 237 * stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD 238 */ 239 .macro set_sp 240 bl __get_core_pos 241 cmp r0, #CFG_TEE_CORE_NB_CORE 242 /* Unsupported CPU, park it before it breaks something */ 243 bge unhandled_cpu 244 add r0, r0, #1 245 246 /* r2 = stack_tmp - STACK_TMP_GUARD */ 247 adr r3, stack_tmp_rel 248 ldr r2, [r3] 249 add r2, r2, r3 250 251 /* 252 * stack_tmp_stride and stack_tmp_stride_rel are the 253 * equivalent of: 254 * extern const u32 stack_tmp_stride; 255 * u32 stack_tmp_stride_rel = (u32)&stack_tmp_stride - 256 * (u32)&stack_tmp_stride_rel 257 * 258 * To load the value of stack_tmp_stride we do the equivalent 259 * of: 260 * *(u32 *)(stack_tmp_stride + (u32)&stack_tmp_stride_rel) 261 */ 262 adr r3, stack_tmp_stride_rel 263 ldr r1, [r3] 264 ldr r1, [r1, r3] 265 266 /* 267 * r0 is core pos + 1 268 * r1 is value of stack_tmp_stride 269 * r2 is value of stack_tmp + guard 270 */ 271 mul r1, r0, r1 272 add sp, r1, r2 273 .endm 274 275 /* 276 * Cache maintenance during entry: handle outer cache. 277 * End address is exclusive: first byte not to be changed. 278 * Note however arm_clX_inv/cleanbyva operate on full cache lines. 279 * 280 * Use ANSI #define to trap source file line number for PL310 assertion 281 */ 282 .macro __inval_cache_vrange vbase, vend, line 283#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) 284 assert_flat_mapped_range (\vbase), (\line) 285 bl pl310_base 286 ldr r1, \vbase 287 ldr r2, \vend 288 bl arm_cl2_invbypa 289#endif 290 ldr r0, \vbase 291 ldr r1, \vend 292 sub r1, r1, r0 293 bl dcache_inv_range 294 .endm 295 296 .macro __flush_cache_vrange vbase, vend, line 297#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) 298 assert_flat_mapped_range (\vbase), (\line) 299 ldr r0, \vbase 300 ldr r1, \vend 301 sub r1, r1, r0 302 bl dcache_clean_range 303 bl pl310_base 304 ldr r1, \vbase 305 ldr r2, \vend 306 bl arm_cl2_cleaninvbypa 307#endif 308 ldr r0, \vbase 309 ldr r1, \vend 310 sub r1, r1, r0 311 bl dcache_cleaninv_range 312 .endm 313 314#define inval_cache_vrange(vbase, vend) \ 315 __inval_cache_vrange vbase, vend, __LINE__ 316 317#define flush_cache_vrange(vbase, vend) \ 318 __flush_cache_vrange vbase, vend, __LINE__ 319 320#ifdef CFG_BOOT_SYNC_CPU 321#define flush_cpu_semaphores \ 322 flush_cache_vrange(sem_cpu_sync_start, sem_cpu_sync_end) 323#else 324#define flush_cpu_semaphores 325#endif 326 327LOCAL_FUNC reset_primary , : , .identity_map 328UNWIND( .cantunwind) 329 330 /* preserve r4-r8: bootargs */ 331 332#ifdef CFG_WITH_PAGER 333 /* 334 * Move init code into correct location and move hashes to a 335 * temporary safe location until the heap is initialized. 336 * 337 * The binary is built as: 338 * [Pager code, rodata and data] : In correct location 339 * [Init code and rodata] : Should be copied to __init_start 340 * [struct boot_embdata + data] : Should be saved before 341 * initializing pager, first uint32_t tells the length of the data 342 */ 343 ldr r0, =__init_start /* dst */ 344 ldr r1, =__data_end /* src */ 345 ldr r2, =__init_end 346 sub r2, r2, r0 /* init len */ 347 ldr r12, [r1, r2] /* length of hashes etc */ 348 add r2, r2, r12 /* length of init and hashes etc */ 349 /* Copy backwards (as memmove) in case we're overlapping */ 350 add r0, r0, r2 /* __init_start + len */ 351 add r1, r1, r2 /* __data_end + len */ 352 str r0, cached_mem_end 353 ldr r2, =__init_start 354copy_init: 355 ldmdb r1!, {r3, r9-r12} 356 stmdb r0!, {r3, r9-r12} 357 cmp r0, r2 358 bgt copy_init 359#else 360 /* 361 * The binary is built as: 362 * [Core, rodata and data] : In correct location 363 * [struct boot_embdata + data] : Should be moved to __end, first 364 * uint32_t tells the length of the struct + data 365 */ 366 ldr r0, =__end /* dst */ 367 ldr r1, =__data_end /* src */ 368 ldr r2, [r1] /* struct boot_embdata::total_len */ 369 /* Copy backwards (as memmove) in case we're overlapping */ 370 add r0, r0, r2 371 add r1, r1, r2 372 str r0, cached_mem_end 373 ldr r2, =__end 374 375copy_init: 376 ldmdb r1!, {r3, r9-r12} 377 stmdb r0!, {r3, r9-r12} 378 cmp r0, r2 379 bgt copy_init 380#endif 381 382 /* 383 * Clear .bss, this code obviously depends on the linker keeping 384 * start/end of .bss at least 8 byte aligned. 385 */ 386 ldr r0, =__bss_start 387 ldr r1, =__bss_end 388 mov r2, #0 389 mov r3, #0 390clear_bss: 391 stmia r0!, {r2, r3} 392 cmp r0, r1 393 bls clear_bss 394 395#ifdef CFG_NS_VIRTUALIZATION 396 /* 397 * Clear .nex_bss, this code obviously depends on the linker keeping 398 * start/end of .bss at least 8 byte aligned. 399 */ 400 ldr r0, =__nex_bss_start 401 ldr r1, =__nex_bss_end 402 mov r2, #0 403 mov r3, #0 404clear_nex_bss: 405 stmia r0!, {r2, r3} 406 cmp r0, r1 407 bls clear_nex_bss 408#endif 409 410#ifdef CFG_CORE_SANITIZE_KADDRESS 411 /* First initialize the entire shadow area with no access */ 412 ldr r0, =__asan_shadow_start /* start */ 413 ldr r1, =__asan_shadow_end /* limit */ 414 mov r2, #ASAN_DATA_RED_ZONE 415shadow_no_access: 416 str r2, [r0], #4 417 cmp r0, r1 418 bls shadow_no_access 419 420 /* Mark the entire stack area as OK */ 421 ldr r2, =CFG_ASAN_SHADOW_OFFSET 422 ldr r0, =__nozi_stack_start /* start */ 423 lsr r0, r0, #ASAN_BLOCK_SHIFT 424 add r0, r0, r2 425 ldr r1, =__nozi_stack_end /* limit */ 426 lsr r1, r1, #ASAN_BLOCK_SHIFT 427 add r1, r1, r2 428 mov r2, #0 429shadow_stack_access_ok: 430 strb r2, [r0], #1 431 cmp r0, r1 432 bls shadow_stack_access_ok 433#endif 434 435 set_sp 436 437 bl thread_init_thread_core_local 438 439 /* complete ARM secure MP common configuration */ 440 bl plat_primary_init_early 441 442 /* Enable Console */ 443 bl console_init 444 445 mov r0, r8 446 mov r1, #0 447 push {r0, r1} 448 mov r0, r4 449 mov r1, r5 450 mov r2, r6 451 mov r3, r7 452 bl boot_save_args 453 add sp, sp, #(2 * 4) 454 455#ifdef CFG_PL310 456 bl pl310_base 457 bl arm_cl2_config 458#endif 459 460 /* 461 * Invalidate dcache for all memory used during initialization to 462 * avoid nasty surprices when the cache is turned on. We must not 463 * invalidate memory not used by OP-TEE since we may invalidate 464 * entries used by for instance ARM Trusted Firmware. 465 */ 466 inval_cache_vrange(cached_mem_start, cached_mem_end) 467 468#if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) 469 /* Enable PL310 if not yet enabled */ 470 bl pl310_base 471 bl arm_cl2_enable 472#endif 473 474#ifdef CFG_CORE_ASLR 475 bl get_aslr_seed 476#ifdef CFG_CORE_ASLR_SEED 477 mov_imm r0, CFG_CORE_ASLR_SEED 478#endif 479#else 480 mov r0, #0 481#endif 482 483 ldr r1, =boot_mmu_config 484 bl core_init_mmu_map 485 486#ifdef CFG_CORE_ASLR 487 /* 488 * Process relocation information for updating with the virtual map 489 * offset. We're doing this now before MMU is enabled as some of 490 * the memory will become write protected. 491 */ 492 ldr r0, =boot_mmu_config 493 ldr r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET] 494 /* 495 * Update cached_mem_end address with load offset since it was 496 * calculated before relocation. 497 */ 498 ldr r2, cached_mem_end 499 add r2, r2, r0 500 str r2, cached_mem_end 501 502 bl relocate 503#endif 504 505 bl __get_core_pos 506 bl enable_mmu 507#ifdef CFG_CORE_ASLR 508 /* 509 * Reinitialize console, since register_serial_console() has 510 * previously registered a PA and with ASLR the VA is different 511 * from the PA. 512 */ 513 bl console_init 514#endif 515 516#ifdef CFG_NS_VIRTUALIZATION 517 /* 518 * Initialize partition tables for each partition to 519 * default_partition which has been relocated now to a different VA 520 */ 521 bl core_mmu_set_default_prtn_tbl 522#endif 523 524 bl boot_init_primary_early 525#ifndef CFG_NS_VIRTUALIZATION 526 mov r9, sp 527 ldr r0, =threads 528 ldr r0, [r0, #THREAD_CTX_STACK_VA_END] 529 mov sp, r0 530 bl thread_get_core_local 531 mov r8, r0 532 mov r0, #0 533 str r0, [r8, #THREAD_CORE_LOCAL_FLAGS] 534#endif 535 bl boot_init_primary_late 536 bl boot_init_primary_final 537#ifndef CFG_NS_VIRTUALIZATION 538 mov r0, #THREAD_CLF_TMP 539 str r0, [r8, #THREAD_CORE_LOCAL_FLAGS] 540 mov sp, r9 541#endif 542 543#ifdef _CFG_CORE_STACK_PROTECTOR 544 /* Update stack canary value */ 545 sub sp, sp, #0x8 546 mov r0, sp 547 mov r1, #1 548 mov r2, #0x4 549 bl plat_get_random_stack_canaries 550 ldr r0, [sp] 551 ldr r1, =__stack_chk_guard 552 str r0, [r1] 553 add sp, sp, #0x8 554#endif 555 556 /* 557 * In case we've touched memory that secondary CPUs will use before 558 * they have turned on their D-cache, clean and invalidate the 559 * D-cache before exiting to normal world. 560 */ 561 flush_cache_vrange(cached_mem_start, cached_mem_end) 562 563 /* release secondary boot cores and sync with them */ 564 cpu_is_ready 565 flush_cpu_semaphores 566 wait_secondary 567 568#ifdef CFG_PL310_LOCKED 569#ifdef CFG_PL310_SIP_PROTOCOL 570#error "CFG_PL310_LOCKED must not be defined when CFG_PL310_SIP_PROTOCOL=y" 571#endif 572 /* lock/invalidate all lines: pl310 behaves as if disable */ 573 bl pl310_base 574 bl arm_cl2_lockallways 575 bl pl310_base 576 bl arm_cl2_cleaninvbyway 577#endif 578 579 /* 580 * Clear current thread id now to allow the thread to be reused on 581 * next entry. Matches the thread_init_boot_thread() in 582 * boot.c. 583 */ 584#ifndef CFG_NS_VIRTUALIZATION 585 bl thread_clr_boot_thread 586#endif 587 588#ifdef CFG_CORE_FFA 589 ldr r0, =cpu_on_handler 590 /* 591 * Compensate for the virtual map offset since cpu_on_handler() is 592 * called with MMU off. 593 */ 594 ldr r1, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET 595 sub r0, r0, r1 596 bl thread_spmc_register_secondary_ep 597 b thread_ffa_msg_wait 598#else /* CFG_CORE_FFA */ 599 600#if defined(CFG_WITH_ARM_TRUSTED_FW) 601 ldr r0, =boot_mmu_config 602 ldr r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET] 603 ldr r1, =thread_vector_table 604 /* Pass the vector address returned from main_init */ 605 sub r1, r1, r0 606#else 607 /* Relay standard bootarg #1 and #2 to non-secure entry */ 608 mov r4, #0 609 mov r3, r6 /* std bootarg #2 for register R2 */ 610 mov r2, r5 /* std bootarg #1 for register R1 */ 611 mov r1, #0 612#endif /* CFG_WITH_ARM_TRUSTED_FW */ 613 614 mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE 615 smc #0 616 /* SMC should not return */ 617 panic_at_smc_return 618#endif /* CFG_CORE_FFA */ 619END_FUNC reset_primary 620 621#ifdef CFG_BOOT_SYNC_CPU 622LOCAL_DATA sem_cpu_sync_start , : 623 .word sem_cpu_sync 624END_DATA sem_cpu_sync_start 625 626LOCAL_DATA sem_cpu_sync_end , : 627 .word sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2) 628END_DATA sem_cpu_sync_end 629#endif 630 631LOCAL_DATA cached_mem_start , : 632 .word __text_start 633END_DATA cached_mem_start 634 635LOCAL_DATA cached_mem_end , : 636 .skip 4 637END_DATA cached_mem_end 638 639LOCAL_FUNC unhandled_cpu , : 640 wfi 641 b unhandled_cpu 642END_FUNC unhandled_cpu 643 644#ifdef CFG_CORE_ASLR 645LOCAL_FUNC relocate , : 646 push {r4-r5} 647 /* r0 holds load offset */ 648#ifdef CFG_WITH_PAGER 649 ldr r12, =__init_end 650#else 651 ldr r12, =__end 652#endif 653 ldr r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET] 654 ldr r3, [r12, #BOOT_EMBDATA_RELOC_LEN] 655 656 mov_imm r1, TEE_LOAD_ADDR 657 add r2, r2, r12 /* start of relocations */ 658 add r3, r3, r2 /* end of relocations */ 659 660 /* 661 * Relocations are not formatted as Rel32, instead they are in a 662 * compressed format created by get_reloc_bin() in 663 * scripts/gen_tee_bin.py 664 * 665 * All the R_ARM_RELATIVE relocations are translated into a list of 666 * 32-bit offsets from TEE_LOAD_ADDR. At each address a 32-bit 667 * value pointed out which increased with the load offset. 668 */ 669 670#ifdef CFG_WITH_PAGER 671 /* 672 * With pager enabled we can only relocate the pager and init 673 * parts, the rest has to be done when a page is populated. 674 */ 675 sub r12, r12, r1 676#endif 677 678 b 2f 679 /* Loop over the relocation addresses and process all entries */ 6801: ldr r4, [r2], #4 681#ifdef CFG_WITH_PAGER 682 /* Skip too large addresses */ 683 cmp r4, r12 684 bge 2f 685#endif 686 ldr r5, [r4, r1] 687 add r5, r5, r0 688 str r5, [r4, r1] 689 6902: cmp r2, r3 691 bne 1b 692 693 pop {r4-r5} 694 bx lr 695END_FUNC relocate 696#endif 697 698/* 699 * void enable_mmu(unsigned long core_pos); 700 * 701 * This function depends on being mapped with in the identity map where 702 * physical address and virtual address is the same. After MMU has been 703 * enabled the instruction pointer will be updated to execute as the new 704 * offset instead. Stack pointers and the return address are updated. 705 */ 706LOCAL_FUNC enable_mmu , : , .identity_map 707 /* r0 = core pos */ 708 adr r1, boot_mmu_config 709 710#ifdef CFG_WITH_LPAE 711 ldm r1!, {r2, r3} 712 /* 713 * r2 = ttbcr 714 * r3 = mair0 715 */ 716 write_ttbcr r2 717 write_mair0 r3 718 719 ldm r1!, {r2, r3} 720 /* 721 * r2 = ttbr0_base 722 * r3 = ttbr0_core_offset 723 */ 724 725 /* 726 * ttbr0_el1 = ttbr0_base + ttbr0_core_offset * core_pos 727 */ 728 mla r12, r0, r3, r2 729 mov r0, #0 730 write_ttbr0_64bit r12, r0 731 write_ttbr1_64bit r0, r0 732#else 733 ldm r1!, {r2, r3} 734 /* 735 * r2 = prrr 736 * r3 = nmrr 737 */ 738 write_prrr r2 739 write_nmrr r3 740 741 ldm r1!, {r2, r3} 742 /* 743 * r2 = dacr 744 * r3 = ttbcr 745 */ 746 write_dacr r2 747 write_ttbcr r3 748 749 ldm r1!, {r2} 750 /* r2 = ttbr */ 751 write_ttbr0 r2 752 write_ttbr1 r2 753 754 mov r2, #0 755 write_contextidr r2 756#endif 757 ldm r1!, {r2} 758 /* r2 = load_offset (always 0 if CFG_CORE_ASLR=n) */ 759 isb 760 761 /* Invalidate TLB */ 762 write_tlbiall 763 764 /* 765 * Make sure translation table writes have drained into memory and 766 * the TLB invalidation is complete. 767 */ 768 dsb sy 769 isb 770 771 read_sctlr r0 772 orr r0, r0, #SCTLR_M 773#ifndef CFG_WITH_LPAE 774 /* Enable Access flag (simplified access permissions) and TEX remap */ 775 orr r0, r0, #(SCTLR_AFE | SCTLR_TRE) 776#endif 777 write_sctlr r0 778 isb 779 780 /* Update vbar */ 781 read_vbar r1 782 add r1, r1, r2 783 write_vbar r1 784 isb 785 786 /* Invalidate instruction cache and branch predictor */ 787 write_iciallu 788 write_bpiall 789 isb 790 791 read_sctlr r0 792 /* Enable I and D cache */ 793 orr r0, r0, #SCTLR_I 794 orr r0, r0, #SCTLR_C 795#if defined(CFG_ENABLE_SCTLR_Z) 796 /* 797 * This is only needed on ARMv7 architecture and hence conditionned 798 * by configuration directive CFG_ENABLE_SCTLR_Z. For recent 799 * architectures, the program flow prediction is automatically 800 * enabled upon MMU enablement. 801 */ 802 orr r0, r0, #SCTLR_Z 803#endif 804 write_sctlr r0 805 isb 806 807 /* Adjust stack pointer and return address */ 808 add sp, sp, r2 809 add lr, lr, r2 810 811 bx lr 812END_FUNC enable_mmu 813 814LOCAL_DATA stack_tmp_rel , : 815 .word stack_tmp - stack_tmp_rel - STACK_TMP_GUARD 816END_DATA stack_tmp_rel 817 818LOCAL_DATA stack_tmp_stride_rel , : 819 .word stack_tmp_stride - stack_tmp_stride_rel 820END_DATA stack_tmp_stride_rel 821 822DATA boot_mmu_config , : /* struct core_mmu_config */ 823 .skip CORE_MMU_CONFIG_SIZE 824END_DATA boot_mmu_config 825 826#if defined(CFG_WITH_ARM_TRUSTED_FW) 827FUNC cpu_on_handler , : , .identity_map 828UNWIND( .cantunwind) 829 mov r4, r0 830 mov r5, r1 831 mov r6, lr 832 833 set_sctlr 834 isb 835 836 adr r0, reset_vect_table 837 write_vbar r0 838 839 mov r4, lr 840 841 bl __get_core_pos 842 bl enable_mmu 843 844 set_sp 845 846 mov r0, r4 847 mov r1, r5 848 bl boot_cpu_on_handler 849#ifdef CFG_CORE_FFA 850 b thread_ffa_msg_wait 851#else 852 bx r6 853#endif 854END_FUNC cpu_on_handler 855DECLARE_KEEP_PAGER cpu_on_handler 856 857#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */ 858 859LOCAL_FUNC reset_secondary , : , .identity_map 860UNWIND( .cantunwind) 861 adr r0, reset_vect_table 862 write_vbar r0 863 864 wait_primary 865 866 set_sp 867 868#if defined (CFG_BOOT_SECONDARY_REQUEST) 869 /* if L1 is not invalidated before, do it here */ 870 mov r0, #DCACHE_OP_INV 871 bl dcache_op_level1 872#endif 873 874 bl __get_core_pos 875 bl enable_mmu 876 877 cpu_is_ready 878 879#if defined (CFG_BOOT_SECONDARY_REQUEST) 880 /* 881 * boot_core_hpen() return value (r0) is address of 882 * ns entry context structure 883 */ 884 bl boot_core_hpen 885 ldm r0, {r0, r6} 886#else 887 mov r0, r8 /* ns-entry address */ 888 mov r6, #0 889#endif 890 bl boot_init_secondary 891 892 mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE 893 mov r1, r6 894 mov r2, #0 895 mov r3, #0 896 mov r4, #0 897 smc #0 898 /* SMC should not return */ 899 panic_at_smc_return 900END_FUNC reset_secondary 901DECLARE_KEEP_PAGER reset_secondary 902#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */ 903