1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2015-2022, Linaro Limited 4 * Copyright (c) 2021, Arm Limited 5 */ 6 7#include <platform_config.h> 8 9#include <arm64_macros.S> 10#include <arm.h> 11#include <asm.S> 12#include <generated/asm-defines.h> 13#include <keep.h> 14#include <kernel/thread_private.h> 15#include <sm/optee_smc.h> 16#include <sm/teesmc_opteed.h> 17#include <sm/teesmc_opteed_macros.h> 18 19 /* 20 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0. 21 * SP_EL0 is assigned: 22 * stack_tmp + (cpu_id + 1) * stack_tmp_stride - STACK_TMP_GUARD 23 * SP_EL1 is assigned thread_core_local[cpu_id] 24 */ 25 .macro set_sp 26 bl __get_core_pos 27 cmp x0, #CFG_TEE_CORE_NB_CORE 28 /* Unsupported CPU, park it before it breaks something */ 29 bge unhandled_cpu 30 add x0, x0, #1 31 adr_l x1, stack_tmp_stride 32 ldr w1, [x1] 33 mul x1, x0, x1 34 35 /* x0 = stack_tmp - STACK_TMP_GUARD */ 36 adr_l x2, stack_tmp_rel 37 ldr w0, [x2] 38 add x0, x0, x2 39 40 msr spsel, #0 41 add sp, x1, x0 42 bl thread_get_core_local 43 msr spsel, #1 44 mov sp, x0 45 msr spsel, #0 46 .endm 47 48 .macro read_feat_mte reg 49 mrs \reg, id_aa64pfr1_el1 50 ubfx \reg, \reg, #ID_AA64PFR1_EL1_MTE_SHIFT, #4 51 .endm 52 53 .macro set_sctlr_el1 54 mrs x0, sctlr_el1 55 orr x0, x0, #SCTLR_I 56 orr x0, x0, #SCTLR_SA 57 orr x0, x0, #SCTLR_SPAN 58#if defined(CFG_CORE_RWDATA_NOEXEC) 59 orr x0, x0, #SCTLR_WXN 60#endif 61#if defined(CFG_SCTLR_ALIGNMENT_CHECK) 62 orr x0, x0, #SCTLR_A 63#else 64 bic x0, x0, #SCTLR_A 65#endif 66#ifdef CFG_MEMTAG 67 read_feat_mte x1 68 cmp w1, #1 69 b.ls 111f 70 orr x0, x0, #(SCTLR_ATA | SCTLR_ATA0) 71 bic x0, x0, #SCTLR_TCF_MASK 72 bic x0, x0, #SCTLR_TCF0_MASK 73111: 74#endif 75#if defined(CFG_TA_PAUTH) && defined(CFG_TA_BTI) 76 orr x0, x0, #SCTLR_BT0 77#endif 78#if defined(CFG_CORE_PAUTH) && defined(CFG_CORE_BTI) 79 orr x0, x0, #SCTLR_BT1 80#endif 81 msr sctlr_el1, x0 82 .endm 83 84 .macro init_memtag_per_cpu 85 read_feat_mte x0 86 cmp w0, #1 87 b.ls 11f 88 89#ifdef CFG_TEE_CORE_DEBUG 90 /* 91 * This together with GCR_EL1.RRND = 0 will make the tags 92 * acquired with the irg instruction deterministic. 93 */ 94 mov_imm x0, 0xcafe00 95 msr rgsr_el1, x0 96 /* Avoid tag = 0x0 and 0xf */ 97 mov x0, #0 98#else 99 /* 100 * Still avoid tag = 0x0 and 0xf as we use that tag for 101 * everything which isn't explicitly tagged. Setting 102 * GCR_EL1.RRND = 1 to allow an implementation specific 103 * method of generating the tags. 104 */ 105 mov x0, #GCR_EL1_RRND 106#endif 107 orr x0, x0, #1 108 orr x0, x0, #(1 << 15) 109 msr gcr_el1, x0 110 111 /* 112 * Enable the tag checks on the current CPU. 113 * 114 * Depends on boot_init_memtag() having cleared tags for 115 * TEE core memory. Well, not really, addresses with the 116 * tag value 0b0000 will use unchecked access due to 117 * TCR_TCMA0. 118 */ 119 mrs x0, tcr_el1 120 orr x0, x0, #TCR_TBI0 121 orr x0, x0, #TCR_TCMA0 122 msr tcr_el1, x0 123 124 mrs x0, sctlr_el1 125 orr x0, x0, #SCTLR_TCF_SYNC 126 orr x0, x0, #SCTLR_TCF0_SYNC 127 msr sctlr_el1, x0 128 129 isb 13011: 131 .endm 132 133 .macro init_pauth_per_cpu 134 msr spsel, #1 135 ldp x0, x1, [sp, #THREAD_CORE_LOCAL_KEYS] 136 msr spsel, #0 137 write_apiakeyhi x0 138 write_apiakeylo x1 139 mrs x0, sctlr_el1 140 orr x0, x0, #SCTLR_ENIA 141 msr sctlr_el1, x0 142 .endm 143 144FUNC _start , : 145#if defined(CFG_CORE_SEL1_SPMC) 146 /* 147 * With OP-TEE as SPMC at S-EL1 the SPMD (SPD_spmd) in TF-A passes 148 * the DTB in x0, pagaeble part in x1 and the rest of the registers 149 * are unused 150 */ 151 mov x19, x1 /* Save pagable part */ 152 mov x20, x0 /* Save DT address */ 153#else 154 mov x19, x0 /* Save pagable part address */ 155#if defined(CFG_DT_ADDR) 156 ldr x20, =CFG_DT_ADDR 157#else 158 mov x20, x2 /* Save DT address */ 159#endif 160#endif 161 162 adr x0, reset_vect_table 163 msr vbar_el1, x0 164 isb 165 166 set_sctlr_el1 167 isb 168 169#ifdef CFG_WITH_PAGER 170 /* 171 * Move init code into correct location and move hashes to a 172 * temporary safe location until the heap is initialized. 173 * 174 * The binary is built as: 175 * [Pager code, rodata and data] : In correct location 176 * [Init code and rodata] : Should be copied to __init_start 177 * [struct boot_embdata + data] : Should be saved before 178 * initializing pager, first uint32_t tells the length of the data 179 */ 180 adr x0, __init_start /* dst */ 181 adr x1, __data_end /* src */ 182 adr x2, __init_end 183 sub x2, x2, x0 /* init len */ 184 ldr w4, [x1, x2] /* length of hashes etc */ 185 add x2, x2, x4 /* length of init and hashes etc */ 186 /* Copy backwards (as memmove) in case we're overlapping */ 187 add x0, x0, x2 /* __init_start + len */ 188 add x1, x1, x2 /* __data_end + len */ 189 adr x3, cached_mem_end 190 str x0, [x3] 191 adr x2, __init_start 192copy_init: 193 ldp x3, x4, [x1, #-16]! 194 stp x3, x4, [x0, #-16]! 195 cmp x0, x2 196 b.gt copy_init 197#else 198 /* 199 * The binary is built as: 200 * [Core, rodata and data] : In correct location 201 * [struct boot_embdata + data] : Should be moved to __end, first 202 * uint32_t tells the length of the struct + data 203 */ 204 adr_l x0, __end /* dst */ 205 adr_l x1, __data_end /* src */ 206 ldr w2, [x1] /* struct boot_embdata::total_len */ 207 /* Copy backwards (as memmove) in case we're overlapping */ 208 add x0, x0, x2 209 add x1, x1, x2 210 adr x3, cached_mem_end 211 str x0, [x3] 212 adr_l x2, __end 213 214copy_init: 215 ldp x3, x4, [x1, #-16]! 216 stp x3, x4, [x0, #-16]! 217 cmp x0, x2 218 b.gt copy_init 219#endif 220 221 /* 222 * Clear .bss, this code obviously depends on the linker keeping 223 * start/end of .bss at least 8 byte aligned. 224 */ 225 adr_l x0, __bss_start 226 adr_l x1, __bss_end 227clear_bss: 228 str xzr, [x0], #8 229 cmp x0, x1 230 b.lt clear_bss 231 232#ifdef CFG_VIRTUALIZATION 233 /* 234 * Clear .nex_bss, this code obviously depends on the linker keeping 235 * start/end of .bss at least 8 byte aligned. 236 */ 237 adr x0, __nex_bss_start 238 adr x1, __nex_bss_end 239clear_nex_bss: 240 str xzr, [x0], #8 241 cmp x0, x1 242 b.lt clear_nex_bss 243#endif 244 245 /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */ 246 set_sp 247 248 bl thread_init_thread_core_local 249 250 /* Enable aborts now that we can receive exceptions */ 251 msr daifclr, #DAIFBIT_ABT 252 253 /* 254 * Invalidate dcache for all memory used during initialization to 255 * avoid nasty surprices when the cache is turned on. We must not 256 * invalidate memory not used by OP-TEE since we may invalidate 257 * entries used by for instance ARM Trusted Firmware. 258 */ 259 adr_l x0, __text_start 260 ldr x1, cached_mem_end 261 sub x1, x1, x0 262 bl dcache_cleaninv_range 263 264 /* Enable Console */ 265 bl console_init 266 267#ifdef CFG_MEMTAG 268 /* 269 * If FEAT_MTE2 is available, initializes the memtag callbacks. 270 * Tags for OP-TEE core memory are then cleared to make it safe to 271 * enable MEMTAG below. 272 */ 273 bl boot_init_memtag 274#endif 275 276#ifdef CFG_CORE_ASLR 277 mov x0, x20 278 bl get_aslr_seed 279#else 280 mov x0, #0 281#endif 282 283 adr x1, boot_mmu_config 284 bl core_init_mmu_map 285 286#ifdef CFG_CORE_ASLR 287 /* 288 * Process relocation information again updating for the new 289 * offset. We're doing this now before MMU is enabled as some of 290 * the memory will become write protected. 291 */ 292 ldr x0, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET 293 /* 294 * Update cached_mem_end address with load offset since it was 295 * calculated before relocation. 296 */ 297 adr x5, cached_mem_end 298 ldr x6, [x5] 299 add x6, x6, x0 300 str x6, [x5] 301 bl relocate 302#endif 303 304 bl __get_core_pos 305 bl enable_mmu 306#ifdef CFG_CORE_ASLR 307 /* 308 * Reinitialize console, since register_serial_console() has 309 * previously registered a PA and with ASLR the VA is different 310 * from the PA. 311 */ 312 bl console_init 313#endif 314 315#ifdef CFG_VIRTUALIZATION 316 /* 317 * Initialize partition tables for each partition to 318 * default_partition which has been relocated now to a different VA 319 */ 320 bl core_mmu_set_default_prtn_tbl 321#endif 322 323 mov x0, x19 /* pagable part address */ 324 mov x1, #-1 325 bl boot_init_primary_early 326 327#ifdef CFG_MEMTAG 328 init_memtag_per_cpu 329#endif 330 331#ifndef CFG_VIRTUALIZATION 332 mov x21, sp 333 adr_l x0, threads 334 ldr x0, [x0, #THREAD_CTX_STACK_VA_END] 335 mov sp, x0 336 bl thread_get_core_local 337 mov x22, x0 338 str wzr, [x22, #THREAD_CORE_LOCAL_FLAGS] 339#endif 340 mov x0, x20 /* DT address */ 341 bl boot_init_primary_late 342#ifdef CFG_CORE_PAUTH 343 init_pauth_per_cpu 344#endif 345 346#ifndef CFG_VIRTUALIZATION 347 mov x0, #THREAD_CLF_TMP 348 str w0, [x22, #THREAD_CORE_LOCAL_FLAGS] 349 mov sp, x21 350#endif 351 352 /* 353 * In case we've touched memory that secondary CPUs will use before 354 * they have turned on their D-cache, clean and invalidate the 355 * D-cache before exiting to normal world. 356 */ 357 adr_l x0, __text_start 358 ldr x1, cached_mem_end 359 sub x1, x1, x0 360 bl dcache_cleaninv_range 361 362 363 /* 364 * Clear current thread id now to allow the thread to be reused on 365 * next entry. Matches the thread_init_boot_thread in 366 * boot.c. 367 */ 368#ifndef CFG_VIRTUALIZATION 369 bl thread_clr_boot_thread 370#endif 371 372#ifdef CFG_CORE_FFA 373 adr x0, cpu_on_handler 374 /* 375 * Compensate for the load offset since cpu_on_handler() is 376 * called with MMU off. 377 */ 378 ldr x1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET 379 sub x0, x0, x1 380 bl thread_spmc_register_secondary_ep 381 b thread_ffa_msg_wait 382#else 383 /* 384 * Pass the vector address returned from main_init 385 * Compensate for the load offset since cpu_on_handler() is 386 * called with MMU off. 387 */ 388 ldr x0, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET 389 adr x1, thread_vector_table 390 sub x1, x1, x0 391 mov x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE 392 smc #0 393 /* SMC should not return */ 394 panic_at_smc_return 395#endif 396END_FUNC _start 397DECLARE_KEEP_INIT _start 398 399 .section .identity_map.data 400 .balign 8 401LOCAL_DATA cached_mem_end , : 402 .skip 8 403END_DATA cached_mem_end 404 405#ifdef CFG_CORE_ASLR 406LOCAL_FUNC relocate , : 407 /* x0 holds load offset */ 408#ifdef CFG_WITH_PAGER 409 adr_l x6, __init_end 410#else 411 adr_l x6, __end 412#endif 413 ldp w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET] 414 415 mov_imm x1, TEE_RAM_START 416 add x2, x2, x6 /* start of relocations */ 417 add x3, x3, x2 /* end of relocations */ 418 419 /* 420 * Relocations are not formatted as Rela64, instead they are in a 421 * compressed format created by get_reloc_bin() in 422 * scripts/gen_tee_bin.py 423 * 424 * All the R_AARCH64_RELATIVE relocations are translated into a 425 * list list of 32-bit offsets from TEE_RAM_START. At each address 426 * a 64-bit value pointed out which increased with the load offset. 427 */ 428 429#ifdef CFG_WITH_PAGER 430 /* 431 * With pager enabled we can only relocate the pager and init 432 * parts, the rest has to be done when a page is populated. 433 */ 434 sub x6, x6, x1 435#endif 436 437 b 2f 438 /* Loop over the relocation addresses and process all entries */ 4391: ldr w4, [x2], #4 440#ifdef CFG_WITH_PAGER 441 /* Skip too large addresses */ 442 cmp x4, x6 443 b.ge 2f 444#endif 445 add x4, x4, x1 446 ldr x5, [x4] 447 add x5, x5, x0 448 str x5, [x4] 449 4502: cmp x2, x3 451 b.ne 1b 452 453 ret 454END_FUNC relocate 455#endif 456 457/* 458 * void enable_mmu(unsigned long core_pos); 459 * 460 * This function depends on being mapped with in the identity map where 461 * physical address and virtual address is the same. After MMU has been 462 * enabled the instruction pointer will be updated to execute as the new 463 * offset instead. Stack pointers and the return address are updated. 464 */ 465LOCAL_FUNC enable_mmu , : , .identity_map 466 adr x1, boot_mmu_config 467 load_xregs x1, 0, 2, 6 468 /* 469 * x0 = core_pos 470 * x2 = tcr_el1 471 * x3 = mair_el1 472 * x4 = ttbr0_el1_base 473 * x5 = ttbr0_core_offset 474 * x6 = load_offset 475 */ 476 msr tcr_el1, x2 477 msr mair_el1, x3 478 479 /* 480 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos 481 */ 482 madd x1, x5, x0, x4 483 msr ttbr0_el1, x1 484 msr ttbr1_el1, xzr 485 isb 486 487 /* Invalidate TLB */ 488 tlbi vmalle1 489 490 /* 491 * Make sure translation table writes have drained into memory and 492 * the TLB invalidation is complete. 493 */ 494 dsb sy 495 isb 496 497 /* Enable the MMU */ 498 mrs x1, sctlr_el1 499 orr x1, x1, #SCTLR_M 500 msr sctlr_el1, x1 501 isb 502 503 /* Update vbar */ 504 mrs x1, vbar_el1 505 add x1, x1, x6 506 msr vbar_el1, x1 507 isb 508 509 /* Invalidate instruction cache and branch predictor */ 510 ic iallu 511 isb 512 513 /* Enable I and D cache */ 514 mrs x1, sctlr_el1 515 orr x1, x1, #SCTLR_I 516 orr x1, x1, #SCTLR_C 517 msr sctlr_el1, x1 518 isb 519 520 /* Adjust stack pointers and return address */ 521 msr spsel, #1 522 add sp, sp, x6 523 msr spsel, #0 524 add sp, sp, x6 525 add x30, x30, x6 526 527 ret 528END_FUNC enable_mmu 529 530 .section .identity_map.data 531 .balign 8 532DATA boot_mmu_config , : /* struct core_mmu_config */ 533 .skip CORE_MMU_CONFIG_SIZE 534END_DATA boot_mmu_config 535 536FUNC cpu_on_handler , : 537 mov x19, x0 538 mov x20, x1 539 mov x21, x30 540 541 adr x0, reset_vect_table 542 msr vbar_el1, x0 543 isb 544 545 set_sctlr_el1 546 isb 547 548 /* Enable aborts now that we can receive exceptions */ 549 msr daifclr, #DAIFBIT_ABT 550 551 bl __get_core_pos 552 bl enable_mmu 553 554 /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */ 555 set_sp 556 557#ifdef CFG_MEMTAG 558 init_memtag_per_cpu 559#endif 560#ifdef CFG_CORE_PAUTH 561 init_pauth_per_cpu 562#endif 563 564 mov x0, x19 565 mov x1, x20 566#ifdef CFG_CORE_FFA 567 bl boot_cpu_on_handler 568 b thread_ffa_msg_wait 569#else 570 mov x30, x21 571 b boot_cpu_on_handler 572#endif 573END_FUNC cpu_on_handler 574DECLARE_KEEP_PAGER cpu_on_handler 575 576LOCAL_FUNC unhandled_cpu , : 577 wfi 578 b unhandled_cpu 579END_FUNC unhandled_cpu 580 581LOCAL_DATA stack_tmp_rel , : 582 .word stack_tmp - stack_tmp_rel - STACK_TMP_GUARD 583END_DATA stack_tmp_rel 584 585 /* 586 * This macro verifies that the a given vector doesn't exceed the 587 * architectural limit of 32 instructions. This is meant to be placed 588 * immedately after the last instruction in the vector. It takes the 589 * vector entry as the parameter 590 */ 591 .macro check_vector_size since 592 .if (. - \since) > (32 * 4) 593 .error "Vector exceeds 32 instructions" 594 .endif 595 .endm 596 597 .section .identity_map, "ax", %progbits 598 .align 11 599LOCAL_FUNC reset_vect_table , :, .identity_map, , nobti 600 /* ----------------------------------------------------- 601 * Current EL with SP0 : 0x0 - 0x180 602 * ----------------------------------------------------- 603 */ 604SynchronousExceptionSP0: 605 b SynchronousExceptionSP0 606 check_vector_size SynchronousExceptionSP0 607 608 .align 7 609IrqSP0: 610 b IrqSP0 611 check_vector_size IrqSP0 612 613 .align 7 614FiqSP0: 615 b FiqSP0 616 check_vector_size FiqSP0 617 618 .align 7 619SErrorSP0: 620 b SErrorSP0 621 check_vector_size SErrorSP0 622 623 /* ----------------------------------------------------- 624 * Current EL with SPx: 0x200 - 0x380 625 * ----------------------------------------------------- 626 */ 627 .align 7 628SynchronousExceptionSPx: 629 b SynchronousExceptionSPx 630 check_vector_size SynchronousExceptionSPx 631 632 .align 7 633IrqSPx: 634 b IrqSPx 635 check_vector_size IrqSPx 636 637 .align 7 638FiqSPx: 639 b FiqSPx 640 check_vector_size FiqSPx 641 642 .align 7 643SErrorSPx: 644 b SErrorSPx 645 check_vector_size SErrorSPx 646 647 /* ----------------------------------------------------- 648 * Lower EL using AArch64 : 0x400 - 0x580 649 * ----------------------------------------------------- 650 */ 651 .align 7 652SynchronousExceptionA64: 653 b SynchronousExceptionA64 654 check_vector_size SynchronousExceptionA64 655 656 .align 7 657IrqA64: 658 b IrqA64 659 check_vector_size IrqA64 660 661 .align 7 662FiqA64: 663 b FiqA64 664 check_vector_size FiqA64 665 666 .align 7 667SErrorA64: 668 b SErrorA64 669 check_vector_size SErrorA64 670 671 /* ----------------------------------------------------- 672 * Lower EL using AArch32 : 0x0 - 0x180 673 * ----------------------------------------------------- 674 */ 675 .align 7 676SynchronousExceptionA32: 677 b SynchronousExceptionA32 678 check_vector_size SynchronousExceptionA32 679 680 .align 7 681IrqA32: 682 b IrqA32 683 check_vector_size IrqA32 684 685 .align 7 686FiqA32: 687 b FiqA32 688 check_vector_size FiqA32 689 690 .align 7 691SErrorA32: 692 b SErrorA32 693 check_vector_size SErrorA32 694 695END_FUNC reset_vect_table 696 697BTI(emit_aarch64_feature_1_and GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 698