1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright (c) 2015, Linaro Limited 4 * Copyright (c) 2021, Arm Limited 5 */ 6 7#include <platform_config.h> 8 9#include <arm64_macros.S> 10#include <arm.h> 11#include <asm.S> 12#include <generated/asm-defines.h> 13#include <keep.h> 14#include <sm/optee_smc.h> 15#include <sm/teesmc_opteed.h> 16#include <sm/teesmc_opteed_macros.h> 17 18#include "thread_private.h" 19 20 /* 21 * Setup SP_EL0 and SPEL1, SP will be set to SP_EL0. 22 * SP_EL0 is assigned stack_tmp_export + cpu_id * stack_tmp_stride 23 * SP_EL1 is assigned thread_core_local[cpu_id] 24 */ 25 .macro set_sp 26 bl __get_core_pos 27 cmp x0, #CFG_TEE_CORE_NB_CORE 28 /* Unsupported CPU, park it before it breaks something */ 29 bge unhandled_cpu 30 adr_l x1, stack_tmp_stride 31 ldr w1, [x1] 32 mul x1, x0, x1 33 adr_l x0, stack_tmp_export 34 ldr x0, [x0] 35 msr spsel, #0 36 add sp, x1, x0 37 bl thread_get_core_local 38 msr spsel, #1 39 mov sp, x0 40 msr spsel, #0 41 .endm 42 43 .macro set_sctlr_el1 44 mrs x0, sctlr_el1 45 orr x0, x0, #SCTLR_I 46 orr x0, x0, #SCTLR_SA 47 orr x0, x0, #SCTLR_SPAN 48#if defined(CFG_CORE_RWDATA_NOEXEC) 49 orr x0, x0, #SCTLR_WXN 50#endif 51#if defined(CFG_SCTLR_ALIGNMENT_CHECK) 52 orr x0, x0, #SCTLR_A 53#else 54 bic x0, x0, #SCTLR_A 55#endif 56 msr sctlr_el1, x0 57 .endm 58 59FUNC _start , : 60#if defined(CFG_CORE_SEL1_SPMC) 61 /* 62 * With OP-TEE as SPMC at S-EL1 the SPMD (SPD_spmd) in TF-A passes 63 * the DTB in x0, pagaeble part in x1 and the rest of the registers 64 * are unused 65 */ 66 mov x19, x1 /* Save pagable part */ 67 mov x20, x0 /* Save DT address */ 68#else 69 mov x19, x0 /* Save pagable part address */ 70#if defined(CFG_DT_ADDR) 71 ldr x20, =CFG_DT_ADDR 72#else 73 mov x20, x2 /* Save DT address */ 74#endif 75#endif 76 77 adr x0, reset_vect_table 78 msr vbar_el1, x0 79 isb 80 81 set_sctlr_el1 82 isb 83 84#ifdef CFG_WITH_PAGER 85 /* 86 * Move init code into correct location and move hashes to a 87 * temporary safe location until the heap is initialized. 88 * 89 * The binary is built as: 90 * [Pager code, rodata and data] : In correct location 91 * [Init code and rodata] : Should be copied to __init_start 92 * [struct boot_embdata + data] : Should be saved before 93 * initializing pager, first uint32_t tells the length of the data 94 */ 95 adr x0, __init_start /* dst */ 96 adr x1, __data_end /* src */ 97 adr x2, __init_end 98 sub x2, x2, x0 /* init len */ 99 ldr w4, [x1, x2] /* length of hashes etc */ 100 add x2, x2, x4 /* length of init and hashes etc */ 101 /* Copy backwards (as memmove) in case we're overlapping */ 102 add x0, x0, x2 /* __init_start + len */ 103 add x1, x1, x2 /* __data_end + len */ 104 adr x3, cached_mem_end 105 str x0, [x3] 106 adr x2, __init_start 107copy_init: 108 ldp x3, x4, [x1, #-16]! 109 stp x3, x4, [x0, #-16]! 110 cmp x0, x2 111 b.gt copy_init 112#else 113 /* 114 * The binary is built as: 115 * [Core, rodata and data] : In correct location 116 * [struct boot_embdata + data] : Should be moved to __end, first 117 * uint32_t tells the length of the struct + data 118 */ 119 adr_l x0, __end /* dst */ 120 adr_l x1, __data_end /* src */ 121 ldr w2, [x1] /* struct boot_embdata::total_len */ 122 /* Copy backwards (as memmove) in case we're overlapping */ 123 add x0, x0, x2 124 add x1, x1, x2 125 adr x3, cached_mem_end 126 str x0, [x3] 127 adr_l x2, __end 128 129copy_init: 130 ldp x3, x4, [x1, #-16]! 131 stp x3, x4, [x0, #-16]! 132 cmp x0, x2 133 b.gt copy_init 134#endif 135 136 /* 137 * Clear .bss, this code obviously depends on the linker keeping 138 * start/end of .bss at least 8 byte aligned. 139 */ 140 adr_l x0, __bss_start 141 adr_l x1, __bss_end 142clear_bss: 143 str xzr, [x0], #8 144 cmp x0, x1 145 b.lt clear_bss 146 147#ifdef CFG_VIRTUALIZATION 148 /* 149 * Clear .nex_bss, this code obviously depends on the linker keeping 150 * start/end of .bss at least 8 byte aligned. 151 */ 152 adr x0, __nex_bss_start 153 adr x1, __nex_bss_end 154clear_nex_bss: 155 str xzr, [x0], #8 156 cmp x0, x1 157 b.lt clear_nex_bss 158#endif 159 160 /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */ 161 set_sp 162 163 bl thread_init_thread_core_local 164 165 /* Enable aborts now that we can receive exceptions */ 166 msr daifclr, #DAIFBIT_ABT 167 168 /* 169 * Invalidate dcache for all memory used during initialization to 170 * avoid nasty surprices when the cache is turned on. We must not 171 * invalidate memory not used by OP-TEE since we may invalidate 172 * entries used by for instance ARM Trusted Firmware. 173 */ 174 adr_l x0, __text_start 175 ldr x1, cached_mem_end 176 sub x1, x1, x0 177 bl dcache_cleaninv_range 178 179 /* Enable Console */ 180 bl console_init 181 182#ifdef CFG_CORE_ASLR 183 mov x0, x20 184 bl get_aslr_seed 185#else 186 mov x0, #0 187#endif 188 189 adr x1, boot_mmu_config 190 bl core_init_mmu_map 191 192#ifdef CFG_CORE_ASLR 193 /* 194 * Process relocation information again updating for the new 195 * offset. We're doing this now before MMU is enabled as some of 196 * the memory will become write protected. 197 */ 198 ldr x0, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET 199 /* 200 * Update cached_mem_end address with load offset since it was 201 * calculated before relocation. 202 */ 203 adr x5, cached_mem_end 204 ldr x6, [x5] 205 add x6, x6, x0 206 str x6, [x5] 207 bl relocate 208#endif 209 210 bl __get_core_pos 211 bl enable_mmu 212#ifdef CFG_CORE_ASLR 213 /* 214 * Reinitialize console, since register_serial_console() has 215 * previously registered a PA and with ASLR the VA is different 216 * from the PA. 217 */ 218 bl console_init 219#endif 220 221#ifdef CFG_VIRTUALIZATION 222 /* 223 * Initialize partition tables for each partition to 224 * default_partition which has been relocated now to a different VA 225 */ 226 bl core_mmu_set_default_prtn_tbl 227#endif 228 229 mov x0, x19 /* pagable part address */ 230 mov x1, #-1 231 bl boot_init_primary_early 232#ifndef CFG_VIRTUALIZATION 233 mov x21, sp 234 adr_l x0, threads 235 ldr x0, [x0, #THREAD_CTX_STACK_VA_END] 236 mov sp, x0 237 bl thread_get_core_local 238 mov x22, x0 239 str wzr, [x22, #THREAD_CORE_LOCAL_FLAGS] 240#endif 241 mov x0, x20 /* DT address */ 242 bl boot_init_primary_late 243#ifndef CFG_VIRTUALIZATION 244 mov x0, #THREAD_CLF_TMP 245 str w0, [x22, #THREAD_CORE_LOCAL_FLAGS] 246 mov sp, x21 247#endif 248 249 /* 250 * In case we've touched memory that secondary CPUs will use before 251 * they have turned on their D-cache, clean and invalidate the 252 * D-cache before exiting to normal world. 253 */ 254 adr_l x0, __text_start 255 ldr x1, cached_mem_end 256 sub x1, x1, x0 257 bl dcache_cleaninv_range 258 259 260 /* 261 * Clear current thread id now to allow the thread to be reused on 262 * next entry. Matches the thread_init_boot_thread in 263 * boot.c. 264 */ 265#ifndef CFG_VIRTUALIZATION 266 bl thread_clr_boot_thread 267#endif 268 269#ifdef CFG_CORE_FFA 270 adr x0, cpu_on_handler 271 /* 272 * Compensate for the load offset since cpu_on_handler() is 273 * called with MMU off. 274 */ 275 ldr x1, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET 276 sub x0, x0, x1 277 bl ffa_secondary_cpu_ep_register 278 b thread_ffa_msg_wait 279#else 280 /* 281 * Pass the vector address returned from main_init 282 * Compensate for the load offset since cpu_on_handler() is 283 * called with MMU off. 284 */ 285 ldr x0, boot_mmu_config + CORE_MMU_CONFIG_LOAD_OFFSET 286 adr x1, thread_vector_table 287 sub x1, x1, x0 288 mov x0, #TEESMC_OPTEED_RETURN_ENTRY_DONE 289 smc #0 290 b . /* SMC should not return */ 291#endif 292END_FUNC _start 293DECLARE_KEEP_INIT _start 294 295 .balign 8 296LOCAL_DATA cached_mem_end , : 297 .skip 8 298END_DATA cached_mem_end 299 300#ifdef CFG_CORE_ASLR 301LOCAL_FUNC relocate , : 302 /* x0 holds load offset */ 303#ifdef CFG_WITH_PAGER 304 adr_l x6, __init_end 305#else 306 adr_l x6, __end 307#endif 308 ldp w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET] 309 310 mov_imm x1, TEE_RAM_START 311 add x2, x2, x6 /* start of relocations */ 312 add x3, x3, x2 /* end of relocations */ 313 314 /* 315 * Relocations are not formatted as Rela64, instead they are in a 316 * compressed format created by get_reloc_bin() in 317 * scripts/gen_tee_bin.py 318 * 319 * All the R_AARCH64_RELATIVE relocations are translated into a 320 * list list of 32-bit offsets from TEE_RAM_START. At each address 321 * a 64-bit value pointed out which increased with the load offset. 322 */ 323 324#ifdef CFG_WITH_PAGER 325 /* 326 * With pager enabled we can only relocate the pager and init 327 * parts, the rest has to be done when a page is populated. 328 */ 329 sub x6, x6, x1 330#endif 331 332 b 2f 333 /* Loop over the relocation addresses and process all entries */ 3341: ldr w4, [x2], #4 335#ifdef CFG_WITH_PAGER 336 /* Skip too large addresses */ 337 cmp x4, x6 338 b.ge 2f 339#endif 340 add x4, x4, x1 341 ldr x5, [x4] 342 add x5, x5, x0 343 str x5, [x4] 344 3452: cmp x2, x3 346 b.ne 1b 347 348 ret 349END_FUNC relocate 350#endif 351 352/* 353 * void enable_mmu(unsigned long core_pos); 354 * 355 * This function depends on being mapped with in the identity map where 356 * physical address and virtual address is the same. After MMU has been 357 * enabled the instruction pointer will be updated to execute as the new 358 * offset instead. Stack pointers and the return address are updated. 359 */ 360LOCAL_FUNC enable_mmu , : , .identity_map 361 adr x1, boot_mmu_config 362 load_xregs x1, 0, 2, 6 363 /* 364 * x0 = core_pos 365 * x2 = tcr_el1 366 * x3 = mair_el1 367 * x4 = ttbr0_el1_base 368 * x5 = ttbr0_core_offset 369 * x6 = load_offset 370 */ 371 msr tcr_el1, x2 372 msr mair_el1, x3 373 374 /* 375 * ttbr0_el1 = ttbr0_el1_base + ttbr0_core_offset * core_pos 376 */ 377 madd x1, x5, x0, x4 378 msr ttbr0_el1, x1 379 msr ttbr1_el1, xzr 380 isb 381 382 /* Invalidate TLB */ 383 tlbi vmalle1 384 385 /* 386 * Make sure translation table writes have drained into memory and 387 * the TLB invalidation is complete. 388 */ 389 dsb sy 390 isb 391 392 /* Enable the MMU */ 393 mrs x1, sctlr_el1 394 orr x1, x1, #SCTLR_M 395 msr sctlr_el1, x1 396 isb 397 398 /* Update vbar */ 399 mrs x1, vbar_el1 400 add x1, x1, x6 401 msr vbar_el1, x1 402 isb 403 404 /* Invalidate instruction cache and branch predictor */ 405 ic iallu 406 isb 407 408 /* Enable I and D cache */ 409 mrs x1, sctlr_el1 410 orr x1, x1, #SCTLR_I 411 orr x1, x1, #SCTLR_C 412 msr sctlr_el1, x1 413 isb 414 415 /* Adjust stack pointers and return address */ 416 msr spsel, #1 417 add sp, sp, x6 418 msr spsel, #0 419 add sp, sp, x6 420 add x30, x30, x6 421 422 ret 423END_FUNC enable_mmu 424 425 .balign 8 426DATA boot_mmu_config , : /* struct core_mmu_config */ 427 .skip CORE_MMU_CONFIG_SIZE 428END_DATA boot_mmu_config 429 430FUNC cpu_on_handler , : 431 mov x19, x0 432 mov x20, x1 433 mov x21, x30 434 435 adr x0, reset_vect_table 436 msr vbar_el1, x0 437 isb 438 439 set_sctlr_el1 440 isb 441 442 /* Enable aborts now that we can receive exceptions */ 443 msr daifclr, #DAIFBIT_ABT 444 445 bl __get_core_pos 446 bl enable_mmu 447 448 /* Setup SP_EL0 and SP_EL1, SP will be set to SP_EL0 */ 449 set_sp 450 451 mov x0, x19 452 mov x1, x20 453#ifdef CFG_CORE_FFA 454 bl boot_cpu_on_handler 455 b thread_ffa_msg_wait 456#else 457 mov x30, x21 458 b boot_cpu_on_handler 459#endif 460END_FUNC cpu_on_handler 461DECLARE_KEEP_PAGER cpu_on_handler 462 463LOCAL_FUNC unhandled_cpu , : 464 wfi 465 b unhandled_cpu 466END_FUNC unhandled_cpu 467 468 /* 469 * This macro verifies that the a given vector doesn't exceed the 470 * architectural limit of 32 instructions. This is meant to be placed 471 * immedately after the last instruction in the vector. It takes the 472 * vector entry as the parameter 473 */ 474 .macro check_vector_size since 475 .if (. - \since) > (32 * 4) 476 .error "Vector exceeds 32 instructions" 477 .endif 478 .endm 479 480 .section .identity_map, "ax", %progbits 481 .align 11 482LOCAL_FUNC reset_vect_table , :, .identity_map 483 /* ----------------------------------------------------- 484 * Current EL with SP0 : 0x0 - 0x180 485 * ----------------------------------------------------- 486 */ 487SynchronousExceptionSP0: 488 b SynchronousExceptionSP0 489 check_vector_size SynchronousExceptionSP0 490 491 .align 7 492IrqSP0: 493 b IrqSP0 494 check_vector_size IrqSP0 495 496 .align 7 497FiqSP0: 498 b FiqSP0 499 check_vector_size FiqSP0 500 501 .align 7 502SErrorSP0: 503 b SErrorSP0 504 check_vector_size SErrorSP0 505 506 /* ----------------------------------------------------- 507 * Current EL with SPx: 0x200 - 0x380 508 * ----------------------------------------------------- 509 */ 510 .align 7 511SynchronousExceptionSPx: 512 b SynchronousExceptionSPx 513 check_vector_size SynchronousExceptionSPx 514 515 .align 7 516IrqSPx: 517 b IrqSPx 518 check_vector_size IrqSPx 519 520 .align 7 521FiqSPx: 522 b FiqSPx 523 check_vector_size FiqSPx 524 525 .align 7 526SErrorSPx: 527 b SErrorSPx 528 check_vector_size SErrorSPx 529 530 /* ----------------------------------------------------- 531 * Lower EL using AArch64 : 0x400 - 0x580 532 * ----------------------------------------------------- 533 */ 534 .align 7 535SynchronousExceptionA64: 536 b SynchronousExceptionA64 537 check_vector_size SynchronousExceptionA64 538 539 .align 7 540IrqA64: 541 b IrqA64 542 check_vector_size IrqA64 543 544 .align 7 545FiqA64: 546 b FiqA64 547 check_vector_size FiqA64 548 549 .align 7 550SErrorA64: 551 b SErrorA64 552 check_vector_size SErrorA64 553 554 /* ----------------------------------------------------- 555 * Lower EL using AArch32 : 0x0 - 0x180 556 * ----------------------------------------------------- 557 */ 558 .align 7 559SynchronousExceptionA32: 560 b SynchronousExceptionA32 561 check_vector_size SynchronousExceptionA32 562 563 .align 7 564IrqA32: 565 b IrqA32 566 check_vector_size IrqA32 567 568 .align 7 569FiqA32: 570 b FiqA32 571 check_vector_size FiqA32 572 573 .align 7 574SErrorA32: 575 b SErrorA32 576 check_vector_size SErrorA32 577 578END_FUNC reset_vect_table 579