1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <platform_config.h> 28 29 #include <kernel/panic.h> 30 #include <kernel/thread.h> 31 #include <kernel/thread_defs.h> 32 #include "thread_private.h" 33 #include <sm/sm_defs.h> 34 #include <sm/sm.h> 35 #include <sm/teesmc.h> 36 #include <sm/teesmc_optee.h> 37 #include <arm.h> 38 #include <kernel/tz_proc_def.h> 39 #include <kernel/tz_proc.h> 40 #include <kernel/misc.h> 41 #include <mm/tee_mmu.h> 42 #include <mm/tee_mmu_defs.h> 43 #include <mm/tee_mm.h> 44 #include <mm/tee_pager.h> 45 #include <kernel/tee_ta_manager.h> 46 #include <util.h> 47 #include <trace.h> 48 49 #include <assert.h> 50 51 #ifdef ARM32 52 #define STACK_TMP_SIZE 1024 53 #define STACK_THREAD_SIZE 8192 54 55 #if TRACE_LEVEL > 0 56 #define STACK_ABT_SIZE 2048 57 #else 58 #define STACK_ABT_SIZE 1024 59 #endif 60 61 #endif /*ARM32*/ 62 63 #ifdef ARM64 64 #define STACK_TMP_SIZE 2048 65 #define STACK_THREAD_SIZE 8192 66 67 #if TRACE_LEVEL > 0 68 #define STACK_ABT_SIZE 3072 69 #else 70 #define STACK_ABT_SIZE 1024 71 #endif 72 #endif /*ARM64*/ 73 74 struct thread_ctx threads[CFG_NUM_THREADS]; 75 76 static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 77 78 #ifdef CFG_WITH_VFP 79 struct thread_vfp_state { 80 bool ns_saved; 81 bool sec_saved; 82 bool sec_lazy_saved; 83 struct vfp_state ns; 84 struct vfp_state sec; 85 }; 86 87 static struct thread_vfp_state thread_vfp_state; 88 #endif /*CFG_WITH_VFP*/ 89 90 #ifdef CFG_WITH_STACK_CANARIES 91 #ifdef ARM32 92 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 93 #endif 94 #ifdef ARM64 95 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 96 #endif 97 #define START_CANARY_VALUE 0xdededede 98 #define END_CANARY_VALUE 0xabababab 99 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 100 #define GET_END_CANARY(name, stack_num) \ 101 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 102 #else 103 #define STACK_CANARY_SIZE 0 104 #endif 105 106 #define DECLARE_STACK(name, num_stacks, stack_size) \ 107 static uint32_t name[num_stacks][ \ 108 ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 109 sizeof(uint32_t)] \ 110 __attribute__((section(".nozi.stack"), \ 111 aligned(STACK_ALIGNMENT))) 112 113 #define GET_STACK(stack) \ 114 ((vaddr_t)(stack) + sizeof(stack) - STACK_CANARY_SIZE / 2) 115 116 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE); 117 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE); 118 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 119 DECLARE_STACK(stack_sm, CFG_TEE_CORE_NB_CORE, SM_STACK_SIZE); 120 #endif 121 #ifndef CFG_WITH_PAGER 122 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE); 123 #endif 124 125 const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = { 126 GET_STACK(stack_tmp[0]), 127 #if CFG_TEE_CORE_NB_CORE > 1 128 GET_STACK(stack_tmp[1]), 129 #endif 130 #if CFG_TEE_CORE_NB_CORE > 2 131 GET_STACK(stack_tmp[2]), 132 #endif 133 #if CFG_TEE_CORE_NB_CORE > 3 134 GET_STACK(stack_tmp[3]), 135 #endif 136 #if CFG_TEE_CORE_NB_CORE > 4 137 GET_STACK(stack_tmp[4]), 138 #endif 139 #if CFG_TEE_CORE_NB_CORE > 5 140 GET_STACK(stack_tmp[5]), 141 #endif 142 #if CFG_TEE_CORE_NB_CORE > 6 143 GET_STACK(stack_tmp[6]), 144 #endif 145 #if CFG_TEE_CORE_NB_CORE > 7 146 GET_STACK(stack_tmp[7]), 147 #endif 148 #if CFG_TEE_CORE_NB_CORE > 8 149 #error "Top of tmp stacks aren't defined for more than 8 CPUS" 150 #endif 151 }; 152 153 thread_smc_handler_t thread_std_smc_handler_ptr; 154 static thread_smc_handler_t thread_fast_smc_handler_ptr; 155 thread_fiq_handler_t thread_fiq_handler_ptr; 156 thread_svc_handler_t thread_svc_handler_ptr; 157 static thread_abort_handler_t thread_abort_handler_ptr; 158 thread_pm_handler_t thread_cpu_on_handler_ptr; 159 thread_pm_handler_t thread_cpu_off_handler_ptr; 160 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 161 thread_pm_handler_t thread_cpu_resume_handler_ptr; 162 thread_pm_handler_t thread_system_off_handler_ptr; 163 thread_pm_handler_t thread_system_reset_handler_ptr; 164 165 166 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 167 168 static void init_canaries(void) 169 { 170 #ifdef CFG_WITH_STACK_CANARIES 171 size_t n; 172 #define INIT_CANARY(name) \ 173 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 174 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 175 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 176 \ 177 *start_canary = START_CANARY_VALUE; \ 178 *end_canary = END_CANARY_VALUE; \ 179 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 180 #name, n, (void *)(end_canary - 1)); \ 181 DMSG("watch *%p\n", (void *)end_canary); \ 182 } 183 184 INIT_CANARY(stack_tmp); 185 INIT_CANARY(stack_abt); 186 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 187 INIT_CANARY(stack_sm); 188 #endif 189 #ifndef CFG_WITH_PAGER 190 INIT_CANARY(stack_thread); 191 #endif 192 #endif/*CFG_WITH_STACK_CANARIES*/ 193 } 194 195 void thread_check_canaries(void) 196 { 197 #ifdef CFG_WITH_STACK_CANARIES 198 size_t n; 199 200 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 201 assert(GET_START_CANARY(stack_tmp, n) == START_CANARY_VALUE); 202 assert(GET_END_CANARY(stack_tmp, n) == END_CANARY_VALUE); 203 } 204 205 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 206 assert(GET_START_CANARY(stack_abt, n) == START_CANARY_VALUE); 207 assert(GET_END_CANARY(stack_abt, n) == END_CANARY_VALUE); 208 } 209 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 210 for (n = 0; n < ARRAY_SIZE(stack_sm); n++) { 211 assert(GET_START_CANARY(stack_sm, n) == START_CANARY_VALUE); 212 assert(GET_END_CANARY(stack_sm, n) == END_CANARY_VALUE); 213 } 214 #endif 215 #ifndef CFG_WITH_PAGER 216 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 217 assert(GET_START_CANARY(stack_thread, n) == START_CANARY_VALUE); 218 assert(GET_END_CANARY(stack_thread, n) == END_CANARY_VALUE); 219 } 220 #endif 221 #endif/*CFG_WITH_STACK_CANARIES*/ 222 } 223 224 static void lock_global(void) 225 { 226 cpu_spin_lock(&thread_global_lock); 227 } 228 229 static void unlock_global(void) 230 { 231 cpu_spin_unlock(&thread_global_lock); 232 } 233 234 #ifdef ARM32 235 uint32_t thread_get_exceptions(void) 236 { 237 uint32_t cpsr = read_cpsr(); 238 239 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 240 } 241 242 void thread_set_exceptions(uint32_t exceptions) 243 { 244 uint32_t cpsr = read_cpsr(); 245 246 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 247 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 248 write_cpsr(cpsr); 249 } 250 #endif /*ARM32*/ 251 252 #ifdef ARM64 253 uint32_t thread_get_exceptions(void) 254 { 255 uint32_t daif = read_daif(); 256 257 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 258 } 259 260 void thread_set_exceptions(uint32_t exceptions) 261 { 262 uint32_t daif = read_daif(); 263 264 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 265 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 266 write_daif(daif); 267 } 268 #endif /*ARM64*/ 269 270 uint32_t thread_mask_exceptions(uint32_t exceptions) 271 { 272 uint32_t state = thread_get_exceptions(); 273 274 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 275 return state; 276 } 277 278 void thread_unmask_exceptions(uint32_t state) 279 { 280 thread_set_exceptions(state & THREAD_EXCP_ALL); 281 } 282 283 284 struct thread_core_local *thread_get_core_local(void) 285 { 286 uint32_t cpu_id = get_core_pos(); 287 288 /* 289 * IRQs must be disabled before playing with core_local since 290 * we otherwise may be rescheduled to a different core in the 291 * middle of this function. 292 */ 293 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 294 295 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 296 return &thread_core_local[cpu_id]; 297 } 298 299 static bool have_one_active_thread(void) 300 { 301 size_t n; 302 303 for (n = 0; n < CFG_NUM_THREADS; n++) { 304 if (threads[n].state == THREAD_STATE_ACTIVE) 305 return true; 306 } 307 308 return false; 309 } 310 311 static bool have_one_preempted_thread(void) 312 { 313 size_t n; 314 315 for (n = 0; n < CFG_NUM_THREADS; n++) { 316 if (threads[n].state == THREAD_STATE_SUSPENDED && 317 (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ)) 318 return true; 319 } 320 321 return false; 322 } 323 324 static void thread_lazy_save_ns_vfp(void) 325 { 326 #ifdef CFG_WITH_VFP 327 thread_vfp_state.ns_saved = false; 328 #if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW) 329 /* 330 * ARM TF saves and restores CPACR_EL1, so we must assume NS world 331 * uses VFP and always preserve the register file when secure world 332 * is about to use it 333 */ 334 thread_vfp_state.ns.force_save = true; 335 #endif 336 vfp_lazy_save_state_init(&thread_vfp_state.ns); 337 #endif /*CFG_WITH_VFP*/ 338 } 339 340 static void thread_lazy_restore_ns_vfp(void) 341 { 342 #ifdef CFG_WITH_VFP 343 assert(!thread_vfp_state.sec_lazy_saved && !thread_vfp_state.sec_saved); 344 vfp_lazy_restore_state(&thread_vfp_state.ns, thread_vfp_state.ns_saved); 345 thread_vfp_state.ns_saved = false; 346 #endif /*CFG_WITH_VFP*/ 347 } 348 349 #ifdef ARM32 350 static void init_regs(struct thread_ctx *thread, 351 struct thread_smc_args *args) 352 { 353 thread->regs.pc = (uint32_t)thread_std_smc_entry; 354 355 /* 356 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 357 * abort and unmasked FIQ. 358 */ 359 thread->regs.cpsr = CPSR_MODE_SVC | CPSR_I | CPSR_A; 360 /* Enable thumb mode if it's a thumb instruction */ 361 if (thread->regs.pc & 1) 362 thread->regs.cpsr |= CPSR_T; 363 /* Reinitialize stack pointer */ 364 thread->regs.svc_sp = thread->stack_va_end; 365 366 /* 367 * Copy arguments into context. This will make the 368 * arguments appear in r0-r7 when thread is started. 369 */ 370 thread->regs.r0 = args->a0; 371 thread->regs.r1 = args->a1; 372 thread->regs.r2 = args->a2; 373 thread->regs.r3 = args->a3; 374 thread->regs.r4 = args->a4; 375 thread->regs.r5 = args->a5; 376 thread->regs.r6 = args->a6; 377 thread->regs.r7 = args->a7; 378 } 379 #endif /*ARM32*/ 380 381 #ifdef ARM64 382 static void init_regs(struct thread_ctx *thread, 383 struct thread_smc_args *args) 384 { 385 thread->regs.pc = (uint64_t)thread_std_smc_entry; 386 387 /* 388 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 389 * abort and unmasked FIQ. 390 */ 391 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 392 DAIFBIT_IRQ | DAIFBIT_ABT); 393 /* Reinitialize stack pointer */ 394 thread->regs.sp = thread->stack_va_end; 395 396 /* 397 * Copy arguments into context. This will make the 398 * arguments appear in x0-x7 when thread is started. 399 */ 400 thread->regs.x[0] = args->a0; 401 thread->regs.x[1] = args->a1; 402 thread->regs.x[2] = args->a2; 403 thread->regs.x[3] = args->a3; 404 thread->regs.x[4] = args->a4; 405 thread->regs.x[5] = args->a5; 406 thread->regs.x[6] = args->a6; 407 thread->regs.x[7] = args->a7; 408 } 409 #endif /*ARM64*/ 410 411 static void thread_alloc_and_run(struct thread_smc_args *args) 412 { 413 size_t n; 414 struct thread_core_local *l = thread_get_core_local(); 415 bool found_thread = false; 416 417 assert(l->curr_thread == -1); 418 419 lock_global(); 420 421 if (!have_one_active_thread() && !have_one_preempted_thread()) { 422 for (n = 0; n < CFG_NUM_THREADS; n++) { 423 if (threads[n].state == THREAD_STATE_FREE) { 424 threads[n].state = THREAD_STATE_ACTIVE; 425 found_thread = true; 426 break; 427 } 428 } 429 } 430 431 unlock_global(); 432 433 if (!found_thread) { 434 args->a0 = TEESMC_RETURN_EBUSY; 435 return; 436 } 437 438 l->curr_thread = n; 439 440 threads[n].flags = 0; 441 init_regs(threads + n, args); 442 443 /* Save Hypervisor Client ID */ 444 threads[n].hyp_clnt_id = args->a7; 445 446 thread_lazy_save_ns_vfp(); 447 thread_resume(&threads[n].regs); 448 } 449 450 #ifdef ARM32 451 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 452 struct thread_smc_args *args) 453 { 454 /* 455 * Update returned values from RPC, values will appear in 456 * r0-r3 when thread is resumed. 457 */ 458 regs->r0 = args->a0; 459 regs->r1 = args->a1; 460 regs->r2 = args->a2; 461 regs->r3 = args->a3; 462 } 463 #endif /*ARM32*/ 464 465 #ifdef ARM64 466 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 467 struct thread_smc_args *args) 468 { 469 /* 470 * Update returned values from RPC, values will appear in 471 * x0-x3 when thread is resumed. 472 */ 473 regs->x[0] = args->a0; 474 regs->x[1] = args->a1; 475 regs->x[2] = args->a2; 476 regs->x[3] = args->a3; 477 } 478 #endif /*ARM64*/ 479 480 static void thread_resume_from_rpc(struct thread_smc_args *args) 481 { 482 size_t n = args->a3; /* thread id */ 483 struct thread_core_local *l = thread_get_core_local(); 484 uint32_t rv = 0; 485 486 assert(l->curr_thread == -1); 487 488 lock_global(); 489 490 if (have_one_active_thread()) { 491 rv = TEESMC_RETURN_EBUSY; 492 } else if (n < CFG_NUM_THREADS && 493 threads[n].state == THREAD_STATE_SUSPENDED && 494 args->a7 == threads[n].hyp_clnt_id) { 495 /* 496 * If there's one preempted thread it has to be the one 497 * we're resuming. 498 */ 499 if (have_one_preempted_thread()) { 500 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ) { 501 threads[n].flags &= ~THREAD_FLAGS_EXIT_ON_IRQ; 502 threads[n].state = THREAD_STATE_ACTIVE; 503 } else { 504 rv = TEESMC_RETURN_EBUSY; 505 } 506 } else { 507 threads[n].state = THREAD_STATE_ACTIVE; 508 } 509 } else { 510 rv = TEESMC_RETURN_ERESUME; 511 } 512 513 unlock_global(); 514 515 if (rv) { 516 args->a0 = rv; 517 return; 518 } 519 520 l->curr_thread = n; 521 522 if (threads[n].have_user_map) 523 core_mmu_set_user_map(&threads[n].user_map); 524 525 /* 526 * Return from RPC to request service of an IRQ must not 527 * get parameters from non-secure world. 528 */ 529 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 530 copy_a0_to_a3(&threads[n].regs, args); 531 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 532 } 533 534 thread_lazy_save_ns_vfp(); 535 thread_resume(&threads[n].regs); 536 } 537 538 void thread_handle_fast_smc(struct thread_smc_args *args) 539 { 540 thread_check_canaries(); 541 thread_fast_smc_handler_ptr(args); 542 /* Fast handlers must not unmask any exceptions */ 543 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 544 } 545 546 void thread_handle_std_smc(struct thread_smc_args *args) 547 { 548 thread_check_canaries(); 549 550 if (args->a0 == TEESMC32_CALL_RETURN_FROM_RPC) 551 thread_resume_from_rpc(args); 552 else 553 thread_alloc_and_run(args); 554 } 555 556 void thread_handle_abort(uint32_t abort_type, struct thread_abort_regs *regs) 557 { 558 #ifdef CFG_WITH_VFP 559 if (vfp_is_enabled()) { 560 vfp_lazy_save_state_init(&thread_vfp_state.sec); 561 thread_vfp_state.sec_lazy_saved = true; 562 } 563 #endif 564 565 thread_abort_handler_ptr(abort_type, regs); 566 567 #ifdef CFG_WITH_VFP 568 assert(!vfp_is_enabled()); 569 if (thread_vfp_state.sec_lazy_saved) { 570 vfp_lazy_restore_state(&thread_vfp_state.sec, 571 thread_vfp_state.sec_saved); 572 thread_vfp_state.sec_saved = false; 573 thread_vfp_state.sec_lazy_saved = false; 574 } 575 #endif 576 } 577 578 void *thread_get_tmp_sp(void) 579 { 580 struct thread_core_local *l = thread_get_core_local(); 581 582 return (void *)l->tmp_stack_va_end; 583 } 584 585 #ifdef ARM64 586 vaddr_t thread_get_saved_thread_sp(void) 587 { 588 struct thread_core_local *l = thread_get_core_local(); 589 int ct = l->curr_thread; 590 591 assert(ct != -1); 592 return threads[ct].kern_sp; 593 } 594 #endif /*ARM64*/ 595 596 void thread_state_free(void) 597 { 598 struct thread_core_local *l = thread_get_core_local(); 599 int ct = l->curr_thread; 600 601 assert(ct != -1); 602 603 thread_lazy_restore_ns_vfp(); 604 605 lock_global(); 606 607 assert(threads[ct].state == THREAD_STATE_ACTIVE); 608 threads[ct].state = THREAD_STATE_FREE; 609 threads[ct].flags = 0; 610 l->curr_thread = -1; 611 612 unlock_global(); 613 } 614 615 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 616 { 617 struct thread_core_local *l = thread_get_core_local(); 618 int ct = l->curr_thread; 619 620 assert(ct != -1); 621 622 thread_check_canaries(); 623 624 thread_lazy_restore_ns_vfp(); 625 626 lock_global(); 627 628 assert(threads[ct].state == THREAD_STATE_ACTIVE); 629 threads[ct].flags |= flags; 630 threads[ct].regs.cpsr = cpsr; 631 threads[ct].regs.pc = pc; 632 threads[ct].state = THREAD_STATE_SUSPENDED; 633 634 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 635 if (threads[ct].have_user_map) { 636 core_mmu_get_user_map(&threads[ct].user_map); 637 core_mmu_set_user_map(NULL); 638 } 639 640 641 l->curr_thread = -1; 642 643 unlock_global(); 644 645 return ct; 646 } 647 648 #ifdef ARM32 649 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 650 { 651 l->tmp_stack_va_end = sp; 652 thread_set_irq_sp(sp); 653 thread_set_fiq_sp(sp); 654 } 655 656 static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp) 657 { 658 thread_set_abt_sp(sp); 659 } 660 #endif /*ARM32*/ 661 662 #ifdef ARM64 663 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 664 { 665 /* 666 * We're already using the tmp stack when this function is called 667 * so there's no need to assign it to any stack pointer. However, 668 * we'll need to restore it at different times so store it here. 669 */ 670 l->tmp_stack_va_end = sp; 671 } 672 673 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 674 { 675 l->abt_stack_va_end = sp; 676 } 677 #endif /*ARM64*/ 678 679 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 680 { 681 if (thread_id >= CFG_NUM_THREADS) 682 return false; 683 if (threads[thread_id].state != THREAD_STATE_FREE) 684 return false; 685 686 threads[thread_id].stack_va_end = sp; 687 return true; 688 } 689 690 uint32_t thread_get_id(void) 691 { 692 /* thread_get_core_local() requires IRQs to be disabled */ 693 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 694 struct thread_core_local *l; 695 int ct; 696 697 l = thread_get_core_local(); 698 ct = l->curr_thread; 699 assert((ct >= 0) && (ct < CFG_NUM_THREADS)); 700 701 thread_unmask_exceptions(exceptions); 702 return ct; 703 } 704 705 static void init_handlers(const struct thread_handlers *handlers) 706 { 707 thread_std_smc_handler_ptr = handlers->std_smc; 708 thread_fast_smc_handler_ptr = handlers->fast_smc; 709 thread_fiq_handler_ptr = handlers->fiq; 710 thread_svc_handler_ptr = handlers->svc; 711 thread_abort_handler_ptr = handlers->abort; 712 thread_cpu_on_handler_ptr = handlers->cpu_on; 713 thread_cpu_off_handler_ptr = handlers->cpu_off; 714 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 715 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 716 thread_system_off_handler_ptr = handlers->system_off; 717 thread_system_reset_handler_ptr = handlers->system_reset; 718 } 719 720 721 #ifdef CFG_WITH_PAGER 722 static void init_thread_stacks(void) 723 { 724 size_t n; 725 726 /* 727 * Allocate virtual memory for thread stacks. 728 */ 729 for (n = 0; n < CFG_NUM_THREADS; n++) { 730 tee_mm_entry_t *mm; 731 vaddr_t sp; 732 733 /* Find vmem for thread stack and its protection gap */ 734 mm = tee_mm_alloc(&tee_mm_vcore, 735 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 736 TEE_ASSERT(mm); 737 738 /* Claim eventual physical page */ 739 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 740 true); 741 742 /* Realloc both protection vmem and stack vmem separately */ 743 sp = tee_mm_get_smem(mm); 744 tee_mm_free(mm); 745 mm = tee_mm_alloc2(&tee_mm_vcore, sp, SMALL_PAGE_SIZE); 746 TEE_ASSERT(mm); 747 mm = tee_mm_alloc2(&tee_mm_vcore, sp + SMALL_PAGE_SIZE, 748 STACK_THREAD_SIZE); 749 TEE_ASSERT(mm); 750 751 /* init effective stack */ 752 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 753 if (!thread_init_stack(n, sp)) 754 panic(); 755 756 /* Add the area to the pager */ 757 tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL); 758 } 759 } 760 #else 761 static void init_thread_stacks(void) 762 { 763 size_t n; 764 765 /* Assign the thread stacks */ 766 for (n = 0; n < CFG_NUM_THREADS; n++) { 767 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 768 panic(); 769 } 770 } 771 #endif /*CFG_WITH_PAGER*/ 772 773 void thread_init_primary(const struct thread_handlers *handlers) 774 { 775 /* 776 * The COMPILE_TIME_ASSERT only works in function context. These 777 * checks verifies that the offsets used in assembly code matches 778 * what's used in C code. 779 */ 780 #ifdef ARM32 781 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r0) == 782 THREAD_SVC_REG_R0_OFFS); 783 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r1) == 784 THREAD_SVC_REG_R1_OFFS); 785 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r2) == 786 THREAD_SVC_REG_R2_OFFS); 787 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r3) == 788 THREAD_SVC_REG_R3_OFFS); 789 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r4) == 790 THREAD_SVC_REG_R4_OFFS); 791 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r5) == 792 THREAD_SVC_REG_R5_OFFS); 793 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r6) == 794 THREAD_SVC_REG_R6_OFFS); 795 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r7) == 796 THREAD_SVC_REG_R7_OFFS); 797 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, lr) == 798 THREAD_SVC_REG_LR_OFFS); 799 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, spsr) == 800 THREAD_SVC_REG_SPSR_OFFS); 801 #endif /*ARM32*/ 802 #ifdef ARM64 803 /* struct thread_abort_regs */ 804 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, x22) == 805 THREAD_ABT_REG_X_OFFS(22)); 806 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, elr) == 807 THREAD_ABT_REG_ELR_OFFS); 808 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, spsr) == 809 THREAD_ABT_REG_SPSR_OFFS); 810 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, sp_el0) == 811 THREAD_ABT_REG_SP_EL0_OFFS); 812 COMPILE_TIME_ASSERT(sizeof(struct thread_abort_regs) == 813 THREAD_ABT_REGS_SIZE); 814 815 /* struct thread_ctx */ 816 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx, kern_sp) == 817 THREAD_CTX_KERN_SP_OFFSET); 818 COMPILE_TIME_ASSERT(sizeof(struct thread_ctx) == THREAD_CTX_SIZE); 819 820 /* struct thread_ctx_regs */ 821 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, sp) == 822 THREAD_CTX_REGS_SP_OFFSET); 823 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, pc) == 824 THREAD_CTX_REGS_PC_OFFSET); 825 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, cpsr) == 826 THREAD_CTX_REGS_SPSR_OFFSET); 827 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, x[23]) == 828 THREAD_CTX_REGS_X_OFFSET(23)); 829 COMPILE_TIME_ASSERT(sizeof(struct thread_ctx_regs) == 830 THREAD_CTX_REGS_SIZE); 831 832 /* struct thread_user_mode_rec */ 833 COMPILE_TIME_ASSERT( 834 offsetof(struct thread_user_mode_rec, exit_status0_ptr) == 835 THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET); 836 COMPILE_TIME_ASSERT( 837 offsetof(struct thread_user_mode_rec, exit_status1_ptr) == 838 THREAD_USER_MODE_REC_EXIT_STATUS1_PTR_OFFSET); 839 COMPILE_TIME_ASSERT( 840 offsetof(struct thread_user_mode_rec, x[1]) == 841 THREAD_USER_MODE_REC_X_OFFSET(20)); 842 COMPILE_TIME_ASSERT(sizeof(struct thread_user_mode_rec) == 843 THREAD_USER_MODE_REC_SIZE); 844 845 /* struct thread_core_local */ 846 COMPILE_TIME_ASSERT( 847 offsetof(struct thread_core_local, tmp_stack_va_end) == 848 THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET); 849 COMPILE_TIME_ASSERT( 850 offsetof(struct thread_core_local, curr_thread) == 851 THREAD_CORE_LOCAL_CURR_THREAD_OFFSET); 852 COMPILE_TIME_ASSERT( 853 offsetof(struct thread_core_local, flags) == 854 THREAD_CORE_LOCAL_FLAGS_OFFSET); 855 COMPILE_TIME_ASSERT( 856 offsetof(struct thread_core_local, abt_stack_va_end) == 857 THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET); 858 COMPILE_TIME_ASSERT( 859 offsetof(struct thread_core_local, x[3]) == 860 THREAD_CORE_LOCAL_X_OFFSET(3)); 861 COMPILE_TIME_ASSERT(sizeof(struct thread_core_local) == 862 THREAD_CORE_LOCAL_SIZE); 863 864 #endif /*ARM64*/ 865 866 init_handlers(handlers); 867 868 /* Initialize canaries around the stacks */ 869 init_canaries(); 870 871 init_thread_stacks(); 872 } 873 874 static void init_sec_mon(size_t __unused pos) 875 { 876 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 877 /* Initialize secure monitor */ 878 sm_init(GET_STACK(stack_sm[pos])); 879 sm_set_entry_vector(thread_vector_table); 880 #endif 881 } 882 883 void thread_init_per_cpu(void) 884 { 885 size_t pos = get_core_pos(); 886 struct thread_core_local *l = thread_get_core_local(); 887 888 init_sec_mon(pos); 889 890 l->curr_thread = -1; 891 set_tmp_stack(l, GET_STACK(stack_tmp[pos])); 892 set_abt_stack(l, GET_STACK(stack_abt[pos])); 893 894 thread_init_vbar(); 895 } 896 897 void thread_set_tsd(void *tsd) 898 { 899 /* thread_get_core_local() requires IRQs to be disabled */ 900 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 901 struct thread_core_local *l; 902 int ct; 903 904 l = thread_get_core_local(); 905 ct = l->curr_thread; 906 907 assert(ct != -1); 908 assert(threads[ct].state == THREAD_STATE_ACTIVE); 909 threads[ct].tsd = tsd; 910 911 thread_unmask_exceptions(exceptions); 912 } 913 914 void *thread_get_tsd(void) 915 { 916 /* thread_get_core_local() requires IRQs to be disabled */ 917 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 918 struct thread_core_local *l; 919 int ct; 920 void *tsd; 921 922 l = thread_get_core_local(); 923 ct = l->curr_thread; 924 925 if (ct == -1 || threads[ct].state != THREAD_STATE_ACTIVE) 926 tsd = NULL; 927 else 928 tsd = threads[ct].tsd; 929 930 thread_unmask_exceptions(exceptions); 931 return tsd; 932 } 933 934 struct thread_ctx_regs *thread_get_ctx_regs(void) 935 { 936 struct thread_core_local *l = thread_get_core_local(); 937 938 assert(l->curr_thread != -1); 939 return &threads[l->curr_thread].regs; 940 } 941 942 void thread_set_irq(bool enable) 943 { 944 /* thread_get_core_local() requires IRQs to be disabled */ 945 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 946 struct thread_core_local *l; 947 948 l = thread_get_core_local(); 949 950 assert(l->curr_thread != -1); 951 952 if (enable) { 953 threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE; 954 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 955 } else { 956 /* 957 * No need to disable IRQ here since it's already disabled 958 * above. 959 */ 960 threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE; 961 } 962 } 963 964 void thread_restore_irq(void) 965 { 966 /* thread_get_core_local() requires IRQs to be disabled */ 967 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 968 struct thread_core_local *l; 969 970 l = thread_get_core_local(); 971 972 assert(l->curr_thread != -1); 973 974 if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE) 975 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 976 } 977 978 #ifdef CFG_WITH_VFP 979 uint32_t thread_kernel_enable_vfp(void) 980 { 981 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 982 983 assert(!vfp_is_enabled()); 984 985 if (!thread_vfp_state.ns_saved) { 986 vfp_lazy_save_state_final(&thread_vfp_state.ns); 987 thread_vfp_state.ns_saved = true; 988 } else if (thread_vfp_state.sec_lazy_saved && 989 !thread_vfp_state.sec_saved) { 990 vfp_lazy_save_state_final(&thread_vfp_state.sec); 991 thread_vfp_state.sec_saved = true; 992 } 993 994 vfp_enable(); 995 return exceptions; 996 } 997 998 void thread_kernel_disable_vfp(uint32_t state) 999 { 1000 uint32_t exceptions; 1001 1002 assert(vfp_is_enabled()); 1003 1004 vfp_disable(); 1005 exceptions = thread_get_exceptions(); 1006 assert(exceptions & THREAD_EXCP_IRQ); 1007 exceptions &= ~THREAD_EXCP_IRQ; 1008 exceptions |= state & THREAD_EXCP_IRQ; 1009 thread_set_exceptions(exceptions); 1010 } 1011 #endif /*CFG_WITH_VFP*/ 1012 1013 1014 paddr_t thread_rpc_alloc_arg(size_t size) 1015 { 1016 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1017 TEESMC_RETURN_RPC_ALLOC_ARG, size}; 1018 1019 thread_rpc(rpc_args); 1020 return rpc_args[1]; 1021 } 1022 1023 paddr_t thread_rpc_alloc_payload(size_t size) 1024 { 1025 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1026 TEESMC_RETURN_RPC_ALLOC_PAYLOAD, size}; 1027 1028 thread_rpc(rpc_args); 1029 return rpc_args[1]; 1030 } 1031 1032 void thread_rpc_free_arg(paddr_t arg) 1033 { 1034 if (arg) { 1035 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1036 TEESMC_RETURN_RPC_FREE_ARG, arg}; 1037 1038 thread_rpc(rpc_args); 1039 } 1040 } 1041 void thread_rpc_free_payload(paddr_t payload) 1042 { 1043 if (payload) { 1044 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1045 TEESMC_RETURN_RPC_FREE_PAYLOAD, payload}; 1046 1047 thread_rpc(rpc_args); 1048 } 1049 } 1050 1051 void thread_rpc_cmd(paddr_t arg) 1052 { 1053 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {TEESMC_RETURN_RPC_CMD, arg}; 1054 1055 thread_rpc(rpc_args); 1056 } 1057 1058 void thread_optee_rpc_alloc_payload(size_t size, paddr_t *payload, 1059 paddr_t *cookie) 1060 { 1061 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1062 TEESMC_RETURN_OPTEE_RPC_ALLOC_PAYLOAD, size}; 1063 1064 thread_rpc(rpc_args); 1065 if (payload) 1066 *payload = rpc_args[1]; 1067 if (cookie) 1068 *cookie = rpc_args[2]; 1069 } 1070 1071 void thread_optee_rpc_free_payload(paddr_t cookie) 1072 { 1073 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] ={ 1074 TEESMC_RETURN_OPTEE_RPC_FREE_PAYLOAD, cookie}; 1075 1076 thread_rpc(rpc_args); 1077 } 1078