1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <platform_config.h> 28 29 #include <kernel/panic.h> 30 #include <kernel/thread.h> 31 #include <kernel/thread_defs.h> 32 #include "thread_private.h" 33 #include <sm/sm_defs.h> 34 #include <sm/sm.h> 35 #include <sm/teesmc.h> 36 #include <sm/teesmc_optee.h> 37 #include <arm.h> 38 #include <kernel/tz_proc_def.h> 39 #include <kernel/tz_proc.h> 40 #include <kernel/misc.h> 41 #include <mm/tee_mmu.h> 42 #include <mm/tee_mmu_defs.h> 43 #include <mm/tee_mm.h> 44 #include <mm/tee_pager.h> 45 #include <kernel/tee_ta_manager.h> 46 #include <util.h> 47 #include <trace.h> 48 49 #include <assert.h> 50 51 #ifdef ARM32 52 #define STACK_TMP_SIZE 1024 53 #define STACK_THREAD_SIZE 8192 54 55 #if TRACE_LEVEL > 0 56 #define STACK_ABT_SIZE 2048 57 #else 58 #define STACK_ABT_SIZE 1024 59 #endif 60 61 #endif /*ARM32*/ 62 63 #ifdef ARM64 64 #define STACK_TMP_SIZE 2048 65 #define STACK_THREAD_SIZE 8192 66 67 #if TRACE_LEVEL > 0 68 #define STACK_ABT_SIZE 3072 69 #else 70 #define STACK_ABT_SIZE 1024 71 #endif 72 #endif /*ARM64*/ 73 74 #define RPC_MAX_PARAMS 2 75 76 struct thread_ctx threads[CFG_NUM_THREADS]; 77 78 static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 79 80 #ifdef CFG_WITH_VFP 81 struct thread_vfp_state { 82 bool ns_saved; 83 bool sec_saved; 84 bool sec_lazy_saved; 85 struct vfp_state ns; 86 struct vfp_state sec; 87 }; 88 89 static struct thread_vfp_state thread_vfp_state; 90 #endif /*CFG_WITH_VFP*/ 91 92 #ifdef CFG_WITH_STACK_CANARIES 93 #ifdef ARM32 94 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 95 #endif 96 #ifdef ARM64 97 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 98 #endif 99 #define START_CANARY_VALUE 0xdededede 100 #define END_CANARY_VALUE 0xabababab 101 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 102 #define GET_END_CANARY(name, stack_num) \ 103 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 104 #else 105 #define STACK_CANARY_SIZE 0 106 #endif 107 108 #define DECLARE_STACK(name, num_stacks, stack_size) \ 109 static uint32_t name[num_stacks][ \ 110 ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 111 sizeof(uint32_t)] \ 112 __attribute__((section(".nozi.stack"), \ 113 aligned(STACK_ALIGNMENT))) 114 115 #define GET_STACK(stack) \ 116 ((vaddr_t)(stack) + sizeof(stack) - STACK_CANARY_SIZE / 2) 117 118 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE); 119 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE); 120 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 121 DECLARE_STACK(stack_sm, CFG_TEE_CORE_NB_CORE, SM_STACK_SIZE); 122 #endif 123 #ifndef CFG_WITH_PAGER 124 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE); 125 #endif 126 127 const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = { 128 GET_STACK(stack_tmp[0]), 129 #if CFG_TEE_CORE_NB_CORE > 1 130 GET_STACK(stack_tmp[1]), 131 #endif 132 #if CFG_TEE_CORE_NB_CORE > 2 133 GET_STACK(stack_tmp[2]), 134 #endif 135 #if CFG_TEE_CORE_NB_CORE > 3 136 GET_STACK(stack_tmp[3]), 137 #endif 138 #if CFG_TEE_CORE_NB_CORE > 4 139 GET_STACK(stack_tmp[4]), 140 #endif 141 #if CFG_TEE_CORE_NB_CORE > 5 142 GET_STACK(stack_tmp[5]), 143 #endif 144 #if CFG_TEE_CORE_NB_CORE > 6 145 GET_STACK(stack_tmp[6]), 146 #endif 147 #if CFG_TEE_CORE_NB_CORE > 7 148 GET_STACK(stack_tmp[7]), 149 #endif 150 #if CFG_TEE_CORE_NB_CORE > 8 151 #error "Top of tmp stacks aren't defined for more than 8 CPUS" 152 #endif 153 }; 154 155 thread_smc_handler_t thread_std_smc_handler_ptr; 156 static thread_smc_handler_t thread_fast_smc_handler_ptr; 157 thread_fiq_handler_t thread_fiq_handler_ptr; 158 thread_svc_handler_t thread_svc_handler_ptr; 159 static thread_abort_handler_t thread_abort_handler_ptr; 160 thread_pm_handler_t thread_cpu_on_handler_ptr; 161 thread_pm_handler_t thread_cpu_off_handler_ptr; 162 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 163 thread_pm_handler_t thread_cpu_resume_handler_ptr; 164 thread_pm_handler_t thread_system_off_handler_ptr; 165 thread_pm_handler_t thread_system_reset_handler_ptr; 166 167 168 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 169 170 static void init_canaries(void) 171 { 172 #ifdef CFG_WITH_STACK_CANARIES 173 size_t n; 174 #define INIT_CANARY(name) \ 175 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 176 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 177 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 178 \ 179 *start_canary = START_CANARY_VALUE; \ 180 *end_canary = END_CANARY_VALUE; \ 181 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 182 #name, n, (void *)(end_canary - 1)); \ 183 DMSG("watch *%p\n", (void *)end_canary); \ 184 } 185 186 INIT_CANARY(stack_tmp); 187 INIT_CANARY(stack_abt); 188 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 189 INIT_CANARY(stack_sm); 190 #endif 191 #ifndef CFG_WITH_PAGER 192 INIT_CANARY(stack_thread); 193 #endif 194 #endif/*CFG_WITH_STACK_CANARIES*/ 195 } 196 197 void thread_check_canaries(void) 198 { 199 #ifdef CFG_WITH_STACK_CANARIES 200 size_t n; 201 202 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 203 assert(GET_START_CANARY(stack_tmp, n) == START_CANARY_VALUE); 204 assert(GET_END_CANARY(stack_tmp, n) == END_CANARY_VALUE); 205 } 206 207 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 208 assert(GET_START_CANARY(stack_abt, n) == START_CANARY_VALUE); 209 assert(GET_END_CANARY(stack_abt, n) == END_CANARY_VALUE); 210 } 211 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 212 for (n = 0; n < ARRAY_SIZE(stack_sm); n++) { 213 assert(GET_START_CANARY(stack_sm, n) == START_CANARY_VALUE); 214 assert(GET_END_CANARY(stack_sm, n) == END_CANARY_VALUE); 215 } 216 #endif 217 #ifndef CFG_WITH_PAGER 218 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 219 assert(GET_START_CANARY(stack_thread, n) == START_CANARY_VALUE); 220 assert(GET_END_CANARY(stack_thread, n) == END_CANARY_VALUE); 221 } 222 #endif 223 #endif/*CFG_WITH_STACK_CANARIES*/ 224 } 225 226 static void lock_global(void) 227 { 228 cpu_spin_lock(&thread_global_lock); 229 } 230 231 static void unlock_global(void) 232 { 233 cpu_spin_unlock(&thread_global_lock); 234 } 235 236 #ifdef ARM32 237 uint32_t thread_get_exceptions(void) 238 { 239 uint32_t cpsr = read_cpsr(); 240 241 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 242 } 243 244 void thread_set_exceptions(uint32_t exceptions) 245 { 246 uint32_t cpsr = read_cpsr(); 247 248 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 249 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 250 write_cpsr(cpsr); 251 } 252 #endif /*ARM32*/ 253 254 #ifdef ARM64 255 uint32_t thread_get_exceptions(void) 256 { 257 uint32_t daif = read_daif(); 258 259 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 260 } 261 262 void thread_set_exceptions(uint32_t exceptions) 263 { 264 uint32_t daif = read_daif(); 265 266 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 267 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 268 write_daif(daif); 269 } 270 #endif /*ARM64*/ 271 272 uint32_t thread_mask_exceptions(uint32_t exceptions) 273 { 274 uint32_t state = thread_get_exceptions(); 275 276 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 277 return state; 278 } 279 280 void thread_unmask_exceptions(uint32_t state) 281 { 282 thread_set_exceptions(state & THREAD_EXCP_ALL); 283 } 284 285 286 struct thread_core_local *thread_get_core_local(void) 287 { 288 uint32_t cpu_id = get_core_pos(); 289 290 /* 291 * IRQs must be disabled before playing with core_local since 292 * we otherwise may be rescheduled to a different core in the 293 * middle of this function. 294 */ 295 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 296 297 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 298 return &thread_core_local[cpu_id]; 299 } 300 301 static bool have_one_active_thread(void) 302 { 303 size_t n; 304 305 for (n = 0; n < CFG_NUM_THREADS; n++) { 306 if (threads[n].state == THREAD_STATE_ACTIVE) 307 return true; 308 } 309 310 return false; 311 } 312 313 static bool have_one_preempted_thread(void) 314 { 315 size_t n; 316 317 for (n = 0; n < CFG_NUM_THREADS; n++) { 318 if (threads[n].state == THREAD_STATE_SUSPENDED && 319 (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ)) 320 return true; 321 } 322 323 return false; 324 } 325 326 static void thread_lazy_save_ns_vfp(void) 327 { 328 #ifdef CFG_WITH_VFP 329 thread_vfp_state.ns_saved = false; 330 #if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW) 331 /* 332 * ARM TF saves and restores CPACR_EL1, so we must assume NS world 333 * uses VFP and always preserve the register file when secure world 334 * is about to use it 335 */ 336 thread_vfp_state.ns.force_save = true; 337 #endif 338 vfp_lazy_save_state_init(&thread_vfp_state.ns); 339 #endif /*CFG_WITH_VFP*/ 340 } 341 342 static void thread_lazy_restore_ns_vfp(void) 343 { 344 #ifdef CFG_WITH_VFP 345 assert(!thread_vfp_state.sec_lazy_saved && !thread_vfp_state.sec_saved); 346 vfp_lazy_restore_state(&thread_vfp_state.ns, thread_vfp_state.ns_saved); 347 thread_vfp_state.ns_saved = false; 348 #endif /*CFG_WITH_VFP*/ 349 } 350 351 #ifdef ARM32 352 static void init_regs(struct thread_ctx *thread, 353 struct thread_smc_args *args) 354 { 355 thread->regs.pc = (uint32_t)thread_std_smc_entry; 356 357 /* 358 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 359 * abort and unmasked FIQ. 360 */ 361 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 362 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_I | CPSR_A; 363 /* Enable thumb mode if it's a thumb instruction */ 364 if (thread->regs.pc & 1) 365 thread->regs.cpsr |= CPSR_T; 366 /* Reinitialize stack pointer */ 367 thread->regs.svc_sp = thread->stack_va_end; 368 369 /* 370 * Copy arguments into context. This will make the 371 * arguments appear in r0-r7 when thread is started. 372 */ 373 thread->regs.r0 = args->a0; 374 thread->regs.r1 = args->a1; 375 thread->regs.r2 = args->a2; 376 thread->regs.r3 = args->a3; 377 thread->regs.r4 = args->a4; 378 thread->regs.r5 = args->a5; 379 thread->regs.r6 = args->a6; 380 thread->regs.r7 = args->a7; 381 } 382 #endif /*ARM32*/ 383 384 #ifdef ARM64 385 static void init_regs(struct thread_ctx *thread, 386 struct thread_smc_args *args) 387 { 388 thread->regs.pc = (uint64_t)thread_std_smc_entry; 389 390 /* 391 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 392 * abort and unmasked FIQ. 393 */ 394 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 395 DAIFBIT_IRQ | DAIFBIT_ABT); 396 /* Reinitialize stack pointer */ 397 thread->regs.sp = thread->stack_va_end; 398 399 /* 400 * Copy arguments into context. This will make the 401 * arguments appear in x0-x7 when thread is started. 402 */ 403 thread->regs.x[0] = args->a0; 404 thread->regs.x[1] = args->a1; 405 thread->regs.x[2] = args->a2; 406 thread->regs.x[3] = args->a3; 407 thread->regs.x[4] = args->a4; 408 thread->regs.x[5] = args->a5; 409 thread->regs.x[6] = args->a6; 410 thread->regs.x[7] = args->a7; 411 } 412 #endif /*ARM64*/ 413 414 static void thread_alloc_and_run(struct thread_smc_args *args) 415 { 416 size_t n; 417 struct thread_core_local *l = thread_get_core_local(); 418 bool found_thread = false; 419 420 assert(l->curr_thread == -1); 421 422 lock_global(); 423 424 if (!have_one_active_thread() && !have_one_preempted_thread()) { 425 for (n = 0; n < CFG_NUM_THREADS; n++) { 426 if (threads[n].state == THREAD_STATE_FREE) { 427 threads[n].state = THREAD_STATE_ACTIVE; 428 found_thread = true; 429 break; 430 } 431 } 432 } 433 434 unlock_global(); 435 436 if (!found_thread) { 437 args->a0 = TEESMC_RETURN_EBUSY; 438 return; 439 } 440 441 l->curr_thread = n; 442 443 threads[n].flags = 0; 444 init_regs(threads + n, args); 445 446 /* Save Hypervisor Client ID */ 447 threads[n].hyp_clnt_id = args->a7; 448 449 thread_lazy_save_ns_vfp(); 450 thread_resume(&threads[n].regs); 451 } 452 453 #ifdef ARM32 454 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 455 struct thread_smc_args *args) 456 { 457 /* 458 * Update returned values from RPC, values will appear in 459 * r0-r3 when thread is resumed. 460 */ 461 regs->r0 = args->a0; 462 regs->r1 = args->a1; 463 regs->r2 = args->a2; 464 regs->r3 = args->a3; 465 } 466 #endif /*ARM32*/ 467 468 #ifdef ARM64 469 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 470 struct thread_smc_args *args) 471 { 472 /* 473 * Update returned values from RPC, values will appear in 474 * x0-x3 when thread is resumed. 475 */ 476 regs->x[0] = args->a0; 477 regs->x[1] = args->a1; 478 regs->x[2] = args->a2; 479 regs->x[3] = args->a3; 480 } 481 #endif /*ARM64*/ 482 483 static void thread_resume_from_rpc(struct thread_smc_args *args) 484 { 485 size_t n = args->a3; /* thread id */ 486 struct thread_core_local *l = thread_get_core_local(); 487 uint32_t rv = 0; 488 489 assert(l->curr_thread == -1); 490 491 lock_global(); 492 493 if (have_one_active_thread()) { 494 rv = TEESMC_RETURN_EBUSY; 495 } else if (n < CFG_NUM_THREADS && 496 threads[n].state == THREAD_STATE_SUSPENDED && 497 args->a7 == threads[n].hyp_clnt_id) { 498 /* 499 * If there's one preempted thread it has to be the one 500 * we're resuming. 501 */ 502 if (have_one_preempted_thread()) { 503 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ) { 504 threads[n].flags &= ~THREAD_FLAGS_EXIT_ON_IRQ; 505 threads[n].state = THREAD_STATE_ACTIVE; 506 } else { 507 rv = TEESMC_RETURN_EBUSY; 508 } 509 } else { 510 threads[n].state = THREAD_STATE_ACTIVE; 511 } 512 } else { 513 rv = TEESMC_RETURN_ERESUME; 514 } 515 516 unlock_global(); 517 518 if (rv) { 519 args->a0 = rv; 520 return; 521 } 522 523 l->curr_thread = n; 524 525 if (threads[n].have_user_map) 526 core_mmu_set_user_map(&threads[n].user_map); 527 528 /* 529 * Return from RPC to request service of an IRQ must not 530 * get parameters from non-secure world. 531 */ 532 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 533 copy_a0_to_a3(&threads[n].regs, args); 534 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 535 } 536 537 thread_lazy_save_ns_vfp(); 538 thread_resume(&threads[n].regs); 539 } 540 541 void thread_handle_fast_smc(struct thread_smc_args *args) 542 { 543 thread_check_canaries(); 544 thread_fast_smc_handler_ptr(args); 545 /* Fast handlers must not unmask any exceptions */ 546 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 547 } 548 549 void thread_handle_std_smc(struct thread_smc_args *args) 550 { 551 thread_check_canaries(); 552 553 if (args->a0 == TEESMC32_CALL_RETURN_FROM_RPC) 554 thread_resume_from_rpc(args); 555 else 556 thread_alloc_and_run(args); 557 } 558 559 /* Helper routine for the assembly function thread_std_smc_entry() */ 560 void __thread_std_smc_entry(struct thread_smc_args *args) 561 { 562 struct thread_ctx *thr = threads + thread_get_id(); 563 564 if (!thr->rpc_arg) { 565 paddr_t parg; 566 void *arg; 567 568 parg = thread_rpc_alloc_arg( 569 TEESMC32_GET_ARG_SIZE(RPC_MAX_PARAMS)); 570 if (!parg || !TEE_ALIGNMENT_IS_OK(parg, struct teesmc32_arg) || 571 core_pa2va(parg, &arg)) { 572 thread_rpc_free_arg(parg); 573 args->a0 = TEESMC_RETURN_ENOMEM; 574 return; 575 } 576 577 thr->rpc_arg = arg; 578 thr->rpc_parg = parg; 579 } 580 581 thread_std_smc_handler_ptr(args); 582 } 583 584 void thread_handle_abort(uint32_t abort_type, struct thread_abort_regs *regs) 585 { 586 #ifdef CFG_WITH_VFP 587 if (vfp_is_enabled()) { 588 vfp_lazy_save_state_init(&thread_vfp_state.sec); 589 thread_vfp_state.sec_lazy_saved = true; 590 } 591 #endif 592 593 thread_abort_handler_ptr(abort_type, regs); 594 595 #ifdef CFG_WITH_VFP 596 assert(!vfp_is_enabled()); 597 if (thread_vfp_state.sec_lazy_saved) { 598 vfp_lazy_restore_state(&thread_vfp_state.sec, 599 thread_vfp_state.sec_saved); 600 thread_vfp_state.sec_saved = false; 601 thread_vfp_state.sec_lazy_saved = false; 602 } 603 #endif 604 } 605 606 void *thread_get_tmp_sp(void) 607 { 608 struct thread_core_local *l = thread_get_core_local(); 609 610 return (void *)l->tmp_stack_va_end; 611 } 612 613 #ifdef ARM64 614 vaddr_t thread_get_saved_thread_sp(void) 615 { 616 struct thread_core_local *l = thread_get_core_local(); 617 int ct = l->curr_thread; 618 619 assert(ct != -1); 620 return threads[ct].kern_sp; 621 } 622 #endif /*ARM64*/ 623 624 void thread_state_free(void) 625 { 626 struct thread_core_local *l = thread_get_core_local(); 627 int ct = l->curr_thread; 628 629 assert(ct != -1); 630 631 thread_lazy_restore_ns_vfp(); 632 633 lock_global(); 634 635 assert(threads[ct].state == THREAD_STATE_ACTIVE); 636 threads[ct].state = THREAD_STATE_FREE; 637 threads[ct].flags = 0; 638 l->curr_thread = -1; 639 640 unlock_global(); 641 } 642 643 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 644 { 645 struct thread_core_local *l = thread_get_core_local(); 646 int ct = l->curr_thread; 647 648 assert(ct != -1); 649 650 thread_check_canaries(); 651 652 thread_lazy_restore_ns_vfp(); 653 654 lock_global(); 655 656 assert(threads[ct].state == THREAD_STATE_ACTIVE); 657 threads[ct].flags |= flags; 658 threads[ct].regs.cpsr = cpsr; 659 threads[ct].regs.pc = pc; 660 threads[ct].state = THREAD_STATE_SUSPENDED; 661 662 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 663 if (threads[ct].have_user_map) { 664 core_mmu_get_user_map(&threads[ct].user_map); 665 core_mmu_set_user_map(NULL); 666 } 667 668 669 l->curr_thread = -1; 670 671 unlock_global(); 672 673 return ct; 674 } 675 676 #ifdef ARM32 677 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 678 { 679 l->tmp_stack_va_end = sp; 680 thread_set_irq_sp(sp); 681 thread_set_fiq_sp(sp); 682 } 683 684 static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp) 685 { 686 thread_set_abt_sp(sp); 687 } 688 #endif /*ARM32*/ 689 690 #ifdef ARM64 691 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 692 { 693 /* 694 * We're already using the tmp stack when this function is called 695 * so there's no need to assign it to any stack pointer. However, 696 * we'll need to restore it at different times so store it here. 697 */ 698 l->tmp_stack_va_end = sp; 699 } 700 701 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 702 { 703 l->abt_stack_va_end = sp; 704 } 705 #endif /*ARM64*/ 706 707 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 708 { 709 if (thread_id >= CFG_NUM_THREADS) 710 return false; 711 if (threads[thread_id].state != THREAD_STATE_FREE) 712 return false; 713 714 threads[thread_id].stack_va_end = sp; 715 return true; 716 } 717 718 uint32_t thread_get_id(void) 719 { 720 /* thread_get_core_local() requires IRQs to be disabled */ 721 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 722 struct thread_core_local *l; 723 int ct; 724 725 l = thread_get_core_local(); 726 ct = l->curr_thread; 727 assert((ct >= 0) && (ct < CFG_NUM_THREADS)); 728 729 thread_unmask_exceptions(exceptions); 730 return ct; 731 } 732 733 static void init_handlers(const struct thread_handlers *handlers) 734 { 735 thread_std_smc_handler_ptr = handlers->std_smc; 736 thread_fast_smc_handler_ptr = handlers->fast_smc; 737 thread_fiq_handler_ptr = handlers->fiq; 738 thread_svc_handler_ptr = handlers->svc; 739 thread_abort_handler_ptr = handlers->abort; 740 thread_cpu_on_handler_ptr = handlers->cpu_on; 741 thread_cpu_off_handler_ptr = handlers->cpu_off; 742 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 743 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 744 thread_system_off_handler_ptr = handlers->system_off; 745 thread_system_reset_handler_ptr = handlers->system_reset; 746 } 747 748 749 #ifdef CFG_WITH_PAGER 750 static void init_thread_stacks(void) 751 { 752 size_t n; 753 754 /* 755 * Allocate virtual memory for thread stacks. 756 */ 757 for (n = 0; n < CFG_NUM_THREADS; n++) { 758 tee_mm_entry_t *mm; 759 vaddr_t sp; 760 761 /* Find vmem for thread stack and its protection gap */ 762 mm = tee_mm_alloc(&tee_mm_vcore, 763 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 764 TEE_ASSERT(mm); 765 766 /* Claim eventual physical page */ 767 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 768 true); 769 770 /* Realloc both protection vmem and stack vmem separately */ 771 sp = tee_mm_get_smem(mm); 772 tee_mm_free(mm); 773 mm = tee_mm_alloc2(&tee_mm_vcore, sp, SMALL_PAGE_SIZE); 774 TEE_ASSERT(mm); 775 mm = tee_mm_alloc2(&tee_mm_vcore, sp + SMALL_PAGE_SIZE, 776 STACK_THREAD_SIZE); 777 TEE_ASSERT(mm); 778 779 /* init effective stack */ 780 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 781 if (!thread_init_stack(n, sp)) 782 panic(); 783 784 /* Add the area to the pager */ 785 tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL); 786 } 787 } 788 #else 789 static void init_thread_stacks(void) 790 { 791 size_t n; 792 793 /* Assign the thread stacks */ 794 for (n = 0; n < CFG_NUM_THREADS; n++) { 795 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 796 panic(); 797 } 798 } 799 #endif /*CFG_WITH_PAGER*/ 800 801 void thread_init_primary(const struct thread_handlers *handlers) 802 { 803 /* 804 * The COMPILE_TIME_ASSERT only works in function context. These 805 * checks verifies that the offsets used in assembly code matches 806 * what's used in C code. 807 */ 808 #ifdef ARM32 809 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r0) == 810 THREAD_SVC_REG_R0_OFFS); 811 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r1) == 812 THREAD_SVC_REG_R1_OFFS); 813 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r2) == 814 THREAD_SVC_REG_R2_OFFS); 815 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r3) == 816 THREAD_SVC_REG_R3_OFFS); 817 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r4) == 818 THREAD_SVC_REG_R4_OFFS); 819 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r5) == 820 THREAD_SVC_REG_R5_OFFS); 821 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r6) == 822 THREAD_SVC_REG_R6_OFFS); 823 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r7) == 824 THREAD_SVC_REG_R7_OFFS); 825 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, lr) == 826 THREAD_SVC_REG_LR_OFFS); 827 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, spsr) == 828 THREAD_SVC_REG_SPSR_OFFS); 829 #endif /*ARM32*/ 830 #ifdef ARM64 831 /* struct thread_abort_regs */ 832 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, x22) == 833 THREAD_ABT_REG_X_OFFS(22)); 834 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, elr) == 835 THREAD_ABT_REG_ELR_OFFS); 836 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, spsr) == 837 THREAD_ABT_REG_SPSR_OFFS); 838 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, sp_el0) == 839 THREAD_ABT_REG_SP_EL0_OFFS); 840 COMPILE_TIME_ASSERT(sizeof(struct thread_abort_regs) == 841 THREAD_ABT_REGS_SIZE); 842 843 /* struct thread_ctx */ 844 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx, kern_sp) == 845 THREAD_CTX_KERN_SP_OFFSET); 846 COMPILE_TIME_ASSERT(sizeof(struct thread_ctx) == THREAD_CTX_SIZE); 847 848 /* struct thread_ctx_regs */ 849 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, sp) == 850 THREAD_CTX_REGS_SP_OFFSET); 851 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, pc) == 852 THREAD_CTX_REGS_PC_OFFSET); 853 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, cpsr) == 854 THREAD_CTX_REGS_SPSR_OFFSET); 855 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, x[23]) == 856 THREAD_CTX_REGS_X_OFFSET(23)); 857 COMPILE_TIME_ASSERT(sizeof(struct thread_ctx_regs) == 858 THREAD_CTX_REGS_SIZE); 859 860 /* struct thread_user_mode_rec */ 861 COMPILE_TIME_ASSERT( 862 offsetof(struct thread_user_mode_rec, exit_status0_ptr) == 863 THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET); 864 COMPILE_TIME_ASSERT( 865 offsetof(struct thread_user_mode_rec, exit_status1_ptr) == 866 THREAD_USER_MODE_REC_EXIT_STATUS1_PTR_OFFSET); 867 COMPILE_TIME_ASSERT( 868 offsetof(struct thread_user_mode_rec, x[1]) == 869 THREAD_USER_MODE_REC_X_OFFSET(20)); 870 COMPILE_TIME_ASSERT(sizeof(struct thread_user_mode_rec) == 871 THREAD_USER_MODE_REC_SIZE); 872 873 /* struct thread_core_local */ 874 COMPILE_TIME_ASSERT( 875 offsetof(struct thread_core_local, tmp_stack_va_end) == 876 THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET); 877 COMPILE_TIME_ASSERT( 878 offsetof(struct thread_core_local, curr_thread) == 879 THREAD_CORE_LOCAL_CURR_THREAD_OFFSET); 880 COMPILE_TIME_ASSERT( 881 offsetof(struct thread_core_local, flags) == 882 THREAD_CORE_LOCAL_FLAGS_OFFSET); 883 COMPILE_TIME_ASSERT( 884 offsetof(struct thread_core_local, abt_stack_va_end) == 885 THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET); 886 COMPILE_TIME_ASSERT( 887 offsetof(struct thread_core_local, x[3]) == 888 THREAD_CORE_LOCAL_X_OFFSET(3)); 889 COMPILE_TIME_ASSERT(sizeof(struct thread_core_local) == 890 THREAD_CORE_LOCAL_SIZE); 891 892 #endif /*ARM64*/ 893 894 init_handlers(handlers); 895 896 /* Initialize canaries around the stacks */ 897 init_canaries(); 898 899 init_thread_stacks(); 900 } 901 902 static void init_sec_mon(size_t __unused pos) 903 { 904 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 905 /* Initialize secure monitor */ 906 sm_init(GET_STACK(stack_sm[pos])); 907 sm_set_entry_vector(thread_vector_table); 908 #endif 909 } 910 911 void thread_init_per_cpu(void) 912 { 913 size_t pos = get_core_pos(); 914 struct thread_core_local *l = thread_get_core_local(); 915 916 init_sec_mon(pos); 917 918 l->curr_thread = -1; 919 set_tmp_stack(l, GET_STACK(stack_tmp[pos])); 920 set_abt_stack(l, GET_STACK(stack_abt[pos])); 921 922 thread_init_vbar(); 923 } 924 925 void thread_set_tsd(void *tsd) 926 { 927 /* thread_get_core_local() requires IRQs to be disabled */ 928 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 929 struct thread_core_local *l; 930 int ct; 931 932 l = thread_get_core_local(); 933 ct = l->curr_thread; 934 935 assert(ct != -1); 936 assert(threads[ct].state == THREAD_STATE_ACTIVE); 937 threads[ct].tsd = tsd; 938 939 thread_unmask_exceptions(exceptions); 940 } 941 942 void *thread_get_tsd(void) 943 { 944 /* thread_get_core_local() requires IRQs to be disabled */ 945 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 946 struct thread_core_local *l; 947 int ct; 948 void *tsd; 949 950 l = thread_get_core_local(); 951 ct = l->curr_thread; 952 953 if (ct == -1 || threads[ct].state != THREAD_STATE_ACTIVE) 954 tsd = NULL; 955 else 956 tsd = threads[ct].tsd; 957 958 thread_unmask_exceptions(exceptions); 959 return tsd; 960 } 961 962 struct thread_ctx_regs *thread_get_ctx_regs(void) 963 { 964 struct thread_core_local *l = thread_get_core_local(); 965 966 assert(l->curr_thread != -1); 967 return &threads[l->curr_thread].regs; 968 } 969 970 void thread_set_irq(bool enable) 971 { 972 /* thread_get_core_local() requires IRQs to be disabled */ 973 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 974 struct thread_core_local *l; 975 976 l = thread_get_core_local(); 977 978 assert(l->curr_thread != -1); 979 980 if (enable) { 981 threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE; 982 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 983 } else { 984 /* 985 * No need to disable IRQ here since it's already disabled 986 * above. 987 */ 988 threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE; 989 } 990 } 991 992 void thread_restore_irq(void) 993 { 994 /* thread_get_core_local() requires IRQs to be disabled */ 995 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 996 struct thread_core_local *l; 997 998 l = thread_get_core_local(); 999 1000 assert(l->curr_thread != -1); 1001 1002 if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE) 1003 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 1004 } 1005 1006 #ifdef CFG_WITH_VFP 1007 uint32_t thread_kernel_enable_vfp(void) 1008 { 1009 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 1010 1011 assert(!vfp_is_enabled()); 1012 1013 if (!thread_vfp_state.ns_saved) { 1014 vfp_lazy_save_state_final(&thread_vfp_state.ns); 1015 thread_vfp_state.ns_saved = true; 1016 } else if (thread_vfp_state.sec_lazy_saved && 1017 !thread_vfp_state.sec_saved) { 1018 vfp_lazy_save_state_final(&thread_vfp_state.sec); 1019 thread_vfp_state.sec_saved = true; 1020 } 1021 1022 vfp_enable(); 1023 return exceptions; 1024 } 1025 1026 void thread_kernel_disable_vfp(uint32_t state) 1027 { 1028 uint32_t exceptions; 1029 1030 assert(vfp_is_enabled()); 1031 1032 vfp_disable(); 1033 exceptions = thread_get_exceptions(); 1034 assert(exceptions & THREAD_EXCP_IRQ); 1035 exceptions &= ~THREAD_EXCP_IRQ; 1036 exceptions |= state & THREAD_EXCP_IRQ; 1037 thread_set_exceptions(exceptions); 1038 } 1039 #endif /*CFG_WITH_VFP*/ 1040 1041 1042 paddr_t thread_rpc_alloc_arg(size_t size) 1043 { 1044 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1045 TEESMC_RETURN_RPC_ALLOC_ARG, size}; 1046 1047 thread_rpc(rpc_args); 1048 return rpc_args[1]; 1049 } 1050 1051 paddr_t thread_rpc_alloc_payload(size_t size) 1052 { 1053 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1054 TEESMC_RETURN_RPC_ALLOC_PAYLOAD, size}; 1055 1056 thread_rpc(rpc_args); 1057 return rpc_args[1]; 1058 } 1059 1060 void thread_rpc_free_arg(paddr_t arg) 1061 { 1062 if (arg) { 1063 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1064 TEESMC_RETURN_RPC_FREE_ARG, arg}; 1065 1066 thread_rpc(rpc_args); 1067 } 1068 } 1069 void thread_rpc_free_payload(paddr_t payload) 1070 { 1071 if (payload) { 1072 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1073 TEESMC_RETURN_RPC_FREE_PAYLOAD, payload}; 1074 1075 thread_rpc(rpc_args); 1076 } 1077 } 1078 1079 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1080 struct teesmc32_param *params) 1081 { 1082 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 0 }; 1083 struct thread_ctx *thr = threads + thread_get_id(); 1084 struct teesmc32_arg *arg = thr->rpc_arg; 1085 paddr_t parg = thr->rpc_parg; 1086 const size_t params_size = sizeof(struct teesmc32_param) * num_params; 1087 size_t n; 1088 1089 TEE_ASSERT(arg && parg && num_params <= RPC_MAX_PARAMS); 1090 1091 memset(arg, 0, TEESMC32_GET_ARG_SIZE(RPC_MAX_PARAMS)); 1092 arg->cmd = cmd; 1093 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1094 arg->num_params = num_params; 1095 memcpy(TEESMC32_GET_PARAMS(arg), params, params_size); 1096 1097 rpc_args[0] = TEESMC_RETURN_RPC_CMD; 1098 rpc_args[1] = parg; 1099 thread_rpc(rpc_args); 1100 1101 for (n = 0; n < num_params; n++) { 1102 switch (params[n].attr & TEESMC_ATTR_TYPE_MASK) { 1103 case TEESMC_ATTR_TYPE_VALUE_OUTPUT: 1104 case TEESMC_ATTR_TYPE_VALUE_INOUT: 1105 case TEESMC_ATTR_TYPE_MEMREF_OUTPUT: 1106 case TEESMC_ATTR_TYPE_MEMREF_INOUT: 1107 memcpy(params + n, TEESMC32_GET_PARAMS(arg) + n, 1108 sizeof(struct teesmc32_param)); 1109 break; 1110 default: 1111 break; 1112 } 1113 } 1114 1115 return arg->ret; 1116 } 1117 1118 1119 void thread_optee_rpc_alloc_payload(size_t size, paddr_t *payload, 1120 paddr_t *cookie) 1121 { 1122 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1123 TEESMC_RETURN_OPTEE_RPC_ALLOC_PAYLOAD, size}; 1124 1125 thread_rpc(rpc_args); 1126 if (payload) 1127 *payload = rpc_args[1]; 1128 if (cookie) 1129 *cookie = rpc_args[2]; 1130 } 1131 1132 void thread_optee_rpc_free_payload(paddr_t cookie) 1133 { 1134 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] ={ 1135 TEESMC_RETURN_OPTEE_RPC_FREE_PAYLOAD, cookie}; 1136 1137 thread_rpc(rpc_args); 1138 } 1139