1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <platform_config.h> 28 29 #include <kernel/panic.h> 30 #include <kernel/thread.h> 31 #include <kernel/thread_defs.h> 32 #include "thread_private.h" 33 #include <sm/sm_defs.h> 34 #include <sm/sm.h> 35 #include <sm/teesmc.h> 36 #include <sm/teesmc_optee.h> 37 #include <arm.h> 38 #include <kernel/tz_proc_def.h> 39 #include <kernel/tz_proc.h> 40 #include <kernel/misc.h> 41 #include <mm/tee_mmu.h> 42 #include <mm/tee_mmu_defs.h> 43 #include <mm/tee_mm.h> 44 #include <mm/tee_pager.h> 45 #include <kernel/tee_ta_manager.h> 46 #include <util.h> 47 #include <trace.h> 48 49 #include <assert.h> 50 51 #ifdef ARM32 52 #define STACK_TMP_SIZE 1024 53 #define STACK_THREAD_SIZE 8192 54 55 #if TRACE_LEVEL > 0 56 #define STACK_ABT_SIZE 2048 57 #else 58 #define STACK_ABT_SIZE 1024 59 #endif 60 61 #endif /*ARM32*/ 62 63 #ifdef ARM64 64 #define STACK_TMP_SIZE 2048 65 #define STACK_THREAD_SIZE 8192 66 67 #if TRACE_LEVEL > 0 68 #define STACK_ABT_SIZE 3072 69 #else 70 #define STACK_ABT_SIZE 1024 71 #endif 72 #endif /*ARM64*/ 73 74 struct thread_ctx threads[CFG_NUM_THREADS]; 75 76 static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 77 78 #ifdef CFG_WITH_VFP 79 struct thread_vfp_state { 80 bool ns_saved; 81 bool sec_saved; 82 bool sec_lazy_saved; 83 struct vfp_state ns; 84 struct vfp_state sec; 85 }; 86 87 static struct thread_vfp_state thread_vfp_state; 88 #endif /*CFG_WITH_VFP*/ 89 90 #ifdef CFG_WITH_STACK_CANARIES 91 #ifdef ARM32 92 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 93 #endif 94 #ifdef ARM64 95 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 96 #endif 97 #define START_CANARY_VALUE 0xdededede 98 #define END_CANARY_VALUE 0xabababab 99 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 100 #define GET_END_CANARY(name, stack_num) \ 101 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 102 #else 103 #define STACK_CANARY_SIZE 0 104 #endif 105 106 #define DECLARE_STACK(name, num_stacks, stack_size) \ 107 static uint32_t name[num_stacks][ \ 108 ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 109 sizeof(uint32_t)] \ 110 __attribute__((section(".nozi.stack"), \ 111 aligned(STACK_ALIGNMENT))) 112 113 #define GET_STACK(stack) \ 114 ((vaddr_t)(stack) + sizeof(stack) - STACK_CANARY_SIZE / 2) 115 116 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE); 117 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE); 118 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 119 DECLARE_STACK(stack_sm, CFG_TEE_CORE_NB_CORE, SM_STACK_SIZE); 120 #endif 121 #ifndef CFG_WITH_PAGER 122 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE); 123 #endif 124 125 const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = { 126 GET_STACK(stack_tmp[0]), 127 #if CFG_TEE_CORE_NB_CORE > 1 128 GET_STACK(stack_tmp[1]), 129 #endif 130 #if CFG_TEE_CORE_NB_CORE > 2 131 GET_STACK(stack_tmp[2]), 132 #endif 133 #if CFG_TEE_CORE_NB_CORE > 3 134 GET_STACK(stack_tmp[3]), 135 #endif 136 #if CFG_TEE_CORE_NB_CORE > 4 137 GET_STACK(stack_tmp[4]), 138 #endif 139 #if CFG_TEE_CORE_NB_CORE > 5 140 GET_STACK(stack_tmp[5]), 141 #endif 142 #if CFG_TEE_CORE_NB_CORE > 6 143 GET_STACK(stack_tmp[6]), 144 #endif 145 #if CFG_TEE_CORE_NB_CORE > 7 146 GET_STACK(stack_tmp[7]), 147 #endif 148 #if CFG_TEE_CORE_NB_CORE > 8 149 #error "Top of tmp stacks aren't defined for more than 8 CPUS" 150 #endif 151 }; 152 153 thread_smc_handler_t thread_std_smc_handler_ptr; 154 static thread_smc_handler_t thread_fast_smc_handler_ptr; 155 thread_fiq_handler_t thread_fiq_handler_ptr; 156 thread_svc_handler_t thread_svc_handler_ptr; 157 static thread_abort_handler_t thread_abort_handler_ptr; 158 thread_pm_handler_t thread_cpu_on_handler_ptr; 159 thread_pm_handler_t thread_cpu_off_handler_ptr; 160 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 161 thread_pm_handler_t thread_cpu_resume_handler_ptr; 162 thread_pm_handler_t thread_system_off_handler_ptr; 163 thread_pm_handler_t thread_system_reset_handler_ptr; 164 165 166 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 167 168 static void init_canaries(void) 169 { 170 #ifdef CFG_WITH_STACK_CANARIES 171 size_t n; 172 #define INIT_CANARY(name) \ 173 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 174 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 175 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 176 \ 177 *start_canary = START_CANARY_VALUE; \ 178 *end_canary = END_CANARY_VALUE; \ 179 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 180 #name, n, (void *)(end_canary - 1)); \ 181 DMSG("watch *%p\n", (void *)end_canary); \ 182 } 183 184 INIT_CANARY(stack_tmp); 185 INIT_CANARY(stack_abt); 186 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 187 INIT_CANARY(stack_sm); 188 #endif 189 #ifndef CFG_WITH_PAGER 190 INIT_CANARY(stack_thread); 191 #endif 192 #endif/*CFG_WITH_STACK_CANARIES*/ 193 } 194 195 void thread_check_canaries(void) 196 { 197 #ifdef CFG_WITH_STACK_CANARIES 198 size_t n; 199 200 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 201 assert(GET_START_CANARY(stack_tmp, n) == START_CANARY_VALUE); 202 assert(GET_END_CANARY(stack_tmp, n) == END_CANARY_VALUE); 203 } 204 205 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 206 assert(GET_START_CANARY(stack_abt, n) == START_CANARY_VALUE); 207 assert(GET_END_CANARY(stack_abt, n) == END_CANARY_VALUE); 208 } 209 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 210 for (n = 0; n < ARRAY_SIZE(stack_sm); n++) { 211 assert(GET_START_CANARY(stack_sm, n) == START_CANARY_VALUE); 212 assert(GET_END_CANARY(stack_sm, n) == END_CANARY_VALUE); 213 } 214 #endif 215 #ifndef CFG_WITH_PAGER 216 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 217 assert(GET_START_CANARY(stack_thread, n) == START_CANARY_VALUE); 218 assert(GET_END_CANARY(stack_thread, n) == END_CANARY_VALUE); 219 } 220 #endif 221 #endif/*CFG_WITH_STACK_CANARIES*/ 222 } 223 224 static void lock_global(void) 225 { 226 cpu_spin_lock(&thread_global_lock); 227 } 228 229 static void unlock_global(void) 230 { 231 cpu_spin_unlock(&thread_global_lock); 232 } 233 234 #ifdef ARM32 235 uint32_t thread_get_exceptions(void) 236 { 237 uint32_t cpsr = read_cpsr(); 238 239 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 240 } 241 242 void thread_set_exceptions(uint32_t exceptions) 243 { 244 uint32_t cpsr = read_cpsr(); 245 246 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 247 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 248 write_cpsr(cpsr); 249 } 250 #endif /*ARM32*/ 251 252 #ifdef ARM64 253 uint32_t thread_get_exceptions(void) 254 { 255 uint32_t daif = read_daif(); 256 257 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 258 } 259 260 void thread_set_exceptions(uint32_t exceptions) 261 { 262 uint32_t daif = read_daif(); 263 264 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 265 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 266 write_daif(daif); 267 } 268 #endif /*ARM64*/ 269 270 uint32_t thread_mask_exceptions(uint32_t exceptions) 271 { 272 uint32_t state = thread_get_exceptions(); 273 274 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 275 return state; 276 } 277 278 void thread_unmask_exceptions(uint32_t state) 279 { 280 thread_set_exceptions(state & THREAD_EXCP_ALL); 281 } 282 283 284 struct thread_core_local *thread_get_core_local(void) 285 { 286 uint32_t cpu_id = get_core_pos(); 287 288 /* 289 * IRQs must be disabled before playing with core_local since 290 * we otherwise may be rescheduled to a different core in the 291 * middle of this function. 292 */ 293 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 294 295 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 296 return &thread_core_local[cpu_id]; 297 } 298 299 static bool have_one_active_thread(void) 300 { 301 size_t n; 302 303 for (n = 0; n < CFG_NUM_THREADS; n++) { 304 if (threads[n].state == THREAD_STATE_ACTIVE) 305 return true; 306 } 307 308 return false; 309 } 310 311 static bool have_one_preempted_thread(void) 312 { 313 size_t n; 314 315 for (n = 0; n < CFG_NUM_THREADS; n++) { 316 if (threads[n].state == THREAD_STATE_SUSPENDED && 317 (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ)) 318 return true; 319 } 320 321 return false; 322 } 323 324 static void thread_lazy_save_ns_vfp(void) 325 { 326 #ifdef CFG_WITH_VFP 327 thread_vfp_state.ns_saved = false; 328 vfp_lazy_save_state_init(&thread_vfp_state.ns); 329 #endif /*CFG_WITH_VFP*/ 330 } 331 332 static void thread_lazy_restore_ns_vfp(void) 333 { 334 #ifdef CFG_WITH_VFP 335 assert(!thread_vfp_state.sec_lazy_saved && !thread_vfp_state.sec_saved); 336 vfp_lazy_restore_state(&thread_vfp_state.ns, thread_vfp_state.ns_saved); 337 thread_vfp_state.ns_saved = false; 338 #endif /*CFG_WITH_VFP*/ 339 } 340 341 #ifdef ARM32 342 static void init_regs(struct thread_ctx *thread, 343 struct thread_smc_args *args) 344 { 345 thread->regs.pc = (uint32_t)thread_std_smc_entry; 346 347 /* 348 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 349 * abort and unmasked FIQ. 350 */ 351 thread->regs.cpsr = CPSR_MODE_SVC | CPSR_I | CPSR_A; 352 /* Enable thumb mode if it's a thumb instruction */ 353 if (thread->regs.pc & 1) 354 thread->regs.cpsr |= CPSR_T; 355 /* Reinitialize stack pointer */ 356 thread->regs.svc_sp = thread->stack_va_end; 357 358 /* 359 * Copy arguments into context. This will make the 360 * arguments appear in r0-r7 when thread is started. 361 */ 362 thread->regs.r0 = args->a0; 363 thread->regs.r1 = args->a1; 364 thread->regs.r2 = args->a2; 365 thread->regs.r3 = args->a3; 366 thread->regs.r4 = args->a4; 367 thread->regs.r5 = args->a5; 368 thread->regs.r6 = args->a6; 369 thread->regs.r7 = args->a7; 370 } 371 #endif /*ARM32*/ 372 373 #ifdef ARM64 374 static void init_regs(struct thread_ctx *thread, 375 struct thread_smc_args *args) 376 { 377 thread->regs.pc = (uint64_t)thread_std_smc_entry; 378 379 /* 380 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 381 * abort and unmasked FIQ. 382 */ 383 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 384 DAIFBIT_IRQ | DAIFBIT_ABT); 385 /* Reinitialize stack pointer */ 386 thread->regs.sp = thread->stack_va_end; 387 388 /* 389 * Copy arguments into context. This will make the 390 * arguments appear in x0-x7 when thread is started. 391 */ 392 thread->regs.x[0] = args->a0; 393 thread->regs.x[1] = args->a1; 394 thread->regs.x[2] = args->a2; 395 thread->regs.x[3] = args->a3; 396 thread->regs.x[4] = args->a4; 397 thread->regs.x[5] = args->a5; 398 thread->regs.x[6] = args->a6; 399 thread->regs.x[7] = args->a7; 400 } 401 #endif /*ARM64*/ 402 403 static void thread_alloc_and_run(struct thread_smc_args *args) 404 { 405 size_t n; 406 struct thread_core_local *l = thread_get_core_local(); 407 bool found_thread = false; 408 409 assert(l->curr_thread == -1); 410 411 lock_global(); 412 413 if (!have_one_active_thread() && !have_one_preempted_thread()) { 414 for (n = 0; n < CFG_NUM_THREADS; n++) { 415 if (threads[n].state == THREAD_STATE_FREE) { 416 threads[n].state = THREAD_STATE_ACTIVE; 417 found_thread = true; 418 break; 419 } 420 } 421 } 422 423 unlock_global(); 424 425 if (!found_thread) { 426 args->a0 = TEESMC_RETURN_EBUSY; 427 return; 428 } 429 430 l->curr_thread = n; 431 432 threads[n].flags = 0; 433 init_regs(threads + n, args); 434 435 /* Save Hypervisor Client ID */ 436 threads[n].hyp_clnt_id = args->a7; 437 438 thread_lazy_save_ns_vfp(); 439 thread_resume(&threads[n].regs); 440 } 441 442 #ifdef ARM32 443 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 444 struct thread_smc_args *args) 445 { 446 /* 447 * Update returned values from RPC, values will appear in 448 * r0-r3 when thread is resumed. 449 */ 450 regs->r0 = args->a0; 451 regs->r1 = args->a1; 452 regs->r2 = args->a2; 453 regs->r3 = args->a3; 454 } 455 #endif /*ARM32*/ 456 457 #ifdef ARM64 458 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 459 struct thread_smc_args *args) 460 { 461 /* 462 * Update returned values from RPC, values will appear in 463 * x0-x3 when thread is resumed. 464 */ 465 regs->x[0] = args->a0; 466 regs->x[1] = args->a1; 467 regs->x[2] = args->a2; 468 regs->x[3] = args->a3; 469 } 470 #endif /*ARM64*/ 471 472 static void thread_resume_from_rpc(struct thread_smc_args *args) 473 { 474 size_t n = args->a3; /* thread id */ 475 struct thread_core_local *l = thread_get_core_local(); 476 uint32_t rv = 0; 477 478 assert(l->curr_thread == -1); 479 480 lock_global(); 481 482 if (have_one_active_thread()) { 483 rv = TEESMC_RETURN_EBUSY; 484 } else if (n < CFG_NUM_THREADS && 485 threads[n].state == THREAD_STATE_SUSPENDED && 486 args->a7 == threads[n].hyp_clnt_id) { 487 /* 488 * If there's one preempted thread it has to be the one 489 * we're resuming. 490 */ 491 if (have_one_preempted_thread()) { 492 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ) { 493 threads[n].flags &= ~THREAD_FLAGS_EXIT_ON_IRQ; 494 threads[n].state = THREAD_STATE_ACTIVE; 495 } else { 496 rv = TEESMC_RETURN_EBUSY; 497 } 498 } else { 499 threads[n].state = THREAD_STATE_ACTIVE; 500 } 501 } else { 502 rv = TEESMC_RETURN_ERESUME; 503 } 504 505 unlock_global(); 506 507 if (rv) { 508 args->a0 = rv; 509 return; 510 } 511 512 l->curr_thread = n; 513 514 if (threads[n].have_user_map) 515 core_mmu_set_user_map(&threads[n].user_map); 516 517 /* 518 * Return from RPC to request service of an IRQ must not 519 * get parameters from non-secure world. 520 */ 521 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 522 copy_a0_to_a3(&threads[n].regs, args); 523 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 524 } 525 526 thread_lazy_save_ns_vfp(); 527 thread_resume(&threads[n].regs); 528 } 529 530 void thread_handle_fast_smc(struct thread_smc_args *args) 531 { 532 thread_check_canaries(); 533 thread_fast_smc_handler_ptr(args); 534 /* Fast handlers must not unmask any exceptions */ 535 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 536 } 537 538 void thread_handle_std_smc(struct thread_smc_args *args) 539 { 540 thread_check_canaries(); 541 542 if (args->a0 == TEESMC32_CALL_RETURN_FROM_RPC) 543 thread_resume_from_rpc(args); 544 else 545 thread_alloc_and_run(args); 546 } 547 548 void thread_handle_abort(uint32_t abort_type, struct thread_abort_regs *regs) 549 { 550 #ifdef CFG_WITH_VFP 551 if (vfp_is_enabled()) { 552 vfp_lazy_save_state_init(&thread_vfp_state.sec); 553 thread_vfp_state.sec_lazy_saved = true; 554 } 555 #endif 556 557 thread_abort_handler_ptr(abort_type, regs); 558 559 #ifdef CFG_WITH_VFP 560 assert(!vfp_is_enabled()); 561 if (thread_vfp_state.sec_lazy_saved) { 562 vfp_lazy_restore_state(&thread_vfp_state.sec, 563 thread_vfp_state.sec_saved); 564 thread_vfp_state.sec_saved = false; 565 thread_vfp_state.sec_lazy_saved = false; 566 } 567 #endif 568 } 569 570 void *thread_get_tmp_sp(void) 571 { 572 struct thread_core_local *l = thread_get_core_local(); 573 574 return (void *)l->tmp_stack_va_end; 575 } 576 577 #ifdef ARM64 578 vaddr_t thread_get_saved_thread_sp(void) 579 { 580 struct thread_core_local *l = thread_get_core_local(); 581 int ct = l->curr_thread; 582 583 assert(ct != -1); 584 return threads[ct].kern_sp; 585 } 586 #endif /*ARM64*/ 587 588 void thread_state_free(void) 589 { 590 struct thread_core_local *l = thread_get_core_local(); 591 int ct = l->curr_thread; 592 593 assert(ct != -1); 594 595 thread_lazy_restore_ns_vfp(); 596 597 lock_global(); 598 599 assert(threads[ct].state == THREAD_STATE_ACTIVE); 600 threads[ct].state = THREAD_STATE_FREE; 601 threads[ct].flags = 0; 602 l->curr_thread = -1; 603 604 unlock_global(); 605 } 606 607 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 608 { 609 struct thread_core_local *l = thread_get_core_local(); 610 int ct = l->curr_thread; 611 612 assert(ct != -1); 613 614 thread_check_canaries(); 615 616 thread_lazy_restore_ns_vfp(); 617 618 lock_global(); 619 620 assert(threads[ct].state == THREAD_STATE_ACTIVE); 621 threads[ct].flags |= flags; 622 threads[ct].regs.cpsr = cpsr; 623 threads[ct].regs.pc = pc; 624 threads[ct].state = THREAD_STATE_SUSPENDED; 625 626 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 627 if (threads[ct].have_user_map) { 628 core_mmu_get_user_map(&threads[ct].user_map); 629 core_mmu_set_user_map(NULL); 630 } 631 632 633 l->curr_thread = -1; 634 635 unlock_global(); 636 637 return ct; 638 } 639 640 #ifdef ARM32 641 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 642 { 643 l->tmp_stack_va_end = sp; 644 thread_set_irq_sp(sp); 645 thread_set_fiq_sp(sp); 646 } 647 648 static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp) 649 { 650 thread_set_abt_sp(sp); 651 } 652 #endif /*ARM32*/ 653 654 #ifdef ARM64 655 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 656 { 657 /* 658 * We're already using the tmp stack when this function is called 659 * so there's no need to assign it to any stack pointer. However, 660 * we'll need to restore it at different times so store it here. 661 */ 662 l->tmp_stack_va_end = sp; 663 } 664 665 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 666 { 667 l->abt_stack_va_end = sp; 668 } 669 #endif /*ARM64*/ 670 671 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 672 { 673 if (thread_id >= CFG_NUM_THREADS) 674 return false; 675 if (threads[thread_id].state != THREAD_STATE_FREE) 676 return false; 677 678 threads[thread_id].stack_va_end = sp; 679 return true; 680 } 681 682 uint32_t thread_get_id(void) 683 { 684 /* thread_get_core_local() requires IRQs to be disabled */ 685 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 686 struct thread_core_local *l; 687 int ct; 688 689 l = thread_get_core_local(); 690 ct = l->curr_thread; 691 assert((ct >= 0) && (ct < CFG_NUM_THREADS)); 692 693 thread_unmask_exceptions(exceptions); 694 return ct; 695 } 696 697 static void init_handlers(const struct thread_handlers *handlers) 698 { 699 thread_std_smc_handler_ptr = handlers->std_smc; 700 thread_fast_smc_handler_ptr = handlers->fast_smc; 701 thread_fiq_handler_ptr = handlers->fiq; 702 thread_svc_handler_ptr = handlers->svc; 703 thread_abort_handler_ptr = handlers->abort; 704 thread_cpu_on_handler_ptr = handlers->cpu_on; 705 thread_cpu_off_handler_ptr = handlers->cpu_off; 706 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 707 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 708 thread_system_off_handler_ptr = handlers->system_off; 709 thread_system_reset_handler_ptr = handlers->system_reset; 710 } 711 712 713 #ifdef CFG_WITH_PAGER 714 static void init_thread_stacks(void) 715 { 716 size_t n; 717 718 /* 719 * Allocate virtual memory for thread stacks. 720 */ 721 for (n = 0; n < CFG_NUM_THREADS; n++) { 722 tee_mm_entry_t *mm; 723 vaddr_t sp; 724 725 /* Find vmem for thread stack and its protection gap */ 726 mm = tee_mm_alloc(&tee_mm_vcore, 727 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 728 TEE_ASSERT(mm); 729 730 /* Claim eventual physical page */ 731 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 732 true); 733 734 /* Realloc both protection vmem and stack vmem separately */ 735 sp = tee_mm_get_smem(mm); 736 tee_mm_free(mm); 737 mm = tee_mm_alloc2(&tee_mm_vcore, sp, SMALL_PAGE_SIZE); 738 TEE_ASSERT(mm); 739 mm = tee_mm_alloc2(&tee_mm_vcore, sp + SMALL_PAGE_SIZE, 740 STACK_THREAD_SIZE); 741 TEE_ASSERT(mm); 742 743 /* init effective stack */ 744 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 745 if (!thread_init_stack(n, sp)) 746 panic(); 747 748 /* Add the area to the pager */ 749 tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL); 750 } 751 } 752 #else 753 static void init_thread_stacks(void) 754 { 755 size_t n; 756 757 /* Assign the thread stacks */ 758 for (n = 0; n < CFG_NUM_THREADS; n++) { 759 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 760 panic(); 761 } 762 } 763 #endif /*CFG_WITH_PAGER*/ 764 765 void thread_init_primary(const struct thread_handlers *handlers) 766 { 767 /* 768 * The COMPILE_TIME_ASSERT only works in function context. These 769 * checks verifies that the offsets used in assembly code matches 770 * what's used in C code. 771 */ 772 #ifdef ARM32 773 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r0) == 774 THREAD_SVC_REG_R0_OFFS); 775 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r1) == 776 THREAD_SVC_REG_R1_OFFS); 777 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r2) == 778 THREAD_SVC_REG_R2_OFFS); 779 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r3) == 780 THREAD_SVC_REG_R3_OFFS); 781 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r4) == 782 THREAD_SVC_REG_R4_OFFS); 783 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r5) == 784 THREAD_SVC_REG_R5_OFFS); 785 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r6) == 786 THREAD_SVC_REG_R6_OFFS); 787 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r7) == 788 THREAD_SVC_REG_R7_OFFS); 789 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, lr) == 790 THREAD_SVC_REG_LR_OFFS); 791 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, spsr) == 792 THREAD_SVC_REG_SPSR_OFFS); 793 #endif /*ARM32*/ 794 #ifdef ARM64 795 /* struct thread_abort_regs */ 796 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, x22) == 797 THREAD_ABT_REG_X_OFFS(22)); 798 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, elr) == 799 THREAD_ABT_REG_ELR_OFFS); 800 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, spsr) == 801 THREAD_ABT_REG_SPSR_OFFS); 802 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, sp_el0) == 803 THREAD_ABT_REG_SP_EL0_OFFS); 804 COMPILE_TIME_ASSERT(sizeof(struct thread_abort_regs) == 805 THREAD_ABT_REGS_SIZE); 806 807 /* struct thread_ctx */ 808 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx, kern_sp) == 809 THREAD_CTX_KERN_SP_OFFSET); 810 COMPILE_TIME_ASSERT(sizeof(struct thread_ctx) == THREAD_CTX_SIZE); 811 812 /* struct thread_ctx_regs */ 813 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, sp) == 814 THREAD_CTX_REGS_SP_OFFSET); 815 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, pc) == 816 THREAD_CTX_REGS_PC_OFFSET); 817 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, cpsr) == 818 THREAD_CTX_REGS_SPSR_OFFSET); 819 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, x[23]) == 820 THREAD_CTX_REGS_X_OFFSET(23)); 821 COMPILE_TIME_ASSERT(sizeof(struct thread_ctx_regs) == 822 THREAD_CTX_REGS_SIZE); 823 824 /* struct thread_user_mode_rec */ 825 COMPILE_TIME_ASSERT( 826 offsetof(struct thread_user_mode_rec, exit_status0_ptr) == 827 THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET); 828 COMPILE_TIME_ASSERT( 829 offsetof(struct thread_user_mode_rec, exit_status1_ptr) == 830 THREAD_USER_MODE_REC_EXIT_STATUS1_PTR_OFFSET); 831 COMPILE_TIME_ASSERT( 832 offsetof(struct thread_user_mode_rec, x[1]) == 833 THREAD_USER_MODE_REC_X_OFFSET(20)); 834 COMPILE_TIME_ASSERT(sizeof(struct thread_user_mode_rec) == 835 THREAD_USER_MODE_REC_SIZE); 836 837 /* struct thread_core_local */ 838 COMPILE_TIME_ASSERT( 839 offsetof(struct thread_core_local, tmp_stack_va_end) == 840 THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET); 841 COMPILE_TIME_ASSERT( 842 offsetof(struct thread_core_local, curr_thread) == 843 THREAD_CORE_LOCAL_CURR_THREAD_OFFSET); 844 COMPILE_TIME_ASSERT( 845 offsetof(struct thread_core_local, flags) == 846 THREAD_CORE_LOCAL_FLAGS_OFFSET); 847 COMPILE_TIME_ASSERT( 848 offsetof(struct thread_core_local, abt_stack_va_end) == 849 THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET); 850 COMPILE_TIME_ASSERT( 851 offsetof(struct thread_core_local, x[3]) == 852 THREAD_CORE_LOCAL_X_OFFSET(3)); 853 COMPILE_TIME_ASSERT(sizeof(struct thread_core_local) == 854 THREAD_CORE_LOCAL_SIZE); 855 856 #endif /*ARM64*/ 857 858 init_handlers(handlers); 859 860 /* Initialize canaries around the stacks */ 861 init_canaries(); 862 863 init_thread_stacks(); 864 } 865 866 static void init_sec_mon(size_t __unused pos) 867 { 868 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 869 /* Initialize secure monitor */ 870 sm_init(GET_STACK(stack_sm[pos])); 871 sm_set_entry_vector(thread_vector_table); 872 #endif 873 } 874 875 void thread_init_per_cpu(void) 876 { 877 size_t pos = get_core_pos(); 878 struct thread_core_local *l = thread_get_core_local(); 879 880 init_sec_mon(pos); 881 882 l->curr_thread = -1; 883 set_tmp_stack(l, GET_STACK(stack_tmp[pos])); 884 set_abt_stack(l, GET_STACK(stack_abt[pos])); 885 886 thread_init_vbar(); 887 } 888 889 void thread_set_tsd(void *tsd) 890 { 891 /* thread_get_core_local() requires IRQs to be disabled */ 892 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 893 struct thread_core_local *l; 894 int ct; 895 896 l = thread_get_core_local(); 897 ct = l->curr_thread; 898 899 assert(ct != -1); 900 assert(threads[ct].state == THREAD_STATE_ACTIVE); 901 threads[ct].tsd = tsd; 902 903 thread_unmask_exceptions(exceptions); 904 } 905 906 void *thread_get_tsd(void) 907 { 908 /* thread_get_core_local() requires IRQs to be disabled */ 909 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 910 struct thread_core_local *l; 911 int ct; 912 void *tsd; 913 914 l = thread_get_core_local(); 915 ct = l->curr_thread; 916 917 if (ct == -1 || threads[ct].state != THREAD_STATE_ACTIVE) 918 tsd = NULL; 919 else 920 tsd = threads[ct].tsd; 921 922 thread_unmask_exceptions(exceptions); 923 return tsd; 924 } 925 926 struct thread_ctx_regs *thread_get_ctx_regs(void) 927 { 928 struct thread_core_local *l = thread_get_core_local(); 929 930 assert(l->curr_thread != -1); 931 return &threads[l->curr_thread].regs; 932 } 933 934 void thread_set_irq(bool enable) 935 { 936 /* thread_get_core_local() requires IRQs to be disabled */ 937 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 938 struct thread_core_local *l; 939 940 l = thread_get_core_local(); 941 942 assert(l->curr_thread != -1); 943 944 if (enable) { 945 threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE; 946 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 947 } else { 948 /* 949 * No need to disable IRQ here since it's already disabled 950 * above. 951 */ 952 threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE; 953 } 954 } 955 956 void thread_restore_irq(void) 957 { 958 /* thread_get_core_local() requires IRQs to be disabled */ 959 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 960 struct thread_core_local *l; 961 962 l = thread_get_core_local(); 963 964 assert(l->curr_thread != -1); 965 966 if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE) 967 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 968 } 969 970 #ifdef CFG_WITH_VFP 971 uint32_t thread_kernel_enable_vfp(void) 972 { 973 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 974 975 assert(!vfp_is_enabled()); 976 977 if (!thread_vfp_state.ns_saved) { 978 vfp_lazy_save_state_final(&thread_vfp_state.ns); 979 thread_vfp_state.ns_saved = true; 980 } else if (thread_vfp_state.sec_lazy_saved && 981 !thread_vfp_state.sec_saved) { 982 vfp_lazy_save_state_final(&thread_vfp_state.sec); 983 thread_vfp_state.sec_saved = true; 984 } 985 986 vfp_enable(); 987 return exceptions; 988 } 989 990 void thread_kernel_disable_vfp(uint32_t state) 991 { 992 uint32_t exceptions; 993 994 assert(vfp_is_enabled()); 995 996 vfp_disable(); 997 exceptions = thread_get_exceptions(); 998 assert(exceptions & THREAD_EXCP_IRQ); 999 exceptions &= ~THREAD_EXCP_IRQ; 1000 exceptions |= state & THREAD_EXCP_IRQ; 1001 thread_set_exceptions(exceptions); 1002 } 1003 #endif /*CFG_WITH_VFP*/ 1004 1005 1006 paddr_t thread_rpc_alloc_arg(size_t size) 1007 { 1008 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1009 TEESMC_RETURN_RPC_ALLOC_ARG, size}; 1010 1011 thread_rpc(rpc_args); 1012 return rpc_args[1]; 1013 } 1014 1015 paddr_t thread_rpc_alloc_payload(size_t size) 1016 { 1017 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1018 TEESMC_RETURN_RPC_ALLOC_PAYLOAD, size}; 1019 1020 thread_rpc(rpc_args); 1021 return rpc_args[1]; 1022 } 1023 1024 void thread_rpc_free_arg(paddr_t arg) 1025 { 1026 if (arg) { 1027 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1028 TEESMC_RETURN_RPC_FREE_ARG, arg}; 1029 1030 thread_rpc(rpc_args); 1031 } 1032 } 1033 void thread_rpc_free_payload(paddr_t payload) 1034 { 1035 if (payload) { 1036 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1037 TEESMC_RETURN_RPC_FREE_PAYLOAD, payload}; 1038 1039 thread_rpc(rpc_args); 1040 } 1041 } 1042 1043 void thread_rpc_cmd(paddr_t arg) 1044 { 1045 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {TEESMC_RETURN_RPC_CMD, arg}; 1046 1047 thread_rpc(rpc_args); 1048 } 1049 1050 void thread_optee_rpc_alloc_payload(size_t size, paddr_t *payload, 1051 paddr_t *cookie) 1052 { 1053 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1054 TEESMC_RETURN_OPTEE_RPC_ALLOC_PAYLOAD, size}; 1055 1056 thread_rpc(rpc_args); 1057 if (payload) 1058 *payload = rpc_args[1]; 1059 if (cookie) 1060 *cookie = rpc_args[2]; 1061 } 1062 1063 void thread_optee_rpc_free_payload(paddr_t cookie) 1064 { 1065 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] ={ 1066 TEESMC_RETURN_OPTEE_RPC_FREE_PAYLOAD, cookie}; 1067 1068 thread_rpc(rpc_args); 1069 } 1070