1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <platform_config.h> 28 29 #include <kernel/panic.h> 30 #include <kernel/thread.h> 31 #include <kernel/thread_defs.h> 32 #include "thread_private.h" 33 #include <sm/sm_defs.h> 34 #include <sm/sm.h> 35 #include <sm/teesmc.h> 36 #include <sm/teesmc_optee.h> 37 #include <arm.h> 38 #include <kernel/tz_proc_def.h> 39 #include <kernel/tz_proc.h> 40 #include <kernel/misc.h> 41 #include <mm/tee_mmu.h> 42 #include <mm/tee_mmu_defs.h> 43 #include <mm/tee_mm.h> 44 #include <mm/tee_pager.h> 45 #include <kernel/tee_ta_manager.h> 46 #include <util.h> 47 #include <trace.h> 48 49 #include <assert.h> 50 51 #ifdef ARM32 52 #define STACK_TMP_SIZE 1024 53 #define STACK_THREAD_SIZE 8192 54 55 #if TRACE_LEVEL > 0 56 #define STACK_ABT_SIZE 2048 57 #else 58 #define STACK_ABT_SIZE 1024 59 #endif 60 61 #endif /*ARM32*/ 62 63 #ifdef ARM64 64 #define STACK_TMP_SIZE 2048 65 #define STACK_THREAD_SIZE 8192 66 67 #if TRACE_LEVEL > 0 68 #define STACK_ABT_SIZE 3072 69 #else 70 #define STACK_ABT_SIZE 1024 71 #endif 72 #endif /*ARM64*/ 73 74 struct thread_ctx threads[CFG_NUM_THREADS]; 75 76 static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 77 78 #ifdef CFG_WITH_VFP 79 struct thread_vfp_state { 80 bool ns_saved; 81 bool sec_saved; 82 bool sec_lazy_saved; 83 struct vfp_state ns; 84 struct vfp_state sec; 85 }; 86 87 static struct thread_vfp_state thread_vfp_state; 88 #endif /*CFG_WITH_VFP*/ 89 90 #ifdef CFG_WITH_STACK_CANARIES 91 #ifdef ARM32 92 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 93 #endif 94 #ifdef ARM64 95 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 96 #endif 97 #define START_CANARY_VALUE 0xdededede 98 #define END_CANARY_VALUE 0xabababab 99 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 100 #define GET_END_CANARY(name, stack_num) \ 101 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 102 #else 103 #define STACK_CANARY_SIZE 0 104 #endif 105 106 #define DECLARE_STACK(name, num_stacks, stack_size) \ 107 static uint32_t name[num_stacks][ \ 108 ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 109 sizeof(uint32_t)] \ 110 __attribute__((section(".nozi.stack"), \ 111 aligned(STACK_ALIGNMENT))) 112 113 #define GET_STACK(stack) \ 114 ((vaddr_t)(stack) + sizeof(stack) - STACK_CANARY_SIZE / 2) 115 116 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE); 117 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE); 118 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 119 DECLARE_STACK(stack_sm, CFG_TEE_CORE_NB_CORE, SM_STACK_SIZE); 120 #endif 121 #ifndef CFG_WITH_PAGER 122 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE); 123 #endif 124 125 const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = { 126 GET_STACK(stack_tmp[0]), 127 #if CFG_TEE_CORE_NB_CORE > 1 128 GET_STACK(stack_tmp[1]), 129 #endif 130 #if CFG_TEE_CORE_NB_CORE > 2 131 GET_STACK(stack_tmp[2]), 132 #endif 133 #if CFG_TEE_CORE_NB_CORE > 3 134 GET_STACK(stack_tmp[3]), 135 #endif 136 #if CFG_TEE_CORE_NB_CORE > 4 137 GET_STACK(stack_tmp[4]), 138 #endif 139 #if CFG_TEE_CORE_NB_CORE > 5 140 GET_STACK(stack_tmp[5]), 141 #endif 142 #if CFG_TEE_CORE_NB_CORE > 6 143 GET_STACK(stack_tmp[6]), 144 #endif 145 #if CFG_TEE_CORE_NB_CORE > 7 146 GET_STACK(stack_tmp[7]), 147 #endif 148 #if CFG_TEE_CORE_NB_CORE > 8 149 #error "Top of tmp stacks aren't defined for more than 8 CPUS" 150 #endif 151 }; 152 153 thread_smc_handler_t thread_std_smc_handler_ptr; 154 static thread_smc_handler_t thread_fast_smc_handler_ptr; 155 thread_fiq_handler_t thread_fiq_handler_ptr; 156 thread_svc_handler_t thread_svc_handler_ptr; 157 static thread_abort_handler_t thread_abort_handler_ptr; 158 thread_pm_handler_t thread_cpu_on_handler_ptr; 159 thread_pm_handler_t thread_cpu_off_handler_ptr; 160 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 161 thread_pm_handler_t thread_cpu_resume_handler_ptr; 162 thread_pm_handler_t thread_system_off_handler_ptr; 163 thread_pm_handler_t thread_system_reset_handler_ptr; 164 165 166 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 167 168 static void init_canaries(void) 169 { 170 #ifdef CFG_WITH_STACK_CANARIES 171 size_t n; 172 #define INIT_CANARY(name) \ 173 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 174 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 175 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 176 \ 177 *start_canary = START_CANARY_VALUE; \ 178 *end_canary = END_CANARY_VALUE; \ 179 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 180 #name, n, (void *)(end_canary - 1)); \ 181 DMSG("watch *%p\n", (void *)end_canary); \ 182 } 183 184 INIT_CANARY(stack_tmp); 185 INIT_CANARY(stack_abt); 186 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 187 INIT_CANARY(stack_sm); 188 #endif 189 #ifndef CFG_WITH_PAGER 190 INIT_CANARY(stack_thread); 191 #endif 192 #endif/*CFG_WITH_STACK_CANARIES*/ 193 } 194 195 void thread_check_canaries(void) 196 { 197 #ifdef CFG_WITH_STACK_CANARIES 198 size_t n; 199 200 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 201 assert(GET_START_CANARY(stack_tmp, n) == START_CANARY_VALUE); 202 assert(GET_END_CANARY(stack_tmp, n) == END_CANARY_VALUE); 203 } 204 205 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 206 assert(GET_START_CANARY(stack_abt, n) == START_CANARY_VALUE); 207 assert(GET_END_CANARY(stack_abt, n) == END_CANARY_VALUE); 208 } 209 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 210 for (n = 0; n < ARRAY_SIZE(stack_sm); n++) { 211 assert(GET_START_CANARY(stack_sm, n) == START_CANARY_VALUE); 212 assert(GET_END_CANARY(stack_sm, n) == END_CANARY_VALUE); 213 } 214 #endif 215 #ifndef CFG_WITH_PAGER 216 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 217 assert(GET_START_CANARY(stack_thread, n) == START_CANARY_VALUE); 218 assert(GET_END_CANARY(stack_thread, n) == END_CANARY_VALUE); 219 } 220 #endif 221 #endif/*CFG_WITH_STACK_CANARIES*/ 222 } 223 224 static void lock_global(void) 225 { 226 cpu_spin_lock(&thread_global_lock); 227 } 228 229 static void unlock_global(void) 230 { 231 cpu_spin_unlock(&thread_global_lock); 232 } 233 234 #ifdef ARM32 235 uint32_t thread_get_exceptions(void) 236 { 237 uint32_t cpsr = read_cpsr(); 238 239 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 240 } 241 242 void thread_set_exceptions(uint32_t exceptions) 243 { 244 uint32_t cpsr = read_cpsr(); 245 246 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 247 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 248 write_cpsr(cpsr); 249 } 250 #endif /*ARM32*/ 251 252 #ifdef ARM64 253 uint32_t thread_get_exceptions(void) 254 { 255 uint32_t daif = read_daif(); 256 257 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 258 } 259 260 void thread_set_exceptions(uint32_t exceptions) 261 { 262 uint32_t daif = read_daif(); 263 264 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 265 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 266 write_daif(daif); 267 } 268 #endif /*ARM64*/ 269 270 uint32_t thread_mask_exceptions(uint32_t exceptions) 271 { 272 uint32_t state = thread_get_exceptions(); 273 274 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 275 return state; 276 } 277 278 void thread_unmask_exceptions(uint32_t state) 279 { 280 thread_set_exceptions(state & THREAD_EXCP_ALL); 281 } 282 283 284 struct thread_core_local *thread_get_core_local(void) 285 { 286 uint32_t cpu_id = get_core_pos(); 287 288 /* 289 * IRQs must be disabled before playing with core_local since 290 * we otherwise may be rescheduled to a different core in the 291 * middle of this function. 292 */ 293 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 294 295 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 296 return &thread_core_local[cpu_id]; 297 } 298 299 static bool have_one_active_thread(void) 300 { 301 size_t n; 302 303 for (n = 0; n < CFG_NUM_THREADS; n++) { 304 if (threads[n].state == THREAD_STATE_ACTIVE) 305 return true; 306 } 307 308 return false; 309 } 310 311 static bool have_one_preempted_thread(void) 312 { 313 size_t n; 314 315 for (n = 0; n < CFG_NUM_THREADS; n++) { 316 if (threads[n].state == THREAD_STATE_SUSPENDED && 317 (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ)) 318 return true; 319 } 320 321 return false; 322 } 323 324 static void thread_lazy_save_ns_vfp(void) 325 { 326 #ifdef CFG_WITH_VFP 327 thread_vfp_state.ns_saved = false; 328 vfp_lazy_save_state_init(&thread_vfp_state.ns); 329 #endif /*CFG_WITH_VFP*/ 330 } 331 332 static void thread_lazy_restore_ns_vfp(void) 333 { 334 #ifdef CFG_WITH_VFP 335 assert(!thread_vfp_state.sec_lazy_saved && !thread_vfp_state.sec_saved); 336 vfp_lazy_restore_state(&thread_vfp_state.ns, thread_vfp_state.ns_saved); 337 thread_vfp_state.ns_saved = false; 338 #endif /*CFG_WITH_VFP*/ 339 } 340 341 #ifdef ARM32 342 static void init_regs(struct thread_ctx *thread, 343 struct thread_smc_args *args) 344 { 345 thread->regs.pc = (uint32_t)thread_std_smc_entry; 346 347 /* 348 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 349 * abort and unmasked FIQ. 350 */ 351 thread->regs.cpsr = CPSR_MODE_SVC | CPSR_I | CPSR_A; 352 /* Enable thumb mode if it's a thumb instruction */ 353 if (thread->regs.pc & 1) 354 thread->regs.cpsr |= CPSR_T; 355 /* Reinitialize stack pointer */ 356 thread->regs.svc_sp = thread->stack_va_end; 357 358 /* 359 * Copy arguments into context. This will make the 360 * arguments appear in r0-r7 when thread is started. 361 */ 362 thread->regs.r0 = args->a0; 363 thread->regs.r1 = args->a1; 364 thread->regs.r2 = args->a2; 365 thread->regs.r3 = args->a3; 366 thread->regs.r4 = args->a4; 367 thread->regs.r5 = args->a5; 368 thread->regs.r6 = args->a6; 369 thread->regs.r7 = args->a7; 370 } 371 #endif /*ARM32*/ 372 373 #ifdef ARM64 374 static void init_regs(struct thread_ctx *thread, 375 struct thread_smc_args *args) 376 { 377 thread->regs.pc = (uint64_t)thread_std_smc_entry; 378 379 /* 380 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 381 * abort and unmasked FIQ. 382 */ 383 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 384 DAIFBIT_IRQ | DAIFBIT_ABT); 385 /* Reinitialize stack pointer */ 386 thread->regs.sp = thread->stack_va_end; 387 388 /* 389 * Copy arguments into context. This will make the 390 * arguments appear in x0-x7 when thread is started. 391 */ 392 thread->regs.x[0] = args->a0; 393 thread->regs.x[1] = args->a1; 394 thread->regs.x[2] = args->a2; 395 thread->regs.x[3] = args->a3; 396 thread->regs.x[4] = args->a4; 397 thread->regs.x[5] = args->a5; 398 thread->regs.x[6] = args->a6; 399 thread->regs.x[7] = args->a7; 400 } 401 #endif /*ARM64*/ 402 403 static void thread_alloc_and_run(struct thread_smc_args *args) 404 { 405 size_t n; 406 struct thread_core_local *l = thread_get_core_local(); 407 bool found_thread = false; 408 409 assert(l->curr_thread == -1); 410 411 lock_global(); 412 413 if (!have_one_active_thread() && !have_one_preempted_thread()) { 414 for (n = 0; n < CFG_NUM_THREADS; n++) { 415 if (threads[n].state == THREAD_STATE_FREE) { 416 threads[n].state = THREAD_STATE_ACTIVE; 417 found_thread = true; 418 break; 419 } 420 } 421 } 422 423 unlock_global(); 424 425 if (!found_thread) { 426 args->a0 = TEESMC_RETURN_EBUSY; 427 return; 428 } 429 430 l->curr_thread = n; 431 432 threads[n].flags = 0; 433 init_regs(threads + n, args); 434 435 /* Save Hypervisor Client ID */ 436 threads[n].hyp_clnt_id = args->a7; 437 438 thread_lazy_save_ns_vfp(); 439 thread_resume(&threads[n].regs); 440 } 441 442 #ifdef ARM32 443 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 444 struct thread_smc_args *args) 445 { 446 /* 447 * Update returned values from RPC, values will appear in 448 * r0-r3 when thread is resumed. 449 */ 450 regs->r0 = args->a0; 451 regs->r1 = args->a1; 452 regs->r2 = args->a2; 453 regs->r3 = args->a3; 454 } 455 #endif /*ARM32*/ 456 457 #ifdef ARM64 458 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 459 struct thread_smc_args *args) 460 { 461 /* 462 * Update returned values from RPC, values will appear in 463 * x0-x3 when thread is resumed. 464 */ 465 regs->x[0] = args->a0; 466 regs->x[1] = args->a1; 467 regs->x[2] = args->a2; 468 regs->x[3] = args->a3; 469 } 470 #endif /*ARM64*/ 471 472 static void thread_resume_from_rpc(struct thread_smc_args *args) 473 { 474 size_t n = args->a3; /* thread id */ 475 struct thread_core_local *l = thread_get_core_local(); 476 uint32_t rv = 0; 477 478 assert(l->curr_thread == -1); 479 480 lock_global(); 481 482 if (have_one_active_thread()) { 483 rv = TEESMC_RETURN_EBUSY; 484 } else if (n < CFG_NUM_THREADS && 485 threads[n].state == THREAD_STATE_SUSPENDED && 486 args->a7 == threads[n].hyp_clnt_id) { 487 /* 488 * If there's one preempted thread it has to be the one 489 * we're resuming. 490 */ 491 if (have_one_preempted_thread()) { 492 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ) { 493 threads[n].flags &= ~THREAD_FLAGS_EXIT_ON_IRQ; 494 threads[n].state = THREAD_STATE_ACTIVE; 495 } else { 496 rv = TEESMC_RETURN_EBUSY; 497 } 498 } else { 499 threads[n].state = THREAD_STATE_ACTIVE; 500 } 501 } else { 502 rv = TEESMC_RETURN_ERESUME; 503 } 504 505 unlock_global(); 506 507 if (rv) { 508 args->a0 = rv; 509 return; 510 } 511 512 l->curr_thread = n; 513 514 if (threads[n].have_user_map) 515 core_mmu_set_user_map(&threads[n].user_map); 516 517 /* 518 * Return from RPC to request service of an IRQ must not 519 * get parameters from non-secure world. 520 */ 521 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 522 copy_a0_to_a3(&threads[n].regs, args); 523 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 524 } 525 526 thread_lazy_save_ns_vfp(); 527 thread_resume(&threads[n].regs); 528 } 529 530 void thread_handle_fast_smc(struct thread_smc_args *args) 531 { 532 thread_check_canaries(); 533 thread_fast_smc_handler_ptr(args); 534 /* Fast handlers must not unmask any exceptions */ 535 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 536 } 537 538 void thread_handle_std_smc(struct thread_smc_args *args) 539 { 540 thread_check_canaries(); 541 542 if (args->a0 == TEESMC32_CALL_RETURN_FROM_RPC) 543 thread_resume_from_rpc(args); 544 else 545 thread_alloc_and_run(args); 546 } 547 548 void thread_handle_abort(uint32_t abort_type, struct thread_abort_regs *regs) 549 { 550 #ifdef CFG_WITH_VFP 551 if (vfp_is_enabled()) { 552 vfp_lazy_save_state_init(&thread_vfp_state.sec); 553 thread_vfp_state.sec_lazy_saved = true; 554 } 555 #endif 556 557 thread_abort_handler_ptr(abort_type, regs); 558 559 #ifdef CFG_WITH_VFP 560 assert(!vfp_is_enabled()); 561 if (thread_vfp_state.sec_lazy_saved) { 562 vfp_lazy_restore_state(&thread_vfp_state.sec, 563 thread_vfp_state.sec_saved); 564 thread_vfp_state.sec_saved = false; 565 thread_vfp_state.sec_lazy_saved = false; 566 } 567 #endif 568 } 569 570 void *thread_get_tmp_sp(void) 571 { 572 struct thread_core_local *l = thread_get_core_local(); 573 574 return (void *)l->tmp_stack_va_end; 575 } 576 577 #ifdef ARM64 578 vaddr_t thread_get_saved_thread_sp(void) 579 { 580 struct thread_core_local *l = thread_get_core_local(); 581 int ct = l->curr_thread; 582 583 assert(ct != -1); 584 return threads[ct].kern_sp; 585 } 586 #endif /*ARM64*/ 587 588 void thread_state_free(void) 589 { 590 struct thread_core_local *l = thread_get_core_local(); 591 int ct = l->curr_thread; 592 593 assert(ct != -1); 594 595 thread_lazy_restore_ns_vfp(); 596 597 lock_global(); 598 599 assert(threads[ct].state == THREAD_STATE_ACTIVE); 600 threads[ct].state = THREAD_STATE_FREE; 601 threads[ct].flags = 0; 602 l->curr_thread = -1; 603 604 unlock_global(); 605 } 606 607 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 608 { 609 struct thread_core_local *l = thread_get_core_local(); 610 int ct = l->curr_thread; 611 612 assert(ct != -1); 613 614 thread_check_canaries(); 615 616 thread_lazy_restore_ns_vfp(); 617 618 lock_global(); 619 620 assert(threads[ct].state == THREAD_STATE_ACTIVE); 621 threads[ct].flags |= flags; 622 threads[ct].regs.cpsr = cpsr; 623 threads[ct].regs.pc = pc; 624 threads[ct].state = THREAD_STATE_SUSPENDED; 625 626 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 627 if (threads[ct].have_user_map) { 628 core_mmu_get_user_map(&threads[ct].user_map); 629 core_mmu_set_user_map(NULL); 630 } 631 632 633 l->curr_thread = -1; 634 635 unlock_global(); 636 637 return ct; 638 } 639 640 #ifdef ARM32 641 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 642 { 643 l->tmp_stack_va_end = sp; 644 thread_set_irq_sp(sp); 645 thread_set_fiq_sp(sp); 646 } 647 648 static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp) 649 { 650 thread_set_abt_sp(sp); 651 } 652 #endif /*ARM32*/ 653 654 #ifdef ARM64 655 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 656 { 657 /* 658 * We're already using the tmp stack when this function is called 659 * so there's no need to assign it to any stack pointer. However, 660 * we'll need to restore it at different times so store it here. 661 */ 662 l->tmp_stack_va_end = sp; 663 } 664 665 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 666 { 667 l->abt_stack_va_end = sp; 668 } 669 #endif /*ARM64*/ 670 671 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 672 { 673 if (thread_id >= CFG_NUM_THREADS) 674 return false; 675 if (threads[thread_id].state != THREAD_STATE_FREE) 676 return false; 677 678 threads[thread_id].stack_va_end = sp; 679 return true; 680 } 681 682 uint32_t thread_get_id(void) 683 { 684 /* thread_get_core_local() requires IRQs to be disabled */ 685 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 686 struct thread_core_local *l; 687 int ct; 688 689 l = thread_get_core_local(); 690 ct = l->curr_thread; 691 assert((ct >= 0) && (ct < CFG_NUM_THREADS)); 692 693 thread_unmask_exceptions(exceptions); 694 return ct; 695 } 696 697 static void init_handlers(const struct thread_handlers *handlers) 698 { 699 thread_std_smc_handler_ptr = handlers->std_smc; 700 thread_fast_smc_handler_ptr = handlers->fast_smc; 701 thread_fiq_handler_ptr = handlers->fiq; 702 thread_svc_handler_ptr = handlers->svc; 703 thread_abort_handler_ptr = handlers->abort; 704 thread_cpu_on_handler_ptr = handlers->cpu_on; 705 thread_cpu_off_handler_ptr = handlers->cpu_off; 706 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 707 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 708 thread_system_off_handler_ptr = handlers->system_off; 709 thread_system_reset_handler_ptr = handlers->system_reset; 710 } 711 712 713 #ifdef CFG_WITH_PAGER 714 static void init_thread_stacks(void) 715 { 716 size_t n; 717 718 /* 719 * Allocate virtual memory for thread stacks. 720 */ 721 for (n = 0; n < CFG_NUM_THREADS; n++) { 722 tee_mm_entry_t *mm; 723 vaddr_t sp; 724 725 /* Get unmapped page at bottom of stack */ 726 mm = tee_mm_alloc(&tee_mm_vcore, SMALL_PAGE_SIZE); 727 TEE_ASSERT(mm); 728 /* Claim eventual physical page */ 729 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 730 true); 731 732 /* Allocate the actual stack */ 733 mm = tee_mm_alloc(&tee_mm_vcore, STACK_THREAD_SIZE); 734 TEE_ASSERT(mm); 735 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 736 if (!thread_init_stack(n, sp)) 737 panic(); 738 /* Claim eventual physical page */ 739 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 740 true); 741 /* Add the area to the pager */ 742 tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL); 743 } 744 } 745 #else 746 static void init_thread_stacks(void) 747 { 748 size_t n; 749 750 /* Assign the thread stacks */ 751 for (n = 0; n < CFG_NUM_THREADS; n++) { 752 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 753 panic(); 754 } 755 } 756 #endif /*CFG_WITH_PAGER*/ 757 758 void thread_init_primary(const struct thread_handlers *handlers) 759 { 760 /* 761 * The COMPILE_TIME_ASSERT only works in function context. These 762 * checks verifies that the offsets used in assembly code matches 763 * what's used in C code. 764 */ 765 #ifdef ARM32 766 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r0) == 767 THREAD_SVC_REG_R0_OFFS); 768 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r1) == 769 THREAD_SVC_REG_R1_OFFS); 770 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r2) == 771 THREAD_SVC_REG_R2_OFFS); 772 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r3) == 773 THREAD_SVC_REG_R3_OFFS); 774 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r4) == 775 THREAD_SVC_REG_R4_OFFS); 776 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r5) == 777 THREAD_SVC_REG_R5_OFFS); 778 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r6) == 779 THREAD_SVC_REG_R6_OFFS); 780 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r7) == 781 THREAD_SVC_REG_R7_OFFS); 782 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, lr) == 783 THREAD_SVC_REG_LR_OFFS); 784 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, spsr) == 785 THREAD_SVC_REG_SPSR_OFFS); 786 #endif /*ARM32*/ 787 #ifdef ARM64 788 /* struct thread_abort_regs */ 789 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, x22) == 790 THREAD_ABT_REG_X_OFFS(22)); 791 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, elr) == 792 THREAD_ABT_REG_ELR_OFFS); 793 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, spsr) == 794 THREAD_ABT_REG_SPSR_OFFS); 795 COMPILE_TIME_ASSERT(offsetof(struct thread_abort_regs, sp_el0) == 796 THREAD_ABT_REG_SP_EL0_OFFS); 797 COMPILE_TIME_ASSERT(sizeof(struct thread_abort_regs) == 798 THREAD_ABT_REGS_SIZE); 799 800 /* struct thread_ctx */ 801 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx, kern_sp) == 802 THREAD_CTX_KERN_SP_OFFSET); 803 COMPILE_TIME_ASSERT(sizeof(struct thread_ctx) == THREAD_CTX_SIZE); 804 805 /* struct thread_ctx_regs */ 806 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, sp) == 807 THREAD_CTX_REGS_SP_OFFSET); 808 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, pc) == 809 THREAD_CTX_REGS_PC_OFFSET); 810 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, cpsr) == 811 THREAD_CTX_REGS_SPSR_OFFSET); 812 COMPILE_TIME_ASSERT(offsetof(struct thread_ctx_regs, x[23]) == 813 THREAD_CTX_REGS_X_OFFSET(23)); 814 COMPILE_TIME_ASSERT(sizeof(struct thread_ctx_regs) == 815 THREAD_CTX_REGS_SIZE); 816 817 /* struct thread_user_mode_rec */ 818 COMPILE_TIME_ASSERT( 819 offsetof(struct thread_user_mode_rec, exit_status0_ptr) == 820 THREAD_USER_MODE_REC_EXIT_STATUS0_PTR_OFFSET); 821 COMPILE_TIME_ASSERT( 822 offsetof(struct thread_user_mode_rec, exit_status1_ptr) == 823 THREAD_USER_MODE_REC_EXIT_STATUS1_PTR_OFFSET); 824 COMPILE_TIME_ASSERT( 825 offsetof(struct thread_user_mode_rec, x[1]) == 826 THREAD_USER_MODE_REC_X_OFFSET(20)); 827 COMPILE_TIME_ASSERT(sizeof(struct thread_user_mode_rec) == 828 THREAD_USER_MODE_REC_SIZE); 829 830 /* struct thread_core_local */ 831 COMPILE_TIME_ASSERT( 832 offsetof(struct thread_core_local, tmp_stack_va_end) == 833 THREAD_CORE_LOCAL_TMP_STACK_VA_END_OFFSET); 834 COMPILE_TIME_ASSERT( 835 offsetof(struct thread_core_local, curr_thread) == 836 THREAD_CORE_LOCAL_CURR_THREAD_OFFSET); 837 COMPILE_TIME_ASSERT( 838 offsetof(struct thread_core_local, flags) == 839 THREAD_CORE_LOCAL_FLAGS_OFFSET); 840 COMPILE_TIME_ASSERT( 841 offsetof(struct thread_core_local, abt_stack_va_end) == 842 THREAD_CORE_LOCAL_ABT_STACK_VA_END_OFFSET); 843 COMPILE_TIME_ASSERT( 844 offsetof(struct thread_core_local, x[3]) == 845 THREAD_CORE_LOCAL_X_OFFSET(3)); 846 COMPILE_TIME_ASSERT(sizeof(struct thread_core_local) == 847 THREAD_CORE_LOCAL_SIZE); 848 849 #endif /*ARM64*/ 850 851 init_handlers(handlers); 852 853 /* Initialize canaries around the stacks */ 854 init_canaries(); 855 856 init_thread_stacks(); 857 } 858 859 static void init_sec_mon(size_t __unused pos) 860 { 861 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 862 /* Initialize secure monitor */ 863 sm_init(GET_STACK(stack_sm[pos])); 864 sm_set_entry_vector(thread_vector_table); 865 #endif 866 } 867 868 void thread_init_per_cpu(void) 869 { 870 size_t pos = get_core_pos(); 871 struct thread_core_local *l = thread_get_core_local(); 872 873 init_sec_mon(pos); 874 875 l->curr_thread = -1; 876 set_tmp_stack(l, GET_STACK(stack_tmp[pos])); 877 set_abt_stack(l, GET_STACK(stack_abt[pos])); 878 879 thread_init_vbar(); 880 } 881 882 void thread_set_tsd(void *tsd) 883 { 884 /* thread_get_core_local() requires IRQs to be disabled */ 885 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 886 struct thread_core_local *l; 887 int ct; 888 889 l = thread_get_core_local(); 890 ct = l->curr_thread; 891 892 assert(ct != -1); 893 assert(threads[ct].state == THREAD_STATE_ACTIVE); 894 threads[ct].tsd = tsd; 895 896 thread_unmask_exceptions(exceptions); 897 } 898 899 void *thread_get_tsd(void) 900 { 901 /* thread_get_core_local() requires IRQs to be disabled */ 902 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 903 struct thread_core_local *l; 904 int ct; 905 void *tsd; 906 907 l = thread_get_core_local(); 908 ct = l->curr_thread; 909 910 if (ct == -1 || threads[ct].state != THREAD_STATE_ACTIVE) 911 tsd = NULL; 912 else 913 tsd = threads[ct].tsd; 914 915 thread_unmask_exceptions(exceptions); 916 return tsd; 917 } 918 919 struct thread_ctx_regs *thread_get_ctx_regs(void) 920 { 921 struct thread_core_local *l = thread_get_core_local(); 922 923 assert(l->curr_thread != -1); 924 return &threads[l->curr_thread].regs; 925 } 926 927 void thread_set_irq(bool enable) 928 { 929 /* thread_get_core_local() requires IRQs to be disabled */ 930 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 931 struct thread_core_local *l; 932 933 l = thread_get_core_local(); 934 935 assert(l->curr_thread != -1); 936 937 if (enable) { 938 threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE; 939 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 940 } else { 941 /* 942 * No need to disable IRQ here since it's already disabled 943 * above. 944 */ 945 threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE; 946 } 947 } 948 949 void thread_restore_irq(void) 950 { 951 /* thread_get_core_local() requires IRQs to be disabled */ 952 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 953 struct thread_core_local *l; 954 955 l = thread_get_core_local(); 956 957 assert(l->curr_thread != -1); 958 959 if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE) 960 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 961 } 962 963 #ifdef CFG_WITH_VFP 964 uint32_t thread_kernel_enable_vfp(void) 965 { 966 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 967 968 assert(!vfp_is_enabled()); 969 970 if (!thread_vfp_state.ns_saved) { 971 vfp_lazy_save_state_final(&thread_vfp_state.ns); 972 thread_vfp_state.ns_saved = true; 973 } else if (thread_vfp_state.sec_lazy_saved && 974 !thread_vfp_state.sec_saved) { 975 vfp_lazy_save_state_final(&thread_vfp_state.sec); 976 thread_vfp_state.sec_saved = true; 977 } 978 979 vfp_enable(); 980 return exceptions; 981 } 982 983 void thread_kernel_disable_vfp(uint32_t state) 984 { 985 uint32_t exceptions; 986 987 assert(vfp_is_enabled()); 988 989 vfp_disable(); 990 exceptions = thread_get_exceptions(); 991 assert(exceptions & THREAD_EXCP_IRQ); 992 exceptions &= ~THREAD_EXCP_IRQ; 993 exceptions |= state & THREAD_EXCP_IRQ; 994 thread_set_exceptions(exceptions); 995 } 996 #endif /*CFG_WITH_VFP*/ 997 998 999 paddr_t thread_rpc_alloc_arg(size_t size) 1000 { 1001 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1002 TEESMC_RETURN_RPC_ALLOC_ARG, size}; 1003 1004 thread_rpc(rpc_args); 1005 return rpc_args[1]; 1006 } 1007 1008 paddr_t thread_rpc_alloc_payload(size_t size) 1009 { 1010 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1011 TEESMC_RETURN_RPC_ALLOC_PAYLOAD, size}; 1012 1013 thread_rpc(rpc_args); 1014 return rpc_args[1]; 1015 } 1016 1017 void thread_rpc_free_arg(paddr_t arg) 1018 { 1019 if (arg) { 1020 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1021 TEESMC_RETURN_RPC_FREE_ARG, arg}; 1022 1023 thread_rpc(rpc_args); 1024 } 1025 } 1026 void thread_rpc_free_payload(paddr_t payload) 1027 { 1028 if (payload) { 1029 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1030 TEESMC_RETURN_RPC_FREE_PAYLOAD, payload}; 1031 1032 thread_rpc(rpc_args); 1033 } 1034 } 1035 1036 void thread_rpc_cmd(paddr_t arg) 1037 { 1038 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {TEESMC_RETURN_RPC_CMD, arg}; 1039 1040 thread_rpc(rpc_args); 1041 } 1042 1043 void thread_optee_rpc_alloc_payload(size_t size, paddr_t *payload, 1044 paddr_t *cookie) 1045 { 1046 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1047 TEESMC_RETURN_OPTEE_RPC_ALLOC_PAYLOAD, size}; 1048 1049 thread_rpc(rpc_args); 1050 if (payload) 1051 *payload = rpc_args[1]; 1052 if (cookie) 1053 *cookie = rpc_args[2]; 1054 } 1055 1056 void thread_optee_rpc_free_payload(paddr_t cookie) 1057 { 1058 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] ={ 1059 TEESMC_RETURN_OPTEE_RPC_FREE_PAYLOAD, cookie}; 1060 1061 thread_rpc(rpc_args); 1062 } 1063