1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <platform_config.h> 28 29 #include <kernel/panic.h> 30 #include <kernel/thread.h> 31 #include <kernel/thread_defs.h> 32 #include "thread_private.h" 33 #include <sm/sm_defs.h> 34 #include <sm/sm.h> 35 #include <sm/teesmc.h> 36 #include <sm/teesmc_optee.h> 37 #include <arm.h> 38 #include <kernel/tz_proc_def.h> 39 #include <kernel/tz_proc.h> 40 #include <kernel/misc.h> 41 #include <mm/tee_mmu.h> 42 #include <mm/tee_mmu_defs.h> 43 #include <mm/tee_mm.h> 44 #include <mm/tee_pager.h> 45 #include <kernel/tee_ta_manager.h> 46 #include <util.h> 47 #include <trace.h> 48 49 #include <assert.h> 50 51 #ifdef ARM32 52 #define STACK_TMP_SIZE 1024 53 #define STACK_THREAD_SIZE 8192 54 55 #if TRACE_LEVEL > 0 56 #define STACK_ABT_SIZE 2048 57 #else 58 #define STACK_ABT_SIZE 1024 59 #endif 60 61 #endif /*ARM32*/ 62 63 static struct thread_ctx threads[CFG_NUM_THREADS]; 64 65 static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 66 67 #ifdef CFG_WITH_VFP 68 struct thread_vfp_state { 69 bool ns_saved; 70 bool sec_saved; 71 bool sec_lazy_saved; 72 struct vfp_state ns; 73 struct vfp_state sec; 74 }; 75 76 static struct thread_vfp_state thread_vfp_state; 77 #endif /*CFG_WITH_VFP*/ 78 79 #ifdef CFG_WITH_STACK_CANARIES 80 #ifdef ARM32 81 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 82 #endif 83 #define START_CANARY_VALUE 0xdededede 84 #define END_CANARY_VALUE 0xabababab 85 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 86 #define GET_END_CANARY(name, stack_num) \ 87 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 88 #else 89 #define STACK_CANARY_SIZE 0 90 #endif 91 92 #define DECLARE_STACK(name, num_stacks, stack_size) \ 93 static uint32_t name[num_stacks][ \ 94 ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 95 sizeof(uint32_t)] \ 96 __attribute__((section(".nozi.stack"), \ 97 aligned(STACK_ALIGNMENT))) 98 99 #define GET_STACK(stack) \ 100 ((vaddr_t)(stack) + sizeof(stack) - STACK_CANARY_SIZE / 2) 101 102 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE); 103 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE); 104 #if defined(CFG_WITH_SEC_MON) 105 DECLARE_STACK(stack_sm, CFG_TEE_CORE_NB_CORE, SM_STACK_SIZE); 106 #endif 107 #ifndef CFG_WITH_PAGER 108 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE); 109 #endif 110 111 const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = { 112 GET_STACK(stack_tmp[0]), 113 #if CFG_TEE_CORE_NB_CORE > 1 114 GET_STACK(stack_tmp[1]), 115 #endif 116 #if CFG_TEE_CORE_NB_CORE > 2 117 GET_STACK(stack_tmp[2]), 118 #endif 119 #if CFG_TEE_CORE_NB_CORE > 3 120 GET_STACK(stack_tmp[3]), 121 #endif 122 #if CFG_TEE_CORE_NB_CORE > 4 123 GET_STACK(stack_tmp[4]), 124 #endif 125 #if CFG_TEE_CORE_NB_CORE > 5 126 GET_STACK(stack_tmp[5]), 127 #endif 128 #if CFG_TEE_CORE_NB_CORE > 6 129 GET_STACK(stack_tmp[6]), 130 #endif 131 #if CFG_TEE_CORE_NB_CORE > 7 132 GET_STACK(stack_tmp[7]), 133 #endif 134 #if CFG_TEE_CORE_NB_CORE > 8 135 #error "Top of tmp stacks aren't defined for more than 8 CPUS" 136 #endif 137 }; 138 139 thread_smc_handler_t thread_std_smc_handler_ptr; 140 static thread_smc_handler_t thread_fast_smc_handler_ptr; 141 thread_fiq_handler_t thread_fiq_handler_ptr; 142 thread_svc_handler_t thread_svc_handler_ptr; 143 static thread_abort_handler_t thread_abort_handler_ptr; 144 thread_pm_handler_t thread_cpu_on_handler_ptr; 145 thread_pm_handler_t thread_cpu_off_handler_ptr; 146 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 147 thread_pm_handler_t thread_cpu_resume_handler_ptr; 148 thread_pm_handler_t thread_system_off_handler_ptr; 149 thread_pm_handler_t thread_system_reset_handler_ptr; 150 151 152 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 153 154 static void init_canaries(void) 155 { 156 #ifdef CFG_WITH_STACK_CANARIES 157 size_t n; 158 #define INIT_CANARY(name) \ 159 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 160 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 161 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 162 \ 163 *start_canary = START_CANARY_VALUE; \ 164 *end_canary = END_CANARY_VALUE; \ 165 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 166 #name, n, (void *)(end_canary - 1)); \ 167 DMSG("watch *%p\n", (void *)end_canary); \ 168 } 169 170 INIT_CANARY(stack_tmp); 171 INIT_CANARY(stack_abt); 172 #ifdef CFG_WITH_SEC_MON 173 INIT_CANARY(stack_sm); 174 #endif 175 #ifndef CFG_WITH_PAGER 176 INIT_CANARY(stack_thread); 177 #endif 178 #endif/*CFG_WITH_STACK_CANARIES*/ 179 } 180 181 void thread_check_canaries(void) 182 { 183 #ifdef CFG_WITH_STACK_CANARIES 184 size_t n; 185 186 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 187 assert(GET_START_CANARY(stack_tmp, n) == START_CANARY_VALUE); 188 assert(GET_END_CANARY(stack_tmp, n) == END_CANARY_VALUE); 189 } 190 191 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 192 assert(GET_START_CANARY(stack_abt, n) == START_CANARY_VALUE); 193 assert(GET_END_CANARY(stack_abt, n) == END_CANARY_VALUE); 194 } 195 #ifdef CFG_WITH_SEC_MON 196 for (n = 0; n < ARRAY_SIZE(stack_sm); n++) { 197 assert(GET_START_CANARY(stack_sm, n) == START_CANARY_VALUE); 198 assert(GET_END_CANARY(stack_sm, n) == END_CANARY_VALUE); 199 } 200 #endif 201 #ifndef CFG_WITH_PAGER 202 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 203 assert(GET_START_CANARY(stack_thread, n) == START_CANARY_VALUE); 204 assert(GET_END_CANARY(stack_thread, n) == END_CANARY_VALUE); 205 } 206 #endif 207 #endif/*CFG_WITH_STACK_CANARIES*/ 208 } 209 210 static void lock_global(void) 211 { 212 cpu_spin_lock(&thread_global_lock); 213 } 214 215 static void unlock_global(void) 216 { 217 cpu_spin_unlock(&thread_global_lock); 218 } 219 220 #ifdef ARM32 221 uint32_t thread_get_exceptions(void) 222 { 223 uint32_t cpsr = read_cpsr(); 224 225 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 226 } 227 228 void thread_set_exceptions(uint32_t exceptions) 229 { 230 uint32_t cpsr = read_cpsr(); 231 232 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 233 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 234 write_cpsr(cpsr); 235 } 236 #endif /*ARM32*/ 237 238 uint32_t thread_mask_exceptions(uint32_t exceptions) 239 { 240 uint32_t state = thread_get_exceptions(); 241 242 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 243 return state; 244 } 245 246 void thread_unmask_exceptions(uint32_t state) 247 { 248 thread_set_exceptions(state & THREAD_EXCP_ALL); 249 } 250 251 252 struct thread_core_local *thread_get_core_local(void) 253 { 254 uint32_t cpu_id = get_core_pos(); 255 256 /* 257 * IRQs must be disabled before playing with core_local since 258 * we otherwise may be rescheduled to a different core in the 259 * middle of this function. 260 */ 261 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 262 263 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 264 return &thread_core_local[cpu_id]; 265 } 266 267 static bool have_one_active_thread(void) 268 { 269 size_t n; 270 271 for (n = 0; n < CFG_NUM_THREADS; n++) { 272 if (threads[n].state == THREAD_STATE_ACTIVE) 273 return true; 274 } 275 276 return false; 277 } 278 279 static bool have_one_preempted_thread(void) 280 { 281 size_t n; 282 283 for (n = 0; n < CFG_NUM_THREADS; n++) { 284 if (threads[n].state == THREAD_STATE_SUSPENDED && 285 (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ)) 286 return true; 287 } 288 289 return false; 290 } 291 292 static void thread_lazy_save_ns_vfp(void) 293 { 294 #ifdef CFG_WITH_VFP 295 thread_vfp_state.ns_saved = false; 296 vfp_lazy_save_state_init(&thread_vfp_state.ns); 297 #endif /*CFG_WITH_VFP*/ 298 } 299 300 static void thread_lazy_restore_ns_vfp(void) 301 { 302 #ifdef CFG_WITH_VFP 303 assert(!thread_vfp_state.sec_lazy_saved && !thread_vfp_state.sec_saved); 304 vfp_lazy_restore_state(&thread_vfp_state.ns, thread_vfp_state.ns_saved); 305 thread_vfp_state.ns_saved = false; 306 #endif /*CFG_WITH_VFP*/ 307 } 308 309 #ifdef ARM32 310 static void init_regs(struct thread_ctx *thread, 311 struct thread_smc_args *args) 312 { 313 thread->regs.pc = (uint32_t)thread_std_smc_entry; 314 315 /* 316 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 317 * abort and unmasked FIQ. 318 */ 319 thread->regs.cpsr = CPSR_MODE_SVC | CPSR_I | CPSR_A; 320 /* Enable thumb mode if it's a thumb instruction */ 321 if (thread->regs.pc & 1) 322 thread->regs.cpsr |= CPSR_T; 323 /* Reinitialize stack pointer */ 324 thread->regs.svc_sp = thread->stack_va_end; 325 326 /* 327 * Copy arguments into context. This will make the 328 * arguments appear in r0-r7 when thread is started. 329 */ 330 thread->regs.r0 = args->a0; 331 thread->regs.r1 = args->a1; 332 thread->regs.r2 = args->a2; 333 thread->regs.r3 = args->a3; 334 thread->regs.r4 = args->a4; 335 thread->regs.r5 = args->a5; 336 thread->regs.r6 = args->a6; 337 thread->regs.r7 = args->a7; 338 } 339 #endif /*ARM32*/ 340 341 static void thread_alloc_and_run(struct thread_smc_args *args) 342 { 343 size_t n; 344 struct thread_core_local *l = thread_get_core_local(); 345 bool found_thread = false; 346 347 assert(l->curr_thread == -1); 348 349 lock_global(); 350 351 if (!have_one_active_thread() && !have_one_preempted_thread()) { 352 for (n = 0; n < CFG_NUM_THREADS; n++) { 353 if (threads[n].state == THREAD_STATE_FREE) { 354 threads[n].state = THREAD_STATE_ACTIVE; 355 found_thread = true; 356 break; 357 } 358 } 359 } 360 361 unlock_global(); 362 363 if (!found_thread) { 364 args->a0 = TEESMC_RETURN_EBUSY; 365 return; 366 } 367 368 l->curr_thread = n; 369 370 threads[n].flags = 0; 371 init_regs(threads + n, args); 372 373 /* Save Hypervisor Client ID */ 374 threads[n].hyp_clnt_id = args->a7; 375 376 thread_lazy_save_ns_vfp(); 377 thread_resume(&threads[n].regs); 378 } 379 380 #ifdef ARM32 381 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 382 struct thread_smc_args *args) 383 { 384 /* 385 * Update returned values from RPC, values will appear in 386 * r0-r3 when thread is resumed. 387 */ 388 regs->r0 = args->a0; 389 regs->r1 = args->a1; 390 regs->r2 = args->a2; 391 regs->r3 = args->a3; 392 } 393 #endif /*ARM32*/ 394 395 static void thread_resume_from_rpc(struct thread_smc_args *args) 396 { 397 size_t n = args->a3; /* thread id */ 398 struct thread_core_local *l = thread_get_core_local(); 399 uint32_t rv = 0; 400 401 assert(l->curr_thread == -1); 402 403 lock_global(); 404 405 if (have_one_active_thread()) { 406 rv = TEESMC_RETURN_EBUSY; 407 } else if (n < CFG_NUM_THREADS && 408 threads[n].state == THREAD_STATE_SUSPENDED && 409 args->a7 == threads[n].hyp_clnt_id) { 410 /* 411 * If there's one preempted thread it has to be the one 412 * we're resuming. 413 */ 414 if (have_one_preempted_thread()) { 415 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ) { 416 threads[n].flags &= ~THREAD_FLAGS_EXIT_ON_IRQ; 417 threads[n].state = THREAD_STATE_ACTIVE; 418 } else { 419 rv = TEESMC_RETURN_EBUSY; 420 } 421 } else { 422 threads[n].state = THREAD_STATE_ACTIVE; 423 } 424 } else { 425 rv = TEESMC_RETURN_ERESUME; 426 } 427 428 unlock_global(); 429 430 if (rv) { 431 args->a0 = rv; 432 return; 433 } 434 435 l->curr_thread = n; 436 437 if (threads[n].have_user_map) 438 core_mmu_set_user_map(&threads[n].user_map); 439 440 /* 441 * Return from RPC to request service of an IRQ must not 442 * get parameters from non-secure world. 443 */ 444 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 445 copy_a0_to_a3(&threads[n].regs, args); 446 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 447 } 448 449 thread_lazy_save_ns_vfp(); 450 thread_resume(&threads[n].regs); 451 } 452 453 void thread_handle_fast_smc(struct thread_smc_args *args) 454 { 455 thread_check_canaries(); 456 thread_fast_smc_handler_ptr(args); 457 /* Fast handlers must not unmask any exceptions */ 458 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 459 } 460 461 void thread_handle_std_smc(struct thread_smc_args *args) 462 { 463 thread_check_canaries(); 464 465 if (args->a0 == TEESMC32_CALL_RETURN_FROM_RPC) 466 thread_resume_from_rpc(args); 467 else 468 thread_alloc_and_run(args); 469 } 470 471 void thread_handle_abort(uint32_t abort_type, struct thread_abort_regs *regs) 472 { 473 #ifdef CFG_WITH_VFP 474 if (vfp_is_enabled()) { 475 vfp_lazy_save_state_init(&thread_vfp_state.sec); 476 thread_vfp_state.sec_lazy_saved = true; 477 } 478 #endif 479 480 thread_abort_handler_ptr(abort_type, regs); 481 482 #ifdef CFG_WITH_VFP 483 assert(!vfp_is_enabled()); 484 if (thread_vfp_state.sec_lazy_saved) { 485 vfp_lazy_restore_state(&thread_vfp_state.sec, 486 thread_vfp_state.sec_saved); 487 thread_vfp_state.sec_saved = false; 488 thread_vfp_state.sec_lazy_saved = false; 489 } 490 #endif 491 } 492 493 void *thread_get_tmp_sp(void) 494 { 495 struct thread_core_local *l = thread_get_core_local(); 496 497 return (void *)l->tmp_stack_va_end; 498 } 499 500 void thread_state_free(void) 501 { 502 struct thread_core_local *l = thread_get_core_local(); 503 int ct = l->curr_thread; 504 505 assert(ct != -1); 506 507 thread_lazy_restore_ns_vfp(); 508 509 lock_global(); 510 511 assert(threads[ct].state == THREAD_STATE_ACTIVE); 512 threads[ct].state = THREAD_STATE_FREE; 513 threads[ct].flags = 0; 514 l->curr_thread = -1; 515 516 unlock_global(); 517 } 518 519 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 520 { 521 struct thread_core_local *l = thread_get_core_local(); 522 int ct = l->curr_thread; 523 524 assert(ct != -1); 525 526 thread_check_canaries(); 527 528 thread_lazy_restore_ns_vfp(); 529 530 lock_global(); 531 532 assert(threads[ct].state == THREAD_STATE_ACTIVE); 533 threads[ct].flags |= flags; 534 threads[ct].regs.cpsr = cpsr; 535 threads[ct].regs.pc = pc; 536 threads[ct].state = THREAD_STATE_SUSPENDED; 537 538 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 539 if (threads[ct].have_user_map) { 540 core_mmu_get_user_map(&threads[ct].user_map); 541 core_mmu_set_user_map(NULL); 542 } 543 544 545 l->curr_thread = -1; 546 547 unlock_global(); 548 549 return ct; 550 } 551 552 #ifdef ARM32 553 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 554 { 555 l->tmp_stack_va_end = sp; 556 thread_set_irq_sp(sp); 557 thread_set_fiq_sp(sp); 558 } 559 560 static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp) 561 { 562 thread_set_abt_sp(sp); 563 } 564 #endif /*ARM32*/ 565 566 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 567 { 568 if (thread_id >= CFG_NUM_THREADS) 569 return false; 570 if (threads[thread_id].state != THREAD_STATE_FREE) 571 return false; 572 573 threads[thread_id].stack_va_end = sp; 574 return true; 575 } 576 577 uint32_t thread_get_id(void) 578 { 579 /* thread_get_core_local() requires IRQs to be disabled */ 580 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 581 struct thread_core_local *l; 582 int ct; 583 584 l = thread_get_core_local(); 585 ct = l->curr_thread; 586 assert((ct >= 0) && (ct < CFG_NUM_THREADS)); 587 588 thread_unmask_exceptions(exceptions); 589 return ct; 590 } 591 592 static void init_handlers(const struct thread_handlers *handlers) 593 { 594 thread_std_smc_handler_ptr = handlers->std_smc; 595 thread_fast_smc_handler_ptr = handlers->fast_smc; 596 thread_fiq_handler_ptr = handlers->fiq; 597 thread_svc_handler_ptr = handlers->svc; 598 thread_abort_handler_ptr = handlers->abort; 599 thread_cpu_on_handler_ptr = handlers->cpu_on; 600 thread_cpu_off_handler_ptr = handlers->cpu_off; 601 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 602 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 603 thread_system_off_handler_ptr = handlers->system_off; 604 thread_system_reset_handler_ptr = handlers->system_reset; 605 } 606 607 608 #ifdef CFG_WITH_PAGER 609 static void init_thread_stacks(void) 610 { 611 size_t n; 612 613 /* 614 * Allocate virtual memory for thread stacks. 615 */ 616 for (n = 0; n < CFG_NUM_THREADS; n++) { 617 tee_mm_entry_t *mm; 618 vaddr_t sp; 619 620 /* Get unmapped page at bottom of stack */ 621 mm = tee_mm_alloc(&tee_mm_vcore, SMALL_PAGE_SIZE); 622 TEE_ASSERT(mm); 623 /* Claim eventual physical page */ 624 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 625 true); 626 627 /* Allocate the actual stack */ 628 mm = tee_mm_alloc(&tee_mm_vcore, STACK_THREAD_SIZE); 629 TEE_ASSERT(mm); 630 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 631 if (!thread_init_stack(n, sp)) 632 panic(); 633 /* Claim eventual physical page */ 634 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 635 true); 636 /* Add the area to the pager */ 637 tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL); 638 } 639 } 640 #else 641 static void init_thread_stacks(void) 642 { 643 size_t n; 644 645 /* Assign the thread stacks */ 646 for (n = 0; n < CFG_NUM_THREADS; n++) { 647 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 648 panic(); 649 } 650 } 651 #endif /*CFG_WITH_PAGER*/ 652 653 void thread_init_primary(const struct thread_handlers *handlers) 654 { 655 /* 656 * The COMPILE_TIME_ASSERT only works in function context. These 657 * checks verifies that the offsets used in assembly code matches 658 * what's used in C code. 659 */ 660 #ifdef ARM32 661 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r0) == 662 THREAD_SVC_REG_R0_OFFS); 663 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r1) == 664 THREAD_SVC_REG_R1_OFFS); 665 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r2) == 666 THREAD_SVC_REG_R2_OFFS); 667 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r3) == 668 THREAD_SVC_REG_R3_OFFS); 669 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r4) == 670 THREAD_SVC_REG_R4_OFFS); 671 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r5) == 672 THREAD_SVC_REG_R5_OFFS); 673 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r6) == 674 THREAD_SVC_REG_R6_OFFS); 675 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r7) == 676 THREAD_SVC_REG_R7_OFFS); 677 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, lr) == 678 THREAD_SVC_REG_LR_OFFS); 679 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, spsr) == 680 THREAD_SVC_REG_SPSR_OFFS); 681 #endif /*ARM32*/ 682 683 init_handlers(handlers); 684 685 /* Initialize canaries around the stacks */ 686 init_canaries(); 687 688 init_thread_stacks(); 689 } 690 691 static void init_sec_mon(size_t __unused pos) 692 { 693 #if defined(CFG_WITH_SEC_MON) 694 /* Initialize secure monitor */ 695 sm_init(GET_STACK(stack_sm[pos])); 696 sm_set_entry_vector(thread_vector_table); 697 #endif /*CFG_WITH_SEC_MON*/ 698 } 699 700 void thread_init_per_cpu(void) 701 { 702 size_t pos = get_core_pos(); 703 struct thread_core_local *l = thread_get_core_local(); 704 705 init_sec_mon(pos); 706 707 l->curr_thread = -1; 708 set_tmp_stack(l, GET_STACK(stack_tmp[pos])); 709 set_abt_stack(l, GET_STACK(stack_abt[pos])); 710 711 thread_init_vbar(); 712 } 713 714 void thread_set_tsd(void *tsd) 715 { 716 /* thread_get_core_local() requires IRQs to be disabled */ 717 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 718 struct thread_core_local *l; 719 int ct; 720 721 l = thread_get_core_local(); 722 ct = l->curr_thread; 723 724 assert(ct != -1); 725 assert(threads[ct].state == THREAD_STATE_ACTIVE); 726 threads[ct].tsd = tsd; 727 728 thread_unmask_exceptions(exceptions); 729 } 730 731 void *thread_get_tsd(void) 732 { 733 /* thread_get_core_local() requires IRQs to be disabled */ 734 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 735 struct thread_core_local *l; 736 int ct; 737 void *tsd; 738 739 l = thread_get_core_local(); 740 ct = l->curr_thread; 741 742 if (ct == -1 || threads[ct].state != THREAD_STATE_ACTIVE) 743 tsd = NULL; 744 else 745 tsd = threads[ct].tsd; 746 747 thread_unmask_exceptions(exceptions); 748 return tsd; 749 } 750 751 struct thread_ctx_regs *thread_get_ctx_regs(void) 752 { 753 struct thread_core_local *l = thread_get_core_local(); 754 755 assert(l->curr_thread != -1); 756 return &threads[l->curr_thread].regs; 757 } 758 759 void thread_set_irq(bool enable) 760 { 761 /* thread_get_core_local() requires IRQs to be disabled */ 762 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 763 struct thread_core_local *l; 764 765 l = thread_get_core_local(); 766 767 assert(l->curr_thread != -1); 768 769 if (enable) { 770 threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE; 771 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 772 } else { 773 /* 774 * No need to disable IRQ here since it's already disabled 775 * above. 776 */ 777 threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE; 778 } 779 } 780 781 void thread_restore_irq(void) 782 { 783 /* thread_get_core_local() requires IRQs to be disabled */ 784 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 785 struct thread_core_local *l; 786 787 l = thread_get_core_local(); 788 789 assert(l->curr_thread != -1); 790 791 if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE) 792 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 793 } 794 795 #ifdef CFG_WITH_VFP 796 uint32_t thread_kernel_enable_vfp(void) 797 { 798 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 799 800 assert(!vfp_is_enabled()); 801 802 if (!thread_vfp_state.ns_saved) { 803 vfp_lazy_save_state_final(&thread_vfp_state.ns); 804 thread_vfp_state.ns_saved = true; 805 } else if (thread_vfp_state.sec_lazy_saved && 806 !thread_vfp_state.sec_saved) { 807 vfp_lazy_save_state_final(&thread_vfp_state.sec); 808 thread_vfp_state.sec_saved = true; 809 } 810 811 vfp_enable(); 812 return exceptions; 813 } 814 815 void thread_kernel_disable_vfp(uint32_t state) 816 { 817 uint32_t exceptions; 818 819 assert(vfp_is_enabled()); 820 821 vfp_disable(); 822 exceptions = thread_get_exceptions(); 823 assert(exceptions & THREAD_EXCP_IRQ); 824 exceptions &= ~THREAD_EXCP_IRQ; 825 exceptions |= state & THREAD_EXCP_IRQ; 826 thread_set_exceptions(exceptions); 827 } 828 #endif /*CFG_WITH_VFP*/ 829 830 831 paddr_t thread_rpc_alloc_arg(size_t size) 832 { 833 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 834 TEESMC_RETURN_RPC_ALLOC_ARG, size}; 835 836 thread_rpc(rpc_args); 837 return rpc_args[1]; 838 } 839 840 paddr_t thread_rpc_alloc_payload(size_t size) 841 { 842 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 843 TEESMC_RETURN_RPC_ALLOC_PAYLOAD, size}; 844 845 thread_rpc(rpc_args); 846 return rpc_args[1]; 847 } 848 849 void thread_rpc_free_arg(paddr_t arg) 850 { 851 if (arg) { 852 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 853 TEESMC_RETURN_RPC_FREE_ARG, arg}; 854 855 thread_rpc(rpc_args); 856 } 857 } 858 void thread_rpc_free_payload(paddr_t payload) 859 { 860 if (payload) { 861 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 862 TEESMC_RETURN_RPC_FREE_PAYLOAD, payload}; 863 864 thread_rpc(rpc_args); 865 } 866 } 867 868 void thread_rpc_cmd(paddr_t arg) 869 { 870 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {TEESMC_RETURN_RPC_CMD, arg}; 871 872 thread_rpc(rpc_args); 873 } 874 875 void thread_optee_rpc_alloc_payload(size_t size, paddr_t *payload, 876 paddr_t *cookie) 877 { 878 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 879 TEESMC_RETURN_OPTEE_RPC_ALLOC_PAYLOAD, size}; 880 881 thread_rpc(rpc_args); 882 if (payload) 883 *payload = rpc_args[1]; 884 if (cookie) 885 *cookie = rpc_args[2]; 886 } 887 888 void thread_optee_rpc_free_payload(paddr_t cookie) 889 { 890 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] ={ 891 TEESMC_RETURN_OPTEE_RPC_FREE_PAYLOAD, cookie}; 892 893 thread_rpc(rpc_args); 894 } 895