1 /* 2 * Copyright (c) 2014, STMicroelectronics International N.V. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <platform_config.h> 28 29 #include <kernel/panic.h> 30 #include <kernel/thread.h> 31 #include <kernel/thread_defs.h> 32 #include "thread_private.h" 33 #include <sm/sm_defs.h> 34 #include <sm/sm.h> 35 #include <sm/teesmc.h> 36 #include <sm/teesmc_optee.h> 37 #include <arm.h> 38 #include <kernel/tz_proc_def.h> 39 #include <kernel/tz_proc.h> 40 #include <kernel/misc.h> 41 #include <mm/tee_mmu.h> 42 #include <mm/tee_mmu_defs.h> 43 #include <mm/tee_mm.h> 44 #include <mm/tee_pager.h> 45 #include <kernel/tee_ta_manager.h> 46 #include <util.h> 47 #include <trace.h> 48 49 #include <assert.h> 50 51 #ifdef ARM32 52 #define STACK_TMP_SIZE 1024 53 #define STACK_THREAD_SIZE 8192 54 55 #if TRACE_LEVEL > 0 56 #define STACK_ABT_SIZE 2048 57 #else 58 #define STACK_ABT_SIZE 1024 59 #endif 60 61 #endif /*ARM32*/ 62 63 static struct thread_ctx threads[CFG_NUM_THREADS]; 64 65 static struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 66 67 #ifdef CFG_WITH_VFP 68 struct thread_vfp_state { 69 bool ns_saved; 70 bool sec_saved; 71 bool sec_lazy_saved; 72 struct vfp_state ns; 73 struct vfp_state sec; 74 }; 75 76 static struct thread_vfp_state thread_vfp_state; 77 #endif /*CFG_WITH_VFP*/ 78 79 #ifdef CFG_WITH_STACK_CANARIES 80 #ifdef ARM32 81 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 82 #endif 83 #define START_CANARY_VALUE 0xdededede 84 #define END_CANARY_VALUE 0xabababab 85 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 86 #define GET_END_CANARY(name, stack_num) \ 87 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 88 #else 89 #define STACK_CANARY_SIZE 0 90 #endif 91 92 #define DECLARE_STACK(name, num_stacks, stack_size) \ 93 static uint32_t name[num_stacks][ \ 94 ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 95 sizeof(uint32_t)] \ 96 __attribute__((section(".nozi.stack"), \ 97 aligned(STACK_ALIGNMENT))) 98 99 #define GET_STACK(stack) \ 100 ((vaddr_t)(stack) + sizeof(stack) - STACK_CANARY_SIZE / 2) 101 102 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE); 103 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE); 104 #if defined(CFG_WITH_SEC_MON) 105 DECLARE_STACK(stack_sm, CFG_TEE_CORE_NB_CORE, SM_STACK_SIZE); 106 #endif 107 #ifndef CFG_WITH_PAGER 108 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE); 109 #endif 110 111 const vaddr_t stack_tmp_top[CFG_TEE_CORE_NB_CORE] = { 112 GET_STACK(stack_tmp[0]), 113 #if CFG_TEE_CORE_NB_CORE > 1 114 GET_STACK(stack_tmp[1]), 115 #endif 116 #if CFG_TEE_CORE_NB_CORE > 2 117 GET_STACK(stack_tmp[2]), 118 #endif 119 #if CFG_TEE_CORE_NB_CORE > 3 120 GET_STACK(stack_tmp[3]), 121 #endif 122 #if CFG_TEE_CORE_NB_CORE > 4 123 GET_STACK(stack_tmp[4]), 124 #endif 125 #if CFG_TEE_CORE_NB_CORE > 5 126 GET_STACK(stack_tmp[5]), 127 #endif 128 #if CFG_TEE_CORE_NB_CORE > 6 129 GET_STACK(stack_tmp[6]), 130 #endif 131 #if CFG_TEE_CORE_NB_CORE > 7 132 GET_STACK(stack_tmp[7]), 133 #endif 134 #if CFG_TEE_CORE_NB_CORE > 8 135 #error "Top of tmp stacks aren't defined for more than 8 CPUS" 136 #endif 137 }; 138 139 thread_smc_handler_t thread_std_smc_handler_ptr; 140 static thread_smc_handler_t thread_fast_smc_handler_ptr; 141 thread_fiq_handler_t thread_fiq_handler_ptr; 142 thread_svc_handler_t thread_svc_handler_ptr; 143 static thread_abort_handler_t thread_abort_handler_ptr; 144 thread_pm_handler_t thread_cpu_on_handler_ptr; 145 thread_pm_handler_t thread_cpu_off_handler_ptr; 146 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 147 thread_pm_handler_t thread_cpu_resume_handler_ptr; 148 thread_pm_handler_t thread_system_off_handler_ptr; 149 thread_pm_handler_t thread_system_reset_handler_ptr; 150 151 152 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 153 154 static void init_canaries(void) 155 { 156 #ifdef CFG_WITH_STACK_CANARIES 157 size_t n; 158 #define INIT_CANARY(name) \ 159 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 160 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 161 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 162 \ 163 *start_canary = START_CANARY_VALUE; \ 164 *end_canary = END_CANARY_VALUE; \ 165 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 166 #name, n, (void *)(end_canary - 1)); \ 167 DMSG("watch *%p\n", (void *)end_canary); \ 168 } 169 170 INIT_CANARY(stack_tmp); 171 INIT_CANARY(stack_abt); 172 #ifdef CFG_WITH_SEC_MON 173 INIT_CANARY(stack_sm); 174 #endif 175 #ifndef CFG_WITH_PAGER 176 INIT_CANARY(stack_thread); 177 #endif 178 #endif/*CFG_WITH_STACK_CANARIES*/ 179 } 180 181 void thread_check_canaries(void) 182 { 183 #ifdef CFG_WITH_STACK_CANARIES 184 size_t n; 185 186 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 187 assert(GET_START_CANARY(stack_tmp, n) == START_CANARY_VALUE); 188 assert(GET_END_CANARY(stack_tmp, n) == END_CANARY_VALUE); 189 } 190 191 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 192 assert(GET_START_CANARY(stack_abt, n) == START_CANARY_VALUE); 193 assert(GET_END_CANARY(stack_abt, n) == END_CANARY_VALUE); 194 } 195 #ifdef CFG_WITH_SEC_MON 196 for (n = 0; n < ARRAY_SIZE(stack_sm); n++) { 197 assert(GET_START_CANARY(stack_sm, n) == START_CANARY_VALUE); 198 assert(GET_END_CANARY(stack_sm, n) == END_CANARY_VALUE); 199 } 200 #endif 201 #ifndef CFG_WITH_PAGER 202 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 203 assert(GET_START_CANARY(stack_thread, n) == START_CANARY_VALUE); 204 assert(GET_END_CANARY(stack_thread, n) == END_CANARY_VALUE); 205 } 206 #endif 207 #endif/*CFG_WITH_STACK_CANARIES*/ 208 } 209 210 static void lock_global(void) 211 { 212 cpu_spin_lock(&thread_global_lock); 213 } 214 215 static void unlock_global(void) 216 { 217 cpu_spin_unlock(&thread_global_lock); 218 } 219 220 #ifdef ARM32 221 uint32_t thread_get_exceptions(void) 222 { 223 uint32_t cpsr = read_cpsr(); 224 225 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 226 } 227 228 void thread_set_exceptions(uint32_t exceptions) 229 { 230 uint32_t cpsr = read_cpsr(); 231 232 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 233 write_cpsr(cpsr); 234 } 235 #endif /*ARM32*/ 236 237 uint32_t thread_mask_exceptions(uint32_t exceptions) 238 { 239 uint32_t state = thread_get_exceptions(); 240 241 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 242 return state; 243 } 244 245 void thread_unmask_exceptions(uint32_t state) 246 { 247 thread_set_exceptions(state & THREAD_EXCP_ALL); 248 } 249 250 251 struct thread_core_local *thread_get_core_local(void) 252 { 253 uint32_t cpu_id = get_core_pos(); 254 255 /* 256 * IRQs must be disabled before playing with core_local since 257 * we otherwise may be rescheduled to a different core in the 258 * middle of this function. 259 */ 260 assert(thread_get_exceptions() & THREAD_EXCP_IRQ); 261 262 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 263 return &thread_core_local[cpu_id]; 264 } 265 266 static bool have_one_active_thread(void) 267 { 268 size_t n; 269 270 for (n = 0; n < CFG_NUM_THREADS; n++) { 271 if (threads[n].state == THREAD_STATE_ACTIVE) 272 return true; 273 } 274 275 return false; 276 } 277 278 static bool have_one_preempted_thread(void) 279 { 280 size_t n; 281 282 for (n = 0; n < CFG_NUM_THREADS; n++) { 283 if (threads[n].state == THREAD_STATE_SUSPENDED && 284 (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ)) 285 return true; 286 } 287 288 return false; 289 } 290 291 static void thread_lazy_save_ns_vfp(void) 292 { 293 #ifdef CFG_WITH_VFP 294 thread_vfp_state.ns_saved = false; 295 vfp_lazy_save_state_init(&thread_vfp_state.ns); 296 #endif /*CFG_WITH_VFP*/ 297 } 298 299 static void thread_lazy_restore_ns_vfp(void) 300 { 301 #ifdef CFG_WITH_VFP 302 assert(!thread_vfp_state.sec_lazy_saved && !thread_vfp_state.sec_saved); 303 vfp_lazy_restore_state(&thread_vfp_state.ns, thread_vfp_state.ns_saved); 304 thread_vfp_state.ns_saved = false; 305 #endif /*CFG_WITH_VFP*/ 306 } 307 308 #ifdef ARM32 309 static void init_regs(struct thread_ctx *thread, 310 struct thread_smc_args *args) 311 { 312 thread->regs.pc = (uint32_t)thread_std_smc_entry; 313 314 /* 315 * Stdcalls starts in SVC mode with masked IRQ, masked Asynchronous 316 * abort and unmasked FIQ. 317 */ 318 thread->regs.cpsr = CPSR_MODE_SVC | CPSR_I | CPSR_A; 319 /* Enable thumb mode if it's a thumb instruction */ 320 if (thread->regs.pc & 1) 321 thread->regs.cpsr |= CPSR_T; 322 /* Reinitialize stack pointer */ 323 thread->regs.svc_sp = thread->stack_va_end; 324 325 /* 326 * Copy arguments into context. This will make the 327 * arguments appear in r0-r7 when thread is started. 328 */ 329 thread->regs.r0 = args->a0; 330 thread->regs.r1 = args->a1; 331 thread->regs.r2 = args->a2; 332 thread->regs.r3 = args->a3; 333 thread->regs.r4 = args->a4; 334 thread->regs.r5 = args->a5; 335 thread->regs.r6 = args->a6; 336 thread->regs.r7 = args->a7; 337 } 338 #endif /*ARM32*/ 339 340 static void thread_alloc_and_run(struct thread_smc_args *args) 341 { 342 size_t n; 343 struct thread_core_local *l = thread_get_core_local(); 344 bool found_thread = false; 345 346 assert(l->curr_thread == -1); 347 348 lock_global(); 349 350 if (!have_one_active_thread() && !have_one_preempted_thread()) { 351 for (n = 0; n < CFG_NUM_THREADS; n++) { 352 if (threads[n].state == THREAD_STATE_FREE) { 353 threads[n].state = THREAD_STATE_ACTIVE; 354 found_thread = true; 355 break; 356 } 357 } 358 } 359 360 unlock_global(); 361 362 if (!found_thread) { 363 args->a0 = TEESMC_RETURN_EBUSY; 364 return; 365 } 366 367 l->curr_thread = n; 368 369 threads[n].flags = 0; 370 init_regs(threads + n, args); 371 372 /* Save Hypervisor Client ID */ 373 threads[n].hyp_clnt_id = args->a7; 374 375 thread_lazy_save_ns_vfp(); 376 thread_resume(&threads[n].regs); 377 } 378 379 #ifdef ARM32 380 static void copy_a0_to_a3(struct thread_ctx_regs *regs, 381 struct thread_smc_args *args) 382 { 383 /* 384 * Update returned values from RPC, values will appear in 385 * r0-r3 when thread is resumed. 386 */ 387 regs->r0 = args->a0; 388 regs->r1 = args->a1; 389 regs->r2 = args->a2; 390 regs->r3 = args->a3; 391 } 392 #endif /*ARM32*/ 393 394 static void thread_resume_from_rpc(struct thread_smc_args *args) 395 { 396 size_t n = args->a3; /* thread id */ 397 struct thread_core_local *l = thread_get_core_local(); 398 uint32_t rv = 0; 399 400 assert(l->curr_thread == -1); 401 402 lock_global(); 403 404 if (have_one_active_thread()) { 405 rv = TEESMC_RETURN_EBUSY; 406 } else if (n < CFG_NUM_THREADS && 407 threads[n].state == THREAD_STATE_SUSPENDED && 408 args->a7 == threads[n].hyp_clnt_id) { 409 /* 410 * If there's one preempted thread it has to be the one 411 * we're resuming. 412 */ 413 if (have_one_preempted_thread()) { 414 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_IRQ) { 415 threads[n].flags &= ~THREAD_FLAGS_EXIT_ON_IRQ; 416 threads[n].state = THREAD_STATE_ACTIVE; 417 } else { 418 rv = TEESMC_RETURN_EBUSY; 419 } 420 } else { 421 threads[n].state = THREAD_STATE_ACTIVE; 422 } 423 } else { 424 rv = TEESMC_RETURN_ERESUME; 425 } 426 427 unlock_global(); 428 429 if (rv) { 430 args->a0 = rv; 431 return; 432 } 433 434 l->curr_thread = n; 435 436 if (threads[n].have_user_map) 437 core_mmu_set_user_map(&threads[n].user_map); 438 439 /* 440 * Return from RPC to request service of an IRQ must not 441 * get parameters from non-secure world. 442 */ 443 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 444 copy_a0_to_a3(&threads[n].regs, args); 445 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 446 } 447 448 thread_lazy_save_ns_vfp(); 449 thread_resume(&threads[n].regs); 450 } 451 452 void thread_handle_fast_smc(struct thread_smc_args *args) 453 { 454 thread_check_canaries(); 455 thread_fast_smc_handler_ptr(args); 456 /* Fast handlers must not unmask any exceptions */ 457 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 458 } 459 460 void thread_handle_std_smc(struct thread_smc_args *args) 461 { 462 thread_check_canaries(); 463 464 if (args->a0 == TEESMC32_CALL_RETURN_FROM_RPC) 465 thread_resume_from_rpc(args); 466 else 467 thread_alloc_and_run(args); 468 } 469 470 void thread_handle_abort(uint32_t abort_type, struct thread_abort_regs *regs) 471 { 472 #ifdef CFG_WITH_VFP 473 if (vfp_is_enabled()) { 474 vfp_lazy_save_state_init(&thread_vfp_state.sec); 475 thread_vfp_state.sec_lazy_saved = true; 476 } 477 #endif 478 479 thread_abort_handler_ptr(abort_type, regs); 480 481 #ifdef CFG_WITH_VFP 482 assert(!vfp_is_enabled()); 483 if (thread_vfp_state.sec_lazy_saved) { 484 vfp_lazy_restore_state(&thread_vfp_state.sec, 485 thread_vfp_state.sec_saved); 486 thread_vfp_state.sec_saved = false; 487 thread_vfp_state.sec_lazy_saved = false; 488 } 489 #endif 490 } 491 492 void *thread_get_tmp_sp(void) 493 { 494 struct thread_core_local *l = thread_get_core_local(); 495 496 return (void *)l->tmp_stack_va_end; 497 } 498 499 void thread_state_free(void) 500 { 501 struct thread_core_local *l = thread_get_core_local(); 502 int ct = l->curr_thread; 503 504 assert(ct != -1); 505 506 thread_lazy_restore_ns_vfp(); 507 508 lock_global(); 509 510 assert(threads[ct].state == THREAD_STATE_ACTIVE); 511 threads[ct].state = THREAD_STATE_FREE; 512 threads[ct].flags = 0; 513 l->curr_thread = -1; 514 515 unlock_global(); 516 } 517 518 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 519 { 520 struct thread_core_local *l = thread_get_core_local(); 521 int ct = l->curr_thread; 522 523 assert(ct != -1); 524 525 thread_check_canaries(); 526 527 thread_lazy_restore_ns_vfp(); 528 529 lock_global(); 530 531 assert(threads[ct].state == THREAD_STATE_ACTIVE); 532 threads[ct].flags |= flags; 533 threads[ct].regs.cpsr = cpsr; 534 threads[ct].regs.pc = pc; 535 threads[ct].state = THREAD_STATE_SUSPENDED; 536 537 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 538 if (threads[ct].have_user_map) { 539 core_mmu_get_user_map(&threads[ct].user_map); 540 core_mmu_set_user_map(NULL); 541 } 542 543 544 l->curr_thread = -1; 545 546 unlock_global(); 547 548 return ct; 549 } 550 551 #ifdef ARM32 552 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 553 { 554 l->tmp_stack_va_end = sp; 555 thread_set_irq_sp(sp); 556 thread_set_fiq_sp(sp); 557 } 558 559 static void set_abt_stack(struct thread_core_local *l __unused, vaddr_t sp) 560 { 561 thread_set_abt_sp(sp); 562 } 563 #endif /*ARM32*/ 564 565 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 566 { 567 if (thread_id >= CFG_NUM_THREADS) 568 return false; 569 if (threads[thread_id].state != THREAD_STATE_FREE) 570 return false; 571 572 threads[thread_id].stack_va_end = sp; 573 return true; 574 } 575 576 uint32_t thread_get_id(void) 577 { 578 /* thread_get_core_local() requires IRQs to be disabled */ 579 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 580 struct thread_core_local *l; 581 int ct; 582 583 l = thread_get_core_local(); 584 ct = l->curr_thread; 585 586 thread_unmask_exceptions(exceptions); 587 return ct; 588 } 589 590 static void init_handlers(const struct thread_handlers *handlers) 591 { 592 thread_std_smc_handler_ptr = handlers->std_smc; 593 thread_fast_smc_handler_ptr = handlers->fast_smc; 594 thread_fiq_handler_ptr = handlers->fiq; 595 thread_svc_handler_ptr = handlers->svc; 596 thread_abort_handler_ptr = handlers->abort; 597 thread_cpu_on_handler_ptr = handlers->cpu_on; 598 thread_cpu_off_handler_ptr = handlers->cpu_off; 599 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 600 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 601 thread_system_off_handler_ptr = handlers->system_off; 602 thread_system_reset_handler_ptr = handlers->system_reset; 603 } 604 605 606 #ifdef CFG_WITH_PAGER 607 static void init_thread_stacks(void) 608 { 609 size_t n; 610 611 /* 612 * Allocate virtual memory for thread stacks. 613 */ 614 for (n = 0; n < CFG_NUM_THREADS; n++) { 615 tee_mm_entry_t *mm; 616 vaddr_t sp; 617 618 /* Get unmapped page at bottom of stack */ 619 mm = tee_mm_alloc(&tee_mm_vcore, SMALL_PAGE_SIZE); 620 TEE_ASSERT(mm); 621 /* Claim eventual physical page */ 622 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 623 true); 624 625 /* Allocate the actual stack */ 626 mm = tee_mm_alloc(&tee_mm_vcore, STACK_THREAD_SIZE); 627 TEE_ASSERT(mm); 628 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 629 if (!thread_init_stack(n, sp)) 630 panic(); 631 /* Claim eventual physical page */ 632 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 633 true); 634 /* Add the area to the pager */ 635 tee_pager_add_area(mm, TEE_PAGER_AREA_RW, NULL, NULL); 636 } 637 } 638 #else 639 static void init_thread_stacks(void) 640 { 641 size_t n; 642 643 /* Assign the thread stacks */ 644 for (n = 0; n < CFG_NUM_THREADS; n++) { 645 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 646 panic(); 647 } 648 } 649 #endif /*CFG_WITH_PAGER*/ 650 651 void thread_init_primary(const struct thread_handlers *handlers) 652 { 653 /* 654 * The COMPILE_TIME_ASSERT only works in function context. These 655 * checks verifies that the offsets used in assembly code matches 656 * what's used in C code. 657 */ 658 #ifdef ARM32 659 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r0) == 660 THREAD_SVC_REG_R0_OFFS); 661 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r1) == 662 THREAD_SVC_REG_R1_OFFS); 663 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r2) == 664 THREAD_SVC_REG_R2_OFFS); 665 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r3) == 666 THREAD_SVC_REG_R3_OFFS); 667 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r4) == 668 THREAD_SVC_REG_R4_OFFS); 669 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r5) == 670 THREAD_SVC_REG_R5_OFFS); 671 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r6) == 672 THREAD_SVC_REG_R6_OFFS); 673 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, r7) == 674 THREAD_SVC_REG_R7_OFFS); 675 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, lr) == 676 THREAD_SVC_REG_LR_OFFS); 677 COMPILE_TIME_ASSERT(offsetof(struct thread_svc_regs, spsr) == 678 THREAD_SVC_REG_SPSR_OFFS); 679 #endif /*ARM32*/ 680 681 init_handlers(handlers); 682 683 /* Initialize canaries around the stacks */ 684 init_canaries(); 685 686 init_thread_stacks(); 687 } 688 689 static void init_sec_mon(size_t __unused pos) 690 { 691 #if defined(CFG_WITH_SEC_MON) 692 /* Initialize secure monitor */ 693 sm_init(GET_STACK(stack_sm[pos])); 694 sm_set_entry_vector(thread_vector_table); 695 #endif /*CFG_WITH_SEC_MON*/ 696 } 697 698 void thread_init_per_cpu(void) 699 { 700 size_t pos = get_core_pos(); 701 struct thread_core_local *l = thread_get_core_local(); 702 703 init_sec_mon(pos); 704 705 l->curr_thread = -1; 706 set_tmp_stack(l, GET_STACK(stack_tmp[pos])); 707 set_abt_stack(l, GET_STACK(stack_abt[pos])); 708 709 thread_init_vbar(); 710 } 711 712 void thread_set_tsd(void *tsd) 713 { 714 /* thread_get_core_local() requires IRQs to be disabled */ 715 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 716 struct thread_core_local *l; 717 int ct; 718 719 l = thread_get_core_local(); 720 ct = l->curr_thread; 721 722 assert(ct != -1); 723 assert(threads[ct].state == THREAD_STATE_ACTIVE); 724 threads[ct].tsd = tsd; 725 726 thread_unmask_exceptions(exceptions); 727 } 728 729 void *thread_get_tsd(void) 730 { 731 /* thread_get_core_local() requires IRQs to be disabled */ 732 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 733 struct thread_core_local *l; 734 int ct; 735 void *tsd; 736 737 l = thread_get_core_local(); 738 ct = l->curr_thread; 739 740 if (ct == -1 || threads[ct].state != THREAD_STATE_ACTIVE) 741 tsd = NULL; 742 else 743 tsd = threads[ct].tsd; 744 745 thread_unmask_exceptions(exceptions); 746 return tsd; 747 } 748 749 struct thread_ctx_regs *thread_get_ctx_regs(void) 750 { 751 struct thread_core_local *l = thread_get_core_local(); 752 753 assert(l->curr_thread != -1); 754 return &threads[l->curr_thread].regs; 755 } 756 757 void thread_set_irq(bool enable) 758 { 759 /* thread_get_core_local() requires IRQs to be disabled */ 760 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 761 struct thread_core_local *l; 762 763 l = thread_get_core_local(); 764 765 assert(l->curr_thread != -1); 766 767 if (enable) { 768 threads[l->curr_thread].flags |= THREAD_FLAGS_IRQ_ENABLE; 769 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 770 } else { 771 /* 772 * No need to disable IRQ here since it's already disabled 773 * above. 774 */ 775 threads[l->curr_thread].flags &= ~THREAD_FLAGS_IRQ_ENABLE; 776 } 777 } 778 779 void thread_restore_irq(void) 780 { 781 /* thread_get_core_local() requires IRQs to be disabled */ 782 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 783 struct thread_core_local *l; 784 785 l = thread_get_core_local(); 786 787 assert(l->curr_thread != -1); 788 789 if (threads[l->curr_thread].flags & THREAD_FLAGS_IRQ_ENABLE) 790 thread_set_exceptions(exceptions & ~THREAD_EXCP_IRQ); 791 } 792 793 #ifdef CFG_WITH_VFP 794 uint32_t thread_kernel_enable_vfp(void) 795 { 796 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); 797 798 assert(!vfp_is_enabled()); 799 800 if (!thread_vfp_state.ns_saved) { 801 vfp_lazy_save_state_final(&thread_vfp_state.ns); 802 thread_vfp_state.ns_saved = true; 803 } else if (thread_vfp_state.sec_lazy_saved && 804 !thread_vfp_state.sec_saved) { 805 vfp_lazy_save_state_final(&thread_vfp_state.sec); 806 thread_vfp_state.sec_saved = true; 807 } 808 809 vfp_enable(); 810 return exceptions; 811 } 812 813 void thread_kernel_disable_vfp(uint32_t state) 814 { 815 uint32_t exceptions; 816 817 assert(vfp_is_enabled()); 818 819 vfp_disable(); 820 exceptions = thread_get_exceptions(); 821 assert(exceptions & THREAD_EXCP_IRQ); 822 exceptions &= ~THREAD_EXCP_IRQ; 823 exceptions |= state & THREAD_EXCP_IRQ; 824 thread_set_exceptions(exceptions); 825 } 826 #endif /*CFG_WITH_VFP*/ 827 828 829 paddr_t thread_rpc_alloc_arg(size_t size) 830 { 831 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 832 TEESMC_RETURN_RPC_ALLOC_ARG, size}; 833 834 thread_rpc(rpc_args); 835 return rpc_args[1]; 836 } 837 838 paddr_t thread_rpc_alloc_payload(size_t size) 839 { 840 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 841 TEESMC_RETURN_RPC_ALLOC_PAYLOAD, size}; 842 843 thread_rpc(rpc_args); 844 return rpc_args[1]; 845 } 846 847 void thread_rpc_free_arg(paddr_t arg) 848 { 849 if (arg) { 850 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 851 TEESMC_RETURN_RPC_FREE_ARG, arg}; 852 853 thread_rpc(rpc_args); 854 } 855 } 856 void thread_rpc_free_payload(paddr_t payload) 857 { 858 if (payload) { 859 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 860 TEESMC_RETURN_RPC_FREE_PAYLOAD, payload}; 861 862 thread_rpc(rpc_args); 863 } 864 } 865 866 void thread_rpc_cmd(paddr_t arg) 867 { 868 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {TEESMC_RETURN_RPC_CMD, arg}; 869 870 thread_rpc(rpc_args); 871 } 872 873 void thread_optee_rpc_alloc_payload(size_t size, paddr_t *payload, 874 paddr_t *cookie) 875 { 876 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 877 TEESMC_RETURN_OPTEE_RPC_ALLOC_PAYLOAD, size}; 878 879 thread_rpc(rpc_args); 880 if (payload) 881 *payload = rpc_args[1]; 882 if (cookie) 883 *cookie = rpc_args[2]; 884 } 885 886 void thread_optee_rpc_free_payload(paddr_t cookie) 887 { 888 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] ={ 889 TEESMC_RETURN_OPTEE_RPC_FREE_PAYLOAD, cookie}; 890 891 thread_rpc(rpc_args); 892 } 893