1/* SPDX-License-Identifier: BSD-2-Clause */ 2/* 3 * Copyright 2022-2023 NXP 4 */ 5 6#include <asm.S> 7#include <generated/asm-defines.h> 8#include <keep.h> 9#include <kernel/thread.h> 10#include <kernel/thread_private.h> 11#include <mm/core_mmu.h> 12#include <riscv.h> 13#include <riscv_macros.S> 14 15.macro get_thread_ctx res, tmp0 16 lw \tmp0, THREAD_CORE_LOCAL_CURR_THREAD(tp) 17 la \res, threads 181: 19 beqz \tmp0, 2f 20 addi \res, \res, THREAD_CTX_SIZE 21 addi \tmp0, \tmp0, -1 22 bnez \tmp0, 1b 232: 24.endm 25 26.macro save_regs, mode 27 addi sp, sp, -THREAD_TRAP_REGS_SIZE 28.if \mode == TRAP_MODE_USER 29 30 /* Save user thread pointer and load kernel thread pointer */ 31 store_xregs sp, THREAD_TRAP_REG_TP, REG_TP 32 load_xregs sp, (THREAD_TRAP_REGS_SIZE - RISCV_XLEN_BYTES), REG_TP 33 34 store_xregs sp, THREAD_TRAP_REG_GP, REG_GP 35 36 /* 37 * Set the scratch register to 0 such in case of a recursive 38 * exception thread_trap_vect() knows that it is emitted from kernel. 39 */ 40 csrrw gp, CSR_XSCRATCH, zero 41 store_xregs sp, THREAD_TRAP_REG_SP, REG_GP 42.option push 43.option norelax 44 la gp, __global_pointer$ 45.option pop 46.endif 47 store_xregs sp, THREAD_TRAP_REG_T3, REG_T3, REG_T6 48 store_xregs sp, THREAD_TRAP_REG_T0, REG_T0, REG_T2 49 store_xregs sp, THREAD_TRAP_REG_A0, REG_A0, REG_A7 50 store_xregs sp, THREAD_TRAP_REG_RA, REG_RA 51#if defined(CFG_UNWIND) 52 /* To unwind stack we need s0, which is frame pointer. */ 53 store_xregs sp, THREAD_TRAP_REG_S0, REG_S0 54#endif 55 56 csrr t0, CSR_XSTATUS 57 store_xregs sp, THREAD_TRAP_REG_STATUS, REG_T0 58 59 csrr a0, CSR_XCAUSE 60 csrr a1, CSR_XEPC 61 62 store_xregs sp, THREAD_TRAP_REG_EPC, REG_A1 63 64 mv a2, sp 65 66 /* a0 = cause 67 * a1 = epc 68 * a2 = sp 69 * a3 = user 70 * thread_trap_handler(cause, epc, sp, user) 71 */ 72.endm 73 74.macro restore_regs, mode 75 load_xregs sp, THREAD_TRAP_REG_EPC, REG_T0 76 csrw CSR_XEPC, t0 77 78 load_xregs sp, THREAD_TRAP_REG_STATUS, REG_T0 79 csrw CSR_XSTATUS, t0 80 81 load_xregs sp, THREAD_TRAP_REG_RA, REG_RA 82 load_xregs sp, THREAD_TRAP_REG_A0, REG_A0, REG_A7 83 load_xregs sp, THREAD_TRAP_REG_T0, REG_T0, REG_T2 84 load_xregs sp, THREAD_TRAP_REG_T3, REG_T3, REG_T6 85#if defined(CFG_UNWIND) 86 /* To unwind stack we need s0, which is frame pointer. */ 87 load_xregs sp, THREAD_TRAP_REG_S0, REG_S0 88#endif 89 90.if \mode == TRAP_MODE_USER 91 addi gp, sp, THREAD_TRAP_REGS_SIZE 92 93 store_xregs gp, REGOFF(-1), REG_TP 94 csrw CSR_XSCRATCH, gp 95 96 load_xregs sp, THREAD_TRAP_REG_TP, REG_TP 97 load_xregs sp, THREAD_TRAP_REG_GP, REG_GP 98 load_xregs sp, THREAD_TRAP_REG_SP, REG_SP 99 100.else 101 addi sp, sp, THREAD_TRAP_REGS_SIZE 102.endif 103.endm 104 105/* size_t __get_core_pos(void); */ 106FUNC __get_core_pos , : , .identity_map 107 lw a0, THREAD_CORE_LOCAL_HART_ID(tp) 108 ret 109END_FUNC __get_core_pos 110 111FUNC thread_trap_vect , : 112 csrrw sp, CSR_XSCRATCH, sp 113 bnez sp, 0f 114 csrrw sp, CSR_XSCRATCH, sp 115 j trap_from_kernel 1160: 117 j trap_from_user 118thread_trap_vect_end: 119END_FUNC thread_trap_vect 120 121LOCAL_FUNC trap_from_kernel, : 122 save_regs TRAP_MODE_KERNEL 123 li a3, 0 124 jal thread_trap_handler 125 restore_regs TRAP_MODE_KERNEL 126 XRET 127END_FUNC trap_from_kernel 128 129LOCAL_FUNC trap_from_user, : 130 save_regs TRAP_MODE_USER 131 li a3, 1 132 jal thread_trap_handler 133 restore_regs TRAP_MODE_USER 134 XRET 135END_FUNC trap_from_user 136 137/* 138 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 139 * uint32_t exit_status1); 140 * See description in thread.h 141 */ 142FUNC thread_unwind_user_mode , : 143 144 /* Store the exit status */ 145 load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A3, REG_A5 146 sw a1, (a4) 147 sw a2, (a5) 148 149 /* Save user callee regs */ 150 store_xregs a3, THREAD_CTX_REG_S0, REG_S0, REG_S1 151 store_xregs a3, THREAD_CTX_REG_S2, REG_S2, REG_S11 152 store_xregs a3, THREAD_CTX_REG_SP, REG_SP, REG_TP 153 154 /* Restore kernel callee regs */ 155 mv a1, sp 156 157 load_xregs a1, THREAD_USER_MODE_REC_X1, REG_RA, REG_TP 158 load_xregs a1, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1 159 load_xregs a1, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11 160 161 add sp, sp, THREAD_USER_MODE_REC_SIZE 162 163 /* 164 * Zeroize xSCRATCH to indicate to thread_trap_vect() 165 * that we are executing in kernel. 166 */ 167 csrw CSR_XSCRATCH, zero 168 169 /* Return from the call of thread_enter_user_mode() */ 170 ret 171END_FUNC thread_unwind_user_mode 172 173/* 174 * void thread_exit_user_mode(unsigned long a0, unsigned long a1, 175 * unsigned long a2, unsigned long a3, 176 * unsigned long sp, unsigned long pc, 177 * unsigned long status); 178 */ 179FUNC thread_exit_user_mode , : 180 /* Set kernel stack pointer */ 181 mv sp, a4 182 183 /* Set xSTATUS */ 184 csrw CSR_XSTATUS, a6 185 186 /* Set return address thread_unwind_user_mode() */ 187 mv ra, a5 188 ret 189END_FUNC thread_exit_user_mode 190 191/* 192 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs, 193 * uint32_t *exit_status0, 194 * uint32_t *exit_status1); 195 */ 196FUNC __thread_enter_user_mode , : 197 /* 198 * Create and fill in the struct thread_user_mode_rec 199 */ 200 addi sp, sp, -THREAD_USER_MODE_REC_SIZE 201 store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, REG_A0, REG_A2 202 store_xregs sp, THREAD_USER_MODE_REC_X1, REG_RA, REG_TP 203 store_xregs sp, THREAD_USER_MODE_REC_X8, REG_S0, REG_S1 204 store_xregs sp, THREAD_USER_MODE_REC_X18, REG_S2, REG_S11 205 206 /* 207 * Save the kernel stack pointer in the thread context 208 */ 209 210 /* Get pointer to current thread context */ 211 get_thread_ctx s0, s1 212 213 /* 214 * Save kernel stack pointer to ensure that 215 * thread_exit_user_mode() uses correct stack pointer. 216 */ 217 218 store_xregs s0, THREAD_CTX_KERN_SP, REG_SP 219 /* 220 * Save kernel stack pointer in xSCRATCH to ensure that 221 * thread_trap_vect() uses correct stack pointer. 222 */ 223 csrw CSR_XSCRATCH, sp 224 225 /* 226 * Save kernel thread pointer below of the kernel stack pointer 227 * to enure that thread_trap_vect() uses correct tp when traps 228 * come from user. 229 */ 230 store_xregs sp, REGOFF(-1), REG_TP 231 232 /* Set user status */ 233 load_xregs a0, THREAD_CTX_REG_STATUS, REG_S0 234 csrw CSR_XSTATUS, s0 235 236 /* 237 * Save the values for a1 and a2 in struct thread_core_local to be 238 * restored later just before the xRET. 239 */ 240 store_xregs tp, THREAD_CORE_LOCAL_X10, REG_A1, REG_A2 241 242 /* Load the rest of the general purpose registers */ 243 load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_TP 244 load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2 245 load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1 246 load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11 247 load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6 248 load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7 249 250 /* Set exception program counter */ 251 csrw CSR_XEPC, ra 252 253 /* Jump into user mode */ 254 XRET 255END_FUNC __thread_enter_user_mode 256 257/* 258 * Implement based on the transport method used to communicate between 259 * untrusted domain and trusted domain. It could be an SBI/ECALL-based to 260 * a security monitor running in M-Mode and panic or messaging-based across 261 * domains where we return to a messaging callback which parses and handles 262 * messages. 263 */ 264LOCAL_FUNC thread_return_from_nsec_call , : 265 /* Implement */ 266 j . 267END_FUNC thread_return_from_nsec_call 268 269 270FUNC thread_std_smc_entry , : 271 jal __thread_std_smc_entry 272 273 /* Save return value */ 274 mv s0, a0 275 276 /* Disable all interrupts */ 277 csrc CSR_XSTATUS, CSR_XSTATUS_IE 278 279 /* Switch to temporary stack */ 280 jal thread_get_tmp_sp 281 mv sp, a0 282 283 /* 284 * We are returning from thread_alloc_and_run() 285 * set thread state as free 286 */ 287 jal thread_state_free 288 289 /* Restore __thread_std_smc_entry() return value */ 290 mv a1, s0 291 li a2, 0 292 li a3, 0 293 li a4, 0 294 li a0, TEESMC_OPTEED_RETURN_CALL_DONE 295 296 /* Return to untrusted domain */ 297 jal thread_return_from_nsec_call 298END_FUNC thread_std_smc_entry 299 300/* void thread_resume(struct thread_ctx_regs *regs) */ 301FUNC thread_resume , : 302 /* 303 * Restore all registers assuming that GP 304 * and TP were not changed. 305 */ 306 load_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_SP 307 load_xregs a0, THREAD_CTX_REG_T0, REG_T0, REG_T2 308 load_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1 309 load_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11 310 load_xregs a0, THREAD_CTX_REG_T3, REG_T3, REG_T6 311 load_xregs a0, THREAD_CTX_REG_A0, REG_A0, REG_A7 312 store_xregs tp, THREAD_CORE_LOCAL_X10, REG_A0, REG_A1 313 ret 314END_FUNC thread_resume 315 316/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */ 317FUNC thread_rpc , : 318 /* Use stack for temporary storage */ 319 addi sp, sp, -16 320 321 /* Read xSTATUS */ 322 csrr a1, CSR_XSTATUS 323 324 /* Save return address xSTATUS and pointer to rv */ 325 STR a0, REGOFF(0)(sp) 326 STR a1, REGOFF(1)(sp) 327 STR ra, REGOFF(2)(sp) 328 329 /* Save thread state */ 330 jal thread_get_ctx_regs 331 store_xregs a0, THREAD_CTX_REG_RA, REG_RA, REG_SP 332 store_xregs a0, THREAD_CTX_REG_S0, REG_S0, REG_S1 333 store_xregs a0, THREAD_CTX_REG_S2, REG_S2, REG_S11 334 335 /* Get to tmp stack */ 336 jal thread_get_tmp_sp 337 338 /* Get pointer to rv */ 339 LDR s0, REGOFF(0)(sp) 340 341 /* xSTATUS to restore */ 342 LDR a1, REGOFF(1)(sp) 343 /* Switch to tmp stack */ 344 mv sp, a0 345 346 /* Early load rv[] into s1-s3 */ 347 lw s1, 0(s0) 348 lw s2, 4(s0) 349 lw s3, 8(s0) 350 351 li a0, THREAD_FLAGS_COPY_ARGS_ON_RETURN 352 la a2, .thread_rpc_return 353 jal thread_state_suspend 354 355 mv a4, a0 /* thread index */ 356 mv a1, s1 /* rv[0] */ 357 mv a2, s2 /* rv[1] */ 358 mv a3, s3 /* rv[2] */ 359 li a0, TEESMC_OPTEED_RETURN_CALL_DONE 360 361 /* Return to untrusted domain */ 362 jal thread_return_from_nsec_call 363.thread_rpc_return: 364 /* 365 * Jumps here from thread_resume() above when RPC has returned. 366 * At this point has the stack pointer been restored to the value 367 * stored in THREAD_CTX above. 368 */ 369 370 /* Get pointer to rv[] */ 371 LDR a4, REGOFF(0)(sp) 372 373 /* Store a0-a3 into rv[] */ 374 sw a0, 0(a4) 375 sw a1, 4(a4) 376 sw a2, 8(a4) 377 sw a3, 12(a4) 378 379 /* Pop return address from stack */ 380 LDR ra, REGOFF(2)(sp) 381 382 addi sp, sp, 16 383 ret 384END_FUNC thread_rpc 385