1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2016-2017, Linaro Limited 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #ifndef __KERNEL_THREAD_H 9 #define __KERNEL_THREAD_H 10 11 #ifndef __ASSEMBLER__ 12 #include <types_ext.h> 13 #include <compiler.h> 14 #include <mm/pgt_cache.h> 15 #endif 16 #include <util.h> 17 #include <kernel/thread_arch.h> 18 19 #define THREAD_FLAGS_COPY_ARGS_ON_RETURN BIT(0) 20 #define THREAD_FLAGS_FOREIGN_INTR_ENABLE BIT(1) 21 #define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR BIT(2) 22 #define THREAD_FLAGS_FFA_ONLY BIT(3) 23 24 #define THREAD_ID_0 0 25 #define THREAD_ID_INVALID -1 26 27 #define THREAD_RPC_MAX_NUM_PARAMS U(4) 28 29 #ifndef __ASSEMBLER__ 30 31 struct thread_specific_data { 32 TAILQ_HEAD(, ts_session) sess_stack; 33 struct ts_ctx *ctx; 34 #ifdef CFG_CORE_FFA 35 uint32_t rpc_target_info; 36 #endif 37 uint32_t abort_type; 38 uint32_t abort_descr; 39 vaddr_t abort_va; 40 unsigned int abort_core; 41 struct thread_abort_regs abort_regs; 42 #ifdef CFG_CORE_DEBUG_CHECK_STACKS 43 bool stackcheck_recursion; 44 #endif 45 unsigned int syscall_recursion; 46 #ifdef CFG_FAULT_MITIGATION 47 struct ftmn_func_arg *ftmn_arg; 48 #endif 49 }; 50 51 void thread_init_canaries(void); 52 void thread_init_primary(void); 53 void thread_init_per_cpu(void); 54 55 #if defined(CFG_WITH_STACK_CANARIES) 56 void thread_update_canaries(void); 57 #else 58 static inline void thread_update_canaries(void) { } 59 #endif 60 61 struct thread_core_local *thread_get_core_local(void); 62 63 /* 64 * Initializes thread contexts. Called in thread_init_boot_thread() if 65 * virtualization is disabled. Virtualization subsystem calls it for 66 * every new guest otherwise. 67 */ 68 void thread_init_threads(void); 69 70 vaddr_t thread_get_abt_stack(void); 71 72 /* 73 * thread_init_thread_core_local() - Initialize thread_core_local 74 * @core_count: Number of cores in the system 75 * 76 * Called by the init CPU. Sets temporary stack mode for all CPUs 77 * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit 78 * for the init CPU. @core_count must be equal to CFG_TEE_CORE_NB_CORE. 79 */ 80 void thread_init_thread_core_local(size_t core_count); 81 void thread_init_core_local_stacks(void); 82 83 #if defined(CFG_CORE_PAUTH) 84 void thread_init_thread_pauth_keys(void); 85 void thread_init_core_local_pauth_keys(void); 86 #else 87 static inline void thread_init_thread_pauth_keys(void) { } 88 static inline void thread_init_core_local_pauth_keys(void) { } 89 #endif 90 91 /* 92 * Initializes a thread to be used during boot 93 */ 94 void thread_init_boot_thread(void); 95 96 /* 97 * Clears the current thread id 98 * Only supposed to be used during initialization. 99 */ 100 void thread_clr_boot_thread(void); 101 102 /* 103 * Returns current thread id. 104 */ 105 short int thread_get_id(void); 106 107 /* 108 * Returns current thread id, return -1 on failure. 109 */ 110 short int thread_get_id_may_fail(void); 111 112 /* Returns Thread Specific Data (TSD) pointer. */ 113 struct thread_specific_data *thread_get_tsd(void); 114 115 /* 116 * Sets foreign interrupts status for current thread, must only be called 117 * from an active thread context. 118 * 119 * enable == true -> enable foreign interrupts 120 * enable == false -> disable foreign interrupts 121 */ 122 void thread_set_foreign_intr(bool enable); 123 124 /* 125 * Restores the foreign interrupts status (in CPSR) for current thread, must 126 * only be called from an active thread context. 127 */ 128 void thread_restore_foreign_intr(void); 129 130 /* 131 * thread_get_exceptions() - return current exception mask 132 */ 133 uint32_t thread_get_exceptions(void); 134 135 /* 136 * thread_set_exceptions() - set exception mask 137 * @exceptions: exception mask to set 138 * 139 * Any previous exception mask is replaced by this exception mask, that is, 140 * old bits are cleared and replaced by these. 141 */ 142 void thread_set_exceptions(uint32_t exceptions); 143 144 /* 145 * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions 146 * @exceptions exceptions to mask 147 * @returns old exception state 148 */ 149 uint32_t thread_mask_exceptions(uint32_t exceptions); 150 151 /* 152 * thread_unmask_exceptions() - Unmasks asynchronous exceptions 153 * @state Old asynchronous exception state to restore (returned by 154 * thread_mask_exceptions()) 155 */ 156 void thread_unmask_exceptions(uint32_t state); 157 158 159 static inline bool __nostackcheck thread_foreign_intr_disabled(void) 160 { 161 return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 162 } 163 164 /* 165 * thread_enter_user_mode() - Enters user mode 166 * @a0: Passed in r/x0 for user_func 167 * @a1: Passed in r/x1 for user_func 168 * @a2: Passed in r/x2 for user_func 169 * @a3: Passed in r/x3 for user_func 170 * @user_sp: Assigned sp value in user mode 171 * @user_func: Function to execute in user mode 172 * @is_32bit: True if TA should execute in Aarch32, false if Aarch64 173 * @exit_status0: Pointer to opaque exit staus 0 174 * @exit_status1: Pointer to opaque exit staus 1 175 * 176 * This functions enters user mode with the argument described above, 177 * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode() 178 * when returning back to the caller of this function through an exception 179 * handler. 180 * 181 * @Returns what's passed in "ret" to thread_unwind_user_mode() 182 */ 183 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 184 unsigned long a2, unsigned long a3, unsigned long user_sp, 185 unsigned long entry_func, bool is_32bit, 186 uint32_t *exit_status0, uint32_t *exit_status1); 187 188 /* 189 * thread_unwind_user_mode() - Unwinds kernel stack from user entry 190 * @ret: Value to return from thread_enter_user_mode() 191 * @exit_status0: Exit status 0 192 * @exit_status1: Exit status 1 193 * 194 * This is the function that exception handlers can return into 195 * to resume execution in kernel mode instead of user mode. 196 * 197 * This function is closely coupled with thread_enter_user_mode() since it 198 * need to restore registers saved by thread_enter_user_mode() and when it 199 * returns make it look like thread_enter_user_mode() just returned. It is 200 * expected that the stack pointer is where thread_enter_user_mode() left 201 * it. The stack will be unwound and the function will return to where 202 * thread_enter_user_mode() was called from. Exit_status0 and exit_status1 203 * are filled in the corresponding pointers supplied to 204 * thread_enter_user_mode(). 205 */ 206 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 207 uint32_t exit_status1); 208 209 /* 210 * Returns the start address (bottom) of the stack for the current thread, 211 * zero if there is no current thread. 212 */ 213 vaddr_t thread_stack_start(void); 214 215 216 /* Returns the stack size for the current thread */ 217 size_t thread_stack_size(void); 218 219 /* 220 * Returns the start (top, lowest address) and end (bottom, highest address) of 221 * the current stack (thread, temporary or abort stack). 222 * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or 223 * soft limits are queried. The difference between soft and hard is that for the 224 * latter, the stack start includes some additional space to let any function 225 * overflow the soft limit and still be able to print a stack dump in this case. 226 */ 227 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard); 228 229 static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start, 230 vaddr_t *end) 231 { 232 return get_stack_limits(start, end, false); 233 } 234 235 static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start, 236 vaddr_t *end) 237 { 238 return get_stack_limits(start, end, true); 239 } 240 241 bool thread_is_in_normal_mode(void); 242 243 /* 244 * Returns true if previous exeception also was in abort mode. 245 * 246 * Note: it's only valid to call this function from an abort exception 247 * handler before interrupts has been re-enabled. 248 */ 249 bool thread_is_from_abort_mode(void); 250 251 /** 252 * Allocates data for payload buffers shared with a non-secure user space 253 * application. Ensure consistency with the enumeration 254 * THREAD_SHM_TYPE_APPLICATION. 255 * 256 * @size: size in bytes of payload buffer 257 * 258 * @returns mobj that describes allocated buffer or NULL on error 259 */ 260 struct mobj *thread_rpc_alloc_payload(size_t size); 261 262 /** 263 * Free physical memory previously allocated with thread_rpc_alloc_payload() 264 * 265 * @mobj: mobj that describes the buffer 266 */ 267 void thread_rpc_free_payload(struct mobj *mobj); 268 269 /** 270 * Allocate data for payload buffers shared with the non-secure kernel. 271 * Ensure consistency with the enumeration THREAD_SHM_TYPE_KERNEL_PRIVATE. 272 * 273 * @size: size in bytes of payload buffer 274 * 275 * @returns mobj that describes allocated buffer or NULL on error 276 */ 277 struct mobj *thread_rpc_alloc_kernel_payload(size_t size); 278 279 /** 280 * Free physical memory previously allocated with 281 * thread_rpc_alloc_kernel_payload() 282 * 283 * @mobj: mobj that describes the buffer 284 */ 285 void thread_rpc_free_kernel_payload(struct mobj *mobj); 286 287 struct thread_param_memref { 288 size_t offs; 289 size_t size; 290 struct mobj *mobj; 291 }; 292 293 struct thread_param_value { 294 uint64_t a; 295 uint64_t b; 296 uint64_t c; 297 }; 298 299 /* 300 * Note that there's some arithmetics done on the value so it's important 301 * to keep in IN, OUT, INOUT order. 302 */ 303 enum thread_param_attr { 304 THREAD_PARAM_ATTR_NONE = 0, 305 THREAD_PARAM_ATTR_VALUE_IN, 306 THREAD_PARAM_ATTR_VALUE_OUT, 307 THREAD_PARAM_ATTR_VALUE_INOUT, 308 THREAD_PARAM_ATTR_MEMREF_IN, 309 THREAD_PARAM_ATTR_MEMREF_OUT, 310 THREAD_PARAM_ATTR_MEMREF_INOUT, 311 }; 312 313 struct thread_param { 314 enum thread_param_attr attr; 315 union { 316 struct thread_param_memref memref; 317 struct thread_param_value value; 318 } u; 319 }; 320 321 #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \ 322 (struct thread_param){ \ 323 .attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \ 324 .mobj = (_mobj), .offs = (_offs), .size = (_size) } \ 325 } 326 327 #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \ 328 (struct thread_param){ \ 329 .attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \ 330 .a = (_a), .b = (_b), .c = (_c) } \ 331 } 332 333 /** 334 * Does an RPC using a preallocated argument buffer 335 * @cmd: RPC cmd 336 * @num_params: number of parameters 337 * @params: RPC parameters 338 * @returns RPC return value 339 */ 340 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 341 struct thread_param *params); 342 343 /** 344 * Allocate data for payload buffers shared with both user space applications 345 * and the non-secure kernel. Ensure consistency with the enumeration 346 * THREAD_SHM_TYPE_GLOBAL. 347 * 348 * @size: size in bytes of payload buffer 349 * 350 * @returns mobj that describes allocated buffer or NULL on error 351 */ 352 struct mobj *thread_rpc_alloc_global_payload(size_t size); 353 354 /** 355 * Free physical memory previously allocated with 356 * thread_rpc_alloc_global_payload() 357 * 358 * @mobj: mobj that describes the buffer 359 */ 360 void thread_rpc_free_global_payload(struct mobj *mobj); 361 362 /* 363 * enum thread_shm_type - type of non-secure shared memory 364 * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory 365 * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory 366 * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory 367 */ 368 enum thread_shm_type { 369 THREAD_SHM_TYPE_APPLICATION, 370 THREAD_SHM_TYPE_KERNEL_PRIVATE, 371 THREAD_SHM_TYPE_GLOBAL, 372 }; 373 374 /* 375 * enum thread_shm_cache_user - user of a cache allocation 376 * @THREAD_SHM_CACHE_USER_SOCKET - socket communication 377 * @THREAD_SHM_CACHE_USER_FS - filesystem access 378 * @THREAD_SHM_CACHE_USER_I2C - I2C communication 379 * @THREAD_SHM_CACHE_USER_RPMB - RPMB communication 380 * 381 * To ensure that each user of the shared memory cache doesn't interfere 382 * with each other a unique ID per user is used. 383 */ 384 enum thread_shm_cache_user { 385 THREAD_SHM_CACHE_USER_SOCKET, 386 THREAD_SHM_CACHE_USER_FS, 387 THREAD_SHM_CACHE_USER_I2C, 388 THREAD_SHM_CACHE_USER_RPMB, 389 }; 390 391 /* 392 * Returns a pointer to the cached RPC memory. Each thread and @user tuple 393 * has a unique cache. The pointer is guaranteed to point to a large enough 394 * area or to be NULL. 395 */ 396 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 397 enum thread_shm_type shm_type, 398 size_t size, struct mobj **mobj); 399 400 #endif /*__ASSEMBLER__*/ 401 402 #endif /*__KERNEL_THREAD_H*/ 403