1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2014, STMicroelectronics International N.V. 4 * Copyright (c) 2016-2017, Linaro Limited 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #ifndef __KERNEL_THREAD_H 9 #define __KERNEL_THREAD_H 10 11 #ifndef __ASSEMBLER__ 12 #include <types_ext.h> 13 #include <compiler.h> 14 #include <mm/pgt_cache.h> 15 #endif 16 #include <util.h> 17 #include <kernel/thread_arch.h> 18 19 #define THREAD_FLAGS_COPY_ARGS_ON_RETURN BIT(0) 20 #define THREAD_FLAGS_FOREIGN_INTR_ENABLE BIT(1) 21 #define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR BIT(2) 22 #define THREAD_FLAGS_FFA_ONLY BIT(3) 23 24 #define THREAD_ID_0 0 25 #define THREAD_ID_INVALID -1 26 27 #define THREAD_RPC_MAX_NUM_PARAMS U(4) 28 29 #ifndef __ASSEMBLER__ 30 31 struct thread_specific_data { 32 TAILQ_HEAD(, ts_session) sess_stack; 33 struct ts_ctx *ctx; 34 #ifdef CFG_CORE_FFA 35 uint32_t rpc_target_info; 36 #endif 37 uint32_t abort_type; 38 uint32_t abort_descr; 39 vaddr_t abort_va; 40 unsigned int abort_core; 41 struct thread_abort_regs abort_regs; 42 #ifdef CFG_CORE_DEBUG_CHECK_STACKS 43 bool stackcheck_recursion; 44 #endif 45 unsigned int syscall_recursion; 46 #ifdef CFG_FAULT_MITIGATION 47 struct ftmn_func_arg *ftmn_arg; 48 #endif 49 }; 50 51 void thread_init_canaries(void); 52 void thread_init_primary(void); 53 void thread_init_per_cpu(void); 54 55 #if defined(CFG_WITH_STACK_CANARIES) 56 void thread_update_canaries(void); 57 #else 58 static inline void thread_update_canaries(void) { } 59 #endif 60 61 struct thread_core_local *thread_get_core_local(void); 62 63 /* 64 * Initializes thread contexts. Called in thread_init_boot_thread() if 65 * virtualization is disabled. Virtualization subsystem calls it for 66 * every new guest otherwise. 67 */ 68 void thread_init_threads(void); 69 70 /* 71 * Called by the init CPU. Sets temporary stack mode for all CPUs 72 * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit for 73 * the init CPU. 74 */ 75 void thread_init_thread_core_local(void); 76 void thread_init_core_local_stacks(void); 77 78 #if defined(CFG_CORE_PAUTH) 79 void thread_init_thread_pauth_keys(void); 80 void thread_init_core_local_pauth_keys(void); 81 #else 82 static inline void thread_init_thread_pauth_keys(void) { } 83 static inline void thread_init_core_local_pauth_keys(void) { } 84 #endif 85 86 /* 87 * Initializes a thread to be used during boot 88 */ 89 void thread_init_boot_thread(void); 90 91 /* 92 * Clears the current thread id 93 * Only supposed to be used during initialization. 94 */ 95 void thread_clr_boot_thread(void); 96 97 /* 98 * Returns current thread id. 99 */ 100 short int thread_get_id(void); 101 102 /* 103 * Returns current thread id, return -1 on failure. 104 */ 105 short int thread_get_id_may_fail(void); 106 107 /* Returns Thread Specific Data (TSD) pointer. */ 108 struct thread_specific_data *thread_get_tsd(void); 109 110 /* 111 * Sets foreign interrupts status for current thread, must only be called 112 * from an active thread context. 113 * 114 * enable == true -> enable foreign interrupts 115 * enable == false -> disable foreign interrupts 116 */ 117 void thread_set_foreign_intr(bool enable); 118 119 /* 120 * Restores the foreign interrupts status (in CPSR) for current thread, must 121 * only be called from an active thread context. 122 */ 123 void thread_restore_foreign_intr(void); 124 125 /* 126 * thread_get_exceptions() - return current exception mask 127 */ 128 uint32_t thread_get_exceptions(void); 129 130 /* 131 * thread_set_exceptions() - set exception mask 132 * @exceptions: exception mask to set 133 * 134 * Any previous exception mask is replaced by this exception mask, that is, 135 * old bits are cleared and replaced by these. 136 */ 137 void thread_set_exceptions(uint32_t exceptions); 138 139 /* 140 * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions 141 * @exceptions exceptions to mask 142 * @returns old exception state 143 */ 144 uint32_t thread_mask_exceptions(uint32_t exceptions); 145 146 /* 147 * thread_unmask_exceptions() - Unmasks asynchronous exceptions 148 * @state Old asynchronous exception state to restore (returned by 149 * thread_mask_exceptions()) 150 */ 151 void thread_unmask_exceptions(uint32_t state); 152 153 154 static inline bool __nostackcheck thread_foreign_intr_disabled(void) 155 { 156 return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 157 } 158 159 /* 160 * thread_enter_user_mode() - Enters user mode 161 * @a0: Passed in r/x0 for user_func 162 * @a1: Passed in r/x1 for user_func 163 * @a2: Passed in r/x2 for user_func 164 * @a3: Passed in r/x3 for user_func 165 * @user_sp: Assigned sp value in user mode 166 * @user_func: Function to execute in user mode 167 * @is_32bit: True if TA should execute in Aarch32, false if Aarch64 168 * @exit_status0: Pointer to opaque exit staus 0 169 * @exit_status1: Pointer to opaque exit staus 1 170 * 171 * This functions enters user mode with the argument described above, 172 * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode() 173 * when returning back to the caller of this function through an exception 174 * handler. 175 * 176 * @Returns what's passed in "ret" to thread_unwind_user_mode() 177 */ 178 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 179 unsigned long a2, unsigned long a3, unsigned long user_sp, 180 unsigned long entry_func, bool is_32bit, 181 uint32_t *exit_status0, uint32_t *exit_status1); 182 183 /* 184 * thread_unwind_user_mode() - Unwinds kernel stack from user entry 185 * @ret: Value to return from thread_enter_user_mode() 186 * @exit_status0: Exit status 0 187 * @exit_status1: Exit status 1 188 * 189 * This is the function that exception handlers can return into 190 * to resume execution in kernel mode instead of user mode. 191 * 192 * This function is closely coupled with thread_enter_user_mode() since it 193 * need to restore registers saved by thread_enter_user_mode() and when it 194 * returns make it look like thread_enter_user_mode() just returned. It is 195 * expected that the stack pointer is where thread_enter_user_mode() left 196 * it. The stack will be unwound and the function will return to where 197 * thread_enter_user_mode() was called from. Exit_status0 and exit_status1 198 * are filled in the corresponding pointers supplied to 199 * thread_enter_user_mode(). 200 */ 201 void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 202 uint32_t exit_status1); 203 204 /* 205 * Returns the start address (bottom) of the stack for the current thread, 206 * zero if there is no current thread. 207 */ 208 vaddr_t thread_stack_start(void); 209 210 211 /* Returns the stack size for the current thread */ 212 size_t thread_stack_size(void); 213 214 /* 215 * Returns the start (top, lowest address) and end (bottom, highest address) of 216 * the current stack (thread, temporary or abort stack). 217 * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or 218 * soft limits are queried. The difference between soft and hard is that for the 219 * latter, the stack start includes some additional space to let any function 220 * overflow the soft limit and still be able to print a stack dump in this case. 221 */ 222 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard); 223 224 static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start, 225 vaddr_t *end) 226 { 227 return get_stack_limits(start, end, false); 228 } 229 230 static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start, 231 vaddr_t *end) 232 { 233 return get_stack_limits(start, end, true); 234 } 235 236 bool thread_is_in_normal_mode(void); 237 238 /* 239 * Returns true if previous exeception also was in abort mode. 240 * 241 * Note: it's only valid to call this function from an abort exception 242 * handler before interrupts has been re-enabled. 243 */ 244 bool thread_is_from_abort_mode(void); 245 246 /** 247 * Allocates data for payload buffers shared with a non-secure user space 248 * application. Ensure consistency with the enumeration 249 * THREAD_SHM_TYPE_APPLICATION. 250 * 251 * @size: size in bytes of payload buffer 252 * 253 * @returns mobj that describes allocated buffer or NULL on error 254 */ 255 struct mobj *thread_rpc_alloc_payload(size_t size); 256 257 /** 258 * Free physical memory previously allocated with thread_rpc_alloc_payload() 259 * 260 * @mobj: mobj that describes the buffer 261 */ 262 void thread_rpc_free_payload(struct mobj *mobj); 263 264 /** 265 * Allocate data for payload buffers shared with the non-secure kernel. 266 * Ensure consistency with the enumeration THREAD_SHM_TYPE_KERNEL_PRIVATE. 267 * 268 * @size: size in bytes of payload buffer 269 * 270 * @returns mobj that describes allocated buffer or NULL on error 271 */ 272 struct mobj *thread_rpc_alloc_kernel_payload(size_t size); 273 274 /** 275 * Free physical memory previously allocated with 276 * thread_rpc_alloc_kernel_payload() 277 * 278 * @mobj: mobj that describes the buffer 279 */ 280 void thread_rpc_free_kernel_payload(struct mobj *mobj); 281 282 struct thread_param_memref { 283 size_t offs; 284 size_t size; 285 struct mobj *mobj; 286 }; 287 288 struct thread_param_value { 289 uint64_t a; 290 uint64_t b; 291 uint64_t c; 292 }; 293 294 /* 295 * Note that there's some arithmetics done on the value so it's important 296 * to keep in IN, OUT, INOUT order. 297 */ 298 enum thread_param_attr { 299 THREAD_PARAM_ATTR_NONE = 0, 300 THREAD_PARAM_ATTR_VALUE_IN, 301 THREAD_PARAM_ATTR_VALUE_OUT, 302 THREAD_PARAM_ATTR_VALUE_INOUT, 303 THREAD_PARAM_ATTR_MEMREF_IN, 304 THREAD_PARAM_ATTR_MEMREF_OUT, 305 THREAD_PARAM_ATTR_MEMREF_INOUT, 306 }; 307 308 struct thread_param { 309 enum thread_param_attr attr; 310 union { 311 struct thread_param_memref memref; 312 struct thread_param_value value; 313 } u; 314 }; 315 316 #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \ 317 (struct thread_param){ \ 318 .attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \ 319 .mobj = (_mobj), .offs = (_offs), .size = (_size) } \ 320 } 321 322 #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \ 323 (struct thread_param){ \ 324 .attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \ 325 .a = (_a), .b = (_b), .c = (_c) } \ 326 } 327 328 /** 329 * Does an RPC using a preallocated argument buffer 330 * @cmd: RPC cmd 331 * @num_params: number of parameters 332 * @params: RPC parameters 333 * @returns RPC return value 334 */ 335 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 336 struct thread_param *params); 337 338 /** 339 * Allocate data for payload buffers shared with both user space applications 340 * and the non-secure kernel. Ensure consistency with the enumeration 341 * THREAD_SHM_TYPE_GLOBAL. 342 * 343 * @size: size in bytes of payload buffer 344 * 345 * @returns mobj that describes allocated buffer or NULL on error 346 */ 347 struct mobj *thread_rpc_alloc_global_payload(size_t size); 348 349 /** 350 * Free physical memory previously allocated with 351 * thread_rpc_alloc_global_payload() 352 * 353 * @mobj: mobj that describes the buffer 354 */ 355 void thread_rpc_free_global_payload(struct mobj *mobj); 356 357 /* 358 * enum thread_shm_type - type of non-secure shared memory 359 * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory 360 * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory 361 * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory 362 */ 363 enum thread_shm_type { 364 THREAD_SHM_TYPE_APPLICATION, 365 THREAD_SHM_TYPE_KERNEL_PRIVATE, 366 THREAD_SHM_TYPE_GLOBAL, 367 }; 368 369 /* 370 * enum thread_shm_cache_user - user of a cache allocation 371 * @THREAD_SHM_CACHE_USER_SOCKET - socket communication 372 * @THREAD_SHM_CACHE_USER_FS - filesystem access 373 * @THREAD_SHM_CACHE_USER_I2C - I2C communication 374 * @THREAD_SHM_CACHE_USER_RPMB - RPMB communication 375 * 376 * To ensure that each user of the shared memory cache doesn't interfere 377 * with each other a unique ID per user is used. 378 */ 379 enum thread_shm_cache_user { 380 THREAD_SHM_CACHE_USER_SOCKET, 381 THREAD_SHM_CACHE_USER_FS, 382 THREAD_SHM_CACHE_USER_I2C, 383 THREAD_SHM_CACHE_USER_RPMB, 384 }; 385 386 /* 387 * Returns a pointer to the cached RPC memory. Each thread and @user tuple 388 * has a unique cache. The pointer is guaranteed to point to a large enough 389 * area or to be NULL. 390 */ 391 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 392 enum thread_shm_type shm_type, 393 size_t size, struct mobj **mobj); 394 395 #endif /*__ASSEMBLER__*/ 396 397 #endif /*__KERNEL_THREAD_H*/ 398