15d9ddca6SJens Wiklander /* SPDX-License-Identifier: BSD-2-Clause */ 25d9ddca6SJens Wiklander /* 35d9ddca6SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V. 45d9ddca6SJens Wiklander * Copyright (c) 2016-2017, Linaro Limited 55d9ddca6SJens Wiklander * Copyright (c) 2020-2021, Arm Limited 65d9ddca6SJens Wiklander */ 75d9ddca6SJens Wiklander 8d50fee03SEtienne Carriere #ifndef __KERNEL_THREAD_H 9d50fee03SEtienne Carriere #define __KERNEL_THREAD_H 105d9ddca6SJens Wiklander 115d9ddca6SJens Wiklander #ifndef __ASSEMBLER__ 125d9ddca6SJens Wiklander #include <types_ext.h> 135d9ddca6SJens Wiklander #include <compiler.h> 145d9ddca6SJens Wiklander #include <mm/pgt_cache.h> 155d9ddca6SJens Wiklander #endif 16891569afSJens Wiklander #include <util.h> 175d9ddca6SJens Wiklander #include <kernel/thread_arch.h> 185d9ddca6SJens Wiklander 19891569afSJens Wiklander #define THREAD_FLAGS_COPY_ARGS_ON_RETURN BIT(0) 20891569afSJens Wiklander #define THREAD_FLAGS_FOREIGN_INTR_ENABLE BIT(1) 21891569afSJens Wiklander #define THREAD_FLAGS_EXIT_ON_FOREIGN_INTR BIT(2) 224d028847SImre Kis #define THREAD_FLAGS_FFA_ONLY BIT(3) 23891569afSJens Wiklander 245d9ddca6SJens Wiklander #define THREAD_ID_0 0 255d9ddca6SJens Wiklander #define THREAD_ID_INVALID -1 265d9ddca6SJens Wiklander 275d9ddca6SJens Wiklander #define THREAD_RPC_MAX_NUM_PARAMS U(4) 285d9ddca6SJens Wiklander 295d9ddca6SJens Wiklander #ifndef __ASSEMBLER__ 305d9ddca6SJens Wiklander 315d9ddca6SJens Wiklander struct thread_specific_data { 325d9ddca6SJens Wiklander TAILQ_HEAD(, ts_session) sess_stack; 335d9ddca6SJens Wiklander struct ts_ctx *ctx; 345d9ddca6SJens Wiklander #ifdef CFG_CORE_FFA 355d9ddca6SJens Wiklander uint32_t rpc_target_info; 365d9ddca6SJens Wiklander #endif 375d9ddca6SJens Wiklander uint32_t abort_type; 385d9ddca6SJens Wiklander uint32_t abort_descr; 395d9ddca6SJens Wiklander vaddr_t abort_va; 405d9ddca6SJens Wiklander unsigned int abort_core; 415d9ddca6SJens Wiklander struct thread_abort_regs abort_regs; 425d9ddca6SJens Wiklander #ifdef CFG_CORE_DEBUG_CHECK_STACKS 435d9ddca6SJens Wiklander bool stackcheck_recursion; 445d9ddca6SJens Wiklander #endif 455d9ddca6SJens Wiklander unsigned int syscall_recursion; 467e75ca54SJens Wiklander #ifdef CFG_FAULT_MITIGATION 477e75ca54SJens Wiklander struct ftmn_func_arg *ftmn_arg; 487e75ca54SJens Wiklander #endif 495d9ddca6SJens Wiklander }; 505d9ddca6SJens Wiklander 51ca825890SJens Wiklander void thread_init_canaries(void); 525d9ddca6SJens Wiklander void thread_init_primary(void); 535d9ddca6SJens Wiklander void thread_init_per_cpu(void); 545d9ddca6SJens Wiklander 55b89b3da2SVincent Chuang #if defined(CFG_WITH_STACK_CANARIES) 56b89b3da2SVincent Chuang void thread_update_canaries(void); 57b89b3da2SVincent Chuang #else 58b89b3da2SVincent Chuang static inline void thread_update_canaries(void) { } 59b89b3da2SVincent Chuang #endif 60b89b3da2SVincent Chuang 615d9ddca6SJens Wiklander struct thread_core_local *thread_get_core_local(void); 625d9ddca6SJens Wiklander 635d9ddca6SJens Wiklander /* 645d9ddca6SJens Wiklander * Initializes thread contexts. Called in thread_init_boot_thread() if 655d9ddca6SJens Wiklander * virtualization is disabled. Virtualization subsystem calls it for 665d9ddca6SJens Wiklander * every new guest otherwise. 675d9ddca6SJens Wiklander */ 685d9ddca6SJens Wiklander void thread_init_threads(void); 695d9ddca6SJens Wiklander 70b5ec8152SJens Wiklander vaddr_t thread_get_abt_stack(void); 71b5ec8152SJens Wiklander 725d9ddca6SJens Wiklander /* 73a4c2e0cbSJens Wiklander * thread_init_thread_core_local() - Initialize thread_core_local 74a4c2e0cbSJens Wiklander * @core_count: Number of cores in the system 75a4c2e0cbSJens Wiklander * 765d9ddca6SJens Wiklander * Called by the init CPU. Sets temporary stack mode for all CPUs 77a4c2e0cbSJens Wiklander * (curr_thread = -1 and THREAD_CLF_TMP) and sets the temporary stack limit 78*59724f22SJens Wiklander * for the init CPU. @core_count must be <= CFG_TEE_CORE_NB_CORE, and will 79*59724f22SJens Wiklander * set the number of supported cores to @core_count if configured with 80*59724f22SJens Wiklander * CFG_DYN_CONFIG=y, else @core_count must equal CFG_TEE_CORE_NB_CORE. 815d9ddca6SJens Wiklander */ 82a4c2e0cbSJens Wiklander void thread_init_thread_core_local(size_t core_count); 83ca825890SJens Wiklander void thread_init_core_local_stacks(void); 845d9ddca6SJens Wiklander 8593dc6b29SJens Wiklander #if defined(CFG_CORE_PAUTH) 8693dc6b29SJens Wiklander void thread_init_thread_pauth_keys(void); 8793dc6b29SJens Wiklander void thread_init_core_local_pauth_keys(void); 8893dc6b29SJens Wiklander #else 8993dc6b29SJens Wiklander static inline void thread_init_thread_pauth_keys(void) { } 9093dc6b29SJens Wiklander static inline void thread_init_core_local_pauth_keys(void) { } 9193dc6b29SJens Wiklander #endif 9293dc6b29SJens Wiklander 935d9ddca6SJens Wiklander /* 945d9ddca6SJens Wiklander * Initializes a thread to be used during boot 955d9ddca6SJens Wiklander */ 965d9ddca6SJens Wiklander void thread_init_boot_thread(void); 975d9ddca6SJens Wiklander 985d9ddca6SJens Wiklander /* 995d9ddca6SJens Wiklander * Clears the current thread id 1005d9ddca6SJens Wiklander * Only supposed to be used during initialization. 1015d9ddca6SJens Wiklander */ 1025d9ddca6SJens Wiklander void thread_clr_boot_thread(void); 1035d9ddca6SJens Wiklander 1045d9ddca6SJens Wiklander /* 1055d9ddca6SJens Wiklander * Returns current thread id. 1065d9ddca6SJens Wiklander */ 1075d9ddca6SJens Wiklander short int thread_get_id(void); 1085d9ddca6SJens Wiklander 1095d9ddca6SJens Wiklander /* 1105d9ddca6SJens Wiklander * Returns current thread id, return -1 on failure. 1115d9ddca6SJens Wiklander */ 1125d9ddca6SJens Wiklander short int thread_get_id_may_fail(void); 1135d9ddca6SJens Wiklander 1145d9ddca6SJens Wiklander /* Returns Thread Specific Data (TSD) pointer. */ 1155d9ddca6SJens Wiklander struct thread_specific_data *thread_get_tsd(void); 1165d9ddca6SJens Wiklander 1175d9ddca6SJens Wiklander /* 1185d9ddca6SJens Wiklander * Sets foreign interrupts status for current thread, must only be called 1195d9ddca6SJens Wiklander * from an active thread context. 1205d9ddca6SJens Wiklander * 1215d9ddca6SJens Wiklander * enable == true -> enable foreign interrupts 1225d9ddca6SJens Wiklander * enable == false -> disable foreign interrupts 1235d9ddca6SJens Wiklander */ 1245d9ddca6SJens Wiklander void thread_set_foreign_intr(bool enable); 1255d9ddca6SJens Wiklander 1265d9ddca6SJens Wiklander /* 1275d9ddca6SJens Wiklander * Restores the foreign interrupts status (in CPSR) for current thread, must 1285d9ddca6SJens Wiklander * only be called from an active thread context. 1295d9ddca6SJens Wiklander */ 1305d9ddca6SJens Wiklander void thread_restore_foreign_intr(void); 1315d9ddca6SJens Wiklander 1325d9ddca6SJens Wiklander /* 1335d9ddca6SJens Wiklander * thread_get_exceptions() - return current exception mask 1345d9ddca6SJens Wiklander */ 1355d9ddca6SJens Wiklander uint32_t thread_get_exceptions(void); 1365d9ddca6SJens Wiklander 1375d9ddca6SJens Wiklander /* 1385d9ddca6SJens Wiklander * thread_set_exceptions() - set exception mask 1395d9ddca6SJens Wiklander * @exceptions: exception mask to set 1405d9ddca6SJens Wiklander * 1415d9ddca6SJens Wiklander * Any previous exception mask is replaced by this exception mask, that is, 1425d9ddca6SJens Wiklander * old bits are cleared and replaced by these. 1435d9ddca6SJens Wiklander */ 1445d9ddca6SJens Wiklander void thread_set_exceptions(uint32_t exceptions); 1455d9ddca6SJens Wiklander 1465d9ddca6SJens Wiklander /* 1475d9ddca6SJens Wiklander * thread_mask_exceptions() - Masks (disables) specified asynchronous exceptions 1485d9ddca6SJens Wiklander * @exceptions exceptions to mask 1495d9ddca6SJens Wiklander * @returns old exception state 1505d9ddca6SJens Wiklander */ 1515d9ddca6SJens Wiklander uint32_t thread_mask_exceptions(uint32_t exceptions); 1525d9ddca6SJens Wiklander 1535d9ddca6SJens Wiklander /* 1545d9ddca6SJens Wiklander * thread_unmask_exceptions() - Unmasks asynchronous exceptions 1555d9ddca6SJens Wiklander * @state Old asynchronous exception state to restore (returned by 1565d9ddca6SJens Wiklander * thread_mask_exceptions()) 1575d9ddca6SJens Wiklander */ 1585d9ddca6SJens Wiklander void thread_unmask_exceptions(uint32_t state); 1595d9ddca6SJens Wiklander 1605d9ddca6SJens Wiklander 1615d9ddca6SJens Wiklander static inline bool __nostackcheck thread_foreign_intr_disabled(void) 1625d9ddca6SJens Wiklander { 1635d9ddca6SJens Wiklander return !!(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1645d9ddca6SJens Wiklander } 1655d9ddca6SJens Wiklander 1665d9ddca6SJens Wiklander /* 1675d9ddca6SJens Wiklander * thread_enter_user_mode() - Enters user mode 1685d9ddca6SJens Wiklander * @a0: Passed in r/x0 for user_func 1695d9ddca6SJens Wiklander * @a1: Passed in r/x1 for user_func 1705d9ddca6SJens Wiklander * @a2: Passed in r/x2 for user_func 1715d9ddca6SJens Wiklander * @a3: Passed in r/x3 for user_func 1725d9ddca6SJens Wiklander * @user_sp: Assigned sp value in user mode 1735d9ddca6SJens Wiklander * @user_func: Function to execute in user mode 1745d9ddca6SJens Wiklander * @is_32bit: True if TA should execute in Aarch32, false if Aarch64 1755d9ddca6SJens Wiklander * @exit_status0: Pointer to opaque exit staus 0 1765d9ddca6SJens Wiklander * @exit_status1: Pointer to opaque exit staus 1 1775d9ddca6SJens Wiklander * 1785d9ddca6SJens Wiklander * This functions enters user mode with the argument described above, 1795d9ddca6SJens Wiklander * @exit_status0 and @exit_status1 are filled in by thread_unwind_user_mode() 1805d9ddca6SJens Wiklander * when returning back to the caller of this function through an exception 1815d9ddca6SJens Wiklander * handler. 1825d9ddca6SJens Wiklander * 1835d9ddca6SJens Wiklander * @Returns what's passed in "ret" to thread_unwind_user_mode() 1845d9ddca6SJens Wiklander */ 1855d9ddca6SJens Wiklander uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1865d9ddca6SJens Wiklander unsigned long a2, unsigned long a3, unsigned long user_sp, 1875d9ddca6SJens Wiklander unsigned long entry_func, bool is_32bit, 1885d9ddca6SJens Wiklander uint32_t *exit_status0, uint32_t *exit_status1); 1895d9ddca6SJens Wiklander 1905d9ddca6SJens Wiklander /* 1915d9ddca6SJens Wiklander * thread_unwind_user_mode() - Unwinds kernel stack from user entry 1925d9ddca6SJens Wiklander * @ret: Value to return from thread_enter_user_mode() 1935d9ddca6SJens Wiklander * @exit_status0: Exit status 0 1945d9ddca6SJens Wiklander * @exit_status1: Exit status 1 1955d9ddca6SJens Wiklander * 1965d9ddca6SJens Wiklander * This is the function that exception handlers can return into 1975d9ddca6SJens Wiklander * to resume execution in kernel mode instead of user mode. 1985d9ddca6SJens Wiklander * 1995d9ddca6SJens Wiklander * This function is closely coupled with thread_enter_user_mode() since it 2005d9ddca6SJens Wiklander * need to restore registers saved by thread_enter_user_mode() and when it 2015d9ddca6SJens Wiklander * returns make it look like thread_enter_user_mode() just returned. It is 2025d9ddca6SJens Wiklander * expected that the stack pointer is where thread_enter_user_mode() left 2035d9ddca6SJens Wiklander * it. The stack will be unwound and the function will return to where 2045d9ddca6SJens Wiklander * thread_enter_user_mode() was called from. Exit_status0 and exit_status1 2055d9ddca6SJens Wiklander * are filled in the corresponding pointers supplied to 2065d9ddca6SJens Wiklander * thread_enter_user_mode(). 2075d9ddca6SJens Wiklander */ 2085d9ddca6SJens Wiklander void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0, 2095d9ddca6SJens Wiklander uint32_t exit_status1); 2105d9ddca6SJens Wiklander 2115d9ddca6SJens Wiklander /* 2125d9ddca6SJens Wiklander * Returns the start address (bottom) of the stack for the current thread, 2135d9ddca6SJens Wiklander * zero if there is no current thread. 2145d9ddca6SJens Wiklander */ 2155d9ddca6SJens Wiklander vaddr_t thread_stack_start(void); 2165d9ddca6SJens Wiklander 2175d9ddca6SJens Wiklander 2185d9ddca6SJens Wiklander /* Returns the stack size for the current thread */ 2195d9ddca6SJens Wiklander size_t thread_stack_size(void); 2205d9ddca6SJens Wiklander 2215d9ddca6SJens Wiklander /* 2225d9ddca6SJens Wiklander * Returns the start (top, lowest address) and end (bottom, highest address) of 2235d9ddca6SJens Wiklander * the current stack (thread, temporary or abort stack). 2245d9ddca6SJens Wiklander * When CFG_CORE_DEBUG_CHECK_STACKS=y, the @hard parameter tells if the hard or 2255d9ddca6SJens Wiklander * soft limits are queried. The difference between soft and hard is that for the 2265d9ddca6SJens Wiklander * latter, the stack start includes some additional space to let any function 2275d9ddca6SJens Wiklander * overflow the soft limit and still be able to print a stack dump in this case. 2285d9ddca6SJens Wiklander */ 2295d9ddca6SJens Wiklander bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard); 2305d9ddca6SJens Wiklander 2315d9ddca6SJens Wiklander static inline bool __nostackcheck get_stack_soft_limits(vaddr_t *start, 2325d9ddca6SJens Wiklander vaddr_t *end) 2335d9ddca6SJens Wiklander { 2345d9ddca6SJens Wiklander return get_stack_limits(start, end, false); 2355d9ddca6SJens Wiklander } 2365d9ddca6SJens Wiklander 2375d9ddca6SJens Wiklander static inline bool __nostackcheck get_stack_hard_limits(vaddr_t *start, 2385d9ddca6SJens Wiklander vaddr_t *end) 2395d9ddca6SJens Wiklander { 2405d9ddca6SJens Wiklander return get_stack_limits(start, end, true); 2415d9ddca6SJens Wiklander } 2425d9ddca6SJens Wiklander 2435d9ddca6SJens Wiklander bool thread_is_in_normal_mode(void); 2445d9ddca6SJens Wiklander 2455d9ddca6SJens Wiklander /* 2465d9ddca6SJens Wiklander * Returns true if previous exeception also was in abort mode. 2475d9ddca6SJens Wiklander * 2485d9ddca6SJens Wiklander * Note: it's only valid to call this function from an abort exception 2495d9ddca6SJens Wiklander * handler before interrupts has been re-enabled. 2505d9ddca6SJens Wiklander */ 2515d9ddca6SJens Wiklander bool thread_is_from_abort_mode(void); 2525d9ddca6SJens Wiklander 2535d9ddca6SJens Wiklander /** 2546b1b2b99SYitong Cheng * Allocates data for payload buffers shared with a non-secure user space 2556b1b2b99SYitong Cheng * application. Ensure consistency with the enumeration 2566b1b2b99SYitong Cheng * THREAD_SHM_TYPE_APPLICATION. 2575d9ddca6SJens Wiklander * 2585d9ddca6SJens Wiklander * @size: size in bytes of payload buffer 2595d9ddca6SJens Wiklander * 2605d9ddca6SJens Wiklander * @returns mobj that describes allocated buffer or NULL on error 2615d9ddca6SJens Wiklander */ 2625d9ddca6SJens Wiklander struct mobj *thread_rpc_alloc_payload(size_t size); 2635d9ddca6SJens Wiklander 2645d9ddca6SJens Wiklander /** 2655d9ddca6SJens Wiklander * Free physical memory previously allocated with thread_rpc_alloc_payload() 2665d9ddca6SJens Wiklander * 2675d9ddca6SJens Wiklander * @mobj: mobj that describes the buffer 2685d9ddca6SJens Wiklander */ 2695d9ddca6SJens Wiklander void thread_rpc_free_payload(struct mobj *mobj); 2705d9ddca6SJens Wiklander 2715d9ddca6SJens Wiklander /** 2726b1b2b99SYitong Cheng * Allocate data for payload buffers shared with the non-secure kernel. 2736b1b2b99SYitong Cheng * Ensure consistency with the enumeration THREAD_SHM_TYPE_KERNEL_PRIVATE. 2745d9ddca6SJens Wiklander * 2755d9ddca6SJens Wiklander * @size: size in bytes of payload buffer 2765d9ddca6SJens Wiklander * 2775d9ddca6SJens Wiklander * @returns mobj that describes allocated buffer or NULL on error 2785d9ddca6SJens Wiklander */ 2795d9ddca6SJens Wiklander struct mobj *thread_rpc_alloc_kernel_payload(size_t size); 2805d9ddca6SJens Wiklander 2815d9ddca6SJens Wiklander /** 2825d9ddca6SJens Wiklander * Free physical memory previously allocated with 2835d9ddca6SJens Wiklander * thread_rpc_alloc_kernel_payload() 2845d9ddca6SJens Wiklander * 2855d9ddca6SJens Wiklander * @mobj: mobj that describes the buffer 2865d9ddca6SJens Wiklander */ 2875d9ddca6SJens Wiklander void thread_rpc_free_kernel_payload(struct mobj *mobj); 2885d9ddca6SJens Wiklander 2895d9ddca6SJens Wiklander struct thread_param_memref { 2905d9ddca6SJens Wiklander size_t offs; 2915d9ddca6SJens Wiklander size_t size; 2925d9ddca6SJens Wiklander struct mobj *mobj; 2935d9ddca6SJens Wiklander }; 2945d9ddca6SJens Wiklander 2955d9ddca6SJens Wiklander struct thread_param_value { 2965d9ddca6SJens Wiklander uint64_t a; 2975d9ddca6SJens Wiklander uint64_t b; 2985d9ddca6SJens Wiklander uint64_t c; 2995d9ddca6SJens Wiklander }; 3005d9ddca6SJens Wiklander 3015d9ddca6SJens Wiklander /* 3025d9ddca6SJens Wiklander * Note that there's some arithmetics done on the value so it's important 3035d9ddca6SJens Wiklander * to keep in IN, OUT, INOUT order. 3045d9ddca6SJens Wiklander */ 3055d9ddca6SJens Wiklander enum thread_param_attr { 3065d9ddca6SJens Wiklander THREAD_PARAM_ATTR_NONE = 0, 3075d9ddca6SJens Wiklander THREAD_PARAM_ATTR_VALUE_IN, 3085d9ddca6SJens Wiklander THREAD_PARAM_ATTR_VALUE_OUT, 3095d9ddca6SJens Wiklander THREAD_PARAM_ATTR_VALUE_INOUT, 3105d9ddca6SJens Wiklander THREAD_PARAM_ATTR_MEMREF_IN, 3115d9ddca6SJens Wiklander THREAD_PARAM_ATTR_MEMREF_OUT, 3125d9ddca6SJens Wiklander THREAD_PARAM_ATTR_MEMREF_INOUT, 3135d9ddca6SJens Wiklander }; 3145d9ddca6SJens Wiklander 3155d9ddca6SJens Wiklander struct thread_param { 3165d9ddca6SJens Wiklander enum thread_param_attr attr; 3175d9ddca6SJens Wiklander union { 3185d9ddca6SJens Wiklander struct thread_param_memref memref; 3195d9ddca6SJens Wiklander struct thread_param_value value; 3205d9ddca6SJens Wiklander } u; 3215d9ddca6SJens Wiklander }; 3225d9ddca6SJens Wiklander 3235d9ddca6SJens Wiklander #define THREAD_PARAM_MEMREF(_direction, _mobj, _offs, _size) \ 3245d9ddca6SJens Wiklander (struct thread_param){ \ 3255d9ddca6SJens Wiklander .attr = THREAD_PARAM_ATTR_MEMREF_ ## _direction, .u.memref = { \ 3265d9ddca6SJens Wiklander .mobj = (_mobj), .offs = (_offs), .size = (_size) } \ 3275d9ddca6SJens Wiklander } 3285d9ddca6SJens Wiklander 3295d9ddca6SJens Wiklander #define THREAD_PARAM_VALUE(_direction, _a, _b, _c) \ 3305d9ddca6SJens Wiklander (struct thread_param){ \ 3315d9ddca6SJens Wiklander .attr = THREAD_PARAM_ATTR_VALUE_ ## _direction, .u.value = { \ 3325d9ddca6SJens Wiklander .a = (_a), .b = (_b), .c = (_c) } \ 3335d9ddca6SJens Wiklander } 3345d9ddca6SJens Wiklander 3355d9ddca6SJens Wiklander /** 3365d9ddca6SJens Wiklander * Does an RPC using a preallocated argument buffer 3375d9ddca6SJens Wiklander * @cmd: RPC cmd 3385d9ddca6SJens Wiklander * @num_params: number of parameters 3395d9ddca6SJens Wiklander * @params: RPC parameters 3405d9ddca6SJens Wiklander * @returns RPC return value 3415d9ddca6SJens Wiklander */ 3425d9ddca6SJens Wiklander uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 3435d9ddca6SJens Wiklander struct thread_param *params); 3445d9ddca6SJens Wiklander 3455d9ddca6SJens Wiklander /** 3466b1b2b99SYitong Cheng * Allocate data for payload buffers shared with both user space applications 3476b1b2b99SYitong Cheng * and the non-secure kernel. Ensure consistency with the enumeration 3486b1b2b99SYitong Cheng * THREAD_SHM_TYPE_GLOBAL. 3495d9ddca6SJens Wiklander * 3505d9ddca6SJens Wiklander * @size: size in bytes of payload buffer 3515d9ddca6SJens Wiklander * 3525d9ddca6SJens Wiklander * @returns mobj that describes allocated buffer or NULL on error 3535d9ddca6SJens Wiklander */ 3545d9ddca6SJens Wiklander struct mobj *thread_rpc_alloc_global_payload(size_t size); 3555d9ddca6SJens Wiklander 3565d9ddca6SJens Wiklander /** 3575d9ddca6SJens Wiklander * Free physical memory previously allocated with 3585d9ddca6SJens Wiklander * thread_rpc_alloc_global_payload() 3595d9ddca6SJens Wiklander * 3605d9ddca6SJens Wiklander * @mobj: mobj that describes the buffer 3615d9ddca6SJens Wiklander */ 3625d9ddca6SJens Wiklander void thread_rpc_free_global_payload(struct mobj *mobj); 3635d9ddca6SJens Wiklander 3645d9ddca6SJens Wiklander /* 3655d9ddca6SJens Wiklander * enum thread_shm_type - type of non-secure shared memory 3665d9ddca6SJens Wiklander * @THREAD_SHM_TYPE_APPLICATION - user space application shared memory 3675d9ddca6SJens Wiklander * @THREAD_SHM_TYPE_KERNEL_PRIVATE - kernel private shared memory 3685d9ddca6SJens Wiklander * @THREAD_SHM_TYPE_GLOBAL - user space and kernel shared memory 3695d9ddca6SJens Wiklander */ 3705d9ddca6SJens Wiklander enum thread_shm_type { 3715d9ddca6SJens Wiklander THREAD_SHM_TYPE_APPLICATION, 3725d9ddca6SJens Wiklander THREAD_SHM_TYPE_KERNEL_PRIVATE, 3735d9ddca6SJens Wiklander THREAD_SHM_TYPE_GLOBAL, 3745d9ddca6SJens Wiklander }; 3755d9ddca6SJens Wiklander 3765d9ddca6SJens Wiklander /* 3775d9ddca6SJens Wiklander * enum thread_shm_cache_user - user of a cache allocation 3785d9ddca6SJens Wiklander * @THREAD_SHM_CACHE_USER_SOCKET - socket communication 3795d9ddca6SJens Wiklander * @THREAD_SHM_CACHE_USER_FS - filesystem access 3805d9ddca6SJens Wiklander * @THREAD_SHM_CACHE_USER_I2C - I2C communication 381f00b453aSJens Wiklander * @THREAD_SHM_CACHE_USER_RPMB - RPMB communication 3825d9ddca6SJens Wiklander * 3835d9ddca6SJens Wiklander * To ensure that each user of the shared memory cache doesn't interfere 3845d9ddca6SJens Wiklander * with each other a unique ID per user is used. 3855d9ddca6SJens Wiklander */ 3865d9ddca6SJens Wiklander enum thread_shm_cache_user { 3875d9ddca6SJens Wiklander THREAD_SHM_CACHE_USER_SOCKET, 3885d9ddca6SJens Wiklander THREAD_SHM_CACHE_USER_FS, 3895d9ddca6SJens Wiklander THREAD_SHM_CACHE_USER_I2C, 390f00b453aSJens Wiklander THREAD_SHM_CACHE_USER_RPMB, 3915d9ddca6SJens Wiklander }; 3925d9ddca6SJens Wiklander 3935d9ddca6SJens Wiklander /* 3945d9ddca6SJens Wiklander * Returns a pointer to the cached RPC memory. Each thread and @user tuple 3955d9ddca6SJens Wiklander * has a unique cache. The pointer is guaranteed to point to a large enough 3965d9ddca6SJens Wiklander * area or to be NULL. 3975d9ddca6SJens Wiklander */ 3985d9ddca6SJens Wiklander void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 3995d9ddca6SJens Wiklander enum thread_shm_type shm_type, 4005d9ddca6SJens Wiklander size_t size, struct mobj **mobj); 4015d9ddca6SJens Wiklander 4025d9ddca6SJens Wiklander #endif /*__ASSEMBLER__*/ 4035d9ddca6SJens Wiklander 404d50fee03SEtienne Carriere #endif /*__KERNEL_THREAD_H*/ 405