1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright 2022-2023 NXP
4 */
5
6 #ifndef __KERNEL_THREAD_ARCH_H
7 #define __KERNEL_THREAD_ARCH_H
8
9 #ifndef __ASSEMBLER__
10 #include <compiler.h>
11 #include <types_ext.h>
12 #endif
13
14 #include <platform_config.h>
15 #include <riscv.h>
16
17 /*
18 * Each RISC-V platform must define their own values.
19 * See core/arch/riscv/plat-virt/platform_config.h for example.
20 */
21 #define THREAD_EXCP_FOREIGN_INTR PLAT_THREAD_EXCP_FOREIGN_INTR
22 #define THREAD_EXCP_NATIVE_INTR PLAT_THREAD_EXCP_NATIVE_INTR
23
24 #define THREAD_EXCP_ALL (THREAD_EXCP_FOREIGN_INTR |\
25 THREAD_EXCP_NATIVE_INTR)
26
27 #ifndef __ASSEMBLER__
28
29 #define THREAD_CORE_LOCAL_ALIGNED __aligned(16)
30
31 struct thread_pauth_keys {
32 };
33
34 struct thread_core_local {
35 unsigned long x[4];
36 uint32_t hart_id;
37 uint32_t hart_index;
38 vaddr_t tmp_stack_va_end;
39 short int curr_thread;
40 uint32_t flags;
41 vaddr_t abt_stack_va_end;
42 #ifdef CFG_TEE_CORE_DEBUG
43 unsigned int locked_count; /* Number of spinlocks held */
44 #endif
45 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
46 bool stackcheck_recursion;
47 #endif
48 #ifdef CFG_FAULT_MITIGATION
49 struct ftmn_func_arg *ftmn_arg;
50 #endif
51 } THREAD_CORE_LOCAL_ALIGNED;
52
53 struct thread_user_vfp_state {
54 };
55
56 struct thread_abi_args {
57 unsigned long a0; /* ABI function ID */
58 unsigned long a1; /* Parameter */
59 unsigned long a2; /* Parameter */
60 unsigned long a3; /* Thread ID when returning from RPC */
61 unsigned long a4; /* Not used */
62 unsigned long a5; /* Not used */
63 unsigned long a6; /* Not used */
64 unsigned long a7; /* Hypervisor Client ID */
65 };
66
67 struct thread_abort_regs {
68 unsigned long ra;
69 unsigned long sp;
70 unsigned long gp;
71 unsigned long tp;
72 unsigned long t0;
73 unsigned long t1;
74 unsigned long t2;
75 unsigned long s0;
76 unsigned long s1;
77 unsigned long a0;
78 unsigned long a1;
79 unsigned long a2;
80 unsigned long a3;
81 unsigned long a4;
82 unsigned long a5;
83 unsigned long a6;
84 unsigned long a7;
85 unsigned long s2;
86 unsigned long s3;
87 unsigned long s4;
88 unsigned long s5;
89 unsigned long s6;
90 unsigned long s7;
91 unsigned long s8;
92 unsigned long s9;
93 unsigned long s10;
94 unsigned long s11;
95 unsigned long t3;
96 unsigned long t4;
97 unsigned long t5;
98 unsigned long t6;
99 unsigned long epc;
100 unsigned long status;
101 unsigned long ie;
102 unsigned long cause;
103 unsigned long tval;
104 } __aligned(16);
105
106 struct thread_scall_regs {
107 unsigned long ra;
108 unsigned long sp;
109 unsigned long gp;
110 unsigned long tp;
111 unsigned long t0;
112 unsigned long t1;
113 unsigned long t2;
114 unsigned long a0;
115 unsigned long a1;
116 unsigned long a2;
117 unsigned long a3;
118 unsigned long a4;
119 unsigned long a5;
120 unsigned long a6;
121 unsigned long a7;
122 unsigned long t3;
123 unsigned long t4;
124 unsigned long t5;
125 unsigned long t6;
126 unsigned long epc;
127 unsigned long status;
128 unsigned long ie;
129 } __aligned(16);
130
131 struct thread_ctx_regs {
132 unsigned long ra;
133 unsigned long sp;
134 unsigned long gp;
135 unsigned long tp;
136 unsigned long t0;
137 unsigned long t1;
138 unsigned long t2;
139 unsigned long s0;
140 unsigned long s1;
141 unsigned long a0;
142 unsigned long a1;
143 unsigned long a2;
144 unsigned long a3;
145 unsigned long a4;
146 unsigned long a5;
147 unsigned long a6;
148 unsigned long a7;
149 unsigned long s2;
150 unsigned long s3;
151 unsigned long s4;
152 unsigned long s5;
153 unsigned long s6;
154 unsigned long s7;
155 unsigned long s8;
156 unsigned long s9;
157 unsigned long s10;
158 unsigned long s11;
159 unsigned long t3;
160 unsigned long t4;
161 unsigned long t5;
162 unsigned long t6;
163 unsigned long epc;
164 unsigned long status;
165 unsigned long ie;
166 };
167
168 struct user_mode_ctx;
169
170 #ifdef CFG_WITH_VFP
171 uint32_t thread_kernel_enable_vfp(void);
172 void thread_kernel_disable_vfp(uint32_t state);
173 void thread_kernel_save_vfp(void);
174 void thread_kernel_restore_vfp(void);
175 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp);
176 #else /*CFG_WITH_VFP*/
thread_kernel_save_vfp(void)177 static inline void thread_kernel_save_vfp(void)
178 {
179 }
180
thread_kernel_restore_vfp(void)181 static inline void thread_kernel_restore_vfp(void)
182 {
183 }
184 #endif /*CFG_WITH_VFP*/
185 #ifdef CFG_WITH_VFP
186 void thread_user_save_vfp(void);
187 #else
thread_user_save_vfp(void)188 static inline void thread_user_save_vfp(void)
189 {
190 }
191 #endif
192 #ifdef CFG_WITH_VFP
193 void thread_user_clear_vfp(struct user_mode_ctx *uctx);
194 #else
thread_user_clear_vfp(struct user_mode_ctx * uctx __unused)195 static inline void thread_user_clear_vfp(struct user_mode_ctx *uctx __unused)
196 {
197 }
198 #endif
199
200 vaddr_t thread_get_saved_thread_sp(void);
201 uint32_t thread_get_hartid_by_hartindex(uint32_t hartidx);
202
thread_get_user_kcode(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)203 static inline void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
204 vaddr_t *va, size_t *sz)
205 {
206 *mobj = NULL;
207 *offset = 0;
208 *va = 0;
209 *sz = 0;
210 }
211
thread_get_user_kdata(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)212 static inline void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
213 vaddr_t *va, size_t *sz)
214 {
215 *mobj = NULL;
216 *offset = 0;
217 *va = 0;
218 *sz = 0;
219 }
220
221 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie);
222 bool thread_enable_prealloc_rpc_cache(void);
223
224 #endif /*__ASSEMBLER__*/
225 #endif /*__KERNEL_THREAD_ARCH_H*/
226