xref: /optee_os/core/arch/arm/include/kernel/thread_arch.h (revision 59724f223500ec9bf077e8caaa5fc0d8fece39a9)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2016-2022, Linaro Limited
5  * Copyright (c) 2020-2021, Arm Limited
6  */
7 
8 #ifndef __KERNEL_THREAD_ARCH_H
9 #define __KERNEL_THREAD_ARCH_H
10 
11 #ifndef __ASSEMBLER__
12 #include <arm.h>
13 #include <compiler.h>
14 #include <kernel/vfp.h>
15 #include <types_ext.h>
16 #endif
17 
18 #ifndef __ASSEMBLER__
19 
20 #ifdef ARM64
21 /*
22  * struct thread_core_local needs to have alignment suitable for a stack
23  * pointer since SP_EL1 points to this
24  */
25 #define THREAD_CORE_LOCAL_ALIGNED __aligned(16)
26 #else
27 #define THREAD_CORE_LOCAL_ALIGNED __aligned(8)
28 #endif
29 
30 struct mobj;
31 
32 /*
33  * Storage of keys used for pointer authentication. FEAT_PAuth supports a
34  * number of keys of which only the APIA key is currently used, depending on
35  * configuration.
36  */
37 struct thread_pauth_keys {
38 	uint64_t apia_hi;
39 	uint64_t apia_lo;
40 };
41 
42 struct thread_core_local {
43 #ifdef ARM32
44 	uint32_t r[2];
45 	paddr_t sm_pm_ctx_phys;
46 #endif
47 #ifdef ARM64
48 	uint64_t x[4];
49 #endif
50 #ifdef CFG_CORE_PAUTH
51 	struct thread_pauth_keys keys;
52 #endif
53 	vaddr_t tmp_stack_va_end;
54 #ifdef ARM32
55 	unsigned long tmp_stack_pa_end;
56 #endif
57 	long kcode_offset;
58 	short int curr_thread;
59 	uint32_t flags;
60 	vaddr_t abt_stack_va_end;
61 #ifdef CFG_TEE_CORE_DEBUG
62 	unsigned int locked_count; /* Number of spinlocks held */
63 #endif
64 #if defined(ARM64) && defined(CFG_CORE_FFA)
65 	/* Function ID to use for a direct response, 32-bit vs 64-bit */
66 	uint32_t direct_resp_fid;
67 #endif
68 #if defined(ARM64) && defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC)
69 	uint8_t bhb_loop_count;
70 #endif
71 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
72 	bool stackcheck_recursion;
73 #endif
74 #ifdef CFG_FAULT_MITIGATION
75 	struct ftmn_func_arg *ftmn_arg;
76 #endif
77 } THREAD_CORE_LOCAL_ALIGNED;
78 
79 struct thread_vector_table {
80 	uint32_t std_smc_entry;
81 	uint32_t fast_smc_entry;
82 	uint32_t cpu_on_entry;
83 	uint32_t cpu_off_entry;
84 	uint32_t cpu_resume_entry;
85 	uint32_t cpu_suspend_entry;
86 	uint32_t fiq_entry;
87 	uint32_t system_off_entry;
88 	uint32_t system_reset_entry;
89 };
90 
91 extern struct thread_vector_table thread_vector_table;
92 
93 struct thread_user_vfp_state {
94 	struct vfp_state vfp;
95 	bool lazy_saved;
96 	bool saved;
97 };
98 
99 #ifdef ARM32
100 struct thread_smc_args {
101 	uint32_t a0;	/* SMC function ID */
102 	uint32_t a1;	/* Parameter */
103 	uint32_t a2;	/* Parameter */
104 	uint32_t a3;	/* Thread ID when returning from RPC */
105 	uint32_t a4;	/* Not used */
106 	uint32_t a5;	/* Not used */
107 	uint32_t a6;	/* Not used */
108 	uint32_t a7;	/* Hypervisor Client ID */
109 };
110 
111 struct thread_smc_1_2_regs {
112 	union {
113 		struct {
114 			uint32_t a0;
115 			uint32_t a1;
116 			uint32_t a2;
117 			uint32_t a3;
118 			uint32_t a4;
119 			uint32_t a5;
120 			uint32_t a6;
121 			uint32_t a7;
122 		};
123 		uint32_t a[8];
124 		struct thread_smc_args arg11;
125 	};
126 };
127 #endif /*ARM32*/
128 #ifdef ARM64
129 struct thread_smc_args {
130 	uint64_t a0;	/* SMC function ID */
131 	uint64_t a1;	/* Parameter */
132 	uint64_t a2;	/* Parameter */
133 	uint64_t a3;	/* Thread ID when returning from RPC */
134 	uint64_t a4;	/* Not used */
135 	uint64_t a5;	/* Not used */
136 	uint64_t a6;	/* Not used */
137 	uint64_t a7;	/* Hypervisor Client ID */
138 };
139 
140 struct thread_smc_1_2_regs {
141 	union {
142 		struct {
143 			uint64_t a0;
144 			uint64_t a1;
145 			uint64_t a2;
146 			uint64_t a3;
147 			uint64_t a4;
148 			uint64_t a5;
149 			uint64_t a6;
150 			uint64_t a7;
151 			uint64_t a8;
152 			uint64_t a9;
153 			uint64_t a10;
154 			uint64_t a11;
155 			uint64_t a12;
156 			uint64_t a13;
157 			uint64_t a14;
158 			uint64_t a15;
159 			uint64_t a16;
160 			uint64_t a17;
161 		};
162 		uint64_t a[18];
163 		struct thread_smc_args arg11;
164 	};
165 };
166 #endif /*ARM64*/
167 
168 #ifdef ARM32
169 struct thread_abort_regs {
170 	uint32_t usr_sp;
171 	uint32_t usr_lr;
172 	uint32_t pad;
173 	uint32_t spsr;
174 	uint32_t elr;
175 	uint32_t r0;
176 	uint32_t r1;
177 	uint32_t r2;
178 	uint32_t r3;
179 	uint32_t r4;
180 	uint32_t r5;
181 	uint32_t r6;
182 	uint32_t r7;
183 	uint32_t r8;
184 	uint32_t r9;
185 	uint32_t r10;
186 	uint32_t r11;
187 	uint32_t ip;
188 };
189 #endif /*ARM32*/
190 #ifdef ARM64
191 struct thread_abort_regs {
192 	uint64_t x0;	/* r0_usr */
193 	uint64_t x1;	/* r1_usr */
194 	uint64_t x2;	/* r2_usr */
195 	uint64_t x3;	/* r3_usr */
196 	uint64_t x4;	/* r4_usr */
197 	uint64_t x5;	/* r5_usr */
198 	uint64_t x6;	/* r6_usr */
199 	uint64_t x7;	/* r7_usr */
200 	uint64_t x8;	/* r8_usr */
201 	uint64_t x9;	/* r9_usr */
202 	uint64_t x10;	/* r10_usr */
203 	uint64_t x11;	/* r11_usr */
204 	uint64_t x12;	/* r12_usr */
205 	uint64_t x13;	/* r13/sp_usr */
206 	uint64_t x14;	/* r14/lr_usr */
207 	uint64_t x15;
208 	uint64_t x16;
209 	uint64_t x17;
210 	uint64_t x18;
211 	uint64_t x19;
212 	uint64_t x20;
213 	uint64_t x21;
214 	uint64_t x22;
215 	uint64_t x23;
216 	uint64_t x24;
217 	uint64_t x25;
218 	uint64_t x26;
219 	uint64_t x27;
220 	uint64_t x28;
221 	uint64_t x29;
222 	uint64_t x30;
223 	uint64_t elr;
224 	uint64_t spsr;
225 	uint64_t sp_el0;
226 #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
227 	uint64_t apiakey_hi;
228 	uint64_t apiakey_lo;
229 #endif
230 };
231 #endif /*ARM64*/
232 
233 #ifdef ARM32
234 struct thread_scall_regs {
235 	uint32_t spsr;
236 	uint32_t r0;
237 	uint32_t r1;
238 	uint32_t r2;
239 	uint32_t r3;
240 	uint32_t r4;
241 	uint32_t r5;
242 	uint32_t r6;
243 	uint32_t r7;
244 	uint32_t lr;
245 };
246 #endif /*ARM32*/
247 #ifdef ARM64
248 struct thread_scall_regs {
249 	uint64_t elr;
250 	uint64_t spsr;
251 	uint64_t x0;	/* r0_usr */
252 	uint64_t x1;	/* r1_usr */
253 	uint64_t x2;	/* r2_usr */
254 	uint64_t x3;	/* r3_usr */
255 	uint64_t x4;	/* r4_usr */
256 	uint64_t x5;	/* r5_usr */
257 	uint64_t x6;	/* r6_usr */
258 	uint64_t x7;	/* r7_usr */
259 	uint64_t x8;	/* r8_usr */
260 	uint64_t x9;	/* r9_usr */
261 	uint64_t x10;	/* r10_usr */
262 	uint64_t x11;	/* r11_usr */
263 	uint64_t x12;	/* r12_usr */
264 	uint64_t x13;	/* r13/sp_usr */
265 	uint64_t x14;	/* r14/lr_usr */
266 	uint64_t x30;
267 	uint64_t sp_el0;
268 #ifdef CFG_SECURE_PARTITION
269 	uint64_t x15;
270 	uint64_t x16;
271 	uint64_t x17;
272 	uint64_t x18;
273 	uint64_t x19;
274 	uint64_t x20;
275 	uint64_t x21;
276 	uint64_t x22;
277 	uint64_t x23;
278 	uint64_t x24;
279 	uint64_t x25;
280 	uint64_t x26;
281 	uint64_t x27;
282 	uint64_t x28;
283 	uint64_t x29;
284 #endif
285 #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
286 	uint64_t apiakey_hi;
287 	uint64_t apiakey_lo;
288 #endif
289 	uint64_t pad;
290 } __aligned(16);
291 #endif /*ARM64*/
292 
293 #ifdef ARM32
294 struct thread_ctx_regs {
295 	uint32_t r0;
296 	uint32_t r1;
297 	uint32_t r2;
298 	uint32_t r3;
299 	uint32_t r4;
300 	uint32_t r5;
301 	uint32_t r6;
302 	uint32_t r7;
303 	uint32_t r8;
304 	uint32_t r9;
305 	uint32_t r10;
306 	uint32_t r11;
307 	uint32_t r12;
308 	uint32_t usr_sp;
309 	uint32_t usr_lr;
310 	uint32_t svc_spsr;
311 	uint32_t svc_sp;
312 	uint32_t svc_lr;
313 	uint32_t pc;
314 	uint32_t cpsr;
315 };
316 #endif /*ARM32*/
317 
318 #ifdef ARM64
319 struct thread_ctx_regs {
320 	uint64_t sp;
321 	uint64_t pc;
322 	uint64_t cpsr;
323 	uint64_t x[31];
324 	uint64_t tpidr_el0;
325 #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
326 	uint64_t apiakey_hi;
327 	uint64_t apiakey_lo;
328 #endif
329 };
330 #endif /*ARM64*/
331 
332 struct user_mode_ctx;
333 
334 #ifdef CFG_WITH_ARM_TRUSTED_FW
335 /*
336  * These five functions have a __weak default implementation which does
337  * nothing. Platforms are expected to override them if needed.
338  */
339 unsigned long thread_cpu_off_handler(unsigned long a0, unsigned long a1);
340 unsigned long thread_cpu_suspend_handler(unsigned long a0, unsigned long a1);
341 unsigned long thread_cpu_resume_handler(unsigned long a0, unsigned long a1);
342 unsigned long thread_system_off_handler(unsigned long a0, unsigned long a1);
343 unsigned long thread_system_reset_handler(unsigned long a0, unsigned long a1);
344 #endif /*CFG_WITH_ARM_TRUSTED_FW*/
345 
346 /*
347  * Defines the bits for the exception mask used by the
348  * thread_*_exceptions() functions below.
349  * These definitions are compatible with both ARM32 and ARM64.
350  */
351 #if defined(CFG_CORE_IRQ_IS_NATIVE_INTR)
352 #define THREAD_EXCP_FOREIGN_INTR	(ARM32_CPSR_F >> ARM32_CPSR_F_SHIFT)
353 #define THREAD_EXCP_NATIVE_INTR		(ARM32_CPSR_I >> ARM32_CPSR_F_SHIFT)
354 #else
355 #define THREAD_EXCP_FOREIGN_INTR	(ARM32_CPSR_I >> ARM32_CPSR_F_SHIFT)
356 #define THREAD_EXCP_NATIVE_INTR		(ARM32_CPSR_F >> ARM32_CPSR_F_SHIFT)
357 #endif
358 #define THREAD_EXCP_ALL			(THREAD_EXCP_FOREIGN_INTR	\
359 					| THREAD_EXCP_NATIVE_INTR	\
360 					| (ARM32_CPSR_A >> ARM32_CPSR_F_SHIFT))
361 
362 #ifdef CFG_WITH_VFP
363 /*
364  * thread_kernel_enable_vfp() - Temporarily enables usage of VFP
365  *
366  * Foreign interrupts are masked while VFP is enabled. User space must not be
367  * entered before thread_kernel_disable_vfp() has been called to disable VFP
368  * and restore the foreign interrupt status.
369  *
370  * This function may only be called from an active thread context and may
371  * not be called again before thread_kernel_disable_vfp() has been called.
372  *
373  * VFP state is saved as needed.
374  *
375  * Returns a state variable that should be passed to
376  * thread_kernel_disable_vfp().
377  */
378 uint32_t thread_kernel_enable_vfp(void);
379 
380 /*
381  * thread_kernel_disable_vfp() - Disables usage of VFP
382  * @state:	state variable returned by thread_kernel_enable_vfp()
383  *
384  * Disables usage of VFP and restores foreign interrupt status after a call to
385  * thread_kernel_enable_vfp().
386  *
387  * This function may only be called after a call to
388  * thread_kernel_enable_vfp().
389  */
390 void thread_kernel_disable_vfp(uint32_t state);
391 
392 /*
393  * thread_kernel_save_vfp() - Saves kernel vfp state if enabled
394  */
395 void thread_kernel_save_vfp(void);
396 
397 /*
398  * thread_kernel_save_vfp() - Restores kernel vfp state
399  */
400 void thread_kernel_restore_vfp(void);
401 
402 /*
403  * thread_user_enable_vfp() - Enables vfp for user mode usage
404  * @uvfp:	pointer to where to save the vfp state if needed
405  */
406 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp);
407 #else /*CFG_WITH_VFP*/
thread_kernel_save_vfp(void)408 static inline void thread_kernel_save_vfp(void)
409 {
410 }
411 
thread_kernel_restore_vfp(void)412 static inline void thread_kernel_restore_vfp(void)
413 {
414 }
415 #endif /*CFG_WITH_VFP*/
416 
417 /*
418  * thread_user_save_vfp() - Saves the user vfp state if enabled
419  */
420 #ifdef CFG_WITH_VFP
421 void thread_user_save_vfp(void);
422 #else
thread_user_save_vfp(void)423 static inline void thread_user_save_vfp(void)
424 {
425 }
426 #endif
427 
428 /*
429  * thread_user_clear_vfp() - Clears the vfp state
430  * @uctx:	pointer to user mode context containing the saved state to clear
431  */
432 #ifdef CFG_WITH_VFP
433 void thread_user_clear_vfp(struct user_mode_ctx *uctx);
434 #else
thread_user_clear_vfp(struct user_mode_ctx * uctx __unused)435 static inline void thread_user_clear_vfp(struct user_mode_ctx *uctx __unused)
436 {
437 }
438 #endif
439 
440 #ifdef ARM64
441 /*
442  * thread_get_saved_thread_sp() - Returns the saved sp of current thread
443  *
444  * When switching from the thread stack pointer the value is stored
445  * separately in the current thread context. This function returns this
446  * saved value.
447  *
448  * @returns stack pointer
449  */
450 vaddr_t thread_get_saved_thread_sp(void);
451 #endif /*ARM64*/
452 
453 /*
454  * Provides addresses and size of kernel code that must be mapped while in
455  * user mode.
456  */
457 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
458 void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
459 			   vaddr_t *va, size_t *sz);
460 #else
thread_get_user_kcode(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)461 static inline void thread_get_user_kcode(struct mobj **mobj, size_t *offset,
462 					 vaddr_t *va, size_t *sz)
463 {
464 	*mobj = NULL;
465 	*offset = 0;
466 	*va = 0;
467 	*sz = 0;
468 }
469 #endif
470 
471 /*
472  * Provides addresses and size of kernel (rw) data that must be mapped
473  * while in user mode.
474  */
475 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \
476 	defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64)
477 void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
478 			   vaddr_t *va, size_t *sz);
479 #else
thread_get_user_kdata(struct mobj ** mobj,size_t * offset,vaddr_t * va,size_t * sz)480 static inline void thread_get_user_kdata(struct mobj **mobj, size_t *offset,
481 					 vaddr_t *va, size_t *sz)
482 {
483 	*mobj = NULL;
484 	*offset = 0;
485 	*va = 0;
486 	*sz = 0;
487 }
488 #endif
489 
490 /*
491  * Disables and empties the prealloc RPC cache one reference at a time. If
492  * all threads are idle this function returns true and a cookie of one shm
493  * object which was removed from the cache. When the cache is empty *cookie
494  * is set to 0 and the cache is disabled else a valid cookie value. If one
495  * thread isn't idle this function returns false.
496  */
497 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie);
498 
499 /*
500  * Enabled the prealloc RPC cache. If all threads are idle the cache is
501  * enabled and this function returns true. If one thread isn't idle this
502  * function return false.
503  */
504 bool thread_enable_prealloc_rpc_cache(void);
505 
506 unsigned long thread_hvc(unsigned long func_id, unsigned long a1,
507 			 unsigned long a2, unsigned long a3);
508 unsigned long thread_smc(unsigned long func_id, unsigned long a1,
509 			 unsigned long a2, unsigned long a3);
510 void thread_smccc(struct thread_smc_args *arg_res);
511 #endif /*__ASSEMBLER__*/
512 #endif /*__KERNEL_THREAD_ARCH_H*/
513