1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7 #ifndef __KERNEL_THREAD_PRIVATE_ARCH_H
8 #define __KERNEL_THREAD_PRIVATE_ARCH_H
9
10 #ifndef __ASSEMBLER__
11
12 #include <kernel/thread.h>
13 #include <kernel/vfp.h>
14 #include <sm/sm.h>
15
16 #ifdef CFG_WITH_ARM_TRUSTED_FW
17 #define STACK_TMP_OFFS 0
18 #else
19 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE
20 #endif
21
22 #ifdef ARM32
23 #ifdef CFG_CORE_SANITIZE_KADDRESS
24 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA)
25 #else
26 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA)
27 #endif
28 #define STACK_THREAD_SIZE (8192 + CFG_STACK_THREAD_EXTRA)
29
30 #if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(__clang__) || \
31 !defined(CFG_CRYPTO_WITH_CE)
32 #define STACK_ABT_SIZE 3072
33 #else
34 #define STACK_ABT_SIZE 2048
35 #endif
36
37 #endif /*ARM32*/
38
39 #ifdef ARM64
40 #if (defined(__clang__) && !defined(__OPTIMIZE_SIZE__)) || \
41 defined(CFG_CORE_SANITIZE_KADDRESS) || \
42 defined(CFG_CORE_DEBUG_CHECK_STACKS) || defined(CFG_NS_VIRTUALIZATION)
43 #define STACK_TMP_SIZE (4096 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA)
44 #else
45 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA)
46 #endif
47 #if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(CFG_CORE_DEBUG_CHECK_STACKS)
48 #define STACK_THREAD_SIZE (12288 + CFG_STACK_THREAD_EXTRA)
49 #else
50 #define STACK_THREAD_SIZE (8192 + CFG_STACK_THREAD_EXTRA)
51 #endif
52
53 #define STACK_ABT_SIZE 4096
54 #endif /*ARM64*/
55
56 #ifdef CFG_CORE_DEBUG_CHECK_STACKS
57 /*
58 * Extra space added to each stack in order to reliably detect and dump stack
59 * overflows. Should cover the maximum expected overflow size caused by any C
60 * function (say, 512 bytes; no function should have that much local variables),
61 * plus the maximum stack space needed by __cyg_profile_func_exit(): about 1 KB,
62 * a large part of which is used to print the call stack. Total: 1.5 KB.
63 */
64 #define STACK_CHECK_EXTRA 1536
65 #else
66 #define STACK_CHECK_EXTRA 0
67 #endif
68
69 #ifdef ARM64
70 struct thread_user_mode_rec {
71 uint64_t ctx_regs_ptr;
72 uint64_t exit_status0_ptr;
73 uint64_t exit_status1_ptr;
74 uint64_t pad;
75 uint64_t x[31 - 19]; /* x19..x30 */
76 };
77 #endif /*ARM64*/
78
79 #ifdef CFG_WITH_VFP
80 struct thread_vfp_state {
81 bool ns_saved;
82 bool sec_saved;
83 bool sec_lazy_saved;
84 struct vfp_state ns;
85 struct vfp_state sec;
86 struct thread_user_vfp_state *uvfp;
87 };
88
89 #endif /*CFG_WITH_VFP*/
90 #endif /*__ASSEMBLER__*/
91
92 #ifdef ARM64
93 #ifdef CFG_WITH_VFP
94 #define THREAD_VFP_STATE_SIZE \
95 (16 + (16 * 32 + 16) * 2 + 16)
96 #else
97 #define THREAD_VFP_STATE_SIZE 0
98 #endif
99 #endif /*ARM64*/
100
101 #ifndef __ASSEMBLER__
102
103 /*
104 * During boot note the part of code and data that needs to be mapped while
105 * in user mode. The provided address and size have to be page aligned.
106 * Note that the code and data will be mapped at the lowest possible
107 * addresses available for user space (see core_mmu_get_user_va_range()).
108 */
109 extern long thread_user_kcode_offset;
110
111 /*
112 * Initializes VBAR for current CPU (called by thread_init_per_cpu()
113 */
114 void thread_init_vbar(vaddr_t addr);
115
116 void thread_excp_vect(void);
117 void thread_excp_vect_wa_spectre_v2(void);
118 void thread_excp_vect_wa_a15_spectre_v2(void);
119 void thread_excp_vect_wa_spectre_bhb(void);
120 void thread_excp_vect_end(void);
121
122 /*
123 * Assembly function as the first function in a thread. Handles a stdcall,
124 * a0-a3 holds the parameters. Hands over to __thread_std_smc_entry() when
125 * everything is set up and does some post processing once
126 * __thread_std_smc_entry() returns.
127 */
128 void thread_std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
129 uint32_t a4, uint32_t a5);
130 uint32_t __thread_std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2,
131 uint32_t a3, uint32_t a4, uint32_t a5);
132
133 void thread_sp_alloc_and_run(struct thread_smc_args *args);
134
135 /*
136 * Resumes execution of currently active thread by restoring context and
137 * jumping to the instruction where to continue execution.
138 *
139 * Arguments supplied by non-secure world will be copied into the saved
140 * context of the current thread if THREAD_FLAGS_COPY_ARGS_ON_RETURN is set
141 * in the flags field in the thread context.
142 */
143 void thread_resume(struct thread_ctx_regs *regs);
144
145 uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
146 uint32_t *exit_status0,
147 uint32_t *exit_status1);
148
149 /*
150 * Private functions made available for thread_asm.S
151 */
152
153 /* Returns the temp stack for current CPU */
154 void *thread_get_tmp_sp(void);
155
156 /*
157 * Marks the current thread as suspended. And updated the flags
158 * for the thread context (see thread resume for use of flags).
159 * Returns thread index of the thread that was suspended.
160 */
161 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc);
162
163 /*
164 * Marks the current thread as free.
165 */
166 void thread_state_free(void);
167
168 /* Returns a pointer to the saved registers in current thread context. */
169 struct thread_ctx_regs *thread_get_ctx_regs(void);
170
171 #ifdef ARM32
172 /* Sets sp for abort mode */
173 void thread_set_abt_sp(vaddr_t sp);
174
175 /* Sets sp for undefined mode */
176 void thread_set_und_sp(vaddr_t sp);
177
178 /* Sets sp for irq mode */
179 void thread_set_irq_sp(vaddr_t sp);
180
181 /* Sets sp for fiq mode */
182 void thread_set_fiq_sp(vaddr_t sp);
183
184 /* Read usr_sp banked CPU register */
185 uint32_t thread_get_usr_sp(void);
186 uint32_t thread_get_usr_lr(void);
187 void thread_set_usr_lr(uint32_t usr_lr);
188 #endif /*ARM32*/
189
190 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
191 uint32_t a4, uint32_t a5);
192 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1,
193 uint32_t a2, uint32_t a3);
194
195 /*
196 * The thread_rpc() function suspends current thread and temporarily exits
197 * to non-secure world. This function returns later when non-secure world
198 * returns.
199 *
200 * The purpose of this function is to request services from non-secure
201 * world.
202 */
203 #define THREAD_RPC_NUM_ARGS 4
204 #ifdef ARM64
205 void thread_rpc_spsr(uint32_t rv[THREAD_RPC_NUM_ARGS], uint64_t spsr);
206 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]);
207
208 #ifdef CFG_CORE_FFA
209 struct thread_rpc_arg {
210 union {
211 struct {
212 uint32_t w1;
213 uint32_t w4;
214 uint32_t w5;
215 uint32_t w6;
216 } call;
217 struct {
218 uint32_t w4;
219 uint32_t w5;
220 uint32_t w6;
221 } ret;
222 uint32_t pad[THREAD_RPC_NUM_ARGS];
223 };
224 };
225
thread_rpc(struct thread_rpc_arg * rpc_arg)226 static inline void thread_rpc(struct thread_rpc_arg *rpc_arg)
227 {
228 __thread_rpc(rpc_arg->pad);
229 }
230 #else
thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])231 static inline void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS])
232 {
233 __thread_rpc(rv);
234 }
235 #endif
236 #endif
237 #ifdef ARM32
238 void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]);
239 #endif
240
241 /*
242 * Called from assembly only, vector_fast_smc_entry(). Handles a fast SMC
243 * by dispatching it to the registered fast SMC handler.
244 */
245 void thread_handle_fast_smc(struct thread_smc_args *args);
246
247 /*
248 * Called from assembly only, vector_std_smc_entry(). Handles a std SMC by
249 * dispatching it to the registered std SMC handler.
250 */
251 uint32_t thread_handle_std_smc(uint32_t a0, uint32_t a1, uint32_t a2,
252 uint32_t a3, uint32_t a4, uint32_t a5,
253 uint32_t a6, uint32_t a7);
254
255 /* Called from assembly only. Handles a SVC from user mode. */
256 void thread_scall_handler(struct thread_scall_regs *regs);
257
258 void thread_spmc_register_secondary_ep(vaddr_t ep);
259 #endif /*__ASSEMBLER__*/
260
261 /*
262 * Used in entry_a64.S entry_a32.S to allocate a temporary
263 * thread_core_local[0] for the boot CPU and the associated abort and
264 * temporary stacks.
265 */
266 #define THREAD_BOOT_INIT_TMP_ALLOC (SMALL_PAGE_SIZE * 6)
267
268 #endif /*__KERNEL_THREAD_PRIVATE_ARCH_H*/
269