xref: /optee_os/core/arch/arm/kernel/asm-defines.c (revision 9f34db38245c9b3a4e6e7e63eb78a75e23ab2da3)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2022, Linaro Limited
4  */
5 
6 #include <gen-asm-defines.h>
7 #include <kernel/boot.h>
8 #include <kernel/thread.h>
9 #include <kernel/thread_private.h>
10 #include <mm/core_mmu_arch.h>
11 #include <sm/pm.h>
12 #include <sm/sm.h>
13 #include <types_ext.h>
14 
15 DEFINES
16 {
17 #ifdef ARM32
18 	DEFINE(SM_NSEC_CTX_R0, offsetof(struct sm_nsec_ctx, r0));
19 	DEFINE(SM_NSEC_CTX_R8, offsetof(struct sm_nsec_ctx, r8));
20 	DEFINE(SM_SEC_CTX_R0, offsetof(struct sm_sec_ctx, r0));
21 	DEFINE(SM_SEC_CTX_MON_LR, offsetof(struct sm_sec_ctx, mon_lr));
22 	DEFINE(SM_CTX_SEC_SIZE, sizeof(struct sm_sec_ctx));
23 	DEFINE(SM_CTX_SIZE, sizeof(struct sm_ctx));
24 	DEFINE(SM_CTX_NSEC, offsetof(struct sm_ctx, nsec));
25 	DEFINE(SM_CTX_SEC, offsetof(struct sm_ctx, sec));
26 
27 	DEFINE(THREAD_SCALL_REG_R0, offsetof(struct thread_scall_regs, r0));
28 	DEFINE(THREAD_SCALL_REG_R5, offsetof(struct thread_scall_regs, r5));
29 	DEFINE(THREAD_SCALL_REG_R6, offsetof(struct thread_scall_regs, r6));
30 
31 	/* struct thread_ctx */
32 	DEFINE(THREAD_CTX_STACK_VA_END, offsetof(struct thread_ctx,
33 						 stack_va_end));
34 
35 	/* struct thread_ctx_regs */
36 	DEFINE(THREAD_CTX_REGS_USR_SP,
37 	       offsetof(struct thread_ctx_regs, usr_sp));
38 	DEFINE(THREAD_CTX_REGS_PC, offsetof(struct thread_ctx_regs, pc));
39 	DEFINE(THREAD_CTX_REGS_CPSR, offsetof(struct thread_ctx_regs, cpsr));
40 
41 	/* struct thread_core_local */
42 	DEFINE(THREAD_CORE_LOCAL_R0, offsetof(struct thread_core_local, r[0]));
43 	DEFINE(THREAD_CORE_LOCAL_SM_PM_CTX_PHYS,
44 	       offsetof(struct thread_core_local, sm_pm_ctx_phys));
45 	DEFINE(THREAD_CORE_LOCAL_SIZE, sizeof(struct thread_core_local));
46 
47 	DEFINE(SM_PM_CTX_SIZE, sizeof(struct sm_pm_ctx));
48 #endif /*ARM32*/
49 
50 #ifdef ARM64
51 	DEFINE(THREAD_SMC_ARGS_X0, offsetof(struct thread_smc_args, a0));
52 	DEFINE(THREAD_SMC_ARGS_SIZE, sizeof(struct thread_smc_args));
53 
54 	DEFINE(THREAD_SCALL_REG_X0, offsetof(struct thread_scall_regs, x0));
55 	DEFINE(THREAD_SCALL_REG_X2, offsetof(struct thread_scall_regs, x2));
56 	DEFINE(THREAD_SCALL_REG_X5, offsetof(struct thread_scall_regs, x5));
57 	DEFINE(THREAD_SCALL_REG_X6, offsetof(struct thread_scall_regs, x6));
58 	DEFINE(THREAD_SCALL_REG_X30, offsetof(struct thread_scall_regs, x30));
59 	DEFINE(THREAD_SCALL_REG_ELR, offsetof(struct thread_scall_regs, elr));
60 	DEFINE(THREAD_SCALL_REG_SPSR, offsetof(struct thread_scall_regs, spsr));
61 	DEFINE(THREAD_SCALL_REG_SP_EL0, offsetof(struct thread_scall_regs,
62 						 sp_el0));
63 #ifdef CFG_TA_PAUTH
64 	DEFINE(THREAD_SCALL_REG_APIAKEY_HI, offsetof(struct thread_scall_regs,
65 						     apiakey_hi));
66 #endif
67 	DEFINE(THREAD_SCALL_REG_SIZE, sizeof(struct thread_scall_regs));
68 
69 	/* struct thread_abort_regs */
70 	DEFINE(THREAD_ABT_REG_X0, offsetof(struct thread_abort_regs, x0));
71 	DEFINE(THREAD_ABT_REG_X2, offsetof(struct thread_abort_regs, x2));
72 	DEFINE(THREAD_ABT_REG_X30, offsetof(struct thread_abort_regs, x30));
73 	DEFINE(THREAD_ABT_REG_SPSR, offsetof(struct thread_abort_regs, spsr));
74 	DEFINE(THREAD_ABT_REGS_SIZE, sizeof(struct thread_abort_regs));
75 #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
76 	DEFINE(THREAD_ABT_REGS_APIAKEY_HI, offsetof(struct thread_abort_regs,
77 						    apiakey_hi));
78 #endif
79 
80 	/* struct thread_ctx */
81 	DEFINE(THREAD_CTX_KERN_SP, offsetof(struct thread_ctx, kern_sp));
82 	DEFINE(THREAD_CTX_STACK_VA_END, offsetof(struct thread_ctx,
83 						 stack_va_end));
84 #if defined(CFG_CORE_PAUTH)
85 	DEFINE(THREAD_CTX_KEYS, offsetof(struct thread_ctx, keys));
86 #endif
87 
88 	/* struct thread_ctx_regs */
89 	DEFINE(THREAD_CTX_REGS_SP, offsetof(struct thread_ctx_regs, sp));
90 	DEFINE(THREAD_CTX_REGS_X0, offsetof(struct thread_ctx_regs, x[0]));
91 	DEFINE(THREAD_CTX_REGS_X1, offsetof(struct thread_ctx_regs, x[1]));
92 	DEFINE(THREAD_CTX_REGS_X2, offsetof(struct thread_ctx_regs, x[2]));
93 	DEFINE(THREAD_CTX_REGS_X4, offsetof(struct thread_ctx_regs, x[4]));
94 	DEFINE(THREAD_CTX_REGS_X19, offsetof(struct thread_ctx_regs, x[19]));
95 	DEFINE(THREAD_CTX_REGS_TPIDR_EL0, offsetof(struct thread_ctx_regs,
96 						   tpidr_el0));
97 #if defined(CFG_TA_PAUTH) || defined(CFG_CORE_PAUTH)
98 	DEFINE(THREAD_CTX_REGS_APIAKEY_HI, offsetof(struct thread_ctx_regs,
99 						    apiakey_hi));
100 #endif
101 
102 	/* struct thread_user_mode_rec */
103 	DEFINE(THREAD_USER_MODE_REC_CTX_REGS_PTR,
104 	       offsetof(struct thread_user_mode_rec, ctx_regs_ptr));
105 	DEFINE(THREAD_USER_MODE_REC_EXIT_STATUS0_PTR,
106 	       offsetof(struct thread_user_mode_rec, exit_status0_ptr));
107 	DEFINE(THREAD_USER_MODE_REC_X19,
108 	       offsetof(struct thread_user_mode_rec, x[0]));
109 	DEFINE(THREAD_USER_MODE_REC_SIZE, sizeof(struct thread_user_mode_rec));
110 
111 	/* struct thread_core_local */
112 	DEFINE(THREAD_CORE_LOCAL_X0, offsetof(struct thread_core_local, x[0]));
113 	DEFINE(THREAD_CORE_LOCAL_X2, offsetof(struct thread_core_local, x[2]));
114 	DEFINE(THREAD_CORE_LOCAL_KCODE_OFFSET,
115 	       offsetof(struct thread_core_local, kcode_offset));
116 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
117 	DEFINE(THREAD_CORE_LOCAL_BHB_LOOP_COUNT,
118 	       offsetof(struct thread_core_local, bhb_loop_count));
119 #endif
120 #if defined(CFG_CORE_PAUTH)
121 	DEFINE(THREAD_CORE_LOCAL_KEYS,
122 	       offsetof(struct thread_core_local, keys));
123 #endif
124 #endif /*ARM64*/
125 
126 	/* struct thread_ctx */
127 	DEFINE(THREAD_CTX_SIZE, sizeof(struct thread_ctx));
128 #ifdef CFG_CORE_FFA
129 	DEFINE(THREAD_CTX_TSD_RPC_TARGET_INFO,
130 	       offsetof(struct thread_ctx, tsd.rpc_target_info))
131 	DEFINE(THREAD_CTX_FLAGS,
132 	       offsetof(struct thread_ctx, flags))
133 #endif
134 
135 	/* struct thread_core_local */
136 	DEFINE(THREAD_CORE_LOCAL_TMP_STACK_VA_END,
137 	       offsetof(struct thread_core_local, tmp_stack_va_end));
138 	DEFINE(THREAD_CORE_LOCAL_CURR_THREAD,
139 	       offsetof(struct thread_core_local, curr_thread));
140 	DEFINE(THREAD_CORE_LOCAL_FLAGS,
141 	       offsetof(struct thread_core_local, flags));
142 	DEFINE(THREAD_CORE_LOCAL_ABT_STACK_VA_END,
143 	       offsetof(struct thread_core_local, abt_stack_va_end));
144 #if defined(ARM64) && defined(CFG_CORE_FFA)
145 	DEFINE(THREAD_CORE_LOCAL_DIRECT_RESP_FID,
146 	       offsetof(struct thread_core_local, direct_resp_fid));
147 #endif
148 
149 	DEFINE(STACK_TMP_GUARD, STACK_CANARY_SIZE / 2 + STACK_TMP_OFFS);
150 
151 	/* struct core_mmu_config */
152 	DEFINE(CORE_MMU_CONFIG_SIZE, sizeof(struct core_mmu_config));
153 	DEFINE(CORE_MMU_CONFIG_MAP_OFFSET,
154 	       offsetof(struct core_mmu_config, map_offset));
155 
156 	/* struct boot_embdata */
157 	DEFINE(BOOT_EMBDATA_HASHES_OFFSET,
158 	       offsetof(struct boot_embdata, hashes_offset));
159 	DEFINE(BOOT_EMBDATA_HASHES_LEN,
160 	       offsetof(struct boot_embdata, hashes_len));
161 	DEFINE(BOOT_EMBDATA_RELOC_OFFSET,
162 	       offsetof(struct boot_embdata, reloc_offset));
163 	DEFINE(BOOT_EMBDATA_RELOC_LEN,
164 	       offsetof(struct boot_embdata, reloc_len));
165 
166 #ifdef CORE_MMU_BASE_TABLE_OFFSET
167 	/*
168 	 * This define is too complex to be used as an argument for the
169 	 * macros add_imm and sub_imm so evaluate it here.
170 	 */
171 	DEFINE(__CORE_MMU_BASE_TABLE_OFFSET, CORE_MMU_BASE_TABLE_OFFSET);
172 #endif
173 
174 }
175