xref: /rk3399_ARM-atf/include/lib/el3_runtime/aarch64/context.h (revision 59b7c0a03fa8adfc9272f959bd8b4228ddd2607a)
1 /*
2  * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef CONTEXT_H
8 #define CONTEXT_H
9 
10 #include <lib/el3_runtime/context_el2.h>
11 #include <lib/el3_runtime/cpu_data.h>
12 #include <lib/utils_def.h>
13 
14 /*******************************************************************************
15  * Constants that allow assembler code to access members of and the 'gp_regs'
16  * structure at their correct offsets.
17  ******************************************************************************/
18 #define CTX_GPREGS_OFFSET	U(0x0)
19 #define CTX_GPREG_X0		U(0x0)
20 #define CTX_GPREG_X1		U(0x8)
21 #define CTX_GPREG_X2		U(0x10)
22 #define CTX_GPREG_X3		U(0x18)
23 #define CTX_GPREG_X4		U(0x20)
24 #define CTX_GPREG_X5		U(0x28)
25 #define CTX_GPREG_X6		U(0x30)
26 #define CTX_GPREG_X7		U(0x38)
27 #define CTX_GPREG_X8		U(0x40)
28 #define CTX_GPREG_X9		U(0x48)
29 #define CTX_GPREG_X10		U(0x50)
30 #define CTX_GPREG_X11		U(0x58)
31 #define CTX_GPREG_X12		U(0x60)
32 #define CTX_GPREG_X13		U(0x68)
33 #define CTX_GPREG_X14		U(0x70)
34 #define CTX_GPREG_X15		U(0x78)
35 #define CTX_GPREG_X16		U(0x80)
36 #define CTX_GPREG_X17		U(0x88)
37 #define CTX_GPREG_X18		U(0x90)
38 #define CTX_GPREG_X19		U(0x98)
39 #define CTX_GPREG_X20		U(0xa0)
40 #define CTX_GPREG_X21		U(0xa8)
41 #define CTX_GPREG_X22		U(0xb0)
42 #define CTX_GPREG_X23		U(0xb8)
43 #define CTX_GPREG_X24		U(0xc0)
44 #define CTX_GPREG_X25		U(0xc8)
45 #define CTX_GPREG_X26		U(0xd0)
46 #define CTX_GPREG_X27		U(0xd8)
47 #define CTX_GPREG_X28		U(0xe0)
48 #define CTX_GPREG_X29		U(0xe8)
49 #define CTX_GPREG_LR		U(0xf0)
50 #define CTX_GPREG_SP_EL0	U(0xf8)
51 #define CTX_GPREGS_END		U(0x100)
52 
53 /*******************************************************************************
54  * Constants that allow assembler code to access members of and the 'el3_state'
55  * structure at their correct offsets. Note that some of the registers are only
56  * 32-bits wide but are stored as 64-bit values for convenience
57  ******************************************************************************/
58 #define CTX_EL3STATE_OFFSET	(CTX_GPREGS_OFFSET + CTX_GPREGS_END)
59 #define CTX_SCR_EL3		U(0x0)
60 #define CTX_ESR_EL3		U(0x8)
61 #define CTX_RUNTIME_SP		U(0x10)
62 #define CTX_SPSR_EL3		U(0x18)
63 #define CTX_ELR_EL3		U(0x20)
64 #define CTX_PMCR_EL0		U(0x28)
65 #define CTX_IS_IN_EL3		U(0x30)
66 #define CTX_MDCR_EL3		U(0x38)
67 /* Constants required in supporting nested exception in EL3 */
68 #define CTX_SAVED_ELR_EL3	U(0x40)
69 /*
70  * General purpose flag, to save various EL3 states
71  * FFH mode : Used to identify if handling nested exception
72  * KFH mode : Used as counter value
73  */
74 #define CTX_NESTED_EA_FLAG	U(0x48)
75 #if FFH_SUPPORT
76  #define CTX_SAVED_ESR_EL3	U(0x50)
77  #define CTX_SAVED_SPSR_EL3	U(0x58)
78  #define CTX_SAVED_GPREG_LR	U(0x60)
79  #define CTX_EL3STATE_END	U(0x70) /* Align to the next 16 byte boundary */
80 #else
81  #define CTX_EL3STATE_END	U(0x50) /* Align to the next 16 byte boundary */
82 #endif /* FFH_SUPPORT */
83 
84 /*******************************************************************************
85  * Constants that allow assembler code to access members of and the
86  * 'el1_sys_regs' structure at their correct offsets. Note that some of the
87  * registers are only 32-bits wide but are stored as 64-bit values for
88  * convenience
89  ******************************************************************************/
90 #define CTX_EL1_SYSREGS_OFFSET	(CTX_EL3STATE_OFFSET + CTX_EL3STATE_END)
91 #define CTX_SPSR_EL1		U(0x0)
92 #define CTX_ELR_EL1		U(0x8)
93 #define CTX_SCTLR_EL1		U(0x10)
94 #define CTX_TCR_EL1		U(0x18)
95 #define CTX_CPACR_EL1		U(0x20)
96 #define CTX_CSSELR_EL1		U(0x28)
97 #define CTX_SP_EL1		U(0x30)
98 #define CTX_ESR_EL1		U(0x38)
99 #define CTX_TTBR0_EL1		U(0x40)
100 #define CTX_TTBR1_EL1		U(0x48)
101 #define CTX_MAIR_EL1		U(0x50)
102 #define CTX_AMAIR_EL1		U(0x58)
103 #define CTX_ACTLR_EL1		U(0x60)
104 #define CTX_TPIDR_EL1		U(0x68)
105 #define CTX_TPIDR_EL0		U(0x70)
106 #define CTX_TPIDRRO_EL0		U(0x78)
107 #define CTX_PAR_EL1		U(0x80)
108 #define CTX_FAR_EL1		U(0x88)
109 #define CTX_AFSR0_EL1		U(0x90)
110 #define CTX_AFSR1_EL1		U(0x98)
111 #define CTX_CONTEXTIDR_EL1	U(0xa0)
112 #define CTX_VBAR_EL1		U(0xa8)
113 #define CTX_MDCCINT_EL1		U(0xb0)
114 #define CTX_MDSCR_EL1		U(0xb8)
115 
116 #define CTX_AARCH64_END		U(0xc0) /* Align to the next 16 byte boundary */
117 
118 /*
119  * If the platform is AArch64-only, there is no need to save and restore these
120  * AArch32 registers.
121  */
122 #if CTX_INCLUDE_AARCH32_REGS
123 #define CTX_SPSR_ABT		(CTX_AARCH64_END + U(0x0))
124 #define CTX_SPSR_UND		(CTX_AARCH64_END + U(0x8))
125 #define CTX_SPSR_IRQ		(CTX_AARCH64_END + U(0x10))
126 #define CTX_SPSR_FIQ		(CTX_AARCH64_END + U(0x18))
127 #define CTX_DACR32_EL2		(CTX_AARCH64_END + U(0x20))
128 #define CTX_IFSR32_EL2		(CTX_AARCH64_END + U(0x28))
129 #define CTX_AARCH32_END		(CTX_AARCH64_END + U(0x30)) /* Align to the next 16 byte boundary */
130 #else
131 #define CTX_AARCH32_END		CTX_AARCH64_END
132 #endif /* CTX_INCLUDE_AARCH32_REGS */
133 
134 /*
135  * If the timer registers aren't saved and restored, we don't have to reserve
136  * space for them in the context
137  */
138 #if NS_TIMER_SWITCH
139 #define CTX_CNTP_CTL_EL0	(CTX_AARCH32_END + U(0x0))
140 #define CTX_CNTP_CVAL_EL0	(CTX_AARCH32_END + U(0x8))
141 #define CTX_CNTV_CTL_EL0	(CTX_AARCH32_END + U(0x10))
142 #define CTX_CNTV_CVAL_EL0	(CTX_AARCH32_END + U(0x18))
143 #define CTX_CNTKCTL_EL1		(CTX_AARCH32_END + U(0x20))
144 #define CTX_TIMER_SYSREGS_END	(CTX_AARCH32_END + U(0x30)) /* Align to the next 16 byte boundary */
145 #else
146 #define CTX_TIMER_SYSREGS_END	CTX_AARCH32_END
147 #endif /* NS_TIMER_SWITCH */
148 
149 #if ENABLE_FEAT_MTE2
150 #define CTX_TFSRE0_EL1		(CTX_TIMER_SYSREGS_END + U(0x0))
151 #define CTX_TFSR_EL1		(CTX_TIMER_SYSREGS_END + U(0x8))
152 #define CTX_RGSR_EL1		(CTX_TIMER_SYSREGS_END + U(0x10))
153 #define CTX_GCR_EL1		(CTX_TIMER_SYSREGS_END + U(0x18))
154 #define CTX_MTE_REGS_END	(CTX_TIMER_SYSREGS_END + U(0x20)) /* Align to the next 16 byte boundary */
155 #else
156 #define CTX_MTE_REGS_END	CTX_TIMER_SYSREGS_END
157 #endif /* ENABLE_FEAT_MTE2 */
158 
159 #if ENABLE_FEAT_RAS
160 #define CTX_DISR_EL1		(CTX_MTE_REGS_END + U(0x0))
161 #define CTX_RAS_REGS_END	(CTX_MTE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
162 #else
163 #define CTX_RAS_REGS_END        CTX_MTE_REGS_END
164 #endif /* ENABLE_FEAT_RAS */
165 
166 #if ENABLE_FEAT_S1PIE
167 #define CTX_PIRE0_EL1		(CTX_RAS_REGS_END + U(0x0))
168 #define CTX_PIR_EL1		(CTX_RAS_REGS_END + U(0x8))
169 #define CTX_S1PIE_REGS_END	(CTX_RAS_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
170 #else
171 #define CTX_S1PIE_REGS_END	CTX_RAS_REGS_END
172 #endif /* ENABLE_FEAT_S1PIE */
173 
174 #if ENABLE_FEAT_S1POE
175 #define CTX_POR_EL1		(CTX_S1PIE_REGS_END + U(0x0))
176 #define CTX_S1POE_REGS_END	(CTX_S1PIE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
177 #else
178 #define CTX_S1POE_REGS_END	CTX_S1PIE_REGS_END
179 #endif /* ENABLE_FEAT_S1POE */
180 
181 #if ENABLE_FEAT_S2POE
182 #define CTX_S2POR_EL1		(CTX_S1POE_REGS_END + U(0x0))
183 #define CTX_S2POE_REGS_END	(CTX_S1POE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
184 #else
185 #define CTX_S2POE_REGS_END	CTX_S1POE_REGS_END
186 #endif /* ENABLE_FEAT_S2POE */
187 
188 #if ENABLE_FEAT_TCR2
189 #define CTX_TCR2_EL1		(CTX_S2POE_REGS_END + U(0x0))
190 #define CTX_TCR2_REGS_END	(CTX_S2POE_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
191 #else
192 #define CTX_TCR2_REGS_END       CTX_S2POE_REGS_END
193 #endif /* ENABLE_FEAT_TCR2 */
194 
195 #if ENABLE_TRF_FOR_NS
196 #define CTX_TRFCR_EL1		(CTX_TCR2_REGS_END + U(0x0))
197 #define CTX_TRF_REGS_END	(CTX_TCR2_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
198 #else
199 #define CTX_TRF_REGS_END	CTX_TCR2_REGS_END
200 #endif /* ENABLE_TRF_FOR_NS */
201 
202 #if ENABLE_FEAT_CSV2_2
203 #define CTX_SCXTNUM_EL0		(CTX_TRF_REGS_END + U(0x0))
204 #define CTX_SCXTNUM_EL1		(CTX_TRF_REGS_END + U(0x8))
205 #define CTX_CSV2_2_REGS_END	(CTX_TRF_REGS_END + U(0x10)) /* Align to the next 16 byte boundary */
206 #else
207 #define CTX_CSV2_2_REGS_END	CTX_TRF_REGS_END
208 #endif /* ENABLE_FEAT_CSV2_2 */
209 
210 #if ENABLE_FEAT_GCS
211 #define CTX_GCSCR_EL1		(CTX_CSV2_2_REGS_END + U(0x0))
212 #define CTX_GCSCRE0_EL1		(CTX_CSV2_2_REGS_END + U(0x8))
213 #define CTX_GCSPR_EL1		(CTX_CSV2_2_REGS_END + U(0x10))
214 #define CTX_GCSPR_EL0		(CTX_CSV2_2_REGS_END + U(0x18))
215 #define CTX_GCS_REGS_END	(CTX_CSV2_2_REGS_END + U(0x20)) /* Align to the next 16 byte boundary */
216 #else
217 #define CTX_GCS_REGS_END	CTX_CSV2_2_REGS_END
218 #endif /* ENABLE_FEAT_GCS */
219 
220 /*
221  * End of EL1 system registers.
222  */
223 #define CTX_EL1_SYSREGS_END	CTX_GCS_REGS_END
224 
225 /*******************************************************************************
226  * Constants that allow assembler code to access members of and the 'fp_regs'
227  * structure at their correct offsets.
228  ******************************************************************************/
229 # define CTX_FPREGS_OFFSET	(CTX_EL1_SYSREGS_OFFSET + CTX_EL1_SYSREGS_END)
230 #if CTX_INCLUDE_FPREGS
231 #define CTX_FP_Q0		U(0x0)
232 #define CTX_FP_Q1		U(0x10)
233 #define CTX_FP_Q2		U(0x20)
234 #define CTX_FP_Q3		U(0x30)
235 #define CTX_FP_Q4		U(0x40)
236 #define CTX_FP_Q5		U(0x50)
237 #define CTX_FP_Q6		U(0x60)
238 #define CTX_FP_Q7		U(0x70)
239 #define CTX_FP_Q8		U(0x80)
240 #define CTX_FP_Q9		U(0x90)
241 #define CTX_FP_Q10		U(0xa0)
242 #define CTX_FP_Q11		U(0xb0)
243 #define CTX_FP_Q12		U(0xc0)
244 #define CTX_FP_Q13		U(0xd0)
245 #define CTX_FP_Q14		U(0xe0)
246 #define CTX_FP_Q15		U(0xf0)
247 #define CTX_FP_Q16		U(0x100)
248 #define CTX_FP_Q17		U(0x110)
249 #define CTX_FP_Q18		U(0x120)
250 #define CTX_FP_Q19		U(0x130)
251 #define CTX_FP_Q20		U(0x140)
252 #define CTX_FP_Q21		U(0x150)
253 #define CTX_FP_Q22		U(0x160)
254 #define CTX_FP_Q23		U(0x170)
255 #define CTX_FP_Q24		U(0x180)
256 #define CTX_FP_Q25		U(0x190)
257 #define CTX_FP_Q26		U(0x1a0)
258 #define CTX_FP_Q27		U(0x1b0)
259 #define CTX_FP_Q28		U(0x1c0)
260 #define CTX_FP_Q29		U(0x1d0)
261 #define CTX_FP_Q30		U(0x1e0)
262 #define CTX_FP_Q31		U(0x1f0)
263 #define CTX_FP_FPSR		U(0x200)
264 #define CTX_FP_FPCR		U(0x208)
265 #if CTX_INCLUDE_AARCH32_REGS
266 #define CTX_FP_FPEXC32_EL2	U(0x210)
267 #define CTX_FPREGS_END		U(0x220) /* Align to the next 16 byte boundary */
268 #else
269 #define CTX_FPREGS_END		U(0x210) /* Align to the next 16 byte boundary */
270 #endif /* CTX_INCLUDE_AARCH32_REGS */
271 #else
272 #define CTX_FPREGS_END		U(0)
273 #endif /* CTX_INCLUDE_FPREGS */
274 
275 /*******************************************************************************
276  * Registers related to CVE-2018-3639
277  ******************************************************************************/
278 #define CTX_CVE_2018_3639_OFFSET	(CTX_FPREGS_OFFSET + CTX_FPREGS_END)
279 #define CTX_CVE_2018_3639_DISABLE	U(0)
280 #define CTX_CVE_2018_3639_END		U(0x10) /* Align to the next 16 byte boundary */
281 
282 /*******************************************************************************
283  * Registers related to ERRATA_SPECULATIVE_AT
284  *
285  * This is essential as with EL1 and EL2 context registers being decoupled,
286  * both will not be present for a given build configuration.
287  * As ERRATA_SPECULATIVE_AT errata requires SCTLR_EL1 and TCR_EL1 registers
288  * independent of the above logic, we need explicit context entries to be
289  * reserved for these registers.
290  *
291  * NOTE: Based on this we end up with following different configurations depending
292  * on the presence of errata and inclusion of EL1 or EL2 context.
293  *
294  * ============================================================================
295  * | ERRATA_SPECULATIVE_AT | EL1 context| Memory allocation(Sctlr_el1,Tcr_el1)|
296  * ============================================================================
297  * |        0              |      0     |            None                     |
298  * |        0              |      1     |    EL1 C-Context structure          |
299  * |        1              |      0     |    Errata Context Offset Entries    |
300  * |        1              |      1     |    Errata Context Offset Entries    |
301  * ============================================================================
302  *
303  * In the above table, when ERRATA_SPECULATIVE_AT=1, EL1_Context=0, it implies
304  * there is only EL2 context and memory for SCTLR_EL1 and TCR_EL1 registers is
305  * reserved explicitly under ERRATA_SPECULATIVE_AT build flag here.
306  *
307  * In situations when EL1_Context=1 and  ERRATA_SPECULATIVE_AT=1, since SCTLR_EL1
308  * and TCR_EL1 registers will be modified under errata and it happens at the
309  * early in the codeflow prior to el1 context (save and restore operations),
310  * context memory still will be reserved under the errata logic here explicitly.
311  * These registers will not be part of EL1 context save & restore routines.
312  *
313  * Only when ERRATA_SPECULATIVE_AT=0, EL1_Context=1, for this combination,
314  * SCTLR_EL1 and TCR_EL1 will be part of EL1 context structure (context_el1.h)
315  * -----------------------------------------------------------------------------
316  ******************************************************************************/
317 #define CTX_ERRATA_SPEC_AT_OFFSET	(CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_END)
318 #if ERRATA_SPECULATIVE_AT
319 #define CTX_ERRATA_SPEC_AT_SCTLR_EL1	U(0x0)
320 #define CTX_ERRATA_SPEC_AT_TCR_EL1	U(0x8)
321 #define CTX_ERRATA_SPEC_AT_END		U(0x10) /* Align to the next 16 byte boundary */
322 #else
323 #define CTX_ERRATA_SPEC_AT_END		U(0x0)
324 #endif /* ERRATA_SPECULATIVE_AT */
325 
326 /*******************************************************************************
327  * Registers related to ARMv8.3-PAuth.
328  ******************************************************************************/
329 #define CTX_PAUTH_REGS_OFFSET	(CTX_ERRATA_SPEC_AT_OFFSET + CTX_ERRATA_SPEC_AT_END)
330 #if CTX_INCLUDE_PAUTH_REGS
331 #define CTX_PACIAKEY_LO		U(0x0)
332 #define CTX_PACIAKEY_HI		U(0x8)
333 #define CTX_PACIBKEY_LO		U(0x10)
334 #define CTX_PACIBKEY_HI		U(0x18)
335 #define CTX_PACDAKEY_LO		U(0x20)
336 #define CTX_PACDAKEY_HI		U(0x28)
337 #define CTX_PACDBKEY_LO		U(0x30)
338 #define CTX_PACDBKEY_HI		U(0x38)
339 #define CTX_PACGAKEY_LO		U(0x40)
340 #define CTX_PACGAKEY_HI		U(0x48)
341 #define CTX_PAUTH_REGS_END	U(0x50) /* Align to the next 16 byte boundary */
342 #else
343 #define CTX_PAUTH_REGS_END	U(0)
344 #endif /* CTX_INCLUDE_PAUTH_REGS */
345 
346 /*******************************************************************************
347  * Registers initialised in a per-world context.
348  ******************************************************************************/
349 #define CTX_CPTR_EL3			U(0x0)
350 #define CTX_ZCR_EL3			U(0x8)
351 #define CTX_MPAM3_EL3			U(0x10)
352 #define CTX_PERWORLD_EL3STATE_END	U(0x18)
353 
354 #ifndef __ASSEMBLER__
355 
356 #include <stdint.h>
357 
358 #include <lib/cassert.h>
359 
360 /*
361  * Common constants to help define the 'cpu_context' structure and its
362  * members below.
363  */
364 #define DWORD_SHIFT		U(3)
365 #define DEFINE_REG_STRUCT(name, num_regs)	\
366 	typedef struct name {			\
367 		uint64_t ctx_regs[num_regs];	\
368 	}  __aligned(16) name##_t
369 
370 /* Constants to determine the size of individual context structures */
371 #define CTX_GPREG_ALL		(CTX_GPREGS_END >> DWORD_SHIFT)
372 #define CTX_EL1_SYSREGS_ALL	(CTX_EL1_SYSREGS_END >> DWORD_SHIFT)
373 
374 #if CTX_INCLUDE_FPREGS
375 # define CTX_FPREG_ALL		(CTX_FPREGS_END >> DWORD_SHIFT)
376 #endif
377 #define CTX_EL3STATE_ALL	(CTX_EL3STATE_END >> DWORD_SHIFT)
378 #define CTX_CVE_2018_3639_ALL	(CTX_CVE_2018_3639_END >> DWORD_SHIFT)
379 
380 #if ERRATA_SPECULATIVE_AT
381 #define CTX_ERRATA_SPEC_AT_ALL	(CTX_ERRATA_SPEC_AT_END >> DWORD_SHIFT)
382 #endif
383 #if CTX_INCLUDE_PAUTH_REGS
384 # define CTX_PAUTH_REGS_ALL	(CTX_PAUTH_REGS_END >> DWORD_SHIFT)
385 #endif
386 
387 /*
388  * AArch64 general purpose register context structure. Usually x0-x18,
389  * lr are saved as the compiler is expected to preserve the remaining
390  * callee saved registers if used by the C runtime and the assembler
391  * does not touch the remaining. But in case of world switch during
392  * exception handling, we need to save the callee registers too.
393  */
394 DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);
395 
396 /*
397  * AArch64 EL1 system register context structure for preserving the
398  * architectural state during world switches.
399  */
400 DEFINE_REG_STRUCT(el1_sysregs, CTX_EL1_SYSREGS_ALL);
401 
402 /*
403  * AArch64 floating point register context structure for preserving
404  * the floating point state during switches from one security state to
405  * another.
406  */
407 #if CTX_INCLUDE_FPREGS
408 DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL);
409 #endif
410 
411 /*
412  * Miscellaneous registers used by EL3 firmware to maintain its state
413  * across exception entries and exits
414  */
415 DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL);
416 
417 /* Function pointer used by CVE-2018-3639 dynamic mitigation */
418 DEFINE_REG_STRUCT(cve_2018_3639, CTX_CVE_2018_3639_ALL);
419 
420 /* Registers associated to Errata_Speculative */
421 #if ERRATA_SPECULATIVE_AT
422 DEFINE_REG_STRUCT(errata_speculative_at, CTX_ERRATA_SPEC_AT_ALL);
423 #endif
424 
425 /* Registers associated to ARMv8.3-PAuth */
426 #if CTX_INCLUDE_PAUTH_REGS
427 DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL);
428 #endif
429 
430 /*
431  * Macros to access members of any of the above structures using their
432  * offsets
433  */
434 #define read_ctx_reg(ctx, offset)	((ctx)->ctx_regs[(offset) >> DWORD_SHIFT])
435 #define write_ctx_reg(ctx, offset, val)	(((ctx)->ctx_regs[(offset) >> DWORD_SHIFT]) \
436 					 = (uint64_t) (val))
437 
438 /*
439  * Top-level context structure which is used by EL3 firmware to preserve
440  * the state of a core at the next lower EL in a given security state and
441  * save enough EL3 meta data to be able to return to that EL and security
442  * state. The context management library will be used to ensure that
443  * SP_EL3 always points to an instance of this structure at exception
444  * entry and exit.
445  */
446 typedef struct cpu_context {
447 	gp_regs_t gpregs_ctx;
448 	el3_state_t el3state_ctx;
449 	el1_sysregs_t el1_sysregs_ctx;
450 
451 #if CTX_INCLUDE_FPREGS
452 	fp_regs_t fpregs_ctx;
453 #endif
454 	cve_2018_3639_t cve_2018_3639_ctx;
455 
456 #if ERRATA_SPECULATIVE_AT
457 	errata_speculative_at_t errata_speculative_at_ctx;
458 #endif
459 
460 #if CTX_INCLUDE_PAUTH_REGS
461 	pauth_t pauth_ctx;
462 #endif
463 
464 #if CTX_INCLUDE_EL2_REGS
465 	el2_sysregs_t el2_sysregs_ctx;
466 #endif
467 
468 } cpu_context_t;
469 
470 /*
471  * Per-World Context.
472  * It stores registers whose values can be shared across CPUs.
473  */
474 typedef struct per_world_context {
475 	uint64_t ctx_cptr_el3;
476 	uint64_t ctx_zcr_el3;
477 	uint64_t ctx_mpam3_el3;
478 } per_world_context_t;
479 
480 extern per_world_context_t per_world_context[CPU_DATA_CONTEXT_NUM];
481 
482 /* Macros to access members of the 'cpu_context_t' structure */
483 #define get_el3state_ctx(h)	(&((cpu_context_t *) h)->el3state_ctx)
484 #if CTX_INCLUDE_FPREGS
485 # define get_fpregs_ctx(h)	(&((cpu_context_t *) h)->fpregs_ctx)
486 #endif
487 #define get_el1_sysregs_ctx(h)	(&((cpu_context_t *) h)->el1_sysregs_ctx)
488 #if CTX_INCLUDE_EL2_REGS
489 # define get_el2_sysregs_ctx(h)	(&((cpu_context_t *) h)->el2_sysregs_ctx)
490 #endif
491 #define get_gpregs_ctx(h)	(&((cpu_context_t *) h)->gpregs_ctx)
492 #define get_cve_2018_3639_ctx(h)	(&((cpu_context_t *) h)->cve_2018_3639_ctx)
493 
494 #if ERRATA_SPECULATIVE_AT
495 #define get_errata_speculative_at_ctx(h)	(&((cpu_context_t *) h)->errata_speculative_at_ctx)
496 #endif
497 
498 #if CTX_INCLUDE_PAUTH_REGS
499 # define get_pauth_ctx(h)	(&((cpu_context_t *) h)->pauth_ctx)
500 #endif
501 
502 /*
503  * Compile time assertions related to the 'cpu_context' structure to
504  * ensure that the assembler and the compiler view of the offsets of
505  * the structure members is the same.
506  */
507 CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx),
508 	assert_core_context_gp_offset_mismatch);
509 
510 CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx),
511 	assert_core_context_el3state_offset_mismatch);
512 
513 CASSERT(CTX_EL1_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, el1_sysregs_ctx),
514 	assert_core_context_el1_sys_offset_mismatch);
515 
516 #if CTX_INCLUDE_FPREGS
517 CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx),
518 	assert_core_context_fp_offset_mismatch);
519 #endif /* CTX_INCLUDE_FPREGS */
520 
521 CASSERT(CTX_CVE_2018_3639_OFFSET == __builtin_offsetof(cpu_context_t, cve_2018_3639_ctx),
522 	assert_core_context_cve_2018_3639_offset_mismatch);
523 
524 #if ERRATA_SPECULATIVE_AT
525 CASSERT(CTX_ERRATA_SPEC_AT_OFFSET == __builtin_offsetof(cpu_context_t, errata_speculative_at_ctx),
526 	assert_core_context_errata_speculative_at_offset_mismatch);
527 #endif
528 
529 #if CTX_INCLUDE_PAUTH_REGS
530 CASSERT(CTX_PAUTH_REGS_OFFSET == __builtin_offsetof(cpu_context_t, pauth_ctx),
531 	assert_core_context_pauth_offset_mismatch);
532 #endif /* CTX_INCLUDE_PAUTH_REGS */
533 
534 /*
535  * Helper macro to set the general purpose registers that correspond to
536  * parameters in an aapcs_64 call i.e. x0-x7
537  */
538 #define set_aapcs_args0(ctx, x0)				do {	\
539 		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0);	\
540 	} while (0)
541 #define set_aapcs_args1(ctx, x0, x1)				do {	\
542 		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1);	\
543 		set_aapcs_args0(ctx, x0);				\
544 	} while (0)
545 #define set_aapcs_args2(ctx, x0, x1, x2)			do {	\
546 		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2);	\
547 		set_aapcs_args1(ctx, x0, x1);				\
548 	} while (0)
549 #define set_aapcs_args3(ctx, x0, x1, x2, x3)			do {	\
550 		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3);	\
551 		set_aapcs_args2(ctx, x0, x1, x2);			\
552 	} while (0)
553 #define set_aapcs_args4(ctx, x0, x1, x2, x3, x4)		do {	\
554 		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4);	\
555 		set_aapcs_args3(ctx, x0, x1, x2, x3);			\
556 	} while (0)
557 #define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5)		do {	\
558 		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5);	\
559 		set_aapcs_args4(ctx, x0, x1, x2, x3, x4);		\
560 	} while (0)
561 #define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6)	do {	\
562 		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6);	\
563 		set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5);		\
564 	} while (0)
565 #define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7)	do {	\
566 		write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7);	\
567 		set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6);	\
568 	} while (0)
569 
570 /*******************************************************************************
571  * Function prototypes
572  ******************************************************************************/
573 #if CTX_INCLUDE_FPREGS
574 void fpregs_context_save(fp_regs_t *regs);
575 void fpregs_context_restore(fp_regs_t *regs);
576 #endif
577 
578 #endif /* __ASSEMBLER__ */
579 
580 #endif /* CONTEXT_H */
581