xref: /rk3399_ARM-atf/include/lib/el3_runtime/cpu_data.h (revision 1c26b186e40bdf6c912ebbfb1bd3ed5b8798207c)
1 /*
2  * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef CPU_DATA_H
8 #define CPU_DATA_H
9 
10 #include <platform_def.h>	/* CACHE_WRITEBACK_GRANULE required */
11 
12 #include <bl31/ehf.h>
13 #include <context.h>
14 #include <lib/utils_def.h>
15 #include <lib/cpus/cpu_ops.h>
16 
17 /* need enough space in crash buffer to save 8 registers */
18 #define CPU_DATA_CRASH_BUF_BYTES	64
19 #if ENABLE_RUNTIME_INSTRUMENTATION
20 /* Temporary space to store PMF timestamps from assembly code */
21 #define CPU_DATA_PMF_TS_COUNT		1
22 #define CPU_DATA_PMF_TS0_IDX		0
23 #endif
24 
25 #ifdef __aarch64__
26 #define CPU_DATA_CPU_CONTEXT_SIZE	(CPU_CONTEXT_NUM * CPU_WORD_SIZE)
27 #else /* __aarch64__ */
28 #define CPU_DATA_CPU_CONTEXT_SIZE	0
29 #endif /* __aarch64__ */
30 #define CPU_DATA_WARMBOOT_EP_INFO_SIZE	CPU_WORD_SIZE
31 #define CPU_DATA_WARMBOOT_EP_INFO_ALIGN	CPU_WORD_SIZE
32 #define CPU_DATA_CPU_OPS_PTR_SIZE	CPU_WORD_SIZE
33 #define CPU_DATA_CPU_OPS_PTR_ALIGN	CPU_WORD_SIZE
34 #define CPU_DATA_PSCI_SVC_CPU_DATA_SIZE 12
35 #define CPU_DATA_PSCI_SVC_CPU_DATA_ALIGN CPU_WORD_SIZE
36 #if ENABLE_PAUTH
37 /* uint64_t apiakey[2] */
38 #define CPU_DATA_APIAKEY_SIZE		16
39 /* uint64_t alignement */
40 #define CPU_DATA_APIAKEY_ALIGN		8
41 #else /* ENABLE_PAUTH */
42 #define CPU_DATA_APIAKEY_SIZE		0
43 #define CPU_DATA_APIAKEY_ALIGN		1
44 #endif /* ENABLE_PAUTH */
45 #if CRASH_REPORTING
46 #define CPU_DATA_CRASH_BUF_SIZE		((CPU_DATA_CRASH_BUF_BYTES >> 3) * CPU_WORD_SIZE)
47 #define CPU_DATA_CRASH_BUF_ALIGN	CPU_WORD_SIZE
48 #else /* CRASH_REPORTING */
49 #define CPU_DATA_CRASH_BUF_SIZE		0
50 #define CPU_DATA_CRASH_BUF_ALIGN	1
51 #endif /* CRASH_REPORTING */
52 #if ENABLE_RUNTIME_INSTRUMENTATION
53 #define CPU_DATA_CPU_DATA_PMF_TS_SIZE	(CPU_DATA_PMF_TS_COUNT * 8)
54 /* uint64_t alignement */
55 #define CPU_DATA_CPU_DATA_PMF_TS_ALIGN	8
56 #else /* ENABLE_RUNTIME_INSTRUMENTATION */
57 #define CPU_DATA_CPU_DATA_PMF_TS_SIZE	0
58 #define CPU_DATA_CPU_DATA_PMF_TS_ALIGN	1
59 #endif /* ENABLE_RUNTIME_INSTRUMENTATION */
60 #ifdef PLAT_PCPU_DATA_SIZE
61 #define CPU_DATA_PLATFORM_CPU_DATA_SIZE	PLAT_PCPU_DATA_SIZE
62 #define CPU_DATA_PLATFORM_CPU_DATA_ALIGN 1
63 #else /* PLAT_PCPU_DATA_SIZE */
64 #define CPU_DATA_PLATFORM_CPU_DATA_SIZE	0
65 #define CPU_DATA_PLATFORM_CPU_DATA_ALIGN 1
66 #endif /* PLAT_PCPU_DATA_SIZE */
67 #if EL3_EXCEPTION_HANDLING
68 /* buffer space for EHF data is sizeof(pe_exc_data_t) */
69 #define CPU_DATA_EHF_DATA_SIZE		8
70 /* hardcoded to 64 bit alignment */
71 #define CPU_DATA_EHF_DATA_ALIGN		8
72 #else /* EL3_EXCEPTION_HANDLING */
73 #define CPU_DATA_EHF_DATA_SIZE		0
74 #define CPU_DATA_EHF_DATA_ALIGN		1
75 #endif
76 #if ENABLE_FEAT_IDTE3 && defined(__aarch64__)
77 #define CPU_DATA_PCPU_IDREGS_SIZE	(16 * CPU_CONTEXT_NUM)
78 #define CPU_DATA_PCPU_IDREGS_ALIGN	8
79 #else /* ENABLE_FEAT_IDTE3 && defined(__aarch64__) */
80 #define CPU_DATA_PCPU_IDREGS_SIZE	0
81 #define CPU_DATA_PCPU_IDREGS_ALIGN	1
82 #endif /* ENABLE_FEAT_IDTE3 && defined(__aarch64__) */
83 /* cpu_data size is the data size rounded up to the platform cache line size */
84 #define CPU_DATA_SIZE_ALIGN		CACHE_WRITEBACK_GRANULE
85 
86 #define CPU_DATA_CPU_CONTEXT		0
87 #define CPU_DATA_WARMBOOT_EP_INFO	ROUND_UP_2EVAL((CPU_DATA_CPU_CONTEXT + CPU_DATA_CPU_CONTEXT_SIZE), CPU_DATA_CPU_OPS_PTR_ALIGN)
88 #define CPU_DATA_CPU_OPS_PTR		ROUND_UP_2EVAL((CPU_DATA_WARMBOOT_EP_INFO + CPU_DATA_WARMBOOT_EP_INFO_SIZE), CPU_DATA_CPU_OPS_PTR_ALIGN)
89 #define CPU_DATA_PSCI_SVC_CPU_DATA	ROUND_UP_2EVAL((CPU_DATA_CPU_OPS_PTR + CPU_DATA_CPU_OPS_PTR_SIZE), CPU_DATA_PSCI_SVC_CPU_DATA_ALIGN)
90 #define CPU_DATA_APIAKEY		ROUND_UP_2EVAL((CPU_DATA_PSCI_SVC_CPU_DATA + CPU_DATA_PSCI_SVC_CPU_DATA_SIZE), CPU_DATA_APIAKEY_ALIGN)
91 #define CPU_DATA_CRASH_BUF		ROUND_UP_2EVAL((CPU_DATA_APIAKEY + CPU_DATA_APIAKEY_SIZE), CPU_DATA_CRASH_BUF_ALIGN)
92 #define CPU_DATA_CPU_DATA_PMF_TS	ROUND_UP_2EVAL((CPU_DATA_CRASH_BUF + CPU_DATA_CRASH_BUF_SIZE), CPU_DATA_CPU_DATA_PMF_TS_ALIGN)
93 #define CPU_DATA_PLATFORM_CPU_DATA	ROUND_UP_2EVAL((CPU_DATA_CPU_DATA_PMF_TS + CPU_DATA_CPU_DATA_PMF_TS_SIZE), CPU_DATA_PLATFORM_CPU_DATA_ALIGN)
94 #define CPU_DATA_EHF_DATA		ROUND_UP_2EVAL((CPU_DATA_PLATFORM_CPU_DATA + CPU_DATA_PLATFORM_CPU_DATA_SIZE), CPU_DATA_EHF_DATA_ALIGN)
95 #define CPU_DATA_PCPU_IDREGS		ROUND_UP_2EVAL((CPU_DATA_EHF_DATA + CPU_DATA_EHF_DATA_SIZE), CPU_DATA_PCPU_IDREGS_ALIGN)
96 #define CPU_DATA_SIZE			ROUND_UP_2EVAL((CPU_DATA_PCPU_IDREGS + CPU_DATA_PCPU_IDREGS_SIZE), CPU_DATA_SIZE_ALIGN)
97 
98 #ifndef __ASSEMBLER__
99 
100 #include <assert.h>
101 #include <stdint.h>
102 
103 #include <arch_helpers.h>
104 #include <lib/cassert.h>
105 #include <lib/per_cpu/per_cpu.h>
106 #include <lib/psci/psci.h>
107 
108 #include <platform_def.h>
109 
110 /*******************************************************************************
111  * Function & variable prototypes
112  ******************************************************************************/
113 #if ENABLE_FEAT_IDTE3 && defined(__aarch64__)
114 typedef struct percpu_idreg {
115 	u_register_t id_aa64dfr0_el1;
116 	u_register_t id_aa64dfr1_el1;
117 } percpu_idregs_t;
118 #endif /* ENABLE_FEAT_IDTE3 && defined(__aarch64__) */
119 
120 
121 /*******************************************************************************
122  * Cache of frequently used per-cpu data:
123  *   Pointers to non-secure, realm, and secure security state contexts
124  *   Address of the crash stack
125  * It is aligned to the cache line boundary to allow efficient concurrent
126  * manipulation of these pointers on different cpus
127  *
128  * The data structure and the _cpu_data accessors should not be used directly
129  * by components that have per-cpu members. The member access macros should be
130  * used for this.
131  ******************************************************************************/
132 typedef struct cpu_data {
133 #ifdef __aarch64__
134 	void *cpu_context[CPU_CONTEXT_NUM];
135 #endif /* __aarch64__ */
136 	entry_point_info_t *warmboot_ep_info;
137 	struct cpu_ops *cpu_ops_ptr;
138 	struct psci_cpu_data psci_svc_cpu_data;
139 #if ENABLE_PAUTH
140 	uint64_t apiakey[2];
141 #endif
142 #if CRASH_REPORTING
143 	u_register_t crash_buf[CPU_DATA_CRASH_BUF_BYTES >> 3];
144 #endif
145 #if ENABLE_RUNTIME_INSTRUMENTATION
146 	uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
147 #endif
148 #if PLAT_PCPU_DATA_SIZE
149 	uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
150 #endif
151 #if EL3_EXCEPTION_HANDLING
152 	pe_exc_data_t ehf_data;
153 #endif
154 #if (ENABLE_FEAT_IDTE3 && defined(__aarch64__))
155 	percpu_idregs_t idregs[CPU_CONTEXT_NUM];
156 #endif
157 } __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
158 
159 PER_CPU_DECLARE(cpu_data_t, percpu_data);
160 
161 #define CPU_DATA_ASSERT_OFFSET(left, right) \
162 	CASSERT(CPU_DATA_ ## left == __builtin_offsetof \
163 		(cpu_data_t, right), \
164 		assert_cpu_data_ ## right ## _mismatch)
165 
166 /* verify assembler offsets match data structures */
167 CPU_DATA_ASSERT_OFFSET(WARMBOOT_EP_INFO, warmboot_ep_info);
168 CPU_DATA_ASSERT_OFFSET(CPU_OPS_PTR, cpu_ops_ptr);
169 CPU_DATA_ASSERT_OFFSET(PSCI_SVC_CPU_DATA, psci_svc_cpu_data);
170 #if ENABLE_PAUTH
171 CPU_DATA_ASSERT_OFFSET(APIAKEY, apiakey);
172 #endif
173 #if CRASH_REPORTING
174 CPU_DATA_ASSERT_OFFSET(CRASH_BUF, crash_buf);
175 #endif
176 #if ENABLE_RUNTIME_INSTRUMENTATION
177 CPU_DATA_ASSERT_OFFSET(CPU_DATA_PMF_TS, cpu_data_pmf_ts);
178 #endif
179 #if PLAT_PCPU_DATA_SIZE
180 CPU_DATA_ASSERT_OFFSET(PLATFORM_CPU_DATA, platform_cpu_data);
181 #endif
182 #if EL3_EXCEPTION_HANDLING
183 CPU_DATA_ASSERT_OFFSET(EHF_DATA, ehf_data);
184 #endif
185 #if (ENABLE_FEAT_IDTE3 && defined(__aarch64__))
186 CPU_DATA_ASSERT_OFFSET(PCPU_IDREGS, idregs);
187 #endif
188 
189 CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
190 		assert_cpu_data_size_mismatch);
191 
192 #ifndef __aarch64__
193 cpu_data_t *_cpu_data(void);
194 #endif
195 
196 /**************************************************************************
197  * APIs for initialising and accessing per-cpu data
198  *************************************************************************/
199 
200 void cpu_data_init_cpu_ops(void);
201 
202 #define get_cpu_data(_m)			PER_CPU_CUR(percpu_data)->_m
203 #define set_cpu_data(_m, _v)			PER_CPU_CUR(percpu_data)->_m = (_v)
204 #define get_cpu_data_by_index(_ix, _m)		PER_CPU_BY_INDEX(percpu_data, _ix)->_m
205 #define set_cpu_data_by_index(_ix, _m, _v)	PER_CPU_BY_INDEX(percpu_data, _ix)->_m = (_v)
206 /* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
207 #define flush_cpu_data(_m)	   flush_dcache_range((uintptr_t)	  \
208 						&(PER_CPU_CUR(percpu_data)->_m), \
209 						sizeof(((cpu_data_t *)0)->_m))
210 #define inv_cpu_data(_m)	   inv_dcache_range((uintptr_t)	  	  \
211 						&(PER_CPU_CUR(percpu_data)->_m), \
212 						sizeof(((cpu_data_t *)0)->_m))
213 #define flush_cpu_data_by_index(_ix, _m)	\
214 				   flush_dcache_range((uintptr_t)	  \
215 					 &(PER_CPU_BY_INDEX(percpu_data, _ix)->_m),  \
216 						sizeof(((cpu_data_t *)0)->_m))
217 
218 
219 #endif /* __ASSEMBLER__ */
220 #endif /* CPU_DATA_H */
221