xref: /rk3399_ARM-atf/include/lib/el3_runtime/cpu_data.h (revision d57362bd92c2e5c8a1222fd763e24163c1234938)
1 /*
2  * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef CPU_DATA_H
8 #define CPU_DATA_H
9 
10 #include <platform_def.h>	/* CACHE_WRITEBACK_GRANULE required */
11 
12 #include <bl31/ehf.h>
13 
14 /* Size of psci_cpu_data structure */
15 #define PSCI_CPU_DATA_SIZE		12
16 
17 #ifdef __aarch64__
18 
19 /* 8-bytes aligned size of psci_cpu_data structure */
20 #define PSCI_CPU_DATA_SIZE_ALIGNED	((PSCI_CPU_DATA_SIZE + 7) & ~7)
21 
22 #if ENABLE_RME
23 /* Size of cpu_context array */
24 #define CPU_DATA_CONTEXT_NUM		3
25 /* Offset of cpu_ops_ptr, size 8 bytes */
26 #define CPU_DATA_CPU_OPS_PTR		0x20
27 #else /* ENABLE_RME */
28 #define CPU_DATA_CONTEXT_NUM		2
29 #define CPU_DATA_CPU_OPS_PTR		0x18
30 #endif /* ENABLE_RME */
31 
32 #if ENABLE_PAUTH
33 /* 8-bytes aligned offset of apiakey[2], size 16 bytes */
34 #define	CPU_DATA_APIAKEY_OFFSET		(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
35 					     + CPU_DATA_CPU_OPS_PTR)
36 #define CPU_DATA_CRASH_BUF_OFFSET	(0x10 + CPU_DATA_APIAKEY_OFFSET)
37 #else /* ENABLE_PAUTH */
38 #define CPU_DATA_CRASH_BUF_OFFSET	(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
39 					     + CPU_DATA_CPU_OPS_PTR)
40 #endif /* ENABLE_PAUTH */
41 
42 /* need enough space in crash buffer to save 8 registers */
43 #define CPU_DATA_CRASH_BUF_SIZE		64
44 
45 #else	/* !__aarch64__ */
46 
47 #if CRASH_REPORTING
48 #error "Crash reporting is not supported in AArch32"
49 #endif
50 #define WARMBOOT_EP_INFO		0x0
51 #define CPU_DATA_CPU_OPS_PTR		0x4
52 #define CPU_DATA_CRASH_BUF_OFFSET	(CPU_DATA_CPU_OPS_PTR + PSCI_CPU_DATA_SIZE)
53 
54 #endif	/* __aarch64__ */
55 
56 #if CRASH_REPORTING
57 #define CPU_DATA_CRASH_BUF_END		(CPU_DATA_CRASH_BUF_OFFSET + \
58 						CPU_DATA_CRASH_BUF_SIZE)
59 #else
60 #define CPU_DATA_CRASH_BUF_END		CPU_DATA_CRASH_BUF_OFFSET
61 #endif
62 
63 /* buffer space for EHF data is sizeof(pe_exc_data_t) */
64 #define CPU_DATA_EHF_DATA_SIZE		8
65 #define CPU_DATA_EHF_DATA_BUF_OFFSET	CPU_DATA_CRASH_BUF_END
66 
67 #if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
68 #define CPU_DATA_EHF_DATA_BUF_END	(CPU_DATA_EHF_DATA_BUF_OFFSET + \
69 						CPU_DATA_EHF_DATA_SIZE)
70 #else
71 #define CPU_DATA_EHF_DATA_BUF_END	CPU_DATA_EHF_DATA_BUF_OFFSET
72 #endif	/* EL3_EXCEPTION_HANDLING */
73 
74 /* cpu_data size is the data size rounded up to the platform cache line size */
75 #define CPU_DATA_SIZE			(((CPU_DATA_EHF_DATA_BUF_END + \
76 					CACHE_WRITEBACK_GRANULE - 1) / \
77 						CACHE_WRITEBACK_GRANULE) * \
78 							CACHE_WRITEBACK_GRANULE)
79 
80 #if ENABLE_RUNTIME_INSTRUMENTATION
81 /* Temporary space to store PMF timestamps from assembly code */
82 #define CPU_DATA_PMF_TS_COUNT		1
83 #if __aarch64__
84 #define CPU_DATA_PMF_TS0_OFFSET		CPU_DATA_EHF_DATA_BUF_END
85 #else
86 /* alignment */
87 #define CPU_DATA_PMF_TS0_OFFSET		(CPU_DATA_EHF_DATA_BUF_END + 8)
88 #endif
89 #define CPU_DATA_PMF_TS0_IDX		0
90 #endif
91 
92 #ifndef __ASSEMBLER__
93 
94 #include <assert.h>
95 #include <stdint.h>
96 
97 #include <arch_helpers.h>
98 #include <lib/cassert.h>
99 #include <lib/psci/psci.h>
100 
101 #include <platform_def.h>
102 
103 /* Offsets for the cpu_data structure */
104 #define CPU_DATA_PSCI_LOCK_OFFSET	__builtin_offsetof\
105 		(cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
106 
107 #if PLAT_PCPU_DATA_SIZE
108 #define CPU_DATA_PLAT_PCPU_OFFSET	__builtin_offsetof\
109 		(cpu_data_t, platform_cpu_data)
110 #endif
111 
112 typedef enum context_pas {
113 	CPU_CONTEXT_SECURE = 0,
114 	CPU_CONTEXT_NS,
115 #if ENABLE_RME
116 	CPU_CONTEXT_REALM,
117 #endif
118 	CPU_CONTEXT_NUM
119 } context_pas_t;
120 
121 /*******************************************************************************
122  * Function & variable prototypes
123  ******************************************************************************/
124 
125 /*******************************************************************************
126  * Cache of frequently used per-cpu data:
127  *   Pointers to non-secure, realm, and secure security state contexts
128  *   Address of the crash stack
129  * It is aligned to the cache line boundary to allow efficient concurrent
130  * manipulation of these pointers on different cpus
131  *
132  * The data structure and the _cpu_data accessors should not be used directly
133  * by components that have per-cpu members. The member access macros should be
134  * used for this.
135  ******************************************************************************/
136 typedef struct cpu_data {
137 #ifdef __aarch64__
138 	void *cpu_context[CPU_DATA_CONTEXT_NUM];
139 #endif /* __aarch64__ */
140 	entry_point_info_t *warmboot_ep_info;
141 	struct cpu_ops *cpu_ops_ptr;
142 	struct psci_cpu_data psci_svc_cpu_data;
143 #if ENABLE_PAUTH
144 	uint64_t apiakey[2];
145 #endif
146 #if CRASH_REPORTING
147 	u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
148 #endif
149 #if ENABLE_RUNTIME_INSTRUMENTATION
150 	uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
151 #endif
152 #if PLAT_PCPU_DATA_SIZE
153 	uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
154 #endif
155 #if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
156 	pe_exc_data_t ehf_data;
157 #endif
158 } __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
159 
160 extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
161 
162 #ifdef __aarch64__
163 CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM,
164 		assert_cpu_data_context_num_mismatch);
165 #endif
166 
167 #if ENABLE_PAUTH
168 CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof
169 	(cpu_data_t, apiakey),
170 	assert_cpu_data_pauth_stack_offset_mismatch);
171 #endif
172 
173 #if CRASH_REPORTING
174 /* verify assembler offsets match data structures */
175 CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
176 	(cpu_data_t, crash_buf),
177 	assert_cpu_data_crash_stack_offset_mismatch);
178 #endif
179 
180 #if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
181 CASSERT(CPU_DATA_EHF_DATA_BUF_OFFSET == __builtin_offsetof
182 	(cpu_data_t, ehf_data),
183 	assert_cpu_data_ehf_stack_offset_mismatch);
184 #endif
185 
186 CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
187 		assert_cpu_data_size_mismatch);
188 
189 CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
190 		(cpu_data_t, cpu_ops_ptr),
191 		assert_cpu_data_cpu_ops_ptr_offset_mismatch);
192 
193 #if ENABLE_RUNTIME_INSTRUMENTATION
194 CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
195 		(cpu_data_t, cpu_data_pmf_ts[0]),
196 		assert_cpu_data_pmf_ts0_offset_mismatch);
197 #endif
198 
199 static inline cpu_data_t *_cpu_data_by_index(unsigned int cpu_index)
200 {
201 	return &percpu_data[cpu_index];
202 }
203 
204 #ifdef __aarch64__
205 /* Return the cpu_data structure for the current CPU. */
206 static inline cpu_data_t *_cpu_data(void)
207 {
208 	return (cpu_data_t *)read_tpidr_el3();
209 }
210 #else
211 cpu_data_t *_cpu_data(void);
212 #endif
213 
214 /*
215  * Returns the index of the cpu_context array for the given security state.
216  * All accesses to cpu_context should be through this helper to make sure
217  * an access is not out-of-bounds. The function assumes security_state is
218  * valid.
219  */
220 static inline context_pas_t get_cpu_context_index(size_t security_state)
221 {
222 	if (security_state == SECURE) {
223 		return CPU_CONTEXT_SECURE;
224 	} else {
225 #if ENABLE_RME
226 		if (security_state == NON_SECURE) {
227 			return CPU_CONTEXT_NS;
228 		} else {
229 			assert(security_state == REALM);
230 			return CPU_CONTEXT_REALM;
231 		}
232 #else
233 		assert(security_state == NON_SECURE);
234 		return CPU_CONTEXT_NS;
235 #endif
236 	}
237 }
238 
239 /**************************************************************************
240  * APIs for initialising and accessing per-cpu data
241  *************************************************************************/
242 
243 void init_cpu_ops(void);
244 
245 #define get_cpu_data(_m)		   _cpu_data()->_m
246 #define set_cpu_data(_m, _v)		   _cpu_data()->_m = (_v)
247 #define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
248 #define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
249 /* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
250 #define flush_cpu_data(_m)	   flush_dcache_range((uintptr_t)	  \
251 						&(_cpu_data()->_m), \
252 						sizeof(((cpu_data_t *)0)->_m))
253 #define inv_cpu_data(_m)	   inv_dcache_range((uintptr_t)	  	  \
254 						&(_cpu_data()->_m), \
255 						sizeof(((cpu_data_t *)0)->_m))
256 #define flush_cpu_data_by_index(_ix, _m)	\
257 				   flush_dcache_range((uintptr_t)	  \
258 					 &(_cpu_data_by_index(_ix)->_m),  \
259 						sizeof(((cpu_data_t *)0)->_m))
260 
261 
262 #endif /* __ASSEMBLER__ */
263 #endif /* CPU_DATA_H */
264