xref: /rk3399_ARM-atf/include/lib/el3_runtime/cpu_data.h (revision 40c2cfdde78d47ed28b3fb2897b98326939768a8)
1 /*
2  * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef CPU_DATA_H
8 #define CPU_DATA_H
9 
10 #include <platform_def.h>	/* CACHE_WRITEBACK_GRANULE required */
11 
12 #include <bl31/ehf.h>
13 
14 /* Size of psci_cpu_data structure */
15 #define PSCI_CPU_DATA_SIZE		12
16 
17 #ifdef __aarch64__
18 
19 /* 8-bytes aligned size of psci_cpu_data structure */
20 #define PSCI_CPU_DATA_SIZE_ALIGNED	((PSCI_CPU_DATA_SIZE + 7) & ~7)
21 
22 #if ENABLE_RME
23 /* Size of cpu_context array */
24 #define CPU_DATA_CONTEXT_NUM		3
25 /* Offset of cpu_ops_ptr, size 8 bytes */
26 #define CPU_DATA_CPU_OPS_PTR		0x20
27 #else /* ENABLE_RME */
28 #define CPU_DATA_CONTEXT_NUM		2
29 #define CPU_DATA_CPU_OPS_PTR		0x18
30 #endif /* ENABLE_RME */
31 
32 #if ENABLE_PAUTH
33 /* 8-bytes aligned offset of apiakey[2], size 16 bytes */
34 #define	CPU_DATA_APIAKEY_OFFSET		(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
35 					     + CPU_DATA_CPU_OPS_PTR)
36 #define CPU_DATA_CRASH_BUF_OFFSET	(0x10 + CPU_DATA_APIAKEY_OFFSET)
37 #else /* ENABLE_PAUTH */
38 #define CPU_DATA_CRASH_BUF_OFFSET	(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
39 					     + CPU_DATA_CPU_OPS_PTR)
40 #endif /* ENABLE_PAUTH */
41 
42 /* need enough space in crash buffer to save 8 registers */
43 #define CPU_DATA_CRASH_BUF_SIZE		64
44 
45 #else	/* !__aarch64__ */
46 
47 #define WARMBOOT_EP_INFO		0x0
48 #define CPU_DATA_CPU_OPS_PTR		0x4
49 #define CPU_DATA_CRASH_BUF_OFFSET	(CPU_DATA_CPU_OPS_PTR + PSCI_CPU_DATA_SIZE)
50 
51 #endif	/* __aarch64__ */
52 
53 #if CRASH_REPORTING
54 #define CPU_DATA_CRASH_BUF_END		(CPU_DATA_CRASH_BUF_OFFSET + \
55 						CPU_DATA_CRASH_BUF_SIZE)
56 #else
57 #define CPU_DATA_CRASH_BUF_END		CPU_DATA_CRASH_BUF_OFFSET
58 #endif
59 
60 /* buffer space for EHF data is sizeof(pe_exc_data_t) */
61 #define CPU_DATA_EHF_DATA_SIZE		8
62 #define CPU_DATA_EHF_DATA_BUF_OFFSET	CPU_DATA_CRASH_BUF_END
63 
64 #if EL3_EXCEPTION_HANDLING
65 #define CPU_DATA_EHF_DATA_BUF_END	(CPU_DATA_EHF_DATA_BUF_OFFSET + \
66 						CPU_DATA_EHF_DATA_SIZE)
67 #else
68 #define CPU_DATA_EHF_DATA_BUF_END	CPU_DATA_EHF_DATA_BUF_OFFSET
69 #endif	/* EL3_EXCEPTION_HANDLING */
70 
71 /* cpu_data size is the data size rounded up to the platform cache line size */
72 #define CPU_DATA_SIZE			(((CPU_DATA_EHF_DATA_BUF_END + \
73 					CACHE_WRITEBACK_GRANULE - 1) / \
74 						CACHE_WRITEBACK_GRANULE) * \
75 							CACHE_WRITEBACK_GRANULE)
76 
77 #if ENABLE_RUNTIME_INSTRUMENTATION
78 /* Temporary space to store PMF timestamps from assembly code */
79 #define CPU_DATA_PMF_TS_COUNT		1
80 #if __aarch64__
81 #define CPU_DATA_PMF_TS0_OFFSET		CPU_DATA_EHF_DATA_BUF_END
82 #else
83 /* alignment */
84 #define CPU_DATA_PMF_TS0_OFFSET		(CPU_DATA_EHF_DATA_BUF_END + 8)
85 #endif
86 #define CPU_DATA_PMF_TS0_IDX		0
87 #endif
88 
89 #ifndef __ASSEMBLER__
90 
91 #include <assert.h>
92 #include <stdint.h>
93 
94 #include <arch_helpers.h>
95 #include <lib/cassert.h>
96 #include <lib/psci/psci.h>
97 
98 #include <platform_def.h>
99 
100 /* Offsets for the cpu_data structure */
101 #define CPU_DATA_PSCI_LOCK_OFFSET	__builtin_offsetof\
102 		(cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
103 
104 #if PLAT_PCPU_DATA_SIZE
105 #define CPU_DATA_PLAT_PCPU_OFFSET	__builtin_offsetof\
106 		(cpu_data_t, platform_cpu_data)
107 #endif
108 
109 typedef enum context_pas {
110 	CPU_CONTEXT_SECURE = 0,
111 	CPU_CONTEXT_NS,
112 #if ENABLE_RME
113 	CPU_CONTEXT_REALM,
114 #endif
115 	CPU_CONTEXT_NUM
116 } context_pas_t;
117 
118 /*******************************************************************************
119  * Function & variable prototypes
120  ******************************************************************************/
121 
122 /*******************************************************************************
123  * Cache of frequently used per-cpu data:
124  *   Pointers to non-secure, realm, and secure security state contexts
125  *   Address of the crash stack
126  * It is aligned to the cache line boundary to allow efficient concurrent
127  * manipulation of these pointers on different cpus
128  *
129  * The data structure and the _cpu_data accessors should not be used directly
130  * by components that have per-cpu members. The member access macros should be
131  * used for this.
132  ******************************************************************************/
133 typedef struct cpu_data {
134 #ifdef __aarch64__
135 	void *cpu_context[CPU_DATA_CONTEXT_NUM];
136 #endif /* __aarch64__ */
137 	entry_point_info_t *warmboot_ep_info;
138 	struct cpu_ops *cpu_ops_ptr;
139 	struct psci_cpu_data psci_svc_cpu_data;
140 #if ENABLE_PAUTH
141 	uint64_t apiakey[2];
142 #endif
143 #if CRASH_REPORTING
144 	u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
145 #endif
146 #if ENABLE_RUNTIME_INSTRUMENTATION
147 	uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
148 #endif
149 #if PLAT_PCPU_DATA_SIZE
150 	uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
151 #endif
152 #if EL3_EXCEPTION_HANDLING
153 	pe_exc_data_t ehf_data;
154 #endif
155 } __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
156 
157 extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
158 
159 #ifdef __aarch64__
160 CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM,
161 		assert_cpu_data_context_num_mismatch);
162 #endif
163 
164 #if ENABLE_PAUTH
165 CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof
166 	(cpu_data_t, apiakey),
167 	assert_cpu_data_pauth_stack_offset_mismatch);
168 #endif
169 
170 #if CRASH_REPORTING
171 /* verify assembler offsets match data structures */
172 CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
173 	(cpu_data_t, crash_buf),
174 	assert_cpu_data_crash_stack_offset_mismatch);
175 #endif
176 
177 #if EL3_EXCEPTION_HANDLING
178 CASSERT(CPU_DATA_EHF_DATA_BUF_OFFSET == __builtin_offsetof
179 	(cpu_data_t, ehf_data),
180 	assert_cpu_data_ehf_stack_offset_mismatch);
181 #endif
182 
183 CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
184 		assert_cpu_data_size_mismatch);
185 
186 CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
187 		(cpu_data_t, cpu_ops_ptr),
188 		assert_cpu_data_cpu_ops_ptr_offset_mismatch);
189 
190 #if ENABLE_RUNTIME_INSTRUMENTATION
191 CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
192 		(cpu_data_t, cpu_data_pmf_ts[0]),
193 		assert_cpu_data_pmf_ts0_offset_mismatch);
194 #endif
195 
196 static inline cpu_data_t *_cpu_data_by_index(unsigned int cpu_index)
197 {
198 	return &percpu_data[cpu_index];
199 }
200 
201 #ifdef __aarch64__
202 /* Return the cpu_data structure for the current CPU. */
203 static inline cpu_data_t *_cpu_data(void)
204 {
205 	return (cpu_data_t *)read_tpidr_el3();
206 }
207 #else
208 cpu_data_t *_cpu_data(void);
209 #endif
210 
211 /*
212  * Returns the index of the cpu_context array for the given security state.
213  * All accesses to cpu_context should be through this helper to make sure
214  * an access is not out-of-bounds. The function assumes security_state is
215  * valid.
216  */
217 static inline context_pas_t get_cpu_context_index(size_t security_state)
218 {
219 	if (security_state == SECURE) {
220 		return CPU_CONTEXT_SECURE;
221 	} else {
222 #if ENABLE_RME
223 		if (security_state == NON_SECURE) {
224 			return CPU_CONTEXT_NS;
225 		} else {
226 			assert(security_state == REALM);
227 			return CPU_CONTEXT_REALM;
228 		}
229 #else
230 		assert(security_state == NON_SECURE);
231 		return CPU_CONTEXT_NS;
232 #endif
233 	}
234 }
235 
236 /**************************************************************************
237  * APIs for initialising and accessing per-cpu data
238  *************************************************************************/
239 
240 void cpu_data_init_cpu_ops(void);
241 
242 #define get_cpu_data(_m)		   _cpu_data()->_m
243 #define set_cpu_data(_m, _v)		   _cpu_data()->_m = (_v)
244 #define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
245 #define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
246 /* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
247 #define flush_cpu_data(_m)	   flush_dcache_range((uintptr_t)	  \
248 						&(_cpu_data()->_m), \
249 						sizeof(((cpu_data_t *)0)->_m))
250 #define inv_cpu_data(_m)	   inv_dcache_range((uintptr_t)	  	  \
251 						&(_cpu_data()->_m), \
252 						sizeof(((cpu_data_t *)0)->_m))
253 #define flush_cpu_data_by_index(_ix, _m)	\
254 				   flush_dcache_range((uintptr_t)	  \
255 					 &(_cpu_data_by_index(_ix)->_m),  \
256 						sizeof(((cpu_data_t *)0)->_m))
257 
258 
259 #endif /* __ASSEMBLER__ */
260 #endif /* CPU_DATA_H */
261