1 /* 2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #ifndef CPU_DATA_H 8 #define CPU_DATA_H 9 10 #include <platform_def.h> /* CACHE_WRITEBACK_GRANULE required */ 11 12 #include <bl31/ehf.h> 13 #include <context.h> 14 15 /* Size of psci_cpu_data structure */ 16 #define PSCI_CPU_DATA_SIZE 12 17 18 #ifdef __aarch64__ 19 20 /* 8-bytes aligned size of psci_cpu_data structure */ 21 #define PSCI_CPU_DATA_SIZE_ALIGNED ((PSCI_CPU_DATA_SIZE + 7) & ~7) 22 23 /* Offset of cpu_ops_ptr, size 8 bytes */ 24 #define CPU_DATA_CPU_OPS_PTR (8 + (8 * CPU_CONTEXT_NUM)) 25 26 #if ENABLE_PAUTH 27 /* 8-bytes aligned offset of apiakey[2], size 16 bytes */ 28 #define CPU_DATA_APIAKEY_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \ 29 + CPU_DATA_CPU_OPS_PTR) 30 #define CPU_DATA_CRASH_BUF_OFFSET (0x10 + CPU_DATA_APIAKEY_OFFSET) 31 #else /* ENABLE_PAUTH */ 32 #define CPU_DATA_CRASH_BUF_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \ 33 + CPU_DATA_CPU_OPS_PTR) 34 #endif /* ENABLE_PAUTH */ 35 36 /* need enough space in crash buffer to save 8 registers */ 37 #define CPU_DATA_CRASH_BUF_SIZE 64 38 39 #else /* !__aarch64__ */ 40 41 #define WARMBOOT_EP_INFO 0x0 42 #define CPU_DATA_CPU_OPS_PTR 0x4 43 #define CPU_DATA_CRASH_BUF_OFFSET (CPU_DATA_CPU_OPS_PTR + PSCI_CPU_DATA_SIZE) 44 45 #endif /* __aarch64__ */ 46 47 #if CRASH_REPORTING 48 #define CPU_DATA_CRASH_BUF_END (CPU_DATA_CRASH_BUF_OFFSET + \ 49 CPU_DATA_CRASH_BUF_SIZE) 50 #else 51 #define CPU_DATA_CRASH_BUF_END CPU_DATA_CRASH_BUF_OFFSET 52 #endif 53 54 /* buffer space for EHF data is sizeof(pe_exc_data_t) */ 55 #define CPU_DATA_EHF_DATA_SIZE 8 56 #define CPU_DATA_EHF_DATA_BUF_OFFSET CPU_DATA_CRASH_BUF_END 57 58 #if EL3_EXCEPTION_HANDLING 59 #define CPU_DATA_EHF_DATA_BUF_END (CPU_DATA_EHF_DATA_BUF_OFFSET + \ 60 CPU_DATA_EHF_DATA_SIZE) 61 #else 62 #define CPU_DATA_EHF_DATA_BUF_END CPU_DATA_EHF_DATA_BUF_OFFSET 63 #endif /* EL3_EXCEPTION_HANDLING */ 64 65 /* cpu_data size is the data size rounded up to the platform cache line size */ 66 #define CPU_DATA_SIZE (((CPU_DATA_EHF_DATA_BUF_END + \ 67 CACHE_WRITEBACK_GRANULE - 1) / \ 68 CACHE_WRITEBACK_GRANULE) * \ 69 CACHE_WRITEBACK_GRANULE) 70 71 #if ENABLE_RUNTIME_INSTRUMENTATION 72 /* Temporary space to store PMF timestamps from assembly code */ 73 #define CPU_DATA_PMF_TS_COUNT 1 74 #if __aarch64__ 75 #define CPU_DATA_PMF_TS0_OFFSET CPU_DATA_EHF_DATA_BUF_END 76 #else 77 /* alignment */ 78 #define CPU_DATA_PMF_TS0_OFFSET (CPU_DATA_EHF_DATA_BUF_END + 8) 79 #endif 80 #define CPU_DATA_PMF_TS0_IDX 0 81 #endif 82 83 #ifndef __ASSEMBLER__ 84 85 #include <assert.h> 86 #include <stdint.h> 87 88 #include <arch_helpers.h> 89 #include <lib/cassert.h> 90 #include <lib/psci/psci.h> 91 92 #include <platform_def.h> 93 94 /* Offsets for the cpu_data structure */ 95 #define CPU_DATA_PSCI_LOCK_OFFSET __builtin_offsetof\ 96 (cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info) 97 98 #if PLAT_PCPU_DATA_SIZE 99 #define CPU_DATA_PLAT_PCPU_OFFSET __builtin_offsetof\ 100 (cpu_data_t, platform_cpu_data) 101 #endif 102 103 /******************************************************************************* 104 * Function & variable prototypes 105 ******************************************************************************/ 106 107 /******************************************************************************* 108 * Cache of frequently used per-cpu data: 109 * Pointers to non-secure, realm, and secure security state contexts 110 * Address of the crash stack 111 * It is aligned to the cache line boundary to allow efficient concurrent 112 * manipulation of these pointers on different cpus 113 * 114 * The data structure and the _cpu_data accessors should not be used directly 115 * by components that have per-cpu members. The member access macros should be 116 * used for this. 117 ******************************************************************************/ 118 typedef struct cpu_data { 119 #ifdef __aarch64__ 120 void *cpu_context[CPU_CONTEXT_NUM]; 121 #endif /* __aarch64__ */ 122 entry_point_info_t *warmboot_ep_info; 123 struct cpu_ops *cpu_ops_ptr; 124 struct psci_cpu_data psci_svc_cpu_data; 125 #if ENABLE_PAUTH 126 uint64_t apiakey[2]; 127 #endif 128 #if CRASH_REPORTING 129 u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3]; 130 #endif 131 #if ENABLE_RUNTIME_INSTRUMENTATION 132 uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT]; 133 #endif 134 #if PLAT_PCPU_DATA_SIZE 135 uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE]; 136 #endif 137 #if EL3_EXCEPTION_HANDLING 138 pe_exc_data_t ehf_data; 139 #endif 140 } __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t; 141 142 extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT]; 143 144 #if ENABLE_PAUTH 145 CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof 146 (cpu_data_t, apiakey), 147 assert_cpu_data_pauth_stack_offset_mismatch); 148 #endif 149 150 #if CRASH_REPORTING 151 /* verify assembler offsets match data structures */ 152 CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof 153 (cpu_data_t, crash_buf), 154 assert_cpu_data_crash_stack_offset_mismatch); 155 #endif 156 157 #if EL3_EXCEPTION_HANDLING 158 CASSERT(CPU_DATA_EHF_DATA_BUF_OFFSET == __builtin_offsetof 159 (cpu_data_t, ehf_data), 160 assert_cpu_data_ehf_stack_offset_mismatch); 161 #endif 162 163 CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t), 164 assert_cpu_data_size_mismatch); 165 166 CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof 167 (cpu_data_t, cpu_ops_ptr), 168 assert_cpu_data_cpu_ops_ptr_offset_mismatch); 169 170 #if ENABLE_RUNTIME_INSTRUMENTATION 171 CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof 172 (cpu_data_t, cpu_data_pmf_ts[0]), 173 assert_cpu_data_pmf_ts0_offset_mismatch); 174 #endif 175 176 static inline cpu_data_t *_cpu_data_by_index(unsigned int cpu_index) 177 { 178 return &percpu_data[cpu_index]; 179 } 180 181 #ifdef __aarch64__ 182 /* Return the cpu_data structure for the current CPU. */ 183 static inline cpu_data_t *_cpu_data(void) 184 { 185 return (cpu_data_t *)read_tpidr_el3(); 186 } 187 #else 188 cpu_data_t *_cpu_data(void); 189 #endif 190 191 /************************************************************************** 192 * APIs for initialising and accessing per-cpu data 193 *************************************************************************/ 194 195 void cpu_data_init_cpu_ops(void); 196 197 #define get_cpu_data(_m) _cpu_data()->_m 198 #define set_cpu_data(_m, _v) _cpu_data()->_m = (_v) 199 #define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m 200 #define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v) 201 /* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */ 202 #define flush_cpu_data(_m) flush_dcache_range((uintptr_t) \ 203 &(_cpu_data()->_m), \ 204 sizeof(((cpu_data_t *)0)->_m)) 205 #define inv_cpu_data(_m) inv_dcache_range((uintptr_t) \ 206 &(_cpu_data()->_m), \ 207 sizeof(((cpu_data_t *)0)->_m)) 208 #define flush_cpu_data_by_index(_ix, _m) \ 209 flush_dcache_range((uintptr_t) \ 210 &(_cpu_data_by_index(_ix)->_m), \ 211 sizeof(((cpu_data_t *)0)->_m)) 212 213 214 #endif /* __ASSEMBLER__ */ 215 #endif /* CPU_DATA_H */ 216