1 /*
2 * Copyright (c) 2023-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include "cpu_errata_info.h"
9 #include <lib/cpus/cpu_ops.h>
10 #include <lib/cpus/errata.h>
11 #include <lib/smccc.h>
12 #include <lib/utils_def.h>
13 #include <services/errata_abi_svc.h>
14 #include <smccc_helpers.h>
15
16 /*
17 * Global pointer that points to the specific
18 * structure based on the MIDR part number
19 */
20 struct em_cpu_list *cpu_ptr;
21
22 /* Structure array that holds CPU specific errata information */
23 struct em_cpu_list cpu_list[] = {
24 #if CORTEX_A78_H_INC
25 {
26 .cpu_midr = CORTEX_A78_MIDR,
27 .cpu_errata_list = {
28 [0] = {2712571, 0x00, 0x12},
29 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
30 }
31 },
32 #endif /* CORTEX_A78_H_INC */
33
34 #if CORTEX_A78_AE_H_INC
35 {
36 .cpu_midr = CORTEX_A78_AE_MIDR,
37 .cpu_errata_list = {
38 [0] = {2712574, 0x00, 0x02},
39 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
40 }
41 },
42 #endif /* CORTEX_A78_AE_H_INC */
43
44 #if CORTEX_A78C_H_INC
45 {
46 .cpu_midr = CORTEX_A78C_MIDR,
47 .cpu_errata_list = {
48 [0] = {2712575, 0x01, 0x02},
49 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
50 }
51 },
52 #endif /* CORTEX_A78C_H_INC */
53
54 #if NEOVERSE_V1_H_INC
55 {
56 .cpu_midr = NEOVERSE_V1_MIDR,
57 .cpu_errata_list = {
58 [0] = {2701953, 0x00, 0x11},
59 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
60 }
61 },
62 #endif /* NEOVERSE_V1_H_INC */
63
64 #if CORTEX_A710_H_INC
65 {
66 .cpu_midr = CORTEX_A710_MIDR,
67 .cpu_errata_list = {
68 [0] = {2701952, 0x00, 0x21},
69 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
70 }
71 },
72 #endif /* CORTEX_A710_H_INC */
73
74 #if NEOVERSE_N2_H_INC
75 {
76 .cpu_midr = NEOVERSE_N2_MIDR,
77 .cpu_errata_list = {
78 [0] = {2728475, 0x00, 0x02},
79 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
80 }
81 },
82 #endif /* NEOVERSE_N2_H_INC */
83
84 #if CORTEX_X2_H_INC
85 {
86 .cpu_midr = CORTEX_X2_MIDR,
87 .cpu_errata_list = {
88 [0] = {2701952, 0x00, 0x21},
89 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
90 }
91 },
92 #endif /* CORTEX_X2_H_INC */
93
94 #if NEOVERSE_V2_H_INC
95 {
96 .cpu_midr = NEOVERSE_V2_MIDR,
97 .cpu_errata_list = {
98 [0] = {2719103, 0x00, 0x01},
99 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
100 }
101 },
102 #endif /* NEOVERSE_V2_H_INC */
103
104 #if CORTEX_X3_H_INC
105 {
106 .cpu_midr = CORTEX_X3_MIDR,
107 .cpu_errata_list = {
108 [0] = {2701951, 0x00, 0x11},
109 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
110 }
111 },
112 #endif /* CORTEX_X3_H_INC */
113
114 #if CORTEX_X4_H_INC
115 {
116 .cpu_midr = CORTEX_X4_MIDR,
117 .cpu_errata_list = {
118 [0] = {2701112, 0x00, 0x00},
119 [1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
120 }
121 },
122 #endif /* CORTEX_X4_H_INC */
123
124 };
125
126 #if ERRATA_NON_ARM_INTERCONNECT
127
128 /* Check if the errata is enabled for non-arm interconnect */
non_arm_interconnect_errata(uint32_t errata_id,long rev_var)129 static int32_t non_arm_interconnect_errata(uint32_t errata_id, long rev_var)
130 {
131 int32_t ret_val = EM_UNKNOWN_ERRATUM;
132
133 /* Determine the number of cpu listed in the cpu list */
134 uint8_t size_cpulist = ARRAY_SIZE(cpu_list);
135
136 /* Read the midr reg to extract cpu, revision and variant info */
137 uint32_t midr_val = read_midr();
138
139 for (uint8_t i = 0U; i < size_cpulist; i++) {
140 cpu_ptr = &cpu_list[i];
141 /*
142 * If the cpu partnumber in the cpu list, matches the midr
143 * part number, check to see if the errata ID matches
144 */
145 if (EXTRACT_PARTNUM(midr_val) == EXTRACT_PARTNUM(cpu_ptr->cpu_midr)) {
146
147 struct em_cpu *ptr = NULL;
148
149 for (int j = 0; j < MAX_PLAT_CPU_ERRATA_ENTRIES; j++) {
150 ptr = &cpu_ptr->cpu_errata_list[j];
151 assert(ptr != NULL);
152 if (errata_id == ptr->em_errata_id) {
153 if (RXPX_RANGE(rev_var, ptr->em_rxpx_lo, ptr->em_rxpx_hi)) {
154 ret_val = EM_AFFECTED;
155 break;
156 }
157 ret_val = EM_NOT_AFFECTED;
158 break;
159 }
160 }
161 break;
162 }
163 }
164 return ret_val;
165 }
166 #endif
167
168 /* Function to check if the errata exists for the specific CPU and rxpx */
verify_errata_implemented(uint32_t errata_id)169 int32_t verify_errata_implemented(uint32_t errata_id)
170 {
171 struct erratum_entry *entry;
172 long rev_var;
173
174 rev_var = cpu_get_rev_var();
175
176 #if ERRATA_NON_ARM_INTERCONNECT
177 int32_t ret_val = non_arm_interconnect_errata(errata_id, rev_var);
178 if (ret_val != EM_UNKNOWN_ERRATUM) {
179 return ret_val;
180 }
181 #endif
182 entry = find_erratum_entry(errata_id);
183 if (entry == NULL)
184 return EM_UNKNOWN_ERRATUM;
185
186 if (entry->check_func(rev_var)) {
187 if (entry->chosen & WA_ENABLED_MASK)
188 if (entry->chosen & SPLIT_WA_MASK)
189 return EM_AFFECTED;
190 else
191 return EM_HIGHER_EL_MITIGATION;
192 else
193 return EM_AFFECTED;
194 }
195 return EM_NOT_AFFECTED;
196 }
197
198 /* Predicate indicating that a function id is part of EM_ABI */
is_errata_fid(uint32_t smc_fid)199 bool is_errata_fid(uint32_t smc_fid)
200 {
201 return ((smc_fid == ARM_EM_VERSION) ||
202 (smc_fid == ARM_EM_FEATURES) ||
203 (smc_fid == ARM_EM_CPU_ERRATUM_FEATURES));
204
205 }
206
validate_spsr_mode(void)207 bool validate_spsr_mode(void)
208 {
209 /* In AArch64, if the caller is EL1, return true */
210
211 #if __aarch64__
212 if (GET_EL(read_spsr_el3()) == MODE_EL1) {
213 return true;
214 }
215 return false;
216 #else
217
218 /* In AArch32, if in system/svc mode, return true */
219 uint8_t read_el_state = GET_M32(read_spsr());
220
221 if ((read_el_state == (MODE32_svc)) || (read_el_state == MODE32_sys)) {
222 return true;
223 }
224 return false;
225 #endif /* __aarch64__ */
226 }
227
errata_abi_smc_handler(uint32_t smc_fid,u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4,void * cookie,void * handle,u_register_t flags)228 uintptr_t errata_abi_smc_handler(uint32_t smc_fid, u_register_t x1,
229 u_register_t x2, u_register_t x3, u_register_t x4,
230 void *cookie, void *handle, u_register_t flags)
231 {
232 int32_t ret_id = EM_UNKNOWN_ERRATUM;
233
234 switch (smc_fid) {
235 case ARM_EM_VERSION:
236 SMC_RET1(handle, MAKE_SMCCC_VERSION(
237 EM_VERSION_MAJOR, EM_VERSION_MINOR
238 ));
239 break; /* unreachable */
240 case ARM_EM_FEATURES:
241 if (is_errata_fid((uint32_t)x1)) {
242 SMC_RET1(handle, EM_SUCCESS);
243 }
244
245 SMC_RET1(handle, EM_NOT_SUPPORTED);
246 break; /* unreachable */
247 case ARM_EM_CPU_ERRATUM_FEATURES:
248
249 /*
250 * If the forward flag is greater than zero and the calling EL
251 * is EL1 in AArch64 or in system mode or svc mode in case of AArch32,
252 * return Invalid Parameters.
253 */
254 if (((uint32_t)x2 != 0) && (validate_spsr_mode())) {
255 SMC_RET1(handle, EM_INVALID_PARAMETERS);
256 }
257 ret_id = verify_errata_implemented((uint32_t)x1);
258 SMC_RET1(handle, ret_id);
259 break; /* unreachable */
260 default:
261 {
262 WARN("Unimplemented Errata ABI Service Call: 0x%x\n", smc_fid);
263 SMC_RET1(handle, EM_UNKNOWN_ERRATUM);
264 break; /* unreachable */
265 }
266 }
267 }
268