xref: /rk3399_ARM-atf/services/std_svc/errata_abi/errata_abi_main.c (revision 584052c7f80b406666b9597447eeccef4d6deca4)
1 /*
2  * Copyright (c) 2023-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include "cpu_errata_info.h"
9 #include <lib/cpus/cpu_ops.h>
10 #include <lib/cpus/errata.h>
11 #include <lib/smccc.h>
12 #include <lib/utils_def.h>
13 #include <services/errata_abi_svc.h>
14 #include <smccc_helpers.h>
15 
16 /*
17  * Global pointer that points to the specific
18  * structure based on the MIDR part number
19  */
20 struct em_cpu_list *cpu_ptr;
21 
22 /* Structure array that holds CPU specific errata information */
23 struct em_cpu_list cpu_list[] = {
24 #if CORTEX_A78_H_INC
25 {
26 	.cpu_partnumber = CORTEX_A78_MIDR,
27 	.cpu_errata_list = {
28 		[0] = {2712571, 0x00, 0x12},
29 		[1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
30 	}
31 },
32 #endif /* CORTEX_A78_H_INC */
33 
34 #if CORTEX_A78_AE_H_INC
35 {
36 	.cpu_partnumber = CORTEX_A78_AE_MIDR,
37 	.cpu_errata_list = {
38 		[0] = {2712574, 0x00, 0x02},
39 		[1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
40 	}
41 },
42 #endif /* CORTEX_A78_AE_H_INC */
43 
44 #if CORTEX_A78C_H_INC
45 {
46 	.cpu_partnumber = CORTEX_A78C_MIDR,
47 	.cpu_errata_list = {
48 		[0] = {2712575, 0x01, 0x02},
49 		[1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
50 	}
51 },
52 #endif /* CORTEX_A78C_H_INC */
53 
54 #if NEOVERSE_V1_H_INC
55 {
56 	.cpu_partnumber = NEOVERSE_V1_MIDR,
57 	.cpu_errata_list = {
58 		[0] = {2701953, 0x00, 0x11},
59 		[1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
60 	}
61 },
62 #endif /* NEOVERSE_V1_H_INC */
63 
64 #if CORTEX_A710_H_INC
65 {
66 	.cpu_partnumber = CORTEX_A710_MIDR,
67 	.cpu_errata_list = {
68 		[0] = {2701952, 0x00, 0x21},
69 		[1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
70 	}
71 },
72 #endif /* CORTEX_A710_H_INC */
73 
74 #if NEOVERSE_N2_H_INC
75 {
76 	.cpu_partnumber = NEOVERSE_N2_MIDR,
77 	.cpu_errata_list = {
78 		[0] = {2728475, 0x00, 0x02},
79 		[1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
80 	}
81 },
82 #endif /* NEOVERSE_N2_H_INC */
83 
84 #if CORTEX_X2_H_INC
85 {
86 	.cpu_partnumber = CORTEX_X2_MIDR,
87 	.cpu_errata_list = {
88 		[0] = {2701952, 0x00, 0x21},
89 		[1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
90 	}
91 },
92 #endif /* CORTEX_X2_H_INC */
93 
94 #if NEOVERSE_V2_H_INC
95 {
96 	.cpu_partnumber = NEOVERSE_V2_MIDR,
97 	.cpu_errata_list = {
98 		[0] = {2719103, 0x00, 0x01},
99 		[1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
100 	}
101 },
102 #endif /* NEOVERSE_V2_H_INC */
103 
104 #if CORTEX_X3_H_INC
105 {
106 	.cpu_partnumber = CORTEX_X3_MIDR,
107 	.cpu_errata_list = {
108 		[0] = {2701951, 0x00, 0x11},
109 		[1 ... ERRATA_LIST_END] = UNDEF_ERRATA,
110 	}
111 },
112 #endif /* CORTEX_X3_H_INC */
113 
114 };
115 
116 #if ERRATA_NON_ARM_INTERCONNECT
117 
118 /* Check if the errata is enabled for non-arm interconnect */
119 static int32_t non_arm_interconnect_errata(uint32_t errata_id, long rev_var)
120 {
121 	int32_t ret_val = EM_UNKNOWN_ERRATUM;
122 
123 	/* Determine the number of cpu listed in the cpu list */
124 	uint8_t size_cpulist = ARRAY_SIZE(cpu_list);
125 
126 	/* Read the midr reg to extract cpu, revision and variant info */
127 	uint32_t midr_val = read_midr();
128 
129 	for (uint8_t i = 0U; i < size_cpulist; i++) {
130 		cpu_ptr = &cpu_list[i];
131 		/*
132 		 * If the cpu partnumber in the cpu list, matches the midr
133 		 * part number, check to see if the errata ID matches
134 		 */
135 		if (EXTRACT_PARTNUM(midr_val) == EXTRACT_PARTNUM(cpu_ptr->cpu_partnumber)) {
136 
137 			struct em_cpu *ptr = NULL;
138 
139 			for (int j = 0; j < MAX_PLAT_CPU_ERRATA_ENTRIES; j++) {
140 				ptr = &cpu_ptr->cpu_errata_list[j];
141 				assert(ptr != NULL);
142 				if (errata_id == ptr->em_errata_id) {
143 					if (RXPX_RANGE(rev_var, ptr->em_rxpx_lo, ptr->em_rxpx_hi)) {
144 						ret_val = EM_AFFECTED;
145 						break;
146 					}
147 					ret_val = EM_NOT_AFFECTED;
148 					break;
149 				}
150 			}
151 			break;
152 		}
153 	}
154 	return ret_val;
155 }
156 #endif
157 
158 /* Function to check if the errata exists for the specific CPU and rxpx */
159 int32_t verify_errata_implemented(uint32_t errata_id, uint32_t forward_flag)
160 {
161 	int32_t ret_val;
162 	struct cpu_ops *cpu_ops;
163 	struct erratum_entry *entry, *end;
164 	long rev_var;
165 
166 	ret_val = EM_UNKNOWN_ERRATUM;
167 	rev_var = cpu_get_rev_var();
168 
169 #if ERRATA_NON_ARM_INTERCONNECT
170 	ret_val = non_arm_interconnect_errata(errata_id, rev_var);
171 	if (ret_val != EM_UNKNOWN_ERRATUM) {
172 		return ret_val;
173 	}
174 #endif
175 
176 	cpu_ops = get_cpu_ops_ptr();
177 	assert(cpu_ops != NULL);
178 
179 	entry = cpu_ops->errata_list_start;
180 	assert(entry != NULL);
181 
182 	end = cpu_ops->errata_list_end;
183 	assert(end != NULL);
184 
185 	end--; /* point to the last erratum entry of the queried cpu */
186 
187 	while ((entry <= end) && (ret_val == EM_UNKNOWN_ERRATUM)) {
188 		if (entry->id == errata_id) {
189 			if (entry->check_func(rev_var)) {
190 				if (entry->chosen)
191 					return EM_HIGHER_EL_MITIGATION;
192 				else
193 					return EM_AFFECTED;
194 			}
195 			return EM_NOT_AFFECTED;
196 		}
197 		entry += 1;
198 	}
199 	return ret_val;
200 }
201 
202 /* Predicate indicating that a function id is part of EM_ABI */
203 bool is_errata_fid(uint32_t smc_fid)
204 {
205 	return ((smc_fid == ARM_EM_VERSION) ||
206 		(smc_fid == ARM_EM_FEATURES) ||
207 		(smc_fid == ARM_EM_CPU_ERRATUM_FEATURES));
208 
209 }
210 
211 bool validate_spsr_mode(void)
212 {
213 	/* In AArch64, if the caller is EL1, return true */
214 
215 	#if __aarch64__
216 		if (GET_EL(read_spsr_el3()) == MODE_EL1) {
217 			return true;
218 		}
219 		return false;
220 	#else
221 
222 	/* In AArch32, if in system/svc mode, return true */
223 		uint8_t read_el_state = GET_M32(read_spsr());
224 
225 		if ((read_el_state == (MODE32_svc)) || (read_el_state == MODE32_sys)) {
226 			return true;
227 		}
228 		return false;
229 	#endif /* __aarch64__ */
230 }
231 
232 uintptr_t errata_abi_smc_handler(uint32_t smc_fid, u_register_t x1,
233 				u_register_t x2, u_register_t x3, u_register_t x4,
234 				void *cookie, void *handle, u_register_t flags)
235 {
236 	int32_t ret_id = EM_UNKNOWN_ERRATUM;
237 
238 	switch (smc_fid) {
239 	case ARM_EM_VERSION:
240 		SMC_RET1(handle, MAKE_SMCCC_VERSION(
241 			EM_VERSION_MAJOR, EM_VERSION_MINOR
242 		));
243 		break; /* unreachable */
244 	case ARM_EM_FEATURES:
245 		if (is_errata_fid((uint32_t)x1)) {
246 			SMC_RET1(handle, EM_SUCCESS);
247 		}
248 
249 		SMC_RET1(handle, EM_NOT_SUPPORTED);
250 		break; /* unreachable */
251 	case ARM_EM_CPU_ERRATUM_FEATURES:
252 
253 		/*
254 		 * If the forward flag is greater than zero and the calling EL
255 		 * is EL1 in AArch64 or in system mode or svc mode in case of AArch32,
256 		 * return Invalid Parameters.
257 		 */
258 		if (((uint32_t)x2 != 0) && (validate_spsr_mode())) {
259 			SMC_RET1(handle, EM_INVALID_PARAMETERS);
260 		}
261 		ret_id = verify_errata_implemented((uint32_t)x1, (uint32_t)x2);
262 		SMC_RET1(handle, ret_id);
263 		break; /* unreachable */
264 	default:
265 		{
266 		   WARN("Unimplemented Errata ABI Service Call: 0x%x\n", smc_fid);
267 		   SMC_RET1(handle, EM_UNKNOWN_ERRATUM);
268 		   break; /* unreachable */
269 		}
270 	}
271 }
272