xref: /rk3399_ARM-atf/services/arm_arch_svc/arm_arch_svc_setup.c (revision 76d5d32fcf7e8859721e0d63a1ecc6b674a4ae0e)
1 /*
2  * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <common/debug.h>
8 #include <common/runtime_svc.h>
9 #include <lib/cpus/errata.h>
10 #include <lib/smccc.h>
11 #include <services/arm_arch_svc.h>
12 #include <smccc_helpers.h>
13 #include <plat/common/platform.h>
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <lib/el3_runtime/context_mgmt.h>
17 
18 static int32_t smccc_version(void)
19 {
20 	return (int32_t)MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
21 }
22 
23 static int32_t smccc_arch_features(u_register_t arg1)
24 {
25 	switch (arg1) {
26 	case SMCCC_VERSION:
27 	case SMCCC_ARCH_FEATURES:
28 		return SMC_ARCH_CALL_SUCCESS;
29 	case SMCCC_ARCH_SOC_ID:
30 		return plat_is_smccc_feature_available(arg1);
31 #ifdef __aarch64__
32 	/* Workaround checks are currently only implemented for aarch64 */
33 #if WORKAROUND_CVE_2017_5715
34 	case SMCCC_ARCH_WORKAROUND_1:
35 		if (check_erratum_applies(CVE(2017, 5715))
36 			== ERRATA_NOT_APPLIES) {
37 			return 1;
38 		}
39 
40 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
41 #endif
42 
43 #if WORKAROUND_CVE_2018_3639
44 	case SMCCC_ARCH_WORKAROUND_2: {
45 #if DYNAMIC_WORKAROUND_CVE_2018_3639
46 		unsigned long long ssbs;
47 
48 		/*
49 		 * Firmware doesn't have to carry out dynamic workaround if the
50 		 * PE implements architectural Speculation Store Bypass Safe
51 		 * (SSBS) feature.
52 		 */
53 		ssbs = (read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SSBS_SHIFT) &
54 			ID_AA64PFR1_EL1_SSBS_MASK;
55 
56 		/*
57 		 * If architectural SSBS is available on this PE, no firmware
58 		 * mitigation via SMCCC_ARCH_WORKAROUND_2 is required.
59 		 */
60 		if (ssbs != SSBS_NOT_IMPLEMENTED)
61 			return 1;
62 
63 		/*
64 		 * On a platform where at least one CPU requires
65 		 * dynamic mitigation but others are either unaffected
66 		 * or permanently mitigated, report the latter as not
67 		 * needing dynamic mitigation.
68 		 */
69 		if (check_erratum_applies(ERRATUM(ARCH_WORKAROUND_2))
70 			== ERRATA_NOT_APPLIES)
71 			return 1;
72 
73 		/*
74 		 * If we get here, this CPU requires dynamic mitigation
75 		 * so report it as such.
76 		 */
77 		return 0;
78 #else
79 		/* Either the CPUs are unaffected or permanently mitigated */
80 		return SMC_ARCH_CALL_NOT_REQUIRED;
81 #endif
82 	}
83 #endif
84 
85 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
86 	case SMCCC_ARCH_WORKAROUND_3:
87 		/*
88 		 * SMCCC_ARCH_WORKAROUND_3 should also take into account
89 		 * CVE-2017-5715 since this SMC can be used instead of
90 		 * SMCCC_ARCH_WORKAROUND_1.
91 		 */
92 		if ((check_erratum_applies(ERRATUM(ARCH_WORKAROUND_3))
93 			== ERRATA_NOT_APPLIES) &&
94 		    (check_erratum_applies(CVE(2017, 5715))
95 			== ERRATA_NOT_APPLIES)) {
96 			return 1;
97 		}
98 
99 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
100 #endif
101 
102 #if ARCH_FEATURE_AVAILABILITY
103 	case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT):
104 		return SMC_ARCH_CALL_SUCCESS;
105 #endif /* ARCH_FEATURE_AVAILABILITY */
106 
107 #if WORKAROUND_CVE_2024_7881
108 	case SMCCC_ARCH_WORKAROUND_4:
109 		if (check_erratum_applies(CVE(2024, 7881)) != ERRATA_APPLIES) {
110 			return SMC_ARCH_CALL_NOT_SUPPORTED;
111 		}
112 		return 0;
113 #endif /* WORKAROUND_CVE_2024_7881 */
114 
115 #endif /* __aarch64__ */
116 
117 	/* Fallthrough */
118 
119 	default:
120 		return SMC_UNK;
121 	}
122 }
123 
124 /*
125  * Handles SMCCC_ARCH_SOC_ID smc calls.
126  *
127  * - GET_SOC_REVISION: returns SoC revision (AArch32/AArch64)
128  * - GET_SOC_VERSION:  returns SoC version  (AArch32/AArch64)
129  * - GET_SOC_NAME:     returns SoC name string (AArch64 only)
130  *
131  * Returns invalid parameter for unsupported calls.
132  */
133 static uintptr_t smccc_arch_id(u_register_t arg1, void *handle, uint32_t is_smc64)
134 {
135 	if (arg1 == SMCCC_GET_SOC_REVISION) {
136 		SMC_RET1(handle, plat_get_soc_revision());
137 	}
138 	if (arg1 == SMCCC_GET_SOC_VERSION) {
139 		SMC_RET1(handle, plat_get_soc_version());
140 	}
141 #if __aarch64__
142 	/* SoC Name is only present for SMC64 invocations */
143 	if ((arg1 == SMCCC_GET_SOC_NAME) && is_smc64) {
144 		uint64_t arg[SMCCC_SOC_NAME_LEN / 8];
145 		int32_t ret;
146 		char soc_name[SMCCC_SOC_NAME_LEN];
147 
148 		(void)memset(soc_name, 0U, SMCCC_SOC_NAME_LEN);
149 		ret = plat_get_soc_name(soc_name);
150 
151 		if (ret == SMC_ARCH_CALL_SUCCESS) {
152 			(void)memcpy(arg, soc_name, SMCCC_SOC_NAME_LEN);
153 			/*
154 			 * The SoC name is returned as a null-terminated
155 			 * ASCII string, split across registers X1 to X17
156 			 * in little endian order.
157 			 * Each 64-bit register holds 8 consecutive bytes
158 			 * of the string.
159 			 */
160 			SMC_RET18(handle, ret, arg[0], arg[1], arg[2],
161 					arg[3], arg[4], arg[5], arg[6],
162 					arg[7], arg[8], arg[9], arg[10],
163 					arg[11], arg[12], arg[13], arg[14],
164 					arg[15], arg[16]);
165 		} else {
166 			SMC_RET1(handle, ret);
167 		}
168 	}
169 #endif /* __aarch64__ */
170 	SMC_RET1(handle, SMC_ARCH_CALL_INVAL_PARAM);
171 }
172 
173 /*
174  * Reads a system register, sanitises its value, and returns a bitmask
175  * representing which feature in that sysreg has been enabled by firmware. The
176  * bitmask is a 1:1 mapping to the register's fields.
177  */
178 #if ARCH_FEATURE_AVAILABILITY
179 static uintptr_t smccc_arch_feature_availability(u_register_t reg,
180 						 void *handle,
181 						 u_register_t flags)
182 {
183 	per_world_context_t *caller_per_world_context;
184 	el3_state_t *state;
185 	u_register_t bitmask, check;
186 	size_t security_state;
187 
188 	/* check the caller security state */
189 	if (is_caller_secure(flags)) {
190 		security_state = SECURE;
191 	} else if (is_caller_non_secure(flags)) {
192 		security_state = NON_SECURE;
193 	} else {
194 #if ENABLE_RME
195 		security_state = REALM;
196 #else /* !ENABLE_RME */
197 		assert(0); /* shouldn't be possible */
198 #endif /* ENABLE_RME */
199 	}
200 
201 	caller_per_world_context = &per_world_context[get_cpu_context_index(security_state)];
202 	state = get_el3state_ctx(cm_get_context(security_state));
203 
204 	switch (reg) {
205 	case SCR_EL3_OPCODE:
206 		bitmask  = read_ctx_reg(state, CTX_SCR_EL3);
207 		bitmask &= ~SCR_EL3_IGNORED;
208 		check    = bitmask & ~SCR_EL3_FEATS;
209 		bitmask &= SCR_EL3_FEATS;
210 		bitmask ^= SCR_EL3_FLIPPED;
211 		/* will only report 0 if neither is implemented */
212 		if (is_feat_rng_trap_supported() || is_feat_rng_present())
213 			bitmask |= SCR_TRNDR_BIT;
214 		break;
215 	case CPTR_EL3_OPCODE:
216 		bitmask  = caller_per_world_context->ctx_cptr_el3;
217 		check    = bitmask & ~CPTR_EL3_FEATS;
218 		bitmask &= CPTR_EL3_FEATS;
219 		bitmask ^= CPTR_EL3_FLIPPED;
220 		break;
221 	case MDCR_EL3_OPCODE:
222 		bitmask  = read_ctx_reg(state, CTX_MDCR_EL3);
223 		bitmask &= ~MDCR_EL3_IGNORED;
224 		check    = bitmask & ~MDCR_EL3_FEATS;
225 		bitmask &= MDCR_EL3_FEATS;
226 		bitmask ^= MDCR_EL3_FLIPPED;
227 		break;
228 #if ENABLE_FEAT_MPAM
229 	case MPAM3_EL3_OPCODE:
230 		bitmask  = caller_per_world_context->ctx_mpam3_el3;
231 		bitmask &= ~MPAM3_EL3_IGNORED;
232 		check    = bitmask & ~MPAM3_EL3_FEATS;
233 		bitmask &= MPAM3_EL3_FEATS;
234 		bitmask ^= MPAM3_EL3_FLIPPED;
235 		break;
236 #endif /* ENABLE_FEAT_MPAM */
237 	default:
238 		SMC_RET2(handle, SMC_INVALID_PARAM, ULL(0));
239 	}
240 
241 	/*
242 	 * failing this means that the requested register has a bit set that
243 	 * hasn't been declared as a known feature bit or an ignore bit. This is
244 	 * likely to happen when support for a new feature is added but the
245 	 * bitmask macros are not updated.
246 	 */
247 	if (ENABLE_ASSERTIONS && check != 0) {
248 		ERROR("Unexpected bits 0x%lx were set in register %lx!\n", check, reg);
249 		assert(0);
250 	}
251 
252 	SMC_RET2(handle, SMC_ARCH_CALL_SUCCESS, bitmask);
253 }
254 #endif /* ARCH_FEATURE_AVAILABILITY */
255 
256 /*
257  * Top-level Arm Architectural Service SMC handler.
258  */
259 static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid,
260 	u_register_t x1,
261 	u_register_t x2,
262 	u_register_t x3,
263 	u_register_t x4,
264 	void *cookie,
265 	void *handle,
266 	u_register_t flags)
267 {
268 	(void)x2;
269 	(void)x3;
270 	(void)x4;
271 	(void)cookie;
272 
273 	switch (smc_fid) {
274 	case SMCCC_VERSION:
275 		SMC_RET1(handle, smccc_version());
276 	case SMCCC_ARCH_FEATURES:
277 		SMC_RET1(handle, smccc_arch_features(x1));
278 	case SMCCC_ARCH_SOC_ID:
279 	case SMCCC_ARCH_SOC_ID | (SMC_64 << FUNCID_CC_SHIFT):
280 		return smccc_arch_id(x1, handle, (smc_fid
281 				& (SMC_64 << FUNCID_CC_SHIFT)));
282 #if __aarch64__
283 #if WORKAROUND_CVE_2017_5715
284 	case SMCCC_ARCH_WORKAROUND_1:
285 		/*
286 		 * The workaround has already been applied on affected PEs
287 		 * during entry to EL3. On unaffected PEs, this function
288 		 * has no effect.
289 		 */
290 		SMC_RET0(handle);
291 #endif
292 #if WORKAROUND_CVE_2018_3639
293 	case SMCCC_ARCH_WORKAROUND_2:
294 		/*
295 		 * The workaround has already been applied on affected PEs
296 		 * requiring dynamic mitigation during entry to EL3.
297 		 * On unaffected or statically mitigated PEs, this function
298 		 * has no effect.
299 		 */
300 		SMC_RET0(handle);
301 #endif
302 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
303 	case SMCCC_ARCH_WORKAROUND_3:
304 		/*
305 		 * The workaround has already been applied on affected PEs
306 		 * during entry to EL3. On unaffected PEs, this function
307 		 * has no effect.
308 		 */
309 		SMC_RET0(handle);
310 #endif
311 #if WORKAROUND_CVE_2024_7881
312 	case SMCCC_ARCH_WORKAROUND_4:
313 		/*
314 		 * The workaround has already been applied on affected PEs
315 		 * during cold boot. This function has no effect whether PE is
316 		 * affected or not.
317 		 */
318 		SMC_RET0(handle);
319 #endif /* WORKAROUND_CVE_2024_7881 */
320 #endif /* __aarch64__ */
321 #if ARCH_FEATURE_AVAILABILITY
322 	/* return is 64 bit so only reply on SMC64 requests */
323 	case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT):
324 		return smccc_arch_feature_availability(x1, handle, flags);
325 #endif /* ARCH_FEATURE_AVAILABILITY */
326 	default:
327 		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
328 			smc_fid);
329 		SMC_RET1(handle, SMC_UNK);
330 	}
331 }
332 
333 /* Register Standard Service Calls as runtime service */
334 DECLARE_RT_SVC(
335 		arm_arch_svc,
336 		OEN_ARM_START,
337 		OEN_ARM_END,
338 		(uint8_t)SMC_TYPE_FAST,
339 		NULL,
340 		arm_arch_svc_smc_handler
341 );
342