xref: /rk3399_ARM-atf/services/arm_arch_svc/arm_arch_svc_setup.c (revision b67e984664a8644d6cfd1812cabaa02cf24f09c9)
1 /*
2  * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <common/debug.h>
8 #include <common/runtime_svc.h>
9 #include <lib/cpus/errata.h>
10 #include <lib/smccc.h>
11 #include <services/arm_arch_svc.h>
12 #include <smccc_helpers.h>
13 #include <plat/common/platform.h>
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <lib/el3_runtime/context_mgmt.h>
17 
18 static int32_t smccc_version(void)
19 {
20 	return (int32_t)MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
21 }
22 
23 static int32_t smccc_arch_features(u_register_t arg1)
24 {
25 	switch (arg1) {
26 	case SMCCC_VERSION:
27 	case SMCCC_ARCH_FEATURES:
28 		return SMC_ARCH_CALL_SUCCESS;
29 	case SMCCC_ARCH_SOC_ID:
30 		return plat_is_smccc_feature_available(arg1);
31 #ifdef __aarch64__
32 	/* Workaround checks are currently only implemented for aarch64 */
33 #if WORKAROUND_CVE_2017_5715
34 	case SMCCC_ARCH_WORKAROUND_1:
35 		if (check_erratum_applies(CVE(2017, 5715))
36 			== ERRATA_NOT_APPLIES) {
37 			return 1;
38 		}
39 
40 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
41 #endif
42 
43 #if WORKAROUND_CVE_2018_3639
44 	case SMCCC_ARCH_WORKAROUND_2: {
45 #if DYNAMIC_WORKAROUND_CVE_2018_3639
46 		unsigned long long ssbs;
47 
48 		/*
49 		 * Firmware doesn't have to carry out dynamic workaround if the
50 		 * PE implements architectural Speculation Store Bypass Safe
51 		 * (SSBS) feature.
52 		 */
53 		ssbs = (read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SSBS_SHIFT) &
54 			ID_AA64PFR1_EL1_SSBS_MASK;
55 
56 		/*
57 		 * If architectural SSBS is available on this PE, no firmware
58 		 * mitigation via SMCCC_ARCH_WORKAROUND_2 is required.
59 		 */
60 		if (ssbs != SSBS_NOT_IMPLEMENTED)
61 			return 1;
62 
63 		/*
64 		 * On a platform where at least one CPU requires
65 		 * dynamic mitigation but others are either unaffected
66 		 * or permanently mitigated, report the latter as not
67 		 * needing dynamic mitigation.
68 		 */
69 		if (check_erratum_applies(ERRATUM(ARCH_WORKAROUND_2))
70 			== ERRATA_NOT_APPLIES)
71 			return 1;
72 
73 		/*
74 		 * If we get here, this CPU requires dynamic mitigation
75 		 * so report it as such.
76 		 */
77 		return 0;
78 #else
79 		/* Either the CPUs are unaffected or permanently mitigated */
80 		return SMC_ARCH_CALL_NOT_REQUIRED;
81 #endif
82 	}
83 #endif
84 
85 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
86 	case SMCCC_ARCH_WORKAROUND_3:
87 		/*
88 		 * SMCCC_ARCH_WORKAROUND_3 should also take into account
89 		 * CVE-2017-5715 since this SMC can be used instead of
90 		 * SMCCC_ARCH_WORKAROUND_1.
91 		 */
92 		if ((check_erratum_applies(ERRATUM(ARCH_WORKAROUND_3))
93 			== ERRATA_NOT_APPLIES) &&
94 		    (check_erratum_applies(CVE(2017, 5715))
95 			== ERRATA_NOT_APPLIES)) {
96 			return 1;
97 		}
98 
99 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
100 #endif
101 
102 #if ARCH_FEATURE_AVAILABILITY
103 	case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT):
104 		return SMC_ARCH_CALL_SUCCESS;
105 #endif /* ARCH_FEATURE_AVAILABILITY */
106 
107 #if WORKAROUND_CVE_2024_7881
108 	case SMCCC_ARCH_WORKAROUND_4:
109 		if (check_erratum_applies(CVE(2024, 7881)) != ERRATA_APPLIES) {
110 			return SMC_ARCH_CALL_NOT_SUPPORTED;
111 		}
112 		return 0;
113 #endif /* WORKAROUND_CVE_2024_7881 */
114 
115 #endif /* __aarch64__ */
116 
117 	/* Fallthrough */
118 
119 	default:
120 		return SMC_UNK;
121 	}
122 }
123 
124 /*
125  * Handles SMCCC_ARCH_SOC_ID smc calls.
126  *
127  * - GET_SOC_REVISION: returns SoC revision (AArch32/AArch64)
128  * - GET_SOC_VERSION:  returns SoC version  (AArch32/AArch64)
129  * - GET_SOC_NAME:     returns SoC name string (AArch64 only)
130  *
131  * Returns invalid parameter for unsupported calls.
132  */
133 static uintptr_t smccc_arch_id(u_register_t arg1, void *handle, uint32_t is_smc64)
134 {
135 	if (arg1 == SMCCC_GET_SOC_REVISION) {
136 		SMC_RET1(handle, plat_get_soc_revision());
137 	}
138 	if (arg1 == SMCCC_GET_SOC_VERSION) {
139 		SMC_RET1(handle, plat_get_soc_version());
140 	}
141 #if __aarch64__
142 	/* SoC Name is only present for SMC64 invocations */
143 	if ((arg1 == SMCCC_GET_SOC_NAME) && is_smc64) {
144 		uint64_t arg[SMCCC_SOC_NAME_LEN / 8];
145 		int32_t ret;
146 		char soc_name[SMCCC_SOC_NAME_LEN];
147 
148 		(void)memset(soc_name, 0U, SMCCC_SOC_NAME_LEN);
149 		ret = plat_get_soc_name(soc_name);
150 
151 		if (ret == SMC_ARCH_CALL_SUCCESS) {
152 			(void)memcpy(arg, soc_name, SMCCC_SOC_NAME_LEN);
153 			/*
154 			 * The SoC name is returned as a null-terminated
155 			 * ASCII string, split across registers X1 to X17
156 			 * in little endian order.
157 			 * Each 64-bit register holds 8 consecutive bytes
158 			 * of the string.
159 			 */
160 			SMC_RET18(handle, ret, arg[0], arg[1], arg[2],
161 					arg[3], arg[4], arg[5], arg[6],
162 					arg[7], arg[8], arg[9], arg[10],
163 					arg[11], arg[12], arg[13], arg[14],
164 					arg[15], arg[16]);
165 		} else {
166 			SMC_RET1(handle, ret);
167 		}
168 	}
169 #endif /* __aarch64__ */
170 	SMC_RET1(handle, SMC_ARCH_CALL_INVAL_PARAM);
171 }
172 
173 /*
174  * Reads a system register, sanitises its value, and returns a bitmask
175  * representing which feature in that sysreg has been enabled by firmware. The
176  * bitmask is a 1:1 mapping to the register's fields.
177  */
178 #if ARCH_FEATURE_AVAILABILITY
179 static uintptr_t smccc_arch_feature_availability(u_register_t reg,
180 						 void *handle,
181 						 u_register_t flags)
182 {
183 	cpu_context_t *caller_context;
184 	per_world_context_t *caller_per_world_context;
185 	el3_state_t *state;
186 	u_register_t bitmask, check;
187 
188 	/* check the caller security state */
189 	if (is_caller_secure(flags)) {
190 		caller_context = cm_get_context(SECURE);
191 		caller_per_world_context = &per_world_context[CPU_CONTEXT_SECURE];
192 	} else if (is_caller_non_secure(flags)) {
193 		caller_context = cm_get_context(NON_SECURE);
194 		caller_per_world_context = &per_world_context[CPU_CONTEXT_NS];
195 	} else {
196 #if ENABLE_RME
197 		caller_context = cm_get_context(REALM);
198 		caller_per_world_context = &per_world_context[CPU_CONTEXT_REALM];
199 #else /* !ENABLE_RME */
200 		assert(0); /* shouldn't be possible */
201 #endif /* ENABLE_RME */
202 	}
203 
204 	state = get_el3state_ctx(caller_context);
205 
206 	switch (reg) {
207 	case SCR_EL3_OPCODE:
208 		bitmask  = read_ctx_reg(state, CTX_SCR_EL3);
209 		bitmask &= ~SCR_EL3_IGNORED;
210 		check    = bitmask & ~SCR_EL3_FEATS;
211 		bitmask &= SCR_EL3_FEATS;
212 		bitmask ^= SCR_EL3_FLIPPED;
213 		/* will only report 0 if neither is implemented */
214 		if (is_feat_rng_trap_supported() || is_feat_rng_present())
215 			bitmask |= SCR_TRNDR_BIT;
216 		break;
217 	case CPTR_EL3_OPCODE:
218 		bitmask  = caller_per_world_context->ctx_cptr_el3;
219 		check    = bitmask & ~CPTR_EL3_FEATS;
220 		bitmask &= CPTR_EL3_FEATS;
221 		bitmask ^= CPTR_EL3_FLIPPED;
222 		break;
223 	case MDCR_EL3_OPCODE:
224 		bitmask  = read_ctx_reg(state, CTX_MDCR_EL3);
225 		bitmask &= ~MDCR_EL3_IGNORED;
226 		check    = bitmask & ~MDCR_EL3_FEATS;
227 		bitmask &= MDCR_EL3_FEATS;
228 		bitmask ^= MDCR_EL3_FLIPPED;
229 		break;
230 #if ENABLE_FEAT_MPAM
231 	case MPAM3_EL3_OPCODE:
232 		bitmask  = caller_per_world_context->ctx_mpam3_el3;
233 		bitmask &= ~MPAM3_EL3_IGNORED;
234 		check    = bitmask & ~MPAM3_EL3_FEATS;
235 		bitmask &= MPAM3_EL3_FEATS;
236 		bitmask ^= MPAM3_EL3_FLIPPED;
237 		break;
238 #endif /* ENABLE_FEAT_MPAM */
239 	default:
240 		SMC_RET2(handle, SMC_INVALID_PARAM, ULL(0));
241 	}
242 
243 	/*
244 	 * failing this means that the requested register has a bit set that
245 	 * hasn't been declared as a known feature bit or an ignore bit. This is
246 	 * likely to happen when support for a new feature is added but the
247 	 * bitmask macros are not updated.
248 	 */
249 	if (ENABLE_ASSERTIONS && check != 0) {
250 		ERROR("Unexpected bits 0x%lx were set in register %lx!\n", check, reg);
251 		assert(0);
252 	}
253 
254 	SMC_RET2(handle, SMC_ARCH_CALL_SUCCESS, bitmask);
255 }
256 #endif /* ARCH_FEATURE_AVAILABILITY */
257 
258 /*
259  * Top-level Arm Architectural Service SMC handler.
260  */
261 static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid,
262 	u_register_t x1,
263 	u_register_t x2,
264 	u_register_t x3,
265 	u_register_t x4,
266 	void *cookie,
267 	void *handle,
268 	u_register_t flags)
269 {
270 	(void)x2;
271 	(void)x3;
272 	(void)x4;
273 	(void)cookie;
274 
275 	switch (smc_fid) {
276 	case SMCCC_VERSION:
277 		SMC_RET1(handle, smccc_version());
278 	case SMCCC_ARCH_FEATURES:
279 		SMC_RET1(handle, smccc_arch_features(x1));
280 	case SMCCC_ARCH_SOC_ID:
281 	case SMCCC_ARCH_SOC_ID | (SMC_64 << FUNCID_CC_SHIFT):
282 		return smccc_arch_id(x1, handle, (smc_fid
283 				& (SMC_64 << FUNCID_CC_SHIFT)));
284 #if __aarch64__
285 #if WORKAROUND_CVE_2017_5715
286 	case SMCCC_ARCH_WORKAROUND_1:
287 		/*
288 		 * The workaround has already been applied on affected PEs
289 		 * during entry to EL3. On unaffected PEs, this function
290 		 * has no effect.
291 		 */
292 		SMC_RET0(handle);
293 #endif
294 #if WORKAROUND_CVE_2018_3639
295 	case SMCCC_ARCH_WORKAROUND_2:
296 		/*
297 		 * The workaround has already been applied on affected PEs
298 		 * requiring dynamic mitigation during entry to EL3.
299 		 * On unaffected or statically mitigated PEs, this function
300 		 * has no effect.
301 		 */
302 		SMC_RET0(handle);
303 #endif
304 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
305 	case SMCCC_ARCH_WORKAROUND_3:
306 		/*
307 		 * The workaround has already been applied on affected PEs
308 		 * during entry to EL3. On unaffected PEs, this function
309 		 * has no effect.
310 		 */
311 		SMC_RET0(handle);
312 #endif
313 #if WORKAROUND_CVE_2024_7881
314 	case SMCCC_ARCH_WORKAROUND_4:
315 		/*
316 		 * The workaround has already been applied on affected PEs
317 		 * during cold boot. This function has no effect whether PE is
318 		 * affected or not.
319 		 */
320 		SMC_RET0(handle);
321 #endif /* WORKAROUND_CVE_2024_7881 */
322 #endif /* __aarch64__ */
323 #if ARCH_FEATURE_AVAILABILITY
324 	/* return is 64 bit so only reply on SMC64 requests */
325 	case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT):
326 		return smccc_arch_feature_availability(x1, handle, flags);
327 #endif /* ARCH_FEATURE_AVAILABILITY */
328 	default:
329 		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
330 			smc_fid);
331 		SMC_RET1(handle, SMC_UNK);
332 	}
333 }
334 
335 /* Register Standard Service Calls as runtime service */
336 DECLARE_RT_SVC(
337 		arm_arch_svc,
338 		OEN_ARM_START,
339 		OEN_ARM_END,
340 		(uint8_t)SMC_TYPE_FAST,
341 		NULL,
342 		arm_arch_svc_smc_handler
343 );
344