xref: /rk3399_ARM-atf/services/arm_arch_svc/arm_arch_svc_setup.c (revision 430f246e58d146949d399d72294f56403672bee0)
1 /*
2  * Copyright (c) 2018-2026, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <common/debug.h>
8 #include <common/runtime_svc.h>
9 #include <lib/cpus/errata.h>
10 #include <lib/smccc.h>
11 #include <services/arm_arch_svc.h>
12 #include <smccc_helpers.h>
13 #include <plat/common/platform.h>
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <lib/el3_runtime/context_mgmt.h>
17 
smccc_version(void)18 static int32_t smccc_version(void)
19 {
20 	return (int32_t)MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
21 }
22 
smccc_check_for_wa_1(void)23 static inline __unused int32_t smccc_check_for_wa_1(void)
24 {
25 #if WORKAROUND_CVE_2017_5715
26 		switch (check_erratum_applies(CVE(2017, 5715))) {
27 		case ERRATA_APPLIES:
28 			return SMC_WA_DO;
29 		case ERRATA_NOT_APPLIES:
30 			return SMC_WA_DO_NOT;
31 		}
32 #endif
33 		/* ERRATA_MISSING, CVE not compiled in, or not vulnerable */
34 		return SMC_ARCH_CALL_NOT_SUPPORTED;
35 }
36 
smccc_arch_features(u_register_t arg1)37 static int32_t smccc_arch_features(u_register_t arg1)
38 {
39 	switch (arg1) {
40 	case SMCCC_VERSION:
41 	case SMCCC_ARCH_FEATURES:
42 		return SMC_ARCH_CALL_SUCCESS;
43 	case SMCCC_ARCH_SOC_ID:
44 		return plat_is_smccc_feature_available(arg1);
45 #ifdef __aarch64__
46 	/* Workaround checks are currently only implemented for aarch64 */
47 	case SMCCC_ARCH_WORKAROUND_1:
48 		return smccc_check_for_wa_1();
49 
50 	/* see note for WA_3 */
51 	case SMCCC_ARCH_WORKAROUND_2: {
52 #if WORKAROUND_CVE_2018_3639
53 #if DYNAMIC_WORKAROUND_CVE_2018_3639
54 		/*
55 		 * Firmware doesn't have to carry out dynamic workaround if the
56 		 * PE implements architectural Speculation Store Bypass Safe
57 		 * (SSBS) feature.
58 		 */
59 		if (is_feat_ssbs_present()) {
60 			return SMC_WA_DO_NOT;
61 		}
62 
63 		switch (check_erratum_applies(ERRATUM(ARCH_WORKAROUND_2))) {
64 		case ERRATA_APPLIES:
65 			return SMC_WA_DO;
66 		case ERRATA_NOT_APPLIES:
67 			return SMC_WA_DO_NOT;
68 		}
69 #else
70 		/* Either the CPUs are unaffected or permanently mitigated */
71 		return SMC_ARCH_CALL_NOT_REQUIRED;
72 #endif
73 #endif
74 		/* ERRATA_MISSING, CVE not compiled in, or not vulnerable */
75 		return SMC_ARCH_CALL_NOT_SUPPORTED;
76 	}
77 
78 	/*
79 	 * NOTE: this uses the ARCH_WORKAROUND_3 pseudo-erratum instead of the
80 	 * one registered for CVE_2022_23960 on purpose. This is because not all
81 	 * cores affected by the CVE need the SMC workaround. For newer cores,
82 	 * it is assumed that lower EL software is capable of working around the
83 	 * problem itself and so no firmware involvement is needed. Select cores
84 	 * that do not have such software can register for the WA_3 SMC
85 	 * explicitly.
86 	 */
87 	case SMCCC_ARCH_WORKAROUND_3:
88 #if WORKAROUND_CVE_2022_23960
89 		switch (check_erratum_applies(ERRATUM(ARCH_WORKAROUND_3))) {
90 		case ERRATA_APPLIES:
91 			return SMC_WA_DO;
92 		case ERRATA_NOT_APPLIES:
93 			return SMC_WA_DO_NOT;
94 		}
95 #endif /* WORKAROUND_CVE_2022_23960 */
96 		/* WA_3 can be used instead of WA_1 */
97 		return smccc_check_for_wa_1();
98 
99 	case SMCCC_ARCH_WORKAROUND_4:
100 #if WORKAROUND_CVE_2024_7881
101 		/* WA_4 does not have a SMC_WA_DO_NOT */
102 		if (check_erratum_applies(CVE(2024, 7881)) == ERRATA_APPLIES) {
103 				return SMC_WA_DO;
104 		}
105 #endif /* WORKAROUND_CVE_2024_7881 */
106 		/* ERRATA_MISSING, CVE not compiled in, or not vulnerable */
107 		return SMC_ARCH_CALL_NOT_SUPPORTED;
108 
109 #if ARCH_FEATURE_AVAILABILITY
110 	case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT):
111 		return SMC_ARCH_CALL_SUCCESS;
112 #endif /* ARCH_FEATURE_AVAILABILITY */
113 
114 #endif /* __aarch64__ */
115 
116 	default:
117 		return SMC_UNK;
118 	}
119 }
120 
121 /*
122  * Handles SMCCC_ARCH_SOC_ID smc calls.
123  *
124  * - GET_SOC_REVISION: returns SoC revision (AArch32/AArch64)
125  * - GET_SOC_VERSION:  returns SoC version  (AArch32/AArch64)
126  * - GET_SOC_NAME:     returns SoC name string (AArch64 only)
127  *
128  * Returns invalid parameter for unsupported calls.
129  */
smccc_arch_id(u_register_t arg1,void * handle,uint32_t is_smc64)130 static uintptr_t smccc_arch_id(u_register_t arg1, void *handle, uint32_t is_smc64)
131 {
132 	if (arg1 == SMCCC_GET_SOC_REVISION) {
133 		SMC_RET1(handle, plat_get_soc_revision());
134 	}
135 	if (arg1 == SMCCC_GET_SOC_VERSION) {
136 		SMC_RET1(handle, plat_get_soc_version());
137 	}
138 #if __aarch64__
139 	/* SoC Name is only present for SMC64 invocations */
140 	if ((arg1 == SMCCC_GET_SOC_NAME) && is_smc64) {
141 		uint64_t arg[SMCCC_SOC_NAME_LEN / 8];
142 		int32_t ret;
143 		char soc_name[SMCCC_SOC_NAME_LEN];
144 
145 		(void)memset(soc_name, 0U, SMCCC_SOC_NAME_LEN);
146 		ret = plat_get_soc_name(soc_name);
147 
148 		if (ret == SMC_ARCH_CALL_SUCCESS) {
149 			(void)memcpy(arg, soc_name, SMCCC_SOC_NAME_LEN);
150 			/*
151 			 * The SoC name is returned as a null-terminated
152 			 * ASCII string, split across registers X1 to X17
153 			 * in little endian order.
154 			 * Each 64-bit register holds 8 consecutive bytes
155 			 * of the string.
156 			 */
157 			SMC_RET18(handle, ret, arg[0], arg[1], arg[2],
158 					arg[3], arg[4], arg[5], arg[6],
159 					arg[7], arg[8], arg[9], arg[10],
160 					arg[11], arg[12], arg[13], arg[14],
161 					arg[15], arg[16]);
162 		} else {
163 			SMC_RET1(handle, ret);
164 		}
165 	}
166 #endif /* __aarch64__ */
167 	SMC_RET1(handle, SMC_ARCH_CALL_INVAL_PARAM);
168 }
169 
170 /*
171  * Reads a system register, sanitises its value, and returns a bitmask
172  * representing which feature in that sysreg has been enabled by firmware. The
173  * bitmask is a 1:1 mapping to the register's fields.
174  */
175 #if ARCH_FEATURE_AVAILABILITY
smccc_arch_feature_availability(u_register_t reg,void * handle,u_register_t flags)176 static uintptr_t smccc_arch_feature_availability(u_register_t reg,
177 						 void *handle,
178 						 u_register_t flags)
179 {
180 	per_world_context_t *caller_per_world_context;
181 	el3_state_t *state;
182 	u_register_t bitmask, check;
183 	size_t security_state;
184 
185 	/* check the caller security state */
186 	if (is_caller_secure(flags)) {
187 		security_state = SECURE;
188 	} else if (is_caller_non_secure(flags)) {
189 		security_state = NON_SECURE;
190 	} else {
191 #if ENABLE_RMM
192 		security_state = REALM;
193 #else /* !ENABLE_RMM */
194 		assert(0); /* shouldn't be possible */
195 #endif /* ENABLE_RMM */
196 	}
197 
198 	caller_per_world_context = &per_world_context[get_cpu_context_index(security_state)];
199 	state = get_el3state_ctx(cm_get_context(security_state));
200 
201 	switch (reg) {
202 	case SCR_EL3_OPCODE:
203 		bitmask  = read_ctx_reg(state, CTX_SCR_EL3);
204 		bitmask &= ~SCR_EL3_IGNORED;
205 		check    = bitmask & ~SCR_EL3_FEATS;
206 		bitmask &= SCR_EL3_FEATS;
207 		bitmask ^= SCR_EL3_FLIPPED;
208 		/* will only report 0 if neither is implemented */
209 		if (is_feat_rng_trap_supported() || is_feat_rng_present()) {
210 			bitmask |= SCR_TRNDR_BIT;
211 			check   &= ~SCR_TRNDR_BIT;
212 		}
213 		break;
214 	case CPTR_EL3_OPCODE:
215 		bitmask  = caller_per_world_context->ctx_cptr_el3;
216 		check    = bitmask & ~CPTR_EL3_FEATS;
217 		bitmask &= CPTR_EL3_FEATS;
218 		bitmask ^= CPTR_EL3_FLIPPED;
219 		break;
220 	case MDCR_EL3_OPCODE:
221 		bitmask  = read_ctx_reg(state, CTX_MDCR_EL3);
222 		bitmask &= ~MDCR_EL3_IGNORED;
223 		check    = bitmask & ~MDCR_EL3_FEATS;
224 		bitmask &= MDCR_EL3_FEATS;
225 		bitmask ^= MDCR_EL3_FLIPPED;
226 		break;
227 #if ENABLE_FEAT_MPAM
228 	case MPAM3_EL3_OPCODE:
229 		bitmask  = caller_per_world_context->ctx_mpam3_el3;
230 		bitmask &= ~MPAM3_EL3_IGNORED;
231 		check    = bitmask & ~MPAM3_EL3_FEATS;
232 		bitmask &= MPAM3_EL3_FEATS;
233 		bitmask ^= MPAM3_EL3_FLIPPED;
234 		break;
235 #endif /* ENABLE_FEAT_MPAM */
236 	default:
237 		SMC_RET2(handle, SMC_INVALID_PARAM, ULL(0));
238 	}
239 
240 	/*
241 	 * failing this means that the requested register has a bit set that
242 	 * hasn't been declared as a known feature bit or an ignore bit. This is
243 	 * likely to happen when support for a new feature is added but the
244 	 * bitmask macros are not updated.
245 	 */
246 	if (ENABLE_ASSERTIONS && check != 0) {
247 		ERROR("Unexpected bits 0x%lx were set in register %lx!\n", check, reg);
248 		assert(0);
249 	}
250 
251 	SMC_RET2(handle, SMC_ARCH_CALL_SUCCESS, bitmask);
252 }
253 #endif /* ARCH_FEATURE_AVAILABILITY */
254 
255 /*
256  * Top-level Arm Architectural Service SMC handler.
257  */
arm_arch_svc_smc_handler(uint32_t smc_fid,u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4,void * cookie,void * handle,u_register_t flags)258 static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid,
259 	u_register_t x1,
260 	u_register_t x2,
261 	u_register_t x3,
262 	u_register_t x4,
263 	void *cookie,
264 	void *handle,
265 	u_register_t flags)
266 {
267 	(void)x2;
268 	(void)x3;
269 	(void)x4;
270 	(void)cookie;
271 
272 	switch (smc_fid) {
273 	case SMCCC_VERSION:
274 		SMC_RET1(handle, smccc_version());
275 	case SMCCC_ARCH_FEATURES:
276 		SMC_RET1(handle, smccc_arch_features(x1));
277 	case SMCCC_ARCH_SOC_ID:
278 	case SMCCC_ARCH_SOC_ID | (SMC_64 << FUNCID_CC_SHIFT):
279 		return smccc_arch_id(x1, handle, (smc_fid
280 				& (SMC_64 << FUNCID_CC_SHIFT)));
281 #if __aarch64__
282 #if WORKAROUND_CVE_2017_5715
283 	case SMCCC_ARCH_WORKAROUND_1:
284 		/*
285 		 * The workaround has already been applied on affected PEs
286 		 * during entry to EL3. On unaffected PEs, this function
287 		 * has no effect.
288 		 */
289 		SMC_RET0(handle);
290 #endif
291 #if WORKAROUND_CVE_2018_3639
292 	case SMCCC_ARCH_WORKAROUND_2:
293 		/*
294 		 * The workaround has already been applied on affected PEs
295 		 * requiring dynamic mitigation during entry to EL3.
296 		 * On unaffected or statically mitigated PEs, this function
297 		 * has no effect.
298 		 */
299 		SMC_RET0(handle);
300 #endif
301 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
302 	case SMCCC_ARCH_WORKAROUND_3:
303 		/*
304 		 * The workaround has already been applied on affected PEs
305 		 * during entry to EL3. On unaffected PEs, this function
306 		 * has no effect.
307 		 */
308 		SMC_RET0(handle);
309 #endif
310 #if WORKAROUND_CVE_2024_7881
311 	case SMCCC_ARCH_WORKAROUND_4:
312 		/*
313 		 * The workaround has already been applied on affected PEs
314 		 * during cold boot. This function has no effect whether PE is
315 		 * affected or not.
316 		 */
317 		SMC_RET0(handle);
318 #endif /* WORKAROUND_CVE_2024_7881 */
319 #endif /* __aarch64__ */
320 #if ARCH_FEATURE_AVAILABILITY
321 	/* return is 64 bit so only reply on SMC64 requests */
322 	case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT):
323 		return smccc_arch_feature_availability(x1, handle, flags);
324 #endif /* ARCH_FEATURE_AVAILABILITY */
325 	default:
326 		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
327 			smc_fid);
328 		SMC_RET1(handle, SMC_UNK);
329 	}
330 }
331 
332 /* Register Standard Service Calls as runtime service */
333 DECLARE_RT_SVC(
334 		arm_arch_svc,
335 		OEN_ARM_START,
336 		OEN_ARM_END,
337 		SMC_TYPE_FAST,
338 		NULL,
339 		arm_arch_svc_smc_handler
340 );
341