xref: /rk3399_ARM-atf/services/arm_arch_svc/arm_arch_svc_setup.c (revision b62673c645752a78f649282cfa293e8da09e3bef)
1 /*
2  * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <common/debug.h>
8 #include <common/runtime_svc.h>
9 #include <lib/cpus/errata.h>
10 #include <lib/cpus/wa_cve_2017_5715.h>
11 #include <lib/cpus/wa_cve_2018_3639.h>
12 #include <lib/cpus/wa_cve_2022_23960.h>
13 #include <lib/smccc.h>
14 #include <services/arm_arch_svc.h>
15 #include <smccc_helpers.h>
16 #include <plat/common/platform.h>
17 #include <arch_features.h>
18 #include <arch_helpers.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 
21 static int32_t smccc_version(void)
22 {
23 	return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION);
24 }
25 
26 static int32_t smccc_arch_features(u_register_t arg1)
27 {
28 	switch (arg1) {
29 	case SMCCC_VERSION:
30 	case SMCCC_ARCH_FEATURES:
31 		return SMC_ARCH_CALL_SUCCESS;
32 	case SMCCC_ARCH_SOC_ID:
33 		return plat_is_smccc_feature_available(arg1);
34 #ifdef __aarch64__
35 	/* Workaround checks are currently only implemented for aarch64 */
36 #if WORKAROUND_CVE_2017_5715
37 	case SMCCC_ARCH_WORKAROUND_1:
38 		if (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)
39 			return 1;
40 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
41 #endif
42 
43 #if WORKAROUND_CVE_2018_3639
44 	case SMCCC_ARCH_WORKAROUND_2: {
45 #if DYNAMIC_WORKAROUND_CVE_2018_3639
46 		unsigned long long ssbs;
47 
48 		/*
49 		 * Firmware doesn't have to carry out dynamic workaround if the
50 		 * PE implements architectural Speculation Store Bypass Safe
51 		 * (SSBS) feature.
52 		 */
53 		ssbs = (read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SSBS_SHIFT) &
54 			ID_AA64PFR1_EL1_SSBS_MASK;
55 
56 		/*
57 		 * If architectural SSBS is available on this PE, no firmware
58 		 * mitigation via SMCCC_ARCH_WORKAROUND_2 is required.
59 		 */
60 		if (ssbs != SSBS_NOT_IMPLEMENTED)
61 			return 1;
62 
63 		/*
64 		 * On a platform where at least one CPU requires
65 		 * dynamic mitigation but others are either unaffected
66 		 * or permanently mitigated, report the latter as not
67 		 * needing dynamic mitigation.
68 		 */
69 		if (wa_cve_2018_3639_get_disable_ptr() == NULL)
70 			return 1;
71 		/*
72 		 * If we get here, this CPU requires dynamic mitigation
73 		 * so report it as such.
74 		 */
75 		return 0;
76 #else
77 		/* Either the CPUs are unaffected or permanently mitigated */
78 		return SMC_ARCH_CALL_NOT_REQUIRED;
79 #endif
80 	}
81 #endif
82 
83 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
84 	case SMCCC_ARCH_WORKAROUND_3:
85 		/*
86 		 * SMCCC_ARCH_WORKAROUND_3 should also take into account
87 		 * CVE-2017-5715 since this SMC can be used instead of
88 		 * SMCCC_ARCH_WORKAROUND_1.
89 		 */
90 		if ((check_smccc_arch_wa3_applies() == ERRATA_NOT_APPLIES) &&
91 		    (check_wa_cve_2017_5715() == ERRATA_NOT_APPLIES)) {
92 			return 1;
93 		}
94 		return 0; /* ERRATA_APPLIES || ERRATA_MISSING */
95 #endif
96 
97 #if ARCH_FEATURE_AVAILABILITY
98 	case SMCCC_ARCH_FEATURE_AVAILABILITY:
99 		return SMC_ARCH_CALL_SUCCESS;
100 #endif /* ARCH_FEATURE_AVAILABILITY */
101 
102 #if WORKAROUND_CVE_2024_7881
103 	case SMCCC_ARCH_WORKAROUND_4:
104 		if (check_wa_cve_2024_7881() != ERRATA_APPLIES) {
105 			return SMC_ARCH_CALL_NOT_SUPPORTED;
106 		}
107 		return 0;
108 #endif /* WORKAROUND_CVE_2024_7881 */
109 
110 #endif /* __aarch64__ */
111 
112 	/* Fallthrough */
113 
114 	default:
115 		return SMC_UNK;
116 	}
117 }
118 
119 /* return soc revision or soc version on success otherwise
120  * return invalid parameter */
121 static int32_t smccc_arch_id(u_register_t arg1)
122 {
123 	if (arg1 == SMCCC_GET_SOC_REVISION) {
124 		return plat_get_soc_revision();
125 	}
126 	if (arg1 == SMCCC_GET_SOC_VERSION) {
127 		return plat_get_soc_version();
128 	}
129 	return SMC_ARCH_CALL_INVAL_PARAM;
130 }
131 
132 /*
133  * Reads a system register, sanitises its value, and returns a bitmask
134  * representing which feature in that sysreg has been enabled by firmware. The
135  * bitmask is a 1:1 mapping to the register's fields.
136  */
137 #if ARCH_FEATURE_AVAILABILITY
138 static uintptr_t smccc_arch_feature_availability(u_register_t reg,
139 						 void *handle,
140 						 u_register_t flags)
141 {
142 	cpu_context_t *caller_context;
143 	per_world_context_t *caller_per_world_context;
144 	el3_state_t *state;
145 	u_register_t bitmask, check;
146 
147 	/* check the caller security state */
148 	if (is_caller_secure(flags)) {
149 		caller_context = cm_get_context(SECURE);
150 		caller_per_world_context = &per_world_context[CPU_CONTEXT_SECURE];
151 	} else if (is_caller_non_secure(flags)) {
152 		caller_context = cm_get_context(NON_SECURE);
153 		caller_per_world_context = &per_world_context[CPU_CONTEXT_NS];
154 	} else {
155 #if ENABLE_RME
156 		caller_context = cm_get_context(REALM);
157 		caller_per_world_context = &per_world_context[CPU_CONTEXT_REALM];
158 #else /* !ENABLE_RME */
159 		assert(0); /* shouldn't be possible */
160 #endif /* ENABLE_RME */
161 	}
162 
163 	state = get_el3state_ctx(caller_context);
164 
165 	switch (reg) {
166 	case SCR_EL3_OPCODE:
167 		bitmask  = read_ctx_reg(state, CTX_SCR_EL3);
168 		bitmask &= ~SCR_EL3_IGNORED;
169 		check    = bitmask & ~SCR_EL3_FEATS;
170 		bitmask &= SCR_EL3_FEATS;
171 		bitmask ^= SCR_EL3_FLIPPED;
172 		/* will only report 0 if neither is implemented */
173 		if (is_feat_rng_trap_supported() || is_feat_rng_present())
174 			bitmask |= SCR_TRNDR_BIT;
175 		break;
176 	case CPTR_EL3_OPCODE:
177 		bitmask  = caller_per_world_context->ctx_cptr_el3;
178 		check    = bitmask & ~CPTR_EL3_FEATS;
179 		bitmask &= CPTR_EL3_FEATS;
180 		bitmask ^= CPTR_EL3_FLIPPED;
181 		break;
182 	case MDCR_EL3_OPCODE:
183 		bitmask  = read_ctx_reg(state, CTX_MDCR_EL3);
184 		bitmask &= ~MDCR_EL3_IGNORED;
185 		check    = bitmask & ~MDCR_EL3_FEATS;
186 		bitmask &= MDCR_EL3_FEATS;
187 		bitmask ^= MDCR_EL3_FLIPPED;
188 		break;
189 #if ENABLE_FEAT_MPAM
190 	case MPAM3_EL3_OPCODE:
191 		bitmask  = caller_per_world_context->ctx_mpam3_el3;
192 		bitmask &= ~MPAM3_EL3_IGNORED;
193 		check    = bitmask & ~MPAM3_EL3_FEATS;
194 		bitmask &= MPAM3_EL3_FEATS;
195 		bitmask ^= MPAM3_EL3_FLIPPED;
196 		break;
197 #endif /* ENABLE_FEAT_MPAM */
198 	default:
199 		SMC_RET2(handle, SMC_INVALID_PARAM, ULL(0));
200 	}
201 
202 	/*
203 	 * failing this means that the requested register has a bit set that
204 	 * hasn't been declared as a known feature bit or an ignore bit. This is
205 	 * likely to happen when support for a new feature is added but the
206 	 * bitmask macros are not updated.
207 	 */
208 	if (ENABLE_ASSERTIONS && check != 0) {
209 		ERROR("Unexpected bits 0x%lx were set in register %lx!\n", check, reg);
210 		assert(0);
211 	}
212 
213 	SMC_RET2(handle, SMC_ARCH_CALL_SUCCESS, bitmask);
214 }
215 #endif /* ARCH_FEATURE_AVAILABILITY */
216 
217 /*
218  * Top-level Arm Architectural Service SMC handler.
219  */
220 static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid,
221 	u_register_t x1,
222 	u_register_t x2,
223 	u_register_t x3,
224 	u_register_t x4,
225 	void *cookie,
226 	void *handle,
227 	u_register_t flags)
228 {
229 	switch (smc_fid) {
230 	case SMCCC_VERSION:
231 		SMC_RET1(handle, smccc_version());
232 	case SMCCC_ARCH_FEATURES:
233 		SMC_RET1(handle, smccc_arch_features(x1));
234 	case SMCCC_ARCH_SOC_ID:
235 		SMC_RET1(handle, smccc_arch_id(x1));
236 #ifdef __aarch64__
237 #if WORKAROUND_CVE_2017_5715
238 	case SMCCC_ARCH_WORKAROUND_1:
239 		/*
240 		 * The workaround has already been applied on affected PEs
241 		 * during entry to EL3. On unaffected PEs, this function
242 		 * has no effect.
243 		 */
244 		SMC_RET0(handle);
245 #endif
246 #if WORKAROUND_CVE_2018_3639
247 	case SMCCC_ARCH_WORKAROUND_2:
248 		/*
249 		 * The workaround has already been applied on affected PEs
250 		 * requiring dynamic mitigation during entry to EL3.
251 		 * On unaffected or statically mitigated PEs, this function
252 		 * has no effect.
253 		 */
254 		SMC_RET0(handle);
255 #endif
256 #if (WORKAROUND_CVE_2022_23960 || WORKAROUND_CVE_2017_5715)
257 	case SMCCC_ARCH_WORKAROUND_3:
258 		/*
259 		 * The workaround has already been applied on affected PEs
260 		 * during entry to EL3. On unaffected PEs, this function
261 		 * has no effect.
262 		 */
263 		SMC_RET0(handle);
264 #endif
265 #if WORKAROUND_CVE_2024_7881
266 	case SMCCC_ARCH_WORKAROUND_4:
267 		/*
268 		 * The workaround has already been applied on affected PEs
269 		 * during cold boot. This function has no effect whether PE is
270 		 * affected or not.
271 		 */
272 		SMC_RET0(handle);
273 #endif /* WORKAROUND_CVE_2024_7881 */
274 #endif /* __aarch64__ */
275 #if ARCH_FEATURE_AVAILABILITY
276 	/* return is 64 bit so only reply on SMC64 requests */
277 	case SMCCC_ARCH_FEATURE_AVAILABILITY | (SMC_64 << FUNCID_CC_SHIFT):
278 		return smccc_arch_feature_availability(x1, handle, flags);
279 #endif /* ARCH_FEATURE_AVAILABILITY */
280 	default:
281 		WARN("Unimplemented Arm Architecture Service Call: 0x%x \n",
282 			smc_fid);
283 		SMC_RET1(handle, SMC_UNK);
284 	}
285 }
286 
287 /* Register Standard Service Calls as runtime service */
288 DECLARE_RT_SVC(
289 		arm_arch_svc,
290 		OEN_ARM_START,
291 		OEN_ARM_END,
292 		SMC_TYPE_FAST,
293 		NULL,
294 		arm_arch_svc_smc_handler
295 );
296