xref: /rk3399_ARM-atf/services/std_svc/rmmd/rmmd_main.c (revision 138ddcbf4d330d13a11576d973513014055f98c1)
1 /*
2  * Copyright (c) 2021-2023, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch_features.h>
15 #include <bl31/bl31.h>
16 #include <common/debug.h>
17 #include <common/runtime_svc.h>
18 #include <context.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/el3_runtime/pubsub.h>
21 #include <lib/extensions/pmuv3.h>
22 #include <lib/extensions/sys_reg_trace.h>
23 #include <lib/gpt_rme/gpt_rme.h>
24 
25 #include <lib/spinlock.h>
26 #include <lib/utils.h>
27 #include <lib/xlat_tables/xlat_tables_v2.h>
28 #include <plat/common/common_def.h>
29 #include <plat/common/platform.h>
30 #include <platform_def.h>
31 #include <services/rmmd_svc.h>
32 #include <smccc_helpers.h>
33 #include <lib/extensions/sme.h>
34 #include <lib/extensions/sve.h>
35 #include "rmmd_initial_context.h"
36 #include "rmmd_private.h"
37 
38 /*******************************************************************************
39  * RMM boot failure flag
40  ******************************************************************************/
41 static bool rmm_boot_failed;
42 
43 /*******************************************************************************
44  * RMM context information.
45  ******************************************************************************/
46 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
47 
48 /*******************************************************************************
49  * RMM entry point information. Discovered on the primary core and reused
50  * on secondary cores.
51  ******************************************************************************/
52 static entry_point_info_t *rmm_ep_info;
53 
54 /*******************************************************************************
55  * Static function declaration.
56  ******************************************************************************/
57 static int32_t rmm_init(void);
58 
59 /*******************************************************************************
60  * This function takes an RMM context pointer and performs a synchronous entry
61  * into it.
62  ******************************************************************************/
63 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
64 {
65 	uint64_t rc;
66 
67 	assert(rmm_ctx != NULL);
68 
69 	cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
70 
71 	/* Restore the realm context assigned above */
72 	cm_el1_sysregs_context_restore(REALM);
73 	cm_el2_sysregs_context_restore(REALM);
74 	cm_set_next_eret_context(REALM);
75 
76 	/* Enter RMM */
77 	rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
78 
79 	/*
80 	 * Save realm context. EL1 and EL2 Non-secure
81 	 * contexts will be restored before exiting to
82 	 * Non-secure world, therefore there is no need
83 	 * to clear EL1 and EL2 context registers.
84 	 */
85 	cm_el1_sysregs_context_save(REALM);
86 	cm_el2_sysregs_context_save(REALM);
87 
88 	return rc;
89 }
90 
91 /*******************************************************************************
92  * This function returns to the place where rmmd_rmm_sync_entry() was
93  * called originally.
94  ******************************************************************************/
95 __dead2 void rmmd_rmm_sync_exit(uint64_t rc)
96 {
97 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
98 
99 	/* Get context of the RMM in use by this CPU. */
100 	assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
101 
102 	/*
103 	 * The RMMD must have initiated the original request through a
104 	 * synchronous entry into RMM. Jump back to the original C runtime
105 	 * context with the value of rc in x0;
106 	 */
107 	rmmd_rmm_exit(ctx->c_rt_ctx, rc);
108 
109 	panic();
110 }
111 
112 static void rmm_el2_context_init(el2_sysregs_t *regs)
113 {
114 	regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2;
115 	regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1;
116 }
117 
118 /*******************************************************************************
119  * Enable architecture extensions on first entry to Realm world.
120  ******************************************************************************/
121 static void manage_extensions_realm(cpu_context_t *ctx)
122 {
123 	if (is_feat_sve_supported()) {
124 	/*
125 	 * Enable SVE and FPU in realm context when it is enabled for NS.
126 	 * Realm manager must ensure that the SVE and FPU register
127 	 * contexts are properly managed.
128 	 */
129 		sve_enable(ctx);
130 	}
131 
132 	/* NS can access this but Realm shouldn't */
133 	if (is_feat_sys_reg_trace_supported()) {
134 		sys_reg_trace_disable(ctx);
135 	}
136 
137 	pmuv3_enable(ctx);
138 
139 	/*
140 	 * If SME/SME2 is supported and enabled for NS world, then enables SME
141 	 * for Realm world. RMM will save/restore required registers that are
142 	 * shared with SVE/FPU so that Realm can use FPU or SVE.
143 	 */
144 	if (is_feat_sme_supported()) {
145 		/* sme_enable() also enables SME2 if supported by hardware */
146 		sme_enable(ctx);
147 	}
148 }
149 
150 /*******************************************************************************
151  * Jump to the RMM for the first time.
152  ******************************************************************************/
153 static int32_t rmm_init(void)
154 {
155 	long rc;
156 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
157 
158 	INFO("RMM init start.\n");
159 
160 	/* Enable architecture extensions */
161 	manage_extensions_realm(&ctx->cpu_ctx);
162 
163 	/* Initialize RMM EL2 context. */
164 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
165 
166 	rc = rmmd_rmm_sync_entry(ctx);
167 	if (rc != E_RMM_BOOT_SUCCESS) {
168 		ERROR("RMM init failed: %ld\n", rc);
169 		/* Mark the boot as failed for all the CPUs */
170 		rmm_boot_failed = true;
171 		return 0;
172 	}
173 
174 	INFO("RMM init end.\n");
175 
176 	return 1;
177 }
178 
179 /*******************************************************************************
180  * Load and read RMM manifest, setup RMM.
181  ******************************************************************************/
182 int rmmd_setup(void)
183 {
184 	size_t shared_buf_size __unused;
185 	uintptr_t shared_buf_base;
186 	uint32_t ep_attr;
187 	unsigned int linear_id = plat_my_core_pos();
188 	rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
189 	struct rmm_manifest *manifest;
190 	int rc;
191 
192 	/* Make sure RME is supported. */
193 	assert(get_armv9_2_feat_rme_support() != 0U);
194 
195 	rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
196 	if (rmm_ep_info == NULL) {
197 		WARN("No RMM image provided by BL2 boot loader, Booting "
198 		     "device without RMM initialization. SMCs destined for "
199 		     "RMM will return SMC_UNK\n");
200 		return -ENOENT;
201 	}
202 
203 	/* Under no circumstances will this parameter be 0 */
204 	assert(rmm_ep_info->pc == RMM_BASE);
205 
206 	/* Initialise an entrypoint to set up the CPU context */
207 	ep_attr = EP_REALM;
208 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
209 		ep_attr |= EP_EE_BIG;
210 	}
211 
212 	SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
213 	rmm_ep_info->spsr = SPSR_64(MODE_EL2,
214 					MODE_SP_ELX,
215 					DISABLE_ALL_EXCEPTIONS);
216 
217 	shared_buf_size =
218 			plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
219 
220 	assert((shared_buf_size == SZ_4K) &&
221 					((void *)shared_buf_base != NULL));
222 
223 	/* Load the boot manifest at the beginning of the shared area */
224 	manifest = (struct rmm_manifest *)shared_buf_base;
225 	rc = plat_rmmd_load_manifest(manifest);
226 	if (rc != 0) {
227 		ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
228 		return rc;
229 	}
230 	flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
231 
232 	/*
233 	 * Prepare coldboot arguments for RMM:
234 	 * arg0: This CPUID (primary processor).
235 	 * arg1: Version for this Boot Interface.
236 	 * arg2: PLATFORM_CORE_COUNT.
237 	 * arg3: Base address for the EL3 <-> RMM shared area. The boot
238 	 *       manifest will be stored at the beginning of this area.
239 	 */
240 	rmm_ep_info->args.arg0 = linear_id;
241 	rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
242 	rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
243 	rmm_ep_info->args.arg3 = shared_buf_base;
244 
245 	/* Initialise RMM context with this entry point information */
246 	cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
247 
248 	INFO("RMM setup done.\n");
249 
250 	/* Register init function for deferred init.  */
251 	bl31_register_rmm_init(&rmm_init);
252 
253 	return 0;
254 }
255 
256 /*******************************************************************************
257  * Forward SMC to the other security state
258  ******************************************************************************/
259 static uint64_t	rmmd_smc_forward(uint32_t src_sec_state,
260 				 uint32_t dst_sec_state, uint64_t x0,
261 				 uint64_t x1, uint64_t x2, uint64_t x3,
262 				 uint64_t x4, void *handle)
263 {
264 	cpu_context_t *ctx = cm_get_context(dst_sec_state);
265 
266 	/* Save incoming security state */
267 	cm_el1_sysregs_context_save(src_sec_state);
268 	cm_el2_sysregs_context_save(src_sec_state);
269 
270 	/* Restore outgoing security state */
271 	cm_el1_sysregs_context_restore(dst_sec_state);
272 	cm_el2_sysregs_context_restore(dst_sec_state);
273 	cm_set_next_eret_context(dst_sec_state);
274 
275 	/*
276 	 * As per SMCCCv1.2, we need to preserve x4 to x7 unless
277 	 * being used as return args. Hence we differentiate the
278 	 * onward and backward path. Support upto 8 args in the
279 	 * onward path and 4 args in return path.
280 	 * Register x4 will be preserved by RMM in case it is not
281 	 * used in return path.
282 	 */
283 	if (src_sec_state == NON_SECURE) {
284 		SMC_RET8(ctx, x0, x1, x2, x3, x4,
285 			 SMC_GET_GP(handle, CTX_GPREG_X5),
286 			 SMC_GET_GP(handle, CTX_GPREG_X6),
287 			 SMC_GET_GP(handle, CTX_GPREG_X7));
288 	}
289 
290 	SMC_RET5(ctx, x0, x1, x2, x3, x4);
291 }
292 
293 /*******************************************************************************
294  * This function handles all SMCs in the range reserved for RMI. Each call is
295  * either forwarded to the other security state or handled by the RMM dispatcher
296  ******************************************************************************/
297 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
298 			  uint64_t x3, uint64_t x4, void *cookie,
299 			  void *handle, uint64_t flags)
300 {
301 	uint32_t src_sec_state;
302 
303 	/* If RMM failed to boot, treat any RMI SMC as unknown */
304 	if (rmm_boot_failed) {
305 		WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
306 		SMC_RET1(handle, SMC_UNK);
307 	}
308 
309 	/* Determine which security state this SMC originated from */
310 	src_sec_state = caller_sec_state(flags);
311 
312 	/* RMI must not be invoked by the Secure world */
313 	if (src_sec_state == SMC_FROM_SECURE) {
314 		WARN("RMMD: RMI invoked by secure world.\n");
315 		SMC_RET1(handle, SMC_UNK);
316 	}
317 
318 	/*
319 	 * Forward an RMI call from the Normal world to the Realm world as it
320 	 * is.
321 	 */
322 	if (src_sec_state == SMC_FROM_NON_SECURE) {
323 		/*
324 		 * If SVE hint bit is set in the flags then update the SMC
325 		 * function id and pass it on to the lower EL.
326 		 */
327 		if (is_sve_hint_set(flags)) {
328 			smc_fid |= (FUNCID_SVE_HINT_MASK <<
329 				    FUNCID_SVE_HINT_SHIFT);
330 		}
331 		VERBOSE("RMMD: RMI call from non-secure world.\n");
332 		return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
333 					x1, x2, x3, x4, handle);
334 	}
335 
336 	if (src_sec_state != SMC_FROM_REALM) {
337 		SMC_RET1(handle, SMC_UNK);
338 	}
339 
340 	switch (smc_fid) {
341 	case RMM_RMI_REQ_COMPLETE: {
342 		uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
343 
344 		return rmmd_smc_forward(REALM, NON_SECURE, x1,
345 					x2, x3, x4, x5, handle);
346 	}
347 	default:
348 		WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
349 		SMC_RET1(handle, SMC_UNK);
350 	}
351 }
352 
353 /*******************************************************************************
354  * This cpu has been turned on. Enter RMM to initialise R-EL2.  Entry into RMM
355  * is done after initialising minimal architectural state that guarantees safe
356  * execution.
357  ******************************************************************************/
358 static void *rmmd_cpu_on_finish_handler(const void *arg)
359 {
360 	long rc;
361 	uint32_t linear_id = plat_my_core_pos();
362 	rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
363 
364 	if (rmm_boot_failed) {
365 		/* RMM Boot failed on a previous CPU. Abort. */
366 		ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
367 								linear_id);
368 		return NULL;
369 	}
370 
371 	/*
372 	 * Prepare warmboot arguments for RMM:
373 	 * arg0: This CPUID.
374 	 * arg1 to arg3: Not used.
375 	 */
376 	rmm_ep_info->args.arg0 = linear_id;
377 	rmm_ep_info->args.arg1 = 0ULL;
378 	rmm_ep_info->args.arg2 = 0ULL;
379 	rmm_ep_info->args.arg3 = 0ULL;
380 
381 	/* Initialise RMM context with this entry point information */
382 	cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
383 
384 	/* Enable architecture extensions */
385 	manage_extensions_realm(&ctx->cpu_ctx);
386 
387 	/* Initialize RMM EL2 context. */
388 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
389 
390 	rc = rmmd_rmm_sync_entry(ctx);
391 
392 	if (rc != E_RMM_BOOT_SUCCESS) {
393 		ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
394 		/* Mark the boot as failed for any other booting CPU */
395 		rmm_boot_failed = true;
396 	}
397 
398 	return NULL;
399 }
400 
401 /* Subscribe to PSCI CPU on to initialize RMM on secondary */
402 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
403 
404 /* Convert GPT lib error to RMMD GTS error */
405 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
406 {
407 	int ret;
408 
409 	if (error == 0) {
410 		return E_RMM_OK;
411 	}
412 
413 	if (error == -EINVAL) {
414 		ret = E_RMM_BAD_ADDR;
415 	} else {
416 		/* This is the only other error code we expect */
417 		assert(error == -EPERM);
418 		ret = E_RMM_BAD_PAS;
419 	}
420 
421 	ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
422 				error, address, smc_fid);
423 	return ret;
424 }
425 
426 /*******************************************************************************
427  * This function handles RMM-EL3 interface SMCs
428  ******************************************************************************/
429 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
430 				uint64_t x3, uint64_t x4, void *cookie,
431 				void *handle, uint64_t flags)
432 {
433 	uint32_t src_sec_state;
434 	int ret;
435 
436 	/* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
437 	if (rmm_boot_failed) {
438 		WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
439 		SMC_RET1(handle, SMC_UNK);
440 	}
441 
442 	/* Determine which security state this SMC originated from */
443 	src_sec_state = caller_sec_state(flags);
444 
445 	if (src_sec_state != SMC_FROM_REALM) {
446 		WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
447 		SMC_RET1(handle, SMC_UNK);
448 	}
449 
450 	switch (smc_fid) {
451 	case RMM_GTSI_DELEGATE:
452 		ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
453 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
454 	case RMM_GTSI_UNDELEGATE:
455 		ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
456 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
457 	case RMM_ATTEST_GET_PLAT_TOKEN:
458 		ret = rmmd_attest_get_platform_token(x1, &x2, x3);
459 		SMC_RET2(handle, ret, x2);
460 	case RMM_ATTEST_GET_REALM_KEY:
461 		ret = rmmd_attest_get_signing_key(x1, &x2, x3);
462 		SMC_RET2(handle, ret, x2);
463 
464 	case RMM_BOOT_COMPLETE:
465 		VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
466 		rmmd_rmm_sync_exit(x1);
467 
468 	default:
469 		WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
470 		SMC_RET1(handle, SMC_UNK);
471 	}
472 }
473