xref: /rk3399_ARM-atf/services/std_svc/rmmd/rmmd_main.c (revision 3b06438dd1e038a7453d3b812ca6ef2da54f6ba8)
1 /*
2  * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch_features.h>
15 #include <bl31/bl31.h>
16 #include <common/debug.h>
17 #include <common/runtime_svc.h>
18 #include <context.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/el3_runtime/cpu_data.h>
21 #include <lib/el3_runtime/pubsub.h>
22 #include <lib/extensions/mpam.h>
23 #include <lib/extensions/pmuv3.h>
24 #include <lib/extensions/sys_reg_trace.h>
25 #include <lib/gpt_rme/gpt_rme.h>
26 
27 #include <lib/spinlock.h>
28 #include <lib/utils.h>
29 #include <lib/xlat_tables/xlat_tables_v2.h>
30 #include <plat/common/common_def.h>
31 #include <plat/common/platform.h>
32 #include <platform_def.h>
33 #include <services/rmmd_svc.h>
34 #include <smccc_helpers.h>
35 #include <lib/extensions/sme.h>
36 #include <lib/extensions/sve.h>
37 #include <lib/extensions/spe.h>
38 #include <lib/extensions/trbe.h>
39 #include "rmmd_initial_context.h"
40 #include "rmmd_private.h"
41 
42 /*******************************************************************************
43  * RMM boot failure flag
44  ******************************************************************************/
45 static bool rmm_boot_failed;
46 
47 /*******************************************************************************
48  * RMM context information.
49  ******************************************************************************/
50 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
51 
52 /*******************************************************************************
53  * RMM entry point information. Discovered on the primary core and reused
54  * on secondary cores.
55  ******************************************************************************/
56 static entry_point_info_t *rmm_ep_info;
57 
58 /*******************************************************************************
59  * Static function declaration.
60  ******************************************************************************/
61 static int32_t rmm_init(void);
62 
63 /*******************************************************************************
64  * This function takes an RMM context pointer and performs a synchronous entry
65  * into it.
66  ******************************************************************************/
67 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
68 {
69 	uint64_t rc;
70 
71 	assert(rmm_ctx != NULL);
72 
73 	cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
74 
75 	/* Restore the realm context assigned above */
76 	cm_el2_sysregs_context_restore(REALM);
77 	cm_set_next_eret_context(REALM);
78 
79 	/* Enter RMM */
80 	rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
81 
82 	/*
83 	 * Save realm context. EL2 Non-secure context will be restored
84 	 * before exiting Non-secure world, therefore there is no need
85 	 * to clear EL2 context registers.
86 	 */
87 	cm_el2_sysregs_context_save(REALM);
88 
89 	return rc;
90 }
91 
92 /*******************************************************************************
93  * This function returns to the place where rmmd_rmm_sync_entry() was
94  * called originally.
95  ******************************************************************************/
96 __dead2 void rmmd_rmm_sync_exit(uint64_t rc)
97 {
98 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
99 
100 	/* Get context of the RMM in use by this CPU. */
101 	assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
102 
103 	/*
104 	 * The RMMD must have initiated the original request through a
105 	 * synchronous entry into RMM. Jump back to the original C runtime
106 	 * context with the value of rc in x0;
107 	 */
108 	rmmd_rmm_exit(ctx->c_rt_ctx, rc);
109 
110 	panic();
111 }
112 
113 static void rmm_el2_context_init(el2_sysregs_t *regs)
114 {
115 	write_el2_ctx_common(regs, spsr_el2, REALM_SPSR_EL2);
116 	write_el2_ctx_common(regs, sctlr_el2, SCTLR_EL2_RES1);
117 }
118 
119 /*******************************************************************************
120  * Enable architecture extensions on first entry to Realm world.
121  ******************************************************************************/
122 
123 static void manage_extensions_realm(cpu_context_t *ctx)
124 {
125 	/*
126 	 * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world.
127 	 */
128 	if (is_feat_sme_supported()) {
129 		sme_enable(ctx);
130 	}
131 
132 	/*
133 	 * SPE and TRBE cannot be fully disabled from EL3 registers alone, only
134 	 * sysreg access can. In case the EL1 controls leave them active on
135 	 * context switch, we want the owning security state to be NS so Realm
136 	 * can't be DOSed.
137 	 */
138 	if (is_feat_spe_supported()) {
139 		spe_disable(ctx);
140 	}
141 
142 	if (is_feat_trbe_supported()) {
143 		trbe_disable(ctx);
144 	}
145 }
146 
147 static void manage_extensions_realm_per_world(void)
148 {
149 	cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_REALM]);
150 
151 	if (is_feat_sve_supported()) {
152 	/*
153 	 * Enable SVE and FPU in realm context when it is enabled for NS.
154 	 * Realm manager must ensure that the SVE and FPU register
155 	 * contexts are properly managed.
156 	 */
157 		sve_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
158 	}
159 
160 	/* NS can access this but Realm shouldn't */
161 	if (is_feat_sys_reg_trace_supported()) {
162 		sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
163 	}
164 
165 	/*
166 	 * If SME/SME2 is supported and enabled for NS world, then disable trapping
167 	 * of SME instructions for Realm world. RMM will save/restore required
168 	 * registers that are shared with SVE/FPU so that Realm can use FPU or SVE.
169 	 */
170 	if (is_feat_sme_supported()) {
171 		sme_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
172 	}
173 
174 	/*
175 	 * If FEAT_MPAM is supported and enabled, then disable trapping access
176 	 * to the MPAM registers for Realm world. Instead, RMM will configure
177 	 * the access to be trapped by itself so it can inject undefined aborts
178 	 * back to the Realm.
179 	 */
180 	if (is_feat_mpam_supported()) {
181 		mpam_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
182 	}
183 }
184 
185 /*******************************************************************************
186  * Jump to the RMM for the first time.
187  ******************************************************************************/
188 static int32_t rmm_init(void)
189 {
190 	long rc;
191 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
192 
193 	INFO("RMM init start.\n");
194 
195 	/* Enable architecture extensions */
196 	manage_extensions_realm(&ctx->cpu_ctx);
197 
198 	manage_extensions_realm_per_world();
199 
200 	/* Initialize RMM EL2 context. */
201 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
202 
203 	rc = rmmd_rmm_sync_entry(ctx);
204 	if (rc != E_RMM_BOOT_SUCCESS) {
205 		ERROR("RMM init failed: %ld\n", rc);
206 		/* Mark the boot as failed for all the CPUs */
207 		rmm_boot_failed = true;
208 		return 0;
209 	}
210 
211 	INFO("RMM init end.\n");
212 
213 	return 1;
214 }
215 
216 /*******************************************************************************
217  * Load and read RMM manifest, setup RMM.
218  ******************************************************************************/
219 int rmmd_setup(void)
220 {
221 	size_t shared_buf_size __unused;
222 	uintptr_t shared_buf_base;
223 	uint32_t ep_attr;
224 	unsigned int linear_id = plat_my_core_pos();
225 	rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
226 	struct rmm_manifest *manifest;
227 	int rc;
228 
229 	/* Make sure RME is supported. */
230 	if (is_feat_rme_present() == 0U) {
231 		/* Mark the RMM boot as failed for all the CPUs */
232 		rmm_boot_failed = true;
233 		return -ENOTSUP;
234 	}
235 
236 	rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
237 	if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) {
238 		WARN("No RMM image provided by BL2 boot loader, Booting "
239 		     "device without RMM initialization. SMCs destined for "
240 		     "RMM will return SMC_UNK\n");
241 
242 		/* Mark the boot as failed for all the CPUs */
243 		rmm_boot_failed = true;
244 		return -ENOENT;
245 	}
246 
247 	/* Initialise an entrypoint to set up the CPU context */
248 	ep_attr = EP_REALM;
249 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
250 		ep_attr |= EP_EE_BIG;
251 	}
252 
253 	SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
254 	rmm_ep_info->spsr = SPSR_64(MODE_EL2,
255 					MODE_SP_ELX,
256 					DISABLE_ALL_EXCEPTIONS);
257 
258 	shared_buf_size =
259 			plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
260 
261 	assert((shared_buf_size == SZ_4K) &&
262 					((void *)shared_buf_base != NULL));
263 
264 	/* Zero out and load the boot manifest at the beginning of the share area */
265 	manifest = (struct rmm_manifest *)shared_buf_base;
266 	(void)memset((void *)manifest, 0, sizeof(struct rmm_manifest));
267 
268 	rc = plat_rmmd_load_manifest(manifest);
269 	if (rc != 0) {
270 		ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
271 		/* Mark the boot as failed for all the CPUs */
272 		rmm_boot_failed = true;
273 		return rc;
274 	}
275 	flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
276 
277 	/*
278 	 * Prepare coldboot arguments for RMM:
279 	 * arg0: This CPUID (primary processor).
280 	 * arg1: Version for this Boot Interface.
281 	 * arg2: PLATFORM_CORE_COUNT.
282 	 * arg3: Base address for the EL3 <-> RMM shared area. The boot
283 	 *       manifest will be stored at the beginning of this area.
284 	 */
285 	rmm_ep_info->args.arg0 = linear_id;
286 	rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
287 	rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
288 	rmm_ep_info->args.arg3 = shared_buf_base;
289 
290 	/* Initialise RMM context with this entry point information */
291 	cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
292 
293 	INFO("RMM setup done.\n");
294 
295 	/* Register init function for deferred init.  */
296 	bl31_register_rmm_init(&rmm_init);
297 
298 	return 0;
299 }
300 
301 /*******************************************************************************
302  * Forward SMC to the other security state
303  ******************************************************************************/
304 static uint64_t	rmmd_smc_forward(uint32_t src_sec_state,
305 				 uint32_t dst_sec_state, uint64_t x0,
306 				 uint64_t x1, uint64_t x2, uint64_t x3,
307 				 uint64_t x4, void *handle)
308 {
309 	cpu_context_t *ctx = cm_get_context(dst_sec_state);
310 
311 	/* Save incoming security state */
312 	cm_el2_sysregs_context_save(src_sec_state);
313 
314 	/* Restore outgoing security state */
315 	cm_el2_sysregs_context_restore(dst_sec_state);
316 	cm_set_next_eret_context(dst_sec_state);
317 
318 	/*
319 	 * As per SMCCCv1.2, we need to preserve x4 to x7 unless
320 	 * being used as return args. Hence we differentiate the
321 	 * onward and backward path. Support upto 8 args in the
322 	 * onward path and 4 args in return path.
323 	 * Register x4 will be preserved by RMM in case it is not
324 	 * used in return path.
325 	 */
326 	if (src_sec_state == NON_SECURE) {
327 		SMC_RET8(ctx, x0, x1, x2, x3, x4,
328 			 SMC_GET_GP(handle, CTX_GPREG_X5),
329 			 SMC_GET_GP(handle, CTX_GPREG_X6),
330 			 SMC_GET_GP(handle, CTX_GPREG_X7));
331 	}
332 
333 	SMC_RET5(ctx, x0, x1, x2, x3, x4);
334 }
335 
336 /*******************************************************************************
337  * This function handles all SMCs in the range reserved for RMI. Each call is
338  * either forwarded to the other security state or handled by the RMM dispatcher
339  ******************************************************************************/
340 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
341 			  uint64_t x3, uint64_t x4, void *cookie,
342 			  void *handle, uint64_t flags)
343 {
344 	uint32_t src_sec_state;
345 
346 	/* If RMM failed to boot, treat any RMI SMC as unknown */
347 	if (rmm_boot_failed) {
348 		WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
349 		SMC_RET1(handle, SMC_UNK);
350 	}
351 
352 	/* Determine which security state this SMC originated from */
353 	src_sec_state = caller_sec_state(flags);
354 
355 	/* RMI must not be invoked by the Secure world */
356 	if (src_sec_state == SMC_FROM_SECURE) {
357 		WARN("RMMD: RMI invoked by secure world.\n");
358 		SMC_RET1(handle, SMC_UNK);
359 	}
360 
361 	/*
362 	 * Forward an RMI call from the Normal world to the Realm world as it
363 	 * is.
364 	 */
365 	if (src_sec_state == SMC_FROM_NON_SECURE) {
366 		/*
367 		 * If SVE hint bit is set in the flags then update the SMC
368 		 * function id and pass it on to the lower EL.
369 		 */
370 		if (is_sve_hint_set(flags)) {
371 			smc_fid |= (FUNCID_SVE_HINT_MASK <<
372 				    FUNCID_SVE_HINT_SHIFT);
373 		}
374 		VERBOSE("RMMD: RMI call from non-secure world.\n");
375 		return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
376 					x1, x2, x3, x4, handle);
377 	}
378 
379 	if (src_sec_state != SMC_FROM_REALM) {
380 		SMC_RET1(handle, SMC_UNK);
381 	}
382 
383 	switch (smc_fid) {
384 	case RMM_RMI_REQ_COMPLETE: {
385 		uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
386 
387 		return rmmd_smc_forward(REALM, NON_SECURE, x1,
388 					x2, x3, x4, x5, handle);
389 	}
390 	default:
391 		WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
392 		SMC_RET1(handle, SMC_UNK);
393 	}
394 }
395 
396 /*******************************************************************************
397  * This cpu has been turned on. Enter RMM to initialise R-EL2.  Entry into RMM
398  * is done after initialising minimal architectural state that guarantees safe
399  * execution.
400  ******************************************************************************/
401 static void *rmmd_cpu_on_finish_handler(const void *arg)
402 {
403 	long rc;
404 	uint32_t linear_id = plat_my_core_pos();
405 	rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
406 
407 	if (rmm_boot_failed) {
408 		/* RMM Boot failed on a previous CPU. Abort. */
409 		ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
410 								linear_id);
411 		return NULL;
412 	}
413 
414 	/*
415 	 * Prepare warmboot arguments for RMM:
416 	 * arg0: This CPUID.
417 	 * arg1 to arg3: Not used.
418 	 */
419 	rmm_ep_info->args.arg0 = linear_id;
420 	rmm_ep_info->args.arg1 = 0ULL;
421 	rmm_ep_info->args.arg2 = 0ULL;
422 	rmm_ep_info->args.arg3 = 0ULL;
423 
424 	/* Initialise RMM context with this entry point information */
425 	cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
426 
427 	/* Enable architecture extensions */
428 	manage_extensions_realm(&ctx->cpu_ctx);
429 
430 	/* Initialize RMM EL2 context. */
431 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
432 
433 	rc = rmmd_rmm_sync_entry(ctx);
434 
435 	if (rc != E_RMM_BOOT_SUCCESS) {
436 		ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
437 		/* Mark the boot as failed for any other booting CPU */
438 		rmm_boot_failed = true;
439 	}
440 
441 	return NULL;
442 }
443 
444 /* Subscribe to PSCI CPU on to initialize RMM on secondary */
445 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
446 
447 /* Convert GPT lib error to RMMD GTS error */
448 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
449 {
450 	int ret;
451 
452 	if (error == 0) {
453 		return E_RMM_OK;
454 	}
455 
456 	if (error == -EINVAL) {
457 		ret = E_RMM_BAD_ADDR;
458 	} else {
459 		/* This is the only other error code we expect */
460 		assert(error == -EPERM);
461 		ret = E_RMM_BAD_PAS;
462 	}
463 
464 	ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
465 				error, address, smc_fid);
466 	return ret;
467 }
468 
469 static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx,
470 					 uint64_t *feat_reg)
471 {
472 	if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) {
473 		ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx);
474 		return E_RMM_INVAL;
475 	}
476 
477 	*feat_reg = 0UL;
478 #if RMMD_ENABLE_EL3_TOKEN_SIGN
479 	*feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK;
480 #endif
481 	return E_RMM_OK;
482 }
483 
484 /*
485  * Update encryption key associated with @mecid.
486  */
487 static int rmmd_mecid_key_update(uint64_t mecid)
488 {
489 	uint64_t mecid_width, mecid_width_mask;
490 	int ret;
491 
492 	/*
493 	 * Check whether FEAT_MEC is supported by the hardware. If not, return
494 	 * unknown SMC.
495 	 */
496 	if (is_feat_mec_supported() == false) {
497 		return E_RMM_UNK;
498 	}
499 
500 	/*
501 	 * Check whether the mecid parameter is at most MECIDR_EL2.MECIDWidthm1 + 1
502 	 * in length.
503 	 */
504 	mecid_width = ((read_mecidr_el2() >> MECIDR_EL2_MECIDWidthm1_SHIFT) &
505 		MECIDR_EL2_MECIDWidthm1_MASK) + 1;
506 	mecid_width_mask = ((1 << mecid_width) - 1);
507 	if ((mecid & ~mecid_width_mask) != 0U) {
508 		return E_RMM_INVAL;
509 	}
510 
511 	ret = plat_rmmd_mecid_key_update(mecid);
512 
513 	if (ret != 0) {
514 		return E_RMM_UNK;
515 	}
516 	return E_RMM_OK;
517 }
518 
519 /*******************************************************************************
520  * This function handles RMM-EL3 interface SMCs
521  ******************************************************************************/
522 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
523 				uint64_t x3, uint64_t x4, void *cookie,
524 				void *handle, uint64_t flags)
525 {
526 	uint64_t remaining_len = 0UL;
527 	uint32_t src_sec_state;
528 	int ret;
529 
530 	/* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
531 	if (rmm_boot_failed) {
532 		WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
533 		SMC_RET1(handle, SMC_UNK);
534 	}
535 
536 	/* Determine which security state this SMC originated from */
537 	src_sec_state = caller_sec_state(flags);
538 
539 	if (src_sec_state != SMC_FROM_REALM) {
540 		WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
541 		SMC_RET1(handle, SMC_UNK);
542 	}
543 
544 	switch (smc_fid) {
545 	case RMM_GTSI_DELEGATE:
546 		ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
547 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
548 	case RMM_GTSI_UNDELEGATE:
549 		ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
550 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
551 	case RMM_ATTEST_GET_REALM_KEY:
552 		ret = rmmd_attest_get_signing_key(x1, &x2, x3);
553 		SMC_RET2(handle, ret, x2);
554 	case RMM_ATTEST_GET_PLAT_TOKEN:
555 		ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len);
556 		SMC_RET3(handle, ret, x2, remaining_len);
557 	case RMM_EL3_FEATURES:
558 		ret = rmm_el3_ifc_get_feat_register(x1, &x2);
559 		SMC_RET2(handle, ret, x2);
560 #if RMMD_ENABLE_EL3_TOKEN_SIGN
561 	case RMM_EL3_TOKEN_SIGN:
562 		return rmmd_el3_token_sign(handle, x1, x2, x3, x4);
563 #endif
564 	case RMM_BOOT_COMPLETE:
565 		VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
566 		rmmd_rmm_sync_exit(x1);
567 
568 	case RMM_MECID_KEY_UPDATE:
569 		ret = rmmd_mecid_key_update(x1);
570 		SMC_RET1(handle, ret);
571 	default:
572 		WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
573 		SMC_RET1(handle, SMC_UNK);
574 	}
575 }
576