xref: /rk3399_ARM-atf/services/std_svc/rmmd/rmmd_main.c (revision f42f2e734287aca9a5b27c6f50c7e80fd88da110)
1 /*
2  * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch_features.h>
15 #include <bl31/bl31.h>
16 #include <common/debug.h>
17 #include <common/runtime_svc.h>
18 #include <context.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/el3_runtime/cpu_data.h>
21 #include <lib/el3_runtime/pubsub.h>
22 #include <lib/extensions/mpam.h>
23 #include <lib/extensions/pmuv3.h>
24 #include <lib/extensions/sys_reg_trace.h>
25 #include <lib/gpt_rme/gpt_rme.h>
26 
27 #include <lib/spinlock.h>
28 #include <lib/utils.h>
29 #include <lib/xlat_tables/xlat_tables_v2.h>
30 #include <plat/common/common_def.h>
31 #include <plat/common/platform.h>
32 #include <platform_def.h>
33 #include <services/rmmd_svc.h>
34 #include <smccc_helpers.h>
35 #include <lib/extensions/sme.h>
36 #include <lib/extensions/sve.h>
37 #include <lib/extensions/spe.h>
38 #include <lib/extensions/trbe.h>
39 #include "rmmd_private.h"
40 
41 #define MECID_SHIFT			U(32)
42 #define MECID_MASK			0xFFFFU
43 
44 #define MEC_REFRESH_REASON_SHIFT	U(0)
45 #define MEC_REFRESH_REASON_MASK		BIT(0)
46 
47 /*******************************************************************************
48  * RMM boot failure flag
49  ******************************************************************************/
50 static bool rmm_boot_failed;
51 
52 /*******************************************************************************
53  * RMM context information.
54  ******************************************************************************/
55 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
56 
57 /*******************************************************************************
58  * RMM entry point information. Discovered on the primary core and reused
59  * on secondary cores.
60  ******************************************************************************/
61 static entry_point_info_t *rmm_ep_info;
62 
63 /*******************************************************************************
64  * Static function declaration.
65  ******************************************************************************/
66 static int32_t rmm_init(void);
67 
68 /*******************************************************************************
69  * This function takes an RMM context pointer and performs a synchronous entry
70  * into it.
71  ******************************************************************************/
72 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
73 {
74 	uint64_t rc;
75 
76 	assert(rmm_ctx != NULL);
77 
78 	cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
79 
80 	/* Restore the realm context assigned above */
81 	cm_el2_sysregs_context_restore(REALM);
82 	cm_set_next_eret_context(REALM);
83 
84 	/* Enter RMM */
85 	rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
86 
87 	/*
88 	 * Save realm context. EL2 Non-secure context will be restored
89 	 * before exiting Non-secure world, therefore there is no need
90 	 * to clear EL2 context registers.
91 	 */
92 	cm_el2_sysregs_context_save(REALM);
93 
94 	return rc;
95 }
96 
97 /*******************************************************************************
98  * This function returns to the place where rmmd_rmm_sync_entry() was
99  * called originally.
100  ******************************************************************************/
101 __dead2 void rmmd_rmm_sync_exit(uint64_t rc)
102 {
103 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
104 
105 	/* Get context of the RMM in use by this CPU. */
106 	assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
107 
108 	/*
109 	 * The RMMD must have initiated the original request through a
110 	 * synchronous entry into RMM. Jump back to the original C runtime
111 	 * context with the value of rc in x0;
112 	 */
113 	rmmd_rmm_exit(ctx->c_rt_ctx, rc);
114 
115 	panic();
116 }
117 
118 /*******************************************************************************
119  * Jump to the RMM for the first time.
120  ******************************************************************************/
121 static int32_t rmm_init(void)
122 {
123 	long rc;
124 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
125 
126 	INFO("RMM init start.\n");
127 
128 	rc = rmmd_rmm_sync_entry(ctx);
129 	if (rc != E_RMM_BOOT_SUCCESS) {
130 		ERROR("RMM init failed: %ld\n", rc);
131 		/* Mark the boot as failed for all the CPUs */
132 		rmm_boot_failed = true;
133 		return 0;
134 	}
135 
136 	INFO("RMM init end.\n");
137 
138 	return 1;
139 }
140 
141 /*******************************************************************************
142  * Load and read RMM manifest, setup RMM.
143  ******************************************************************************/
144 int rmmd_setup(void)
145 {
146 	size_t shared_buf_size __unused;
147 	uintptr_t shared_buf_base;
148 	uint32_t ep_attr;
149 	unsigned int linear_id = plat_my_core_pos();
150 	rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
151 	struct rmm_manifest *manifest;
152 	int rc;
153 
154 	/* Make sure RME is supported. */
155 	if (is_feat_rme_present() == 0U) {
156 		/* Mark the RMM boot as failed for all the CPUs */
157 		rmm_boot_failed = true;
158 		return -ENOTSUP;
159 	}
160 
161 	rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
162 	if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) {
163 		WARN("No RMM image provided by BL2 boot loader, Booting "
164 		     "device without RMM initialization. SMCs destined for "
165 		     "RMM will return SMC_UNK\n");
166 
167 		/* Mark the boot as failed for all the CPUs */
168 		rmm_boot_failed = true;
169 		return -ENOENT;
170 	}
171 
172 	/* Initialise an entrypoint to set up the CPU context */
173 	ep_attr = EP_REALM;
174 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
175 		ep_attr |= EP_EE_BIG;
176 	}
177 
178 	SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
179 	rmm_ep_info->spsr = SPSR_64(MODE_EL2,
180 					MODE_SP_ELX,
181 					DISABLE_ALL_EXCEPTIONS);
182 
183 	shared_buf_size =
184 			plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
185 
186 	assert((shared_buf_size == SZ_4K) &&
187 					((void *)shared_buf_base != NULL));
188 
189 	/* Zero out and load the boot manifest at the beginning of the share area */
190 	manifest = (struct rmm_manifest *)shared_buf_base;
191 	(void)memset((void *)manifest, 0, sizeof(struct rmm_manifest));
192 
193 	rc = plat_rmmd_load_manifest(manifest);
194 	if (rc != 0) {
195 		ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
196 		/* Mark the boot as failed for all the CPUs */
197 		rmm_boot_failed = true;
198 		return rc;
199 	}
200 	flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
201 
202 	/*
203 	 * Prepare coldboot arguments for RMM:
204 	 * arg0: This CPUID (primary processor).
205 	 * arg1: Version for this Boot Interface.
206 	 * arg2: PLATFORM_CORE_COUNT.
207 	 * arg3: Base address for the EL3 <-> RMM shared area. The boot
208 	 *       manifest will be stored at the beginning of this area.
209 	 * arg4: opaque activation token, as returned by previous calls
210 	 */
211 	rmm_ep_info->args.arg0 = linear_id;
212 	rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
213 	rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
214 	rmm_ep_info->args.arg3 = shared_buf_base;
215 	rmm_ep_info->args.arg4 = rmm_ctx->activation_token;
216 
217 	/* Initialise RMM context with this entry point information */
218 	cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
219 
220 	INFO("RMM setup done.\n");
221 
222 	/* Register init function for deferred init.  */
223 	bl31_register_rmm_init(&rmm_init);
224 
225 	return 0;
226 }
227 
228 /*******************************************************************************
229  * Forward SMC to the other security state
230  ******************************************************************************/
231 static uint64_t	rmmd_smc_forward(uint32_t src_sec_state,
232 				 uint32_t dst_sec_state, uint64_t x0,
233 				 uint64_t x1, uint64_t x2, uint64_t x3,
234 				 uint64_t x4, void *handle)
235 {
236 	cpu_context_t *ctx = cm_get_context(dst_sec_state);
237 
238 	/* Save incoming security state */
239 	cm_el2_sysregs_context_save(src_sec_state);
240 
241 	/* Restore outgoing security state */
242 	cm_el2_sysregs_context_restore(dst_sec_state);
243 	cm_set_next_eret_context(dst_sec_state);
244 
245 	/*
246 	 * As per SMCCCv1.2, we need to preserve x4 to x7 unless
247 	 * being used as return args. Hence we differentiate the
248 	 * onward and backward path. Support upto 8 args in the
249 	 * onward path and 4 args in return path.
250 	 * Register x4 will be preserved by RMM in case it is not
251 	 * used in return path.
252 	 */
253 	if (src_sec_state == NON_SECURE) {
254 		SMC_RET8(ctx, x0, x1, x2, x3, x4,
255 			 SMC_GET_GP(handle, CTX_GPREG_X5),
256 			 SMC_GET_GP(handle, CTX_GPREG_X6),
257 			 SMC_GET_GP(handle, CTX_GPREG_X7));
258 	}
259 
260 	SMC_RET5(ctx, x0, x1, x2, x3, x4);
261 }
262 
263 /*******************************************************************************
264  * This function handles all SMCs in the range reserved for RMI. Each call is
265  * either forwarded to the other security state or handled by the RMM dispatcher
266  ******************************************************************************/
267 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
268 			  uint64_t x3, uint64_t x4, void *cookie,
269 			  void *handle, uint64_t flags)
270 {
271 	uint32_t src_sec_state;
272 
273 	/* If RMM failed to boot, treat any RMI SMC as unknown */
274 	if (rmm_boot_failed) {
275 		WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
276 		SMC_RET1(handle, SMC_UNK);
277 	}
278 
279 	/* Determine which security state this SMC originated from */
280 	src_sec_state = caller_sec_state(flags);
281 
282 	/* RMI must not be invoked by the Secure world */
283 	if (src_sec_state == SMC_FROM_SECURE) {
284 		WARN("RMMD: RMI invoked by secure world.\n");
285 		SMC_RET1(handle, SMC_UNK);
286 	}
287 
288 	/*
289 	 * Forward an RMI call from the Normal world to the Realm world as it
290 	 * is.
291 	 */
292 	if (src_sec_state == SMC_FROM_NON_SECURE) {
293 		/*
294 		 * If SVE hint bit is set in the flags then update the SMC
295 		 * function id and pass it on to the lower EL.
296 		 */
297 		if (is_sve_hint_set(flags)) {
298 			smc_fid |= (FUNCID_SVE_HINT_MASK <<
299 				    FUNCID_SVE_HINT_SHIFT);
300 		}
301 		VERBOSE("RMMD: RMI call from non-secure world.\n");
302 		return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
303 					x1, x2, x3, x4, handle);
304 	}
305 
306 	if (src_sec_state != SMC_FROM_REALM) {
307 		SMC_RET1(handle, SMC_UNK);
308 	}
309 
310 	switch (smc_fid) {
311 	case RMM_RMI_REQ_COMPLETE: {
312 		uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
313 
314 		return rmmd_smc_forward(REALM, NON_SECURE, x1,
315 					x2, x3, x4, x5, handle);
316 	}
317 	default:
318 		WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
319 		SMC_RET1(handle, SMC_UNK);
320 	}
321 }
322 
323 /*******************************************************************************
324  * This cpu has been turned on. Enter RMM to initialise R-EL2.  Entry into RMM
325  * is done after initialising minimal architectural state that guarantees safe
326  * execution.
327  ******************************************************************************/
328 static void *rmmd_cpu_on_finish_handler(const void *arg)
329 {
330 	long rc;
331 	uint32_t linear_id = plat_my_core_pos();
332 	rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
333 	/* Create a local copy of ep info to avoid race conditions */
334 	entry_point_info_t local_rmm_ep_info = *rmm_ep_info;
335 
336 	if (rmm_boot_failed) {
337 		/* RMM Boot failed on a previous CPU. Abort. */
338 		ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
339 								linear_id);
340 		return NULL;
341 	}
342 
343 	/*
344 	 * Prepare warmboot arguments for RMM:
345 	 * arg0: This CPUID.
346 	 * arg1: opaque activation token, as returned by previous calls
347 	 * arg2 to arg3: Not used.
348 	 */
349 	local_rmm_ep_info.args.arg0 = linear_id;
350 	local_rmm_ep_info.args.arg1 = ctx->activation_token;
351 	local_rmm_ep_info.args.arg2 = 0ULL;
352 	local_rmm_ep_info.args.arg3 = 0ULL;
353 
354 	/* Initialise RMM context with this entry point information */
355 	cm_setup_context(&ctx->cpu_ctx, &local_rmm_ep_info);
356 
357 	rc = rmmd_rmm_sync_entry(ctx);
358 
359 	if (rc != E_RMM_BOOT_SUCCESS) {
360 		ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
361 		/*
362 		 * TODO: Investigate handling of rmm_boot_failed under
363 		 * concurrent access, or explore alternative approaches
364 		 * to fixup the logic.
365 		 */
366 		rmm_boot_failed = true;
367 	}
368 
369 	return NULL;
370 }
371 
372 /* Subscribe to PSCI CPU on to initialize RMM on secondary */
373 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
374 
375 /* Convert GPT lib error to RMMD GTS error */
376 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
377 {
378 	int ret;
379 
380 	if (error == 0) {
381 		return E_RMM_OK;
382 	}
383 
384 	if (error == -EINVAL) {
385 		ret = E_RMM_BAD_ADDR;
386 	} else {
387 		/* This is the only other error code we expect */
388 		assert(error == -EPERM);
389 		ret = E_RMM_BAD_PAS;
390 	}
391 
392 	ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
393 				error, address, smc_fid);
394 	return ret;
395 }
396 
397 static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx,
398 					 uint64_t *feat_reg)
399 {
400 	if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) {
401 		ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx);
402 		return E_RMM_INVAL;
403 	}
404 
405 	*feat_reg = 0UL;
406 #if RMMD_ENABLE_EL3_TOKEN_SIGN
407 	*feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK;
408 #endif
409 	return E_RMM_OK;
410 }
411 
412 /*
413  * Update encryption key associated with mecid included in x1.
414  */
415 static int rmmd_mecid_key_update(uint64_t x1)
416 {
417 	uint64_t mecid_width, mecid_width_mask;
418 	uint16_t mecid;
419 	unsigned int reason;
420 	int ret;
421 
422 	/*
423 	 * Check whether FEAT_MEC is supported by the hardware. If not, return
424 	 * unknown SMC.
425 	 */
426 	if (is_feat_mec_supported() == false) {
427 		return E_RMM_UNK;
428 	}
429 
430 	/*
431 	 * Check whether the mecid parameter is at most MECIDR_EL2.MECIDWidthm1 + 1
432 	 * in length.
433 	 */
434 	mecid_width = ((read_mecidr_el2() >> MECIDR_EL2_MECIDWidthm1_SHIFT) &
435 		MECIDR_EL2_MECIDWidthm1_MASK) + 1UL;
436 	mecid_width_mask = ((1UL << mecid_width) - 1UL);
437 
438 	mecid = (x1 >> MECID_SHIFT) & MECID_MASK;
439 	if ((mecid & ~mecid_width_mask) != 0U) {
440 		return E_RMM_INVAL;
441 	}
442 
443 	reason = (x1 >> MEC_REFRESH_REASON_SHIFT) & MEC_REFRESH_REASON_MASK;
444 	ret = plat_rmmd_mecid_key_update(mecid, reason);
445 
446 	if (ret != 0) {
447 		return E_RMM_UNK;
448 	}
449 	return E_RMM_OK;
450 }
451 
452 /*******************************************************************************
453  * This function handles RMM-EL3 interface SMCs
454  ******************************************************************************/
455 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
456 				uint64_t x3, uint64_t x4, void *cookie,
457 				void *handle, uint64_t flags)
458 {
459 	uint64_t remaining_len = 0UL;
460 	uint32_t src_sec_state;
461 	int ret;
462 
463 	/* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
464 	if (rmm_boot_failed) {
465 		WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
466 		SMC_RET1(handle, SMC_UNK);
467 	}
468 
469 	/* Determine which security state this SMC originated from */
470 	src_sec_state = caller_sec_state(flags);
471 
472 	if (src_sec_state != SMC_FROM_REALM) {
473 		WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
474 		SMC_RET1(handle, SMC_UNK);
475 	}
476 
477 	switch (smc_fid) {
478 	case RMM_GTSI_DELEGATE:
479 		ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
480 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
481 	case RMM_GTSI_UNDELEGATE:
482 		ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
483 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
484 	case RMM_ATTEST_GET_REALM_KEY:
485 		ret = rmmd_attest_get_signing_key(x1, &x2, x3);
486 		SMC_RET2(handle, ret, x2);
487 	case RMM_ATTEST_GET_PLAT_TOKEN:
488 		ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len);
489 		SMC_RET3(handle, ret, x2, remaining_len);
490 	case RMM_EL3_FEATURES:
491 		ret = rmm_el3_ifc_get_feat_register(x1, &x2);
492 		SMC_RET2(handle, ret, x2);
493 #if RMMD_ENABLE_EL3_TOKEN_SIGN
494 	case RMM_EL3_TOKEN_SIGN:
495 		return rmmd_el3_token_sign(handle, x1, x2, x3, x4);
496 #endif
497 
498 #if RMMD_ENABLE_IDE_KEY_PROG
499 	case RMM_IDE_KEY_PROG:
500 	{
501 		rp_ide_key_info_t ide_key_info;
502 
503 		ide_key_info.keyqw0 = x4;
504 		ide_key_info.keyqw1 = SMC_GET_GP(handle, CTX_GPREG_X5);
505 		ide_key_info.keyqw2 = SMC_GET_GP(handle, CTX_GPREG_X6);
506 		ide_key_info.keyqw3 = SMC_GET_GP(handle, CTX_GPREG_X7);
507 		ide_key_info.ifvqw0 = SMC_GET_GP(handle, CTX_GPREG_X8);
508 		ide_key_info.ifvqw1 = SMC_GET_GP(handle, CTX_GPREG_X9);
509 		uint64_t x10 = SMC_GET_GP(handle, CTX_GPREG_X10);
510 		uint64_t x11 = SMC_GET_GP(handle, CTX_GPREG_X11);
511 
512 		ret = rmmd_el3_ide_key_program(x1, x2, x3, &ide_key_info, x10, x11);
513 		SMC_RET1(handle, ret);
514 	}
515 	case RMM_IDE_KEY_SET_GO:
516 		ret = rmmd_el3_ide_key_set_go(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5));
517 		SMC_RET1(handle, ret);
518 	case RMM_IDE_KEY_SET_STOP:
519 		ret = rmmd_el3_ide_key_set_stop(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5));
520 		SMC_RET1(handle, ret);
521 	case RMM_IDE_KM_PULL_RESPONSE: {
522 		uint64_t req_resp = 0, req_id = 0, cookie_var = 0;
523 
524 		ret = rmmd_el3_ide_km_pull_response(x1, x2, &req_resp, &req_id, &cookie_var);
525 		SMC_RET4(handle, ret, req_resp, req_id, cookie_var);
526 	}
527 #endif /* RMMD_ENABLE_IDE_KEY_PROG */
528 	case RMM_RESERVE_MEMORY:
529 		ret = rmmd_reserve_memory(x1, &x2);
530 		SMC_RET2(handle, ret, x2);
531 
532 	case RMM_BOOT_COMPLETE:
533 	{
534 		rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
535 
536 		ctx->activation_token = x2;
537 		VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
538 		rmmd_rmm_sync_exit(x1);
539 	}
540 	case RMM_MEC_REFRESH:
541 		ret = rmmd_mecid_key_update(x1);
542 		SMC_RET1(handle, ret);
543 	default:
544 		WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
545 		SMC_RET1(handle, SMC_UNK);
546 	}
547 }
548 
549 /**
550  * Helper to activate Primary CPU with the updated RMM, mainly used during
551  * LFA of RMM.
552  */
553 int rmmd_primary_activate(void)
554 {
555 	int rc;
556 
557 	rc = rmmd_setup();
558 	if (rc != 0) {
559 		ERROR("rmmd_setup failed during LFA: %d\n", rc);
560 		return rc;
561 	}
562 
563 	rc = rmm_init();
564 	if (rc == 0) {
565 		ERROR("rmm_init failed during LFA: %d\n", rc);
566 		return rc;
567 	}
568 
569 	INFO("RMM warm reset done on primary during LFA. \n");
570 
571 	return 0;
572 }
573 
574 /**
575  * Helper to activate Primary CPU with the updated RMM, mainly used during
576  * LFA of RMM.
577  */
578 int rmmd_secondary_activate(void)
579 {
580 	rmmd_cpu_on_finish_handler(NULL);
581 
582 	return 0;
583 }
584