xref: /rk3399_ARM-atf/services/std_svc/rmmd/rmmd_main.c (revision 430f246e58d146949d399d72294f56403672bee0)
1 /*
2  * Copyright (c) 2021-2026, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch_features.h>
15 #include <bl31/bl31.h>
16 #include <common/debug.h>
17 #include <common/runtime_svc.h>
18 #include <context.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/el3_runtime/cpu_data.h>
21 #include <lib/el3_runtime/pubsub.h>
22 #include <lib/extensions/mpam.h>
23 #include <lib/extensions/pmuv3.h>
24 #include <lib/extensions/sys_reg_trace.h>
25 #include <lib/gpt_rme/gpt_rme.h>
26 #include <lib/per_cpu/per_cpu.h>
27 
28 #include <lib/spinlock.h>
29 #include <lib/utils.h>
30 #include <lib/xlat_tables/xlat_tables_v2.h>
31 #include <plat/common/common_def.h>
32 #include <plat/common/platform.h>
33 #include <platform_def.h>
34 #include <services/rmmd_svc.h>
35 #include <smccc_helpers.h>
36 #include <lib/extensions/sme.h>
37 #include <lib/extensions/sve.h>
38 #include <lib/extensions/spe.h>
39 #include <lib/extensions/trbe.h>
40 #include "rmmd_private.h"
41 
42 #define MECID_SHIFT			U(32)
43 #define MECID_MASK			0xFFFFU
44 
45 #define MEC_REFRESH_REASON_SHIFT	U(0)
46 #define MEC_REFRESH_REASON_MASK		BIT(0)
47 
48 /*******************************************************************************
49  * RMM boot failure flag
50  ******************************************************************************/
51 static bool rmm_boot_failed;
52 
53 /*******************************************************************************
54  * RMM context information.
55  ******************************************************************************/
56 PER_CPU_DEFINE(rmmd_rmm_context_t, rmm_context);
57 
58 /*******************************************************************************
59  * RMM entry point information. Discovered on the primary core and reused
60  * on secondary cores.
61  ******************************************************************************/
62 static entry_point_info_t *rmm_ep_info;
63 
64 /*******************************************************************************
65  * Static function declaration.
66  ******************************************************************************/
67 static int32_t rmm_init(void);
68 
69 /*******************************************************************************
70  * This function takes an RMM context pointer and performs a synchronous entry
71  * into it.
72  ******************************************************************************/
rmmd_rmm_sync_entry(rmmd_rmm_context_t * rmm_ctx)73 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
74 {
75 	uint64_t rc;
76 
77 	assert(rmm_ctx != NULL);
78 
79 	cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
80 
81 	/* Restore the realm context assigned above */
82 	cm_el2_sysregs_context_restore(REALM);
83 #if RMM_V1_COMPAT
84 	cm_el2_sysregs_context_restore_gic(REALM);
85 #endif
86 	cm_set_next_eret_context(REALM);
87 
88 	/* Enter RMM */
89 	rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
90 
91 	/*
92 	 * Save realm context. EL2 Non-secure context will be restored
93 	 * before exiting Non-secure world, therefore there is no need
94 	 * to clear EL2 context registers.
95 	 */
96 	cm_el2_sysregs_context_save(REALM);
97 #if RMM_V1_COMPAT
98 	cm_el2_sysregs_context_save_gic(REALM);
99 #endif
100 
101 	return rc;
102 }
103 
104 /*******************************************************************************
105  * This function returns to the place where rmmd_rmm_sync_entry() was
106  * called originally.
107  ******************************************************************************/
rmmd_rmm_sync_exit(uint64_t rc)108 __dead2 void rmmd_rmm_sync_exit(uint64_t rc)
109 {
110 	rmmd_rmm_context_t *ctx = PER_CPU_CUR(rmm_context);
111 
112 	/* Get context of the RMM in use by this CPU. */
113 	assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
114 
115 	/*
116 	 * The RMMD must have initiated the original request through a
117 	 * synchronous entry into RMM. Jump back to the original C runtime
118 	 * context with the value of rc in x0;
119 	 */
120 	rmmd_rmm_exit(ctx->c_rt_ctx, rc);
121 
122 	panic();
123 }
124 
125 /*******************************************************************************
126  * Jump to the RMM for the first time.
127  ******************************************************************************/
rmm_init(void)128 static int32_t rmm_init(void)
129 {
130 	long rc;
131 	rmmd_rmm_context_t *ctx = PER_CPU_CUR(rmm_context);
132 
133 	INFO("RMM init start.\n");
134 
135 #if !RMM_V1_COMPAT
136 	/*
137 	 * The SPMD could init before. RMMD doesn't restore GIC context so it
138 	 * can be shared with NS, so just this once restore GIC context. Use the
139 	 * NS copy as the Realm copy will be unused.
140 	 */
141 	cm_el2_sysregs_context_restore_gic(NON_SECURE);
142 #endif
143 	rc = rmmd_rmm_sync_entry(ctx);
144 	if (rc != E_RMM_BOOT_SUCCESS) {
145 		ERROR("RMM init failed: %ld\n", rc);
146 		/* Mark the boot as failed for all the CPUs */
147 		rmm_boot_failed = true;
148 		return 0;
149 	}
150 
151 	INFO("RMM init end.\n");
152 
153 	return 1;
154 }
155 
156 /*******************************************************************************
157  * Load and read RMM manifest, setup RMM.
158  ******************************************************************************/
rmmd_setup(void)159 int rmmd_setup(void)
160 {
161 	size_t shared_buf_size __unused;
162 	uintptr_t shared_buf_base;
163 	uint32_t ep_attr;
164 	unsigned int linear_id = plat_my_core_pos();
165 
166 	rmmd_rmm_context_t *rmm_ctx = PER_CPU_CUR(rmm_context);
167 	struct rmm_manifest *manifest;
168 	int rc;
169 
170 	/* Make sure RME is supported. */
171 	if (is_feat_rme_present() == 0U) {
172 		/* Mark the RMM boot as failed for all the CPUs */
173 		rmm_boot_failed = true;
174 		return -ENOTSUP;
175 	}
176 
177 	rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
178 	if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) {
179 		WARN("No RMM image provided by BL2 boot loader, Booting "
180 		     "device without RMM initialization. SMCs destined for "
181 		     "RMM will return SMC_UNK\n");
182 
183 		/* Mark the boot as failed for all the CPUs */
184 		rmm_boot_failed = true;
185 		return -ENOENT;
186 	}
187 
188 	/* Initialise an entrypoint to set up the CPU context */
189 	ep_attr = EP_REALM;
190 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
191 		ep_attr |= EP_EE_BIG;
192 	}
193 
194 	SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
195 	rmm_ep_info->spsr = SPSR_64(MODE_EL2,
196 					MODE_SP_ELX,
197 					DISABLE_ALL_EXCEPTIONS);
198 
199 	shared_buf_size =
200 			plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
201 
202 	assert((shared_buf_size == SZ_4K) &&
203 					((void *)shared_buf_base != NULL));
204 
205 	/* Zero out and load the boot manifest at the beginning of the share area */
206 	manifest = (struct rmm_manifest *)shared_buf_base;
207 	*manifest = (struct rmm_manifest) { 0 };
208 
209 	rc = plat_rmmd_load_manifest(manifest);
210 	if (rc != 0) {
211 		ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
212 		/* Mark the boot as failed for all the CPUs */
213 		rmm_boot_failed = true;
214 		return rc;
215 	}
216 	flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
217 
218 	/*
219 	 * Prepare coldboot arguments for RMM:
220 	 * arg0: This CPUID (primary processor).
221 	 * arg1: Version for this Boot Interface.
222 	 * arg2: PLATFORM_CORE_COUNT.
223 	 * arg3: Base address for the EL3 <-> RMM shared area. The boot
224 	 *       manifest will be stored at the beginning of this area.
225 	 * arg4: opaque activation token, as returned by previous calls
226 	 */
227 	rmm_ep_info->args.arg0 = linear_id;
228 	rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
229 	rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
230 	rmm_ep_info->args.arg3 = shared_buf_base;
231 	rmm_ep_info->args.arg4 = rmm_ctx->activation_token;
232 
233 	/* Initialise RMM context with this entry point information */
234 	cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
235 
236 	INFO("RMM setup done.\n");
237 
238 	/* Register init function for deferred init.  */
239 	bl31_register_rmm_init(&rmm_init);
240 
241 	return 0;
242 }
243 
244 /*******************************************************************************
245  * Forward SMC to the other security state
246  ******************************************************************************/
rmmd_smc_forward(uint32_t src_sec_state,uint32_t dst_sec_state,uint64_t x0,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,uint64_t x5,uint64_t x6,uint64_t x7,void * handle)247 static uint64_t	rmmd_smc_forward(uint32_t src_sec_state,
248 				 uint32_t dst_sec_state, uint64_t x0,
249 				 uint64_t x1, uint64_t x2, uint64_t x3,
250 				 uint64_t x4, uint64_t x5, uint64_t x6,
251 				 uint64_t x7, void *handle)
252 {
253 	cpu_context_t *ctx = cm_get_context(dst_sec_state);
254 
255 	/* Save incoming security state */
256 	cm_el2_sysregs_context_save(src_sec_state);
257 #if RMM_V1_COMPAT
258 	cm_el2_sysregs_context_save_gic(src_sec_state);
259 #endif
260 
261 	/* Restore outgoing security state */
262 	cm_el2_sysregs_context_restore(dst_sec_state);
263 #if RMM_V1_COMPAT
264 	cm_el2_sysregs_context_restore_gic(dst_sec_state);
265 #endif
266 
267 	cm_set_next_eret_context(dst_sec_state);
268 
269 	/*
270 	 * As per SMCCCv1.2, we need to preserve x4 to x7 unless
271 	 * being used as return args.
272 	 */
273 #if RMM_V1_COMPAT
274 	/*
275 	 * We differentiate the onward and backward path. Support
276 	 * upto 8 args in the onward path and 4 args in return path.
277 	 * Register x4 will be preserved by RMM in case it is not
278 	 * used in return path.
279 	 */
280 	if (src_sec_state == NON_SECURE) {
281 		SMC_RET8(ctx, x0, x1, x2, x3, x4, x5, x6, x7);
282 	}
283 
284 	SMC_RET5(ctx, x0, x1, x2, x3, x4);
285 #else
286 	/*
287 	 * Expand return registers to x0-x7 for RMM 2.x
288 	 * Register x4-x7 will be preserved by RMM in case they are not
289 	 * used in return path.
290 	 */
291 
292 	SMC_RET8(ctx, x0, x1, x2, x3, x4, x5, x6, x7);
293 #endif
294 }
295 
296 /*******************************************************************************
297  * This function handles all SMCs in the range reserved for RMI. Each call is
298  * either forwarded to the other security state or handled by the RMM dispatcher
299  ******************************************************************************/
rmmd_rmi_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)300 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
301 			  uint64_t x3, uint64_t x4, void *cookie,
302 			  void *handle, uint64_t flags)
303 {
304 	uint32_t src_sec_state;
305 
306 	/* If RMM failed to boot, treat any RMI SMC as unknown */
307 	if (rmm_boot_failed) {
308 		WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
309 		SMC_RET1(handle, SMC_UNK);
310 	}
311 
312 	/* Determine which security state this SMC originated from */
313 	src_sec_state = caller_sec_state(flags);
314 
315 	/* RMI must not be invoked by the Secure world */
316 	if (src_sec_state == SMC_FROM_SECURE) {
317 		WARN("RMMD: RMI invoked by secure world.\n");
318 		SMC_RET1(handle, SMC_UNK);
319 	}
320 
321 	/*
322 	 * Forward an RMI call from the Normal world to the Realm world as it
323 	 * is.
324 	 */
325 	if (src_sec_state == SMC_FROM_NON_SECURE) {
326 		/*
327 		 * If SVE hint bit is set in the flags then update the SMC
328 		 * function id and pass it on to the lower EL.
329 		 */
330 		if (is_sve_hint_set(flags)) {
331 			smc_fid |= (FUNCID_SVE_HINT_MASK <<
332 				    FUNCID_SVE_HINT_SHIFT);
333 		}
334 		VERBOSE("RMMD: RMI call from non-secure world.\n");
335 		return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
336 					x1, x2, x3, x4,
337 					SMC_GET_GP(handle, CTX_GPREG_X5),
338 					SMC_GET_GP(handle, CTX_GPREG_X6),
339 					SMC_GET_GP(handle, CTX_GPREG_X7),
340 					handle);
341 	}
342 
343 	if (src_sec_state != SMC_FROM_REALM) {
344 		SMC_RET1(handle, SMC_UNK);
345 	}
346 
347 	switch (smc_fid) {
348 	case RMM_RMI_REQ_COMPLETE: {
349 		uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
350 
351 		return rmmd_smc_forward(REALM, NON_SECURE, x1,
352 					x2, x3, x4, x5,
353 					SMC_GET_GP(handle, CTX_GPREG_X6),
354 					SMC_GET_GP(handle, CTX_GPREG_X7),
355 					SMC_GET_GP(handle, CTX_GPREG_X8),
356 					handle);
357 	}
358 	default:
359 		WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
360 		SMC_RET1(handle, SMC_UNK);
361 	}
362 }
363 
364 /*******************************************************************************
365  * This cpu has been turned on. Enter RMM to initialise R-EL2.  Entry into RMM
366  * is done after initialising minimal architectural state that guarantees safe
367  * execution.
368  ******************************************************************************/
rmmd_cpu_on_finish_handler(const void * arg)369 static void *rmmd_cpu_on_finish_handler(const void *arg)
370 {
371 	long rc;
372 	uint32_t linear_id = plat_my_core_pos();
373 	rmmd_rmm_context_t *ctx = PER_CPU_CUR(rmm_context);
374 	/* Create a local copy of ep info to avoid race conditions */
375 	entry_point_info_t local_rmm_ep_info;
376 
377 	if (!is_feat_rme_supported()) {
378 		return NULL;
379 	}
380 
381 	if (rmm_boot_failed) {
382 		/* RMM Boot failed on a previous CPU. Abort. */
383 		ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
384 								linear_id);
385 		return NULL;
386 	}
387 
388 	/*
389 	 * Prepare warmboot arguments for RMM:
390 	 * arg0: This CPUID.
391 	 * arg1: opaque activation token, as returned by previous calls
392 	 * arg2 to arg3: Not used.
393 	 */
394 	local_rmm_ep_info = *rmm_ep_info;
395 	local_rmm_ep_info.args.arg0 = linear_id;
396 	local_rmm_ep_info.args.arg1 = ctx->activation_token;
397 	local_rmm_ep_info.args.arg2 = 0ULL;
398 	local_rmm_ep_info.args.arg3 = 0ULL;
399 
400 	/* Initialise RMM context with this entry point information */
401 	cm_setup_context(&ctx->cpu_ctx, &local_rmm_ep_info);
402 
403 	rc = rmmd_rmm_sync_entry(ctx);
404 
405 	if (rc != E_RMM_BOOT_SUCCESS) {
406 		ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
407 		/*
408 		 * TODO: Investigate handling of rmm_boot_failed under
409 		 * concurrent access, or explore alternative approaches
410 		 * to fixup the logic.
411 		 */
412 		rmm_boot_failed = true;
413 	}
414 
415 	return NULL;
416 }
417 
418 /* Subscribe to PSCI CPU on to initialize RMM on secondary */
419 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
420 
421 /* Convert GPT lib error to RMMD GTS error */
gpt_to_gts_error(int error,uint32_t smc_fid,uint64_t address)422 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
423 {
424 	int ret;
425 
426 	if (error == 0) {
427 		return E_RMM_OK;
428 	}
429 
430 	if (error == -EINVAL) {
431 		ret = E_RMM_BAD_ADDR;
432 	} else {
433 		/* This is the only other error code we expect */
434 		assert(error == -EPERM);
435 		ret = E_RMM_BAD_PAS;
436 	}
437 
438 	ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
439 				error, address, smc_fid);
440 	return ret;
441 }
442 
rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx,uint64_t * feat_reg)443 static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx,
444 					 uint64_t *feat_reg)
445 {
446 	if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) {
447 		ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx);
448 		return E_RMM_INVAL;
449 	}
450 
451 	*feat_reg = 0UL;
452 #if RMMD_ENABLE_EL3_TOKEN_SIGN
453 	*feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK;
454 #endif
455 	return E_RMM_OK;
456 }
457 
458 /*
459  * Update encryption key associated with mecid included in x1.
460  */
rmmd_mecid_key_update(uint64_t x1)461 static int rmmd_mecid_key_update(uint64_t x1)
462 {
463 	uint64_t mecid_width, mecid_width_mask;
464 	uint16_t mecid;
465 	unsigned int reason;
466 	int ret;
467 
468 	/*
469 	 * Check whether FEAT_MEC is supported by the hardware. If not, return
470 	 * unknown SMC.
471 	 */
472 	if (is_feat_mec_supported() == false) {
473 		return E_RMM_UNK;
474 	}
475 
476 	/*
477 	 * Check whether the mecid parameter is at most MECIDR_EL2.MECIDWidthm1 + 1
478 	 * in length.
479 	 */
480 	mecid_width = ((read_mecidr_el2() >> MECIDR_EL2_MECIDWidthm1_SHIFT) &
481 		MECIDR_EL2_MECIDWidthm1_MASK) + 1UL;
482 	mecid_width_mask = ((1UL << mecid_width) - 1UL);
483 
484 	mecid = (x1 >> MECID_SHIFT) & MECID_MASK;
485 	if ((mecid & ~mecid_width_mask) != 0U) {
486 		return E_RMM_INVAL;
487 	}
488 
489 	reason = (x1 >> MEC_REFRESH_REASON_SHIFT) & MEC_REFRESH_REASON_MASK;
490 	ret = plat_rmmd_mecid_key_update(mecid, reason);
491 
492 	if (ret != 0) {
493 		return E_RMM_UNK;
494 	}
495 	return E_RMM_OK;
496 }
497 
498 /*******************************************************************************
499  * This function handles RMM-EL3 interface SMCs
500  ******************************************************************************/
rmmd_rmm_el3_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)501 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
502 				uint64_t x3, uint64_t x4, void *cookie,
503 				void *handle, uint64_t flags)
504 {
505 	uint64_t remaining_len = 0UL;
506 	uint32_t src_sec_state;
507 	int ret;
508 
509 	/* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
510 	if (rmm_boot_failed) {
511 		WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
512 		SMC_RET1(handle, SMC_UNK);
513 	}
514 
515 	/* Determine which security state this SMC originated from */
516 	src_sec_state = caller_sec_state(flags);
517 
518 	if (src_sec_state != SMC_FROM_REALM) {
519 		WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
520 		SMC_RET1(handle, SMC_UNK);
521 	}
522 	uint64_t cnt = 1;
523 	switch (smc_fid) {
524 	case RMM_GTSI_DELEGATE:
525 		ret = gpt_transition_pas(x1, &cnt, GPT_GPI_REALM,
526 					 SMC_FROM_REALM);
527 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
528 	case RMM_GTSI_UNDELEGATE:
529 		ret = gpt_transition_pas(x1, &cnt, GPT_GPI_NS, SMC_FROM_REALM);
530 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
531 	case RMM_ATTEST_GET_REALM_KEY:
532 		ret = rmmd_attest_get_signing_key(x1, &x2, x3);
533 		SMC_RET2(handle, ret, x2);
534 	case RMM_ATTEST_GET_PLAT_TOKEN:
535 		ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len);
536 		SMC_RET3(handle, ret, x2, remaining_len);
537 	case RMM_EL3_FEATURES:
538 		ret = rmm_el3_ifc_get_feat_register(x1, &x2);
539 		SMC_RET2(handle, ret, x2);
540 #if RMMD_ENABLE_EL3_TOKEN_SIGN
541 	case RMM_EL3_TOKEN_SIGN:
542 		return rmmd_el3_token_sign(handle, x1, x2, x3, x4);
543 #endif
544 
545 #if RMMD_ENABLE_IDE_KEY_PROG
546 	case RMM_IDE_KEY_PROG:
547 	{
548 		rp_ide_key_info_t ide_key_info;
549 
550 		ide_key_info.keyqw0 = x4;
551 		ide_key_info.keyqw1 = SMC_GET_GP(handle, CTX_GPREG_X5);
552 		ide_key_info.keyqw2 = SMC_GET_GP(handle, CTX_GPREG_X6);
553 		ide_key_info.keyqw3 = SMC_GET_GP(handle, CTX_GPREG_X7);
554 		ide_key_info.ifvqw0 = SMC_GET_GP(handle, CTX_GPREG_X8);
555 		ide_key_info.ifvqw1 = SMC_GET_GP(handle, CTX_GPREG_X9);
556 		uint64_t x10 = SMC_GET_GP(handle, CTX_GPREG_X10);
557 		uint64_t x11 = SMC_GET_GP(handle, CTX_GPREG_X11);
558 
559 		ret = rmmd_el3_ide_key_program(x1, x2, x3, &ide_key_info, x10, x11);
560 		SMC_RET1(handle, ret);
561 	}
562 	case RMM_IDE_KEY_SET_GO:
563 		ret = rmmd_el3_ide_key_set_go(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5));
564 		SMC_RET1(handle, ret);
565 	case RMM_IDE_KEY_SET_STOP:
566 		ret = rmmd_el3_ide_key_set_stop(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5));
567 		SMC_RET1(handle, ret);
568 	case RMM_IDE_KM_PULL_RESPONSE: {
569 		uint64_t req_resp = 0, req_id = 0, cookie_var = 0;
570 
571 		ret = rmmd_el3_ide_km_pull_response(x1, x2, &req_resp, &req_id, &cookie_var);
572 		SMC_RET4(handle, ret, req_resp, req_id, cookie_var);
573 	}
574 #endif /* RMMD_ENABLE_IDE_KEY_PROG */
575 	case RMM_RESERVE_MEMORY:
576 		ret = rmmd_reserve_memory(x1, &x2);
577 		SMC_RET2(handle, ret, x2);
578 
579 	case RMM_BOOT_COMPLETE:
580 	{
581 		rmmd_rmm_context_t *ctx = PER_CPU_CUR(rmm_context);
582 
583 		ctx->activation_token = x2;
584 		VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
585 		rmmd_rmm_sync_exit(x1);
586 	}
587 	case RMM_MEC_REFRESH:
588 		ret = rmmd_mecid_key_update(x1);
589 		SMC_RET1(handle, ret);
590 	default:
591 		WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
592 		SMC_RET1(handle, SMC_UNK);
593 	}
594 }
595 
596 /**
597  * Helper to activate Primary CPU with the updated RMM, mainly used during
598  * LFA of RMM.
599  */
rmmd_primary_activate(void)600 int rmmd_primary_activate(void)
601 {
602 	int rc;
603 
604 	rc = rmmd_setup();
605 	if (rc != 0) {
606 		ERROR("rmmd_setup failed during LFA: %d\n", rc);
607 		return rc;
608 	}
609 
610 	rc = rmm_init();
611 	if (rc == 0) {
612 		ERROR("rmm_init failed during LFA: %d\n", rc);
613 		return rc;
614 	}
615 
616 	INFO("RMM warm reset done on primary during LFA. \n");
617 
618 	return 0;
619 }
620 
621 /**
622  * Helper to activate Primary CPU with the updated RMM, mainly used during
623  * LFA of RMM.
624  */
rmmd_secondary_activate(void)625 int rmmd_secondary_activate(void)
626 {
627 	rmmd_cpu_on_finish_handler(NULL);
628 
629 	return 0;
630 }
631