xref: /rk3399_ARM-atf/services/std_svc/rmmd/rmmd_main.c (revision 6d415de83fe084c08558895837d0eb90210420a9)
1 /*
2  * Copyright (c) 2021-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch_features.h>
15 #include <bl31/bl31.h>
16 #include <common/debug.h>
17 #include <common/runtime_svc.h>
18 #include <context.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/el3_runtime/cpu_data.h>
21 #include <lib/el3_runtime/pubsub.h>
22 #include <lib/extensions/pmuv3.h>
23 #include <lib/extensions/sys_reg_trace.h>
24 #include <lib/gpt_rme/gpt_rme.h>
25 
26 #include <lib/spinlock.h>
27 #include <lib/utils.h>
28 #include <lib/xlat_tables/xlat_tables_v2.h>
29 #include <plat/common/common_def.h>
30 #include <plat/common/platform.h>
31 #include <platform_def.h>
32 #include <services/rmmd_svc.h>
33 #include <smccc_helpers.h>
34 #include <lib/extensions/sme.h>
35 #include <lib/extensions/sve.h>
36 #include <lib/extensions/spe.h>
37 #include <lib/extensions/trbe.h>
38 #include "rmmd_initial_context.h"
39 #include "rmmd_private.h"
40 
41 /*******************************************************************************
42  * RMM boot failure flag
43  ******************************************************************************/
44 static bool rmm_boot_failed;
45 
46 /*******************************************************************************
47  * RMM context information.
48  ******************************************************************************/
49 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
50 
51 /*******************************************************************************
52  * RMM entry point information. Discovered on the primary core and reused
53  * on secondary cores.
54  ******************************************************************************/
55 static entry_point_info_t *rmm_ep_info;
56 
57 /*******************************************************************************
58  * Static function declaration.
59  ******************************************************************************/
60 static int32_t rmm_init(void);
61 
62 /*******************************************************************************
63  * This function takes an RMM context pointer and performs a synchronous entry
64  * into it.
65  ******************************************************************************/
66 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
67 {
68 	uint64_t rc;
69 
70 	assert(rmm_ctx != NULL);
71 
72 	cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
73 
74 	/* Restore the realm context assigned above */
75 	cm_el2_sysregs_context_restore(REALM);
76 	cm_set_next_eret_context(REALM);
77 
78 	/* Enter RMM */
79 	rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
80 
81 	/*
82 	 * Save realm context. EL2 Non-secure context will be restored
83 	 * before exiting Non-secure world, therefore there is no need
84 	 * to clear EL2 context registers.
85 	 */
86 	cm_el2_sysregs_context_save(REALM);
87 
88 	return rc;
89 }
90 
91 /*******************************************************************************
92  * This function returns to the place where rmmd_rmm_sync_entry() was
93  * called originally.
94  ******************************************************************************/
95 __dead2 void rmmd_rmm_sync_exit(uint64_t rc)
96 {
97 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
98 
99 	/* Get context of the RMM in use by this CPU. */
100 	assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
101 
102 	/*
103 	 * The RMMD must have initiated the original request through a
104 	 * synchronous entry into RMM. Jump back to the original C runtime
105 	 * context with the value of rc in x0;
106 	 */
107 	rmmd_rmm_exit(ctx->c_rt_ctx, rc);
108 
109 	panic();
110 }
111 
112 static void rmm_el2_context_init(el2_sysregs_t *regs)
113 {
114 	write_el2_ctx_common(regs, spsr_el2, REALM_SPSR_EL2);
115 	write_el2_ctx_common(regs, sctlr_el2, SCTLR_EL2_RES1);
116 }
117 
118 /*******************************************************************************
119  * Enable architecture extensions on first entry to Realm world.
120  ******************************************************************************/
121 
122 static void manage_extensions_realm(cpu_context_t *ctx)
123 {
124 	pmuv3_enable(ctx);
125 
126 	/*
127 	 * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world.
128 	 */
129 	if (is_feat_sme_supported()) {
130 		sme_enable(ctx);
131 	}
132 
133 	/*
134 	 * SPE and TRBE cannot be fully disabled from EL3 registers alone, only
135 	 * sysreg access can. In case the EL1 controls leave them active on
136 	 * context switch, we want the owning security state to be NS so Realm
137 	 * can't be DOSed.
138 	 */
139 	if (is_feat_spe_supported()) {
140 		spe_disable(ctx);
141 	}
142 
143 	if (is_feat_trbe_supported()) {
144 		trbe_disable(ctx);
145 	}
146 }
147 
148 static void manage_extensions_realm_per_world(void)
149 {
150 	cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_REALM]);
151 
152 	if (is_feat_sve_supported()) {
153 	/*
154 	 * Enable SVE and FPU in realm context when it is enabled for NS.
155 	 * Realm manager must ensure that the SVE and FPU register
156 	 * contexts are properly managed.
157 	 */
158 		sve_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
159 	}
160 
161 	/* NS can access this but Realm shouldn't */
162 	if (is_feat_sys_reg_trace_supported()) {
163 		sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
164 	}
165 
166 	/*
167 	 * If SME/SME2 is supported and enabled for NS world, then disable trapping
168 	 * of SME instructions for Realm world. RMM will save/restore required
169 	 * registers that are shared with SVE/FPU so that Realm can use FPU or SVE.
170 	 */
171 	if (is_feat_sme_supported()) {
172 		sme_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]);
173 	}
174 }
175 
176 /*******************************************************************************
177  * Jump to the RMM for the first time.
178  ******************************************************************************/
179 static int32_t rmm_init(void)
180 {
181 	long rc;
182 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
183 
184 	INFO("RMM init start.\n");
185 
186 	/* Enable architecture extensions */
187 	manage_extensions_realm(&ctx->cpu_ctx);
188 
189 	manage_extensions_realm_per_world();
190 
191 	/* Initialize RMM EL2 context. */
192 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
193 
194 	rc = rmmd_rmm_sync_entry(ctx);
195 	if (rc != E_RMM_BOOT_SUCCESS) {
196 		ERROR("RMM init failed: %ld\n", rc);
197 		/* Mark the boot as failed for all the CPUs */
198 		rmm_boot_failed = true;
199 		return 0;
200 	}
201 
202 	INFO("RMM init end.\n");
203 
204 	return 1;
205 }
206 
207 /*******************************************************************************
208  * Load and read RMM manifest, setup RMM.
209  ******************************************************************************/
210 int rmmd_setup(void)
211 {
212 	size_t shared_buf_size __unused;
213 	uintptr_t shared_buf_base;
214 	uint32_t ep_attr;
215 	unsigned int linear_id = plat_my_core_pos();
216 	rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
217 	struct rmm_manifest *manifest;
218 	int rc;
219 
220 	/* Make sure RME is supported. */
221 	if (is_feat_rme_present() == 0U) {
222 		/* Mark the RMM boot as failed for all the CPUs */
223 		rmm_boot_failed = true;
224 		return -ENOTSUP;
225 	}
226 
227 	rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
228 	if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) {
229 		WARN("No RMM image provided by BL2 boot loader, Booting "
230 		     "device without RMM initialization. SMCs destined for "
231 		     "RMM will return SMC_UNK\n");
232 
233 		/* Mark the boot as failed for all the CPUs */
234 		rmm_boot_failed = true;
235 		return -ENOENT;
236 	}
237 
238 	/* Initialise an entrypoint to set up the CPU context */
239 	ep_attr = EP_REALM;
240 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
241 		ep_attr |= EP_EE_BIG;
242 	}
243 
244 	SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
245 	rmm_ep_info->spsr = SPSR_64(MODE_EL2,
246 					MODE_SP_ELX,
247 					DISABLE_ALL_EXCEPTIONS);
248 
249 	shared_buf_size =
250 			plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
251 
252 	assert((shared_buf_size == SZ_4K) &&
253 					((void *)shared_buf_base != NULL));
254 
255 	/* Zero out and load the boot manifest at the beginning of the share area */
256 	manifest = (struct rmm_manifest *)shared_buf_base;
257 	(void)memset((void *)manifest, 0, sizeof(struct rmm_manifest));
258 
259 	rc = plat_rmmd_load_manifest(manifest);
260 	if (rc != 0) {
261 		ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
262 		/* Mark the boot as failed for all the CPUs */
263 		rmm_boot_failed = true;
264 		return rc;
265 	}
266 	flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
267 
268 	/*
269 	 * Prepare coldboot arguments for RMM:
270 	 * arg0: This CPUID (primary processor).
271 	 * arg1: Version for this Boot Interface.
272 	 * arg2: PLATFORM_CORE_COUNT.
273 	 * arg3: Base address for the EL3 <-> RMM shared area. The boot
274 	 *       manifest will be stored at the beginning of this area.
275 	 */
276 	rmm_ep_info->args.arg0 = linear_id;
277 	rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
278 	rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
279 	rmm_ep_info->args.arg3 = shared_buf_base;
280 
281 	/* Initialise RMM context with this entry point information */
282 	cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
283 
284 	INFO("RMM setup done.\n");
285 
286 	/* Register init function for deferred init.  */
287 	bl31_register_rmm_init(&rmm_init);
288 
289 	return 0;
290 }
291 
292 /*******************************************************************************
293  * Forward SMC to the other security state
294  ******************************************************************************/
295 static uint64_t	rmmd_smc_forward(uint32_t src_sec_state,
296 				 uint32_t dst_sec_state, uint64_t x0,
297 				 uint64_t x1, uint64_t x2, uint64_t x3,
298 				 uint64_t x4, void *handle)
299 {
300 	cpu_context_t *ctx = cm_get_context(dst_sec_state);
301 
302 	/* Save incoming security state */
303 	cm_el2_sysregs_context_save(src_sec_state);
304 
305 	/* Restore outgoing security state */
306 	cm_el2_sysregs_context_restore(dst_sec_state);
307 	cm_set_next_eret_context(dst_sec_state);
308 
309 	/*
310 	 * As per SMCCCv1.2, we need to preserve x4 to x7 unless
311 	 * being used as return args. Hence we differentiate the
312 	 * onward and backward path. Support upto 8 args in the
313 	 * onward path and 4 args in return path.
314 	 * Register x4 will be preserved by RMM in case it is not
315 	 * used in return path.
316 	 */
317 	if (src_sec_state == NON_SECURE) {
318 		SMC_RET8(ctx, x0, x1, x2, x3, x4,
319 			 SMC_GET_GP(handle, CTX_GPREG_X5),
320 			 SMC_GET_GP(handle, CTX_GPREG_X6),
321 			 SMC_GET_GP(handle, CTX_GPREG_X7));
322 	}
323 
324 	SMC_RET5(ctx, x0, x1, x2, x3, x4);
325 }
326 
327 /*******************************************************************************
328  * This function handles all SMCs in the range reserved for RMI. Each call is
329  * either forwarded to the other security state or handled by the RMM dispatcher
330  ******************************************************************************/
331 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
332 			  uint64_t x3, uint64_t x4, void *cookie,
333 			  void *handle, uint64_t flags)
334 {
335 	uint32_t src_sec_state;
336 
337 	/* If RMM failed to boot, treat any RMI SMC as unknown */
338 	if (rmm_boot_failed) {
339 		WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
340 		SMC_RET1(handle, SMC_UNK);
341 	}
342 
343 	/* Determine which security state this SMC originated from */
344 	src_sec_state = caller_sec_state(flags);
345 
346 	/* RMI must not be invoked by the Secure world */
347 	if (src_sec_state == SMC_FROM_SECURE) {
348 		WARN("RMMD: RMI invoked by secure world.\n");
349 		SMC_RET1(handle, SMC_UNK);
350 	}
351 
352 	/*
353 	 * Forward an RMI call from the Normal world to the Realm world as it
354 	 * is.
355 	 */
356 	if (src_sec_state == SMC_FROM_NON_SECURE) {
357 		/*
358 		 * If SVE hint bit is set in the flags then update the SMC
359 		 * function id and pass it on to the lower EL.
360 		 */
361 		if (is_sve_hint_set(flags)) {
362 			smc_fid |= (FUNCID_SVE_HINT_MASK <<
363 				    FUNCID_SVE_HINT_SHIFT);
364 		}
365 		VERBOSE("RMMD: RMI call from non-secure world.\n");
366 		return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
367 					x1, x2, x3, x4, handle);
368 	}
369 
370 	if (src_sec_state != SMC_FROM_REALM) {
371 		SMC_RET1(handle, SMC_UNK);
372 	}
373 
374 	switch (smc_fid) {
375 	case RMM_RMI_REQ_COMPLETE: {
376 		uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
377 
378 		return rmmd_smc_forward(REALM, NON_SECURE, x1,
379 					x2, x3, x4, x5, handle);
380 	}
381 	default:
382 		WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
383 		SMC_RET1(handle, SMC_UNK);
384 	}
385 }
386 
387 /*******************************************************************************
388  * This cpu has been turned on. Enter RMM to initialise R-EL2.  Entry into RMM
389  * is done after initialising minimal architectural state that guarantees safe
390  * execution.
391  ******************************************************************************/
392 static void *rmmd_cpu_on_finish_handler(const void *arg)
393 {
394 	long rc;
395 	uint32_t linear_id = plat_my_core_pos();
396 	rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
397 
398 	if (rmm_boot_failed) {
399 		/* RMM Boot failed on a previous CPU. Abort. */
400 		ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
401 								linear_id);
402 		return NULL;
403 	}
404 
405 	/*
406 	 * Prepare warmboot arguments for RMM:
407 	 * arg0: This CPUID.
408 	 * arg1 to arg3: Not used.
409 	 */
410 	rmm_ep_info->args.arg0 = linear_id;
411 	rmm_ep_info->args.arg1 = 0ULL;
412 	rmm_ep_info->args.arg2 = 0ULL;
413 	rmm_ep_info->args.arg3 = 0ULL;
414 
415 	/* Initialise RMM context with this entry point information */
416 	cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
417 
418 	/* Enable architecture extensions */
419 	manage_extensions_realm(&ctx->cpu_ctx);
420 
421 	/* Initialize RMM EL2 context. */
422 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
423 
424 	rc = rmmd_rmm_sync_entry(ctx);
425 
426 	if (rc != E_RMM_BOOT_SUCCESS) {
427 		ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
428 		/* Mark the boot as failed for any other booting CPU */
429 		rmm_boot_failed = true;
430 	}
431 
432 	return NULL;
433 }
434 
435 /* Subscribe to PSCI CPU on to initialize RMM on secondary */
436 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
437 
438 /* Convert GPT lib error to RMMD GTS error */
439 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
440 {
441 	int ret;
442 
443 	if (error == 0) {
444 		return E_RMM_OK;
445 	}
446 
447 	if (error == -EINVAL) {
448 		ret = E_RMM_BAD_ADDR;
449 	} else {
450 		/* This is the only other error code we expect */
451 		assert(error == -EPERM);
452 		ret = E_RMM_BAD_PAS;
453 	}
454 
455 	ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
456 				error, address, smc_fid);
457 	return ret;
458 }
459 
460 static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx,
461 					 uint64_t *feat_reg)
462 {
463 	if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) {
464 		ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx);
465 		return E_RMM_INVAL;
466 	}
467 
468 	*feat_reg = 0UL;
469 #if RMMD_ENABLE_EL3_TOKEN_SIGN
470 	*feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK;
471 #endif
472 	return E_RMM_OK;
473 }
474 
475 /*******************************************************************************
476  * This function handles RMM-EL3 interface SMCs
477  ******************************************************************************/
478 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
479 				uint64_t x3, uint64_t x4, void *cookie,
480 				void *handle, uint64_t flags)
481 {
482 	uint64_t remaining_len = 0UL;
483 	uint32_t src_sec_state;
484 	int ret;
485 
486 	/* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
487 	if (rmm_boot_failed) {
488 		WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
489 		SMC_RET1(handle, SMC_UNK);
490 	}
491 
492 	/* Determine which security state this SMC originated from */
493 	src_sec_state = caller_sec_state(flags);
494 
495 	if (src_sec_state != SMC_FROM_REALM) {
496 		WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
497 		SMC_RET1(handle, SMC_UNK);
498 	}
499 
500 	switch (smc_fid) {
501 	case RMM_GTSI_DELEGATE:
502 		ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
503 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
504 	case RMM_GTSI_UNDELEGATE:
505 		ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
506 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
507 	case RMM_ATTEST_GET_PLAT_TOKEN:
508 		ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len);
509 		SMC_RET3(handle, ret, x2, remaining_len);
510 	case RMM_ATTEST_GET_REALM_KEY:
511 		ret = rmmd_attest_get_signing_key(x1, &x2, x3);
512 		SMC_RET2(handle, ret, x2);
513 	case RMM_EL3_FEATURES:
514 		ret = rmm_el3_ifc_get_feat_register(x1, &x2);
515 		SMC_RET2(handle, ret, x2);
516 #if RMMD_ENABLE_EL3_TOKEN_SIGN
517 	case RMM_EL3_TOKEN_SIGN:
518 		return rmmd_el3_token_sign(handle, x1, x2, x3, x4);
519 #endif
520 	case RMM_BOOT_COMPLETE:
521 		VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
522 		rmmd_rmm_sync_exit(x1);
523 
524 	default:
525 		WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
526 		SMC_RET1(handle, SMC_UNK);
527 	}
528 }
529