1 /* 2 * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch_features.h> 15 #include <bl31/bl31.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <context.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/el3_runtime/cpu_data.h> 21 #include <lib/el3_runtime/pubsub.h> 22 #include <lib/extensions/mpam.h> 23 #include <lib/extensions/pmuv3.h> 24 #include <lib/extensions/sys_reg_trace.h> 25 #include <lib/gpt_rme/gpt_rme.h> 26 27 #include <lib/spinlock.h> 28 #include <lib/utils.h> 29 #include <lib/xlat_tables/xlat_tables_v2.h> 30 #include <plat/common/common_def.h> 31 #include <plat/common/platform.h> 32 #include <platform_def.h> 33 #include <services/rmmd_svc.h> 34 #include <smccc_helpers.h> 35 #include <lib/extensions/sme.h> 36 #include <lib/extensions/sve.h> 37 #include <lib/extensions/spe.h> 38 #include <lib/extensions/trbe.h> 39 #include "rmmd_initial_context.h" 40 #include "rmmd_private.h" 41 42 /******************************************************************************* 43 * RMM boot failure flag 44 ******************************************************************************/ 45 static bool rmm_boot_failed; 46 47 /******************************************************************************* 48 * RMM context information. 49 ******************************************************************************/ 50 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT]; 51 52 /******************************************************************************* 53 * RMM entry point information. Discovered on the primary core and reused 54 * on secondary cores. 55 ******************************************************************************/ 56 static entry_point_info_t *rmm_ep_info; 57 58 /******************************************************************************* 59 * Static function declaration. 60 ******************************************************************************/ 61 static int32_t rmm_init(void); 62 63 /******************************************************************************* 64 * This function takes an RMM context pointer and performs a synchronous entry 65 * into it. 66 ******************************************************************************/ 67 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx) 68 { 69 uint64_t rc; 70 71 assert(rmm_ctx != NULL); 72 73 cm_set_context(&(rmm_ctx->cpu_ctx), REALM); 74 75 /* Restore the realm context assigned above */ 76 cm_el2_sysregs_context_restore(REALM); 77 cm_set_next_eret_context(REALM); 78 79 /* Enter RMM */ 80 rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx); 81 82 /* 83 * Save realm context. EL2 Non-secure context will be restored 84 * before exiting Non-secure world, therefore there is no need 85 * to clear EL2 context registers. 86 */ 87 cm_el2_sysregs_context_save(REALM); 88 89 return rc; 90 } 91 92 /******************************************************************************* 93 * This function returns to the place where rmmd_rmm_sync_entry() was 94 * called originally. 95 ******************************************************************************/ 96 __dead2 void rmmd_rmm_sync_exit(uint64_t rc) 97 { 98 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 99 100 /* Get context of the RMM in use by this CPU. */ 101 assert(cm_get_context(REALM) == &(ctx->cpu_ctx)); 102 103 /* 104 * The RMMD must have initiated the original request through a 105 * synchronous entry into RMM. Jump back to the original C runtime 106 * context with the value of rc in x0; 107 */ 108 rmmd_rmm_exit(ctx->c_rt_ctx, rc); 109 110 panic(); 111 } 112 113 static void rmm_el2_context_init(el2_sysregs_t *regs) 114 { 115 write_el2_ctx_common(regs, spsr_el2, REALM_SPSR_EL2); 116 write_el2_ctx_common(regs, sctlr_el2, SCTLR_EL2_RES1); 117 } 118 119 /******************************************************************************* 120 * Enable architecture extensions on first entry to Realm world. 121 ******************************************************************************/ 122 123 static void manage_extensions_realm(cpu_context_t *ctx) 124 { 125 /* 126 * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world. 127 */ 128 if (is_feat_sme_supported()) { 129 sme_enable(ctx); 130 } 131 132 /* 133 * SPE and TRBE cannot be fully disabled from EL3 registers alone, only 134 * sysreg access can. In case the EL1 controls leave them active on 135 * context switch, we want the owning security state to be NS so Realm 136 * can't be DOSed. 137 */ 138 if (is_feat_spe_supported()) { 139 spe_disable(ctx); 140 } 141 142 if (is_feat_trbe_supported()) { 143 trbe_disable(ctx); 144 } 145 } 146 147 /******************************************************************************* 148 * Jump to the RMM for the first time. 149 ******************************************************************************/ 150 static int32_t rmm_init(void) 151 { 152 long rc; 153 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 154 155 INFO("RMM init start.\n"); 156 157 /* Enable architecture extensions */ 158 manage_extensions_realm(&ctx->cpu_ctx); 159 160 /* Initialize RMM EL2 context. */ 161 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); 162 163 rc = rmmd_rmm_sync_entry(ctx); 164 if (rc != E_RMM_BOOT_SUCCESS) { 165 ERROR("RMM init failed: %ld\n", rc); 166 /* Mark the boot as failed for all the CPUs */ 167 rmm_boot_failed = true; 168 return 0; 169 } 170 171 INFO("RMM init end.\n"); 172 173 return 1; 174 } 175 176 /******************************************************************************* 177 * Load and read RMM manifest, setup RMM. 178 ******************************************************************************/ 179 int rmmd_setup(void) 180 { 181 size_t shared_buf_size __unused; 182 uintptr_t shared_buf_base; 183 uint32_t ep_attr; 184 unsigned int linear_id = plat_my_core_pos(); 185 rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id]; 186 struct rmm_manifest *manifest; 187 int rc; 188 189 /* Make sure RME is supported. */ 190 if (is_feat_rme_present() == 0U) { 191 /* Mark the RMM boot as failed for all the CPUs */ 192 rmm_boot_failed = true; 193 return -ENOTSUP; 194 } 195 196 rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM); 197 if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) { 198 WARN("No RMM image provided by BL2 boot loader, Booting " 199 "device without RMM initialization. SMCs destined for " 200 "RMM will return SMC_UNK\n"); 201 202 /* Mark the boot as failed for all the CPUs */ 203 rmm_boot_failed = true; 204 return -ENOENT; 205 } 206 207 /* Initialise an entrypoint to set up the CPU context */ 208 ep_attr = EP_REALM; 209 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) { 210 ep_attr |= EP_EE_BIG; 211 } 212 213 SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr); 214 rmm_ep_info->spsr = SPSR_64(MODE_EL2, 215 MODE_SP_ELX, 216 DISABLE_ALL_EXCEPTIONS); 217 218 shared_buf_size = 219 plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base); 220 221 assert((shared_buf_size == SZ_4K) && 222 ((void *)shared_buf_base != NULL)); 223 224 /* Zero out and load the boot manifest at the beginning of the share area */ 225 manifest = (struct rmm_manifest *)shared_buf_base; 226 (void)memset((void *)manifest, 0, sizeof(struct rmm_manifest)); 227 228 rc = plat_rmmd_load_manifest(manifest); 229 if (rc != 0) { 230 ERROR("Error loading RMM Boot Manifest (%i)\n", rc); 231 /* Mark the boot as failed for all the CPUs */ 232 rmm_boot_failed = true; 233 return rc; 234 } 235 flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size); 236 237 /* 238 * Prepare coldboot arguments for RMM: 239 * arg0: This CPUID (primary processor). 240 * arg1: Version for this Boot Interface. 241 * arg2: PLATFORM_CORE_COUNT. 242 * arg3: Base address for the EL3 <-> RMM shared area. The boot 243 * manifest will be stored at the beginning of this area. 244 */ 245 rmm_ep_info->args.arg0 = linear_id; 246 rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION; 247 rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT; 248 rmm_ep_info->args.arg3 = shared_buf_base; 249 250 /* Initialise RMM context with this entry point information */ 251 cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info); 252 253 INFO("RMM setup done.\n"); 254 255 /* Register init function for deferred init. */ 256 bl31_register_rmm_init(&rmm_init); 257 258 return 0; 259 } 260 261 /******************************************************************************* 262 * Forward SMC to the other security state 263 ******************************************************************************/ 264 static uint64_t rmmd_smc_forward(uint32_t src_sec_state, 265 uint32_t dst_sec_state, uint64_t x0, 266 uint64_t x1, uint64_t x2, uint64_t x3, 267 uint64_t x4, void *handle) 268 { 269 cpu_context_t *ctx = cm_get_context(dst_sec_state); 270 271 /* Save incoming security state */ 272 cm_el2_sysregs_context_save(src_sec_state); 273 274 /* Restore outgoing security state */ 275 cm_el2_sysregs_context_restore(dst_sec_state); 276 cm_set_next_eret_context(dst_sec_state); 277 278 /* 279 * As per SMCCCv1.2, we need to preserve x4 to x7 unless 280 * being used as return args. Hence we differentiate the 281 * onward and backward path. Support upto 8 args in the 282 * onward path and 4 args in return path. 283 * Register x4 will be preserved by RMM in case it is not 284 * used in return path. 285 */ 286 if (src_sec_state == NON_SECURE) { 287 SMC_RET8(ctx, x0, x1, x2, x3, x4, 288 SMC_GET_GP(handle, CTX_GPREG_X5), 289 SMC_GET_GP(handle, CTX_GPREG_X6), 290 SMC_GET_GP(handle, CTX_GPREG_X7)); 291 } 292 293 SMC_RET5(ctx, x0, x1, x2, x3, x4); 294 } 295 296 /******************************************************************************* 297 * This function handles all SMCs in the range reserved for RMI. Each call is 298 * either forwarded to the other security state or handled by the RMM dispatcher 299 ******************************************************************************/ 300 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 301 uint64_t x3, uint64_t x4, void *cookie, 302 void *handle, uint64_t flags) 303 { 304 uint32_t src_sec_state; 305 306 /* If RMM failed to boot, treat any RMI SMC as unknown */ 307 if (rmm_boot_failed) { 308 WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n"); 309 SMC_RET1(handle, SMC_UNK); 310 } 311 312 /* Determine which security state this SMC originated from */ 313 src_sec_state = caller_sec_state(flags); 314 315 /* RMI must not be invoked by the Secure world */ 316 if (src_sec_state == SMC_FROM_SECURE) { 317 WARN("RMMD: RMI invoked by secure world.\n"); 318 SMC_RET1(handle, SMC_UNK); 319 } 320 321 /* 322 * Forward an RMI call from the Normal world to the Realm world as it 323 * is. 324 */ 325 if (src_sec_state == SMC_FROM_NON_SECURE) { 326 /* 327 * If SVE hint bit is set in the flags then update the SMC 328 * function id and pass it on to the lower EL. 329 */ 330 if (is_sve_hint_set(flags)) { 331 smc_fid |= (FUNCID_SVE_HINT_MASK << 332 FUNCID_SVE_HINT_SHIFT); 333 } 334 VERBOSE("RMMD: RMI call from non-secure world.\n"); 335 return rmmd_smc_forward(NON_SECURE, REALM, smc_fid, 336 x1, x2, x3, x4, handle); 337 } 338 339 if (src_sec_state != SMC_FROM_REALM) { 340 SMC_RET1(handle, SMC_UNK); 341 } 342 343 switch (smc_fid) { 344 case RMM_RMI_REQ_COMPLETE: { 345 uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5); 346 347 return rmmd_smc_forward(REALM, NON_SECURE, x1, 348 x2, x3, x4, x5, handle); 349 } 350 default: 351 WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid); 352 SMC_RET1(handle, SMC_UNK); 353 } 354 } 355 356 /******************************************************************************* 357 * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM 358 * is done after initialising minimal architectural state that guarantees safe 359 * execution. 360 ******************************************************************************/ 361 static void *rmmd_cpu_on_finish_handler(const void *arg) 362 { 363 long rc; 364 uint32_t linear_id = plat_my_core_pos(); 365 rmmd_rmm_context_t *ctx = &rmm_context[linear_id]; 366 367 if (rmm_boot_failed) { 368 /* RMM Boot failed on a previous CPU. Abort. */ 369 ERROR("RMM Failed to initialize. Ignoring for CPU%d\n", 370 linear_id); 371 return NULL; 372 } 373 374 /* 375 * Prepare warmboot arguments for RMM: 376 * arg0: This CPUID. 377 * arg1 to arg3: Not used. 378 */ 379 rmm_ep_info->args.arg0 = linear_id; 380 rmm_ep_info->args.arg1 = 0ULL; 381 rmm_ep_info->args.arg2 = 0ULL; 382 rmm_ep_info->args.arg3 = 0ULL; 383 384 /* Initialise RMM context with this entry point information */ 385 cm_setup_context(&ctx->cpu_ctx, rmm_ep_info); 386 387 /* Enable architecture extensions */ 388 manage_extensions_realm(&ctx->cpu_ctx); 389 390 /* Initialize RMM EL2 context. */ 391 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); 392 393 rc = rmmd_rmm_sync_entry(ctx); 394 395 if (rc != E_RMM_BOOT_SUCCESS) { 396 ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc); 397 /* Mark the boot as failed for any other booting CPU */ 398 rmm_boot_failed = true; 399 } 400 401 return NULL; 402 } 403 404 /* Subscribe to PSCI CPU on to initialize RMM on secondary */ 405 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler); 406 407 /* Convert GPT lib error to RMMD GTS error */ 408 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address) 409 { 410 int ret; 411 412 if (error == 0) { 413 return E_RMM_OK; 414 } 415 416 if (error == -EINVAL) { 417 ret = E_RMM_BAD_ADDR; 418 } else { 419 /* This is the only other error code we expect */ 420 assert(error == -EPERM); 421 ret = E_RMM_BAD_PAS; 422 } 423 424 ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n", 425 error, address, smc_fid); 426 return ret; 427 } 428 429 static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx, 430 uint64_t *feat_reg) 431 { 432 if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) { 433 ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx); 434 return E_RMM_INVAL; 435 } 436 437 *feat_reg = 0UL; 438 #if RMMD_ENABLE_EL3_TOKEN_SIGN 439 *feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK; 440 #endif 441 return E_RMM_OK; 442 } 443 444 /* 445 * Update encryption key associated with @mecid. 446 */ 447 static int rmmd_mecid_key_update(uint64_t mecid) 448 { 449 uint64_t mecid_width, mecid_width_mask; 450 int ret; 451 452 /* 453 * Check whether FEAT_MEC is supported by the hardware. If not, return 454 * unknown SMC. 455 */ 456 if (is_feat_mec_supported() == false) { 457 return E_RMM_UNK; 458 } 459 460 /* 461 * Check whether the mecid parameter is at most MECIDR_EL2.MECIDWidthm1 + 1 462 * in length. 463 */ 464 mecid_width = ((read_mecidr_el2() >> MECIDR_EL2_MECIDWidthm1_SHIFT) & 465 MECIDR_EL2_MECIDWidthm1_MASK) + 1; 466 mecid_width_mask = ((1 << mecid_width) - 1); 467 if ((mecid & ~mecid_width_mask) != 0U) { 468 return E_RMM_INVAL; 469 } 470 471 ret = plat_rmmd_mecid_key_update(mecid); 472 473 if (ret != 0) { 474 return E_RMM_UNK; 475 } 476 return E_RMM_OK; 477 } 478 479 /******************************************************************************* 480 * This function handles RMM-EL3 interface SMCs 481 ******************************************************************************/ 482 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 483 uint64_t x3, uint64_t x4, void *cookie, 484 void *handle, uint64_t flags) 485 { 486 uint64_t remaining_len = 0UL; 487 uint32_t src_sec_state; 488 int ret; 489 490 /* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */ 491 if (rmm_boot_failed) { 492 WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n"); 493 SMC_RET1(handle, SMC_UNK); 494 } 495 496 /* Determine which security state this SMC originated from */ 497 src_sec_state = caller_sec_state(flags); 498 499 if (src_sec_state != SMC_FROM_REALM) { 500 WARN("RMMD: RMM-EL3 call originated from secure or normal world\n"); 501 SMC_RET1(handle, SMC_UNK); 502 } 503 504 switch (smc_fid) { 505 case RMM_GTSI_DELEGATE: 506 ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 507 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 508 case RMM_GTSI_UNDELEGATE: 509 ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 510 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 511 case RMM_ATTEST_GET_REALM_KEY: 512 ret = rmmd_attest_get_signing_key(x1, &x2, x3); 513 SMC_RET2(handle, ret, x2); 514 case RMM_ATTEST_GET_PLAT_TOKEN: 515 ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len); 516 SMC_RET3(handle, ret, x2, remaining_len); 517 case RMM_EL3_FEATURES: 518 ret = rmm_el3_ifc_get_feat_register(x1, &x2); 519 SMC_RET2(handle, ret, x2); 520 #if RMMD_ENABLE_EL3_TOKEN_SIGN 521 case RMM_EL3_TOKEN_SIGN: 522 return rmmd_el3_token_sign(handle, x1, x2, x3, x4); 523 #endif 524 525 #if RMMD_ENABLE_IDE_KEY_PROG 526 case RMM_IDE_KEY_PROG: 527 { 528 rp_ide_key_info_t ide_key_info; 529 530 ide_key_info.keyqw0 = x4; 531 ide_key_info.keyqw1 = SMC_GET_GP(handle, CTX_GPREG_X5); 532 ide_key_info.keyqw2 = SMC_GET_GP(handle, CTX_GPREG_X6); 533 ide_key_info.keyqw3 = SMC_GET_GP(handle, CTX_GPREG_X7); 534 ide_key_info.ifvqw0 = SMC_GET_GP(handle, CTX_GPREG_X8); 535 ide_key_info.ifvqw1 = SMC_GET_GP(handle, CTX_GPREG_X9); 536 uint64_t x10 = SMC_GET_GP(handle, CTX_GPREG_X10); 537 uint64_t x11 = SMC_GET_GP(handle, CTX_GPREG_X11); 538 539 ret = rmmd_el3_ide_key_program(x1, x2, x3, &ide_key_info, x10, x11); 540 SMC_RET1(handle, ret); 541 } 542 case RMM_IDE_KEY_SET_GO: 543 ret = rmmd_el3_ide_key_set_go(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5)); 544 SMC_RET1(handle, ret); 545 case RMM_IDE_KEY_SET_STOP: 546 ret = rmmd_el3_ide_key_set_stop(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5)); 547 SMC_RET1(handle, ret); 548 case RMM_IDE_KM_PULL_RESPONSE: { 549 uint64_t req_resp = 0, req_id = 0, cookie_var = 0; 550 551 ret = rmmd_el3_ide_km_pull_response(x1, x2, &req_resp, &req_id, &cookie_var); 552 SMC_RET4(handle, ret, req_resp, req_id, cookie_var); 553 } 554 #endif /* RMMD_ENABLE_IDE_KEY_PROG */ 555 case RMM_BOOT_COMPLETE: 556 VERBOSE("RMMD: running rmmd_rmm_sync_exit\n"); 557 rmmd_rmm_sync_exit(x1); 558 559 case RMM_MECID_KEY_UPDATE: 560 ret = rmmd_mecid_key_update(x1); 561 SMC_RET1(handle, ret); 562 default: 563 WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid); 564 SMC_RET1(handle, SMC_UNK); 565 } 566 } 567