1 /* 2 * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch_features.h> 15 #include <bl31/bl31.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <context.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/el3_runtime/cpu_data.h> 21 #include <lib/el3_runtime/pubsub.h> 22 #include <lib/extensions/mpam.h> 23 #include <lib/extensions/pmuv3.h> 24 #include <lib/extensions/sys_reg_trace.h> 25 #include <lib/gpt_rme/gpt_rme.h> 26 #include <lib/per_cpu/per_cpu.h> 27 28 #include <lib/spinlock.h> 29 #include <lib/utils.h> 30 #include <lib/xlat_tables/xlat_tables_v2.h> 31 #include <plat/common/common_def.h> 32 #include <plat/common/platform.h> 33 #include <platform_def.h> 34 #include <services/rmmd_svc.h> 35 #include <smccc_helpers.h> 36 #include <lib/extensions/sme.h> 37 #include <lib/extensions/sve.h> 38 #include <lib/extensions/spe.h> 39 #include <lib/extensions/trbe.h> 40 #include "rmmd_private.h" 41 42 #define MECID_SHIFT U(32) 43 #define MECID_MASK 0xFFFFU 44 45 #define MEC_REFRESH_REASON_SHIFT U(0) 46 #define MEC_REFRESH_REASON_MASK BIT(0) 47 48 /******************************************************************************* 49 * RMM boot failure flag 50 ******************************************************************************/ 51 static bool rmm_boot_failed; 52 53 /******************************************************************************* 54 * RMM context information. 55 ******************************************************************************/ 56 PER_CPU_DEFINE(rmmd_rmm_context_t, rmm_context); 57 58 /******************************************************************************* 59 * RMM entry point information. Discovered on the primary core and reused 60 * on secondary cores. 61 ******************************************************************************/ 62 static entry_point_info_t *rmm_ep_info; 63 64 /******************************************************************************* 65 * Static function declaration. 66 ******************************************************************************/ 67 static int32_t rmm_init(void); 68 69 /******************************************************************************* 70 * This function takes an RMM context pointer and performs a synchronous entry 71 * into it. 72 ******************************************************************************/ 73 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx) 74 { 75 uint64_t rc; 76 77 assert(rmm_ctx != NULL); 78 79 cm_set_context(&(rmm_ctx->cpu_ctx), REALM); 80 81 /* Restore the realm context assigned above */ 82 cm_el2_sysregs_context_restore(REALM); 83 cm_set_next_eret_context(REALM); 84 85 /* Enter RMM */ 86 rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx); 87 88 /* 89 * Save realm context. EL2 Non-secure context will be restored 90 * before exiting Non-secure world, therefore there is no need 91 * to clear EL2 context registers. 92 */ 93 cm_el2_sysregs_context_save(REALM); 94 95 return rc; 96 } 97 98 /******************************************************************************* 99 * This function returns to the place where rmmd_rmm_sync_entry() was 100 * called originally. 101 ******************************************************************************/ 102 __dead2 void rmmd_rmm_sync_exit(uint64_t rc) 103 { 104 rmmd_rmm_context_t *ctx = PER_CPU_CUR(rmm_context); 105 106 /* Get context of the RMM in use by this CPU. */ 107 assert(cm_get_context(REALM) == &(ctx->cpu_ctx)); 108 109 /* 110 * The RMMD must have initiated the original request through a 111 * synchronous entry into RMM. Jump back to the original C runtime 112 * context with the value of rc in x0; 113 */ 114 rmmd_rmm_exit(ctx->c_rt_ctx, rc); 115 116 panic(); 117 } 118 119 /******************************************************************************* 120 * Jump to the RMM for the first time. 121 ******************************************************************************/ 122 static int32_t rmm_init(void) 123 { 124 long rc; 125 rmmd_rmm_context_t *ctx = PER_CPU_CUR(rmm_context); 126 127 INFO("RMM init start.\n"); 128 129 rc = rmmd_rmm_sync_entry(ctx); 130 if (rc != E_RMM_BOOT_SUCCESS) { 131 ERROR("RMM init failed: %ld\n", rc); 132 /* Mark the boot as failed for all the CPUs */ 133 rmm_boot_failed = true; 134 return 0; 135 } 136 137 INFO("RMM init end.\n"); 138 139 return 1; 140 } 141 142 /******************************************************************************* 143 * Load and read RMM manifest, setup RMM. 144 ******************************************************************************/ 145 int rmmd_setup(void) 146 { 147 size_t shared_buf_size __unused; 148 uintptr_t shared_buf_base; 149 uint32_t ep_attr; 150 unsigned int linear_id = plat_my_core_pos(); 151 152 rmmd_rmm_context_t *rmm_ctx = PER_CPU_CUR(rmm_context); 153 struct rmm_manifest *manifest; 154 int rc; 155 156 /* Make sure RME is supported. */ 157 if (is_feat_rme_present() == 0U) { 158 /* Mark the RMM boot as failed for all the CPUs */ 159 rmm_boot_failed = true; 160 return -ENOTSUP; 161 } 162 163 rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM); 164 if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) { 165 WARN("No RMM image provided by BL2 boot loader, Booting " 166 "device without RMM initialization. SMCs destined for " 167 "RMM will return SMC_UNK\n"); 168 169 /* Mark the boot as failed for all the CPUs */ 170 rmm_boot_failed = true; 171 return -ENOENT; 172 } 173 174 /* Initialise an entrypoint to set up the CPU context */ 175 ep_attr = EP_REALM; 176 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) { 177 ep_attr |= EP_EE_BIG; 178 } 179 180 SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr); 181 rmm_ep_info->spsr = SPSR_64(MODE_EL2, 182 MODE_SP_ELX, 183 DISABLE_ALL_EXCEPTIONS); 184 185 shared_buf_size = 186 plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base); 187 188 assert((shared_buf_size == SZ_4K) && 189 ((void *)shared_buf_base != NULL)); 190 191 /* Zero out and load the boot manifest at the beginning of the share area */ 192 manifest = (struct rmm_manifest *)shared_buf_base; 193 (void)memset((void *)manifest, 0, sizeof(struct rmm_manifest)); 194 195 rc = plat_rmmd_load_manifest(manifest); 196 if (rc != 0) { 197 ERROR("Error loading RMM Boot Manifest (%i)\n", rc); 198 /* Mark the boot as failed for all the CPUs */ 199 rmm_boot_failed = true; 200 return rc; 201 } 202 flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size); 203 204 /* 205 * Prepare coldboot arguments for RMM: 206 * arg0: This CPUID (primary processor). 207 * arg1: Version for this Boot Interface. 208 * arg2: PLATFORM_CORE_COUNT. 209 * arg3: Base address for the EL3 <-> RMM shared area. The boot 210 * manifest will be stored at the beginning of this area. 211 * arg4: opaque activation token, as returned by previous calls 212 */ 213 rmm_ep_info->args.arg0 = linear_id; 214 rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION; 215 rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT; 216 rmm_ep_info->args.arg3 = shared_buf_base; 217 rmm_ep_info->args.arg4 = rmm_ctx->activation_token; 218 219 /* Initialise RMM context with this entry point information */ 220 cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info); 221 222 INFO("RMM setup done.\n"); 223 224 /* Register init function for deferred init. */ 225 bl31_register_rmm_init(&rmm_init); 226 227 return 0; 228 } 229 230 /******************************************************************************* 231 * Forward SMC to the other security state 232 ******************************************************************************/ 233 static uint64_t rmmd_smc_forward(uint32_t src_sec_state, 234 uint32_t dst_sec_state, uint64_t x0, 235 uint64_t x1, uint64_t x2, uint64_t x3, 236 uint64_t x4, void *handle) 237 { 238 cpu_context_t *ctx = cm_get_context(dst_sec_state); 239 240 /* Save incoming security state */ 241 cm_el2_sysregs_context_save(src_sec_state); 242 243 /* Restore outgoing security state */ 244 cm_el2_sysregs_context_restore(dst_sec_state); 245 cm_set_next_eret_context(dst_sec_state); 246 247 /* 248 * As per SMCCCv1.2, we need to preserve x4 to x7 unless 249 * being used as return args. Hence we differentiate the 250 * onward and backward path. Support upto 8 args in the 251 * onward path and 4 args in return path. 252 * Register x4 will be preserved by RMM in case it is not 253 * used in return path. 254 */ 255 if (src_sec_state == NON_SECURE) { 256 SMC_RET8(ctx, x0, x1, x2, x3, x4, 257 SMC_GET_GP(handle, CTX_GPREG_X5), 258 SMC_GET_GP(handle, CTX_GPREG_X6), 259 SMC_GET_GP(handle, CTX_GPREG_X7)); 260 } 261 262 SMC_RET5(ctx, x0, x1, x2, x3, x4); 263 } 264 265 /******************************************************************************* 266 * This function handles all SMCs in the range reserved for RMI. Each call is 267 * either forwarded to the other security state or handled by the RMM dispatcher 268 ******************************************************************************/ 269 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 270 uint64_t x3, uint64_t x4, void *cookie, 271 void *handle, uint64_t flags) 272 { 273 uint32_t src_sec_state; 274 275 /* If RMM failed to boot, treat any RMI SMC as unknown */ 276 if (rmm_boot_failed) { 277 WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n"); 278 SMC_RET1(handle, SMC_UNK); 279 } 280 281 /* Determine which security state this SMC originated from */ 282 src_sec_state = caller_sec_state(flags); 283 284 /* RMI must not be invoked by the Secure world */ 285 if (src_sec_state == SMC_FROM_SECURE) { 286 WARN("RMMD: RMI invoked by secure world.\n"); 287 SMC_RET1(handle, SMC_UNK); 288 } 289 290 /* 291 * Forward an RMI call from the Normal world to the Realm world as it 292 * is. 293 */ 294 if (src_sec_state == SMC_FROM_NON_SECURE) { 295 /* 296 * If SVE hint bit is set in the flags then update the SMC 297 * function id and pass it on to the lower EL. 298 */ 299 if (is_sve_hint_set(flags)) { 300 smc_fid |= (FUNCID_SVE_HINT_MASK << 301 FUNCID_SVE_HINT_SHIFT); 302 } 303 VERBOSE("RMMD: RMI call from non-secure world.\n"); 304 return rmmd_smc_forward(NON_SECURE, REALM, smc_fid, 305 x1, x2, x3, x4, handle); 306 } 307 308 if (src_sec_state != SMC_FROM_REALM) { 309 SMC_RET1(handle, SMC_UNK); 310 } 311 312 switch (smc_fid) { 313 case RMM_RMI_REQ_COMPLETE: { 314 uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5); 315 316 return rmmd_smc_forward(REALM, NON_SECURE, x1, 317 x2, x3, x4, x5, handle); 318 } 319 default: 320 WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid); 321 SMC_RET1(handle, SMC_UNK); 322 } 323 } 324 325 /******************************************************************************* 326 * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM 327 * is done after initialising minimal architectural state that guarantees safe 328 * execution. 329 ******************************************************************************/ 330 static void *rmmd_cpu_on_finish_handler(const void *arg) 331 { 332 long rc; 333 uint32_t linear_id = plat_my_core_pos(); 334 rmmd_rmm_context_t *ctx = PER_CPU_CUR(rmm_context); 335 /* Create a local copy of ep info to avoid race conditions */ 336 entry_point_info_t local_rmm_ep_info = *rmm_ep_info; 337 338 if (rmm_boot_failed) { 339 /* RMM Boot failed on a previous CPU. Abort. */ 340 ERROR("RMM Failed to initialize. Ignoring for CPU%d\n", 341 linear_id); 342 return NULL; 343 } 344 345 /* 346 * Prepare warmboot arguments for RMM: 347 * arg0: This CPUID. 348 * arg1: opaque activation token, as returned by previous calls 349 * arg2 to arg3: Not used. 350 */ 351 local_rmm_ep_info.args.arg0 = linear_id; 352 local_rmm_ep_info.args.arg1 = ctx->activation_token; 353 local_rmm_ep_info.args.arg2 = 0ULL; 354 local_rmm_ep_info.args.arg3 = 0ULL; 355 356 /* Initialise RMM context with this entry point information */ 357 cm_setup_context(&ctx->cpu_ctx, &local_rmm_ep_info); 358 359 rc = rmmd_rmm_sync_entry(ctx); 360 361 if (rc != E_RMM_BOOT_SUCCESS) { 362 ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc); 363 /* 364 * TODO: Investigate handling of rmm_boot_failed under 365 * concurrent access, or explore alternative approaches 366 * to fixup the logic. 367 */ 368 rmm_boot_failed = true; 369 } 370 371 return NULL; 372 } 373 374 /* Subscribe to PSCI CPU on to initialize RMM on secondary */ 375 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler); 376 377 /* Convert GPT lib error to RMMD GTS error */ 378 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address) 379 { 380 int ret; 381 382 if (error == 0) { 383 return E_RMM_OK; 384 } 385 386 if (error == -EINVAL) { 387 ret = E_RMM_BAD_ADDR; 388 } else { 389 /* This is the only other error code we expect */ 390 assert(error == -EPERM); 391 ret = E_RMM_BAD_PAS; 392 } 393 394 ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n", 395 error, address, smc_fid); 396 return ret; 397 } 398 399 static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx, 400 uint64_t *feat_reg) 401 { 402 if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) { 403 ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx); 404 return E_RMM_INVAL; 405 } 406 407 *feat_reg = 0UL; 408 #if RMMD_ENABLE_EL3_TOKEN_SIGN 409 *feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK; 410 #endif 411 return E_RMM_OK; 412 } 413 414 /* 415 * Update encryption key associated with mecid included in x1. 416 */ 417 static int rmmd_mecid_key_update(uint64_t x1) 418 { 419 uint64_t mecid_width, mecid_width_mask; 420 uint16_t mecid; 421 unsigned int reason; 422 int ret; 423 424 /* 425 * Check whether FEAT_MEC is supported by the hardware. If not, return 426 * unknown SMC. 427 */ 428 if (is_feat_mec_supported() == false) { 429 return E_RMM_UNK; 430 } 431 432 /* 433 * Check whether the mecid parameter is at most MECIDR_EL2.MECIDWidthm1 + 1 434 * in length. 435 */ 436 mecid_width = ((read_mecidr_el2() >> MECIDR_EL2_MECIDWidthm1_SHIFT) & 437 MECIDR_EL2_MECIDWidthm1_MASK) + 1UL; 438 mecid_width_mask = ((1UL << mecid_width) - 1UL); 439 440 mecid = (x1 >> MECID_SHIFT) & MECID_MASK; 441 if ((mecid & ~mecid_width_mask) != 0U) { 442 return E_RMM_INVAL; 443 } 444 445 reason = (x1 >> MEC_REFRESH_REASON_SHIFT) & MEC_REFRESH_REASON_MASK; 446 ret = plat_rmmd_mecid_key_update(mecid, reason); 447 448 if (ret != 0) { 449 return E_RMM_UNK; 450 } 451 return E_RMM_OK; 452 } 453 454 /******************************************************************************* 455 * This function handles RMM-EL3 interface SMCs 456 ******************************************************************************/ 457 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 458 uint64_t x3, uint64_t x4, void *cookie, 459 void *handle, uint64_t flags) 460 { 461 uint64_t remaining_len = 0UL; 462 uint32_t src_sec_state; 463 int ret; 464 465 /* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */ 466 if (rmm_boot_failed) { 467 WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n"); 468 SMC_RET1(handle, SMC_UNK); 469 } 470 471 /* Determine which security state this SMC originated from */ 472 src_sec_state = caller_sec_state(flags); 473 474 if (src_sec_state != SMC_FROM_REALM) { 475 WARN("RMMD: RMM-EL3 call originated from secure or normal world\n"); 476 SMC_RET1(handle, SMC_UNK); 477 } 478 479 switch (smc_fid) { 480 case RMM_GTSI_DELEGATE: 481 ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 482 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 483 case RMM_GTSI_UNDELEGATE: 484 ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 485 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 486 case RMM_ATTEST_GET_REALM_KEY: 487 ret = rmmd_attest_get_signing_key(x1, &x2, x3); 488 SMC_RET2(handle, ret, x2); 489 case RMM_ATTEST_GET_PLAT_TOKEN: 490 ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len); 491 SMC_RET3(handle, ret, x2, remaining_len); 492 case RMM_EL3_FEATURES: 493 ret = rmm_el3_ifc_get_feat_register(x1, &x2); 494 SMC_RET2(handle, ret, x2); 495 #if RMMD_ENABLE_EL3_TOKEN_SIGN 496 case RMM_EL3_TOKEN_SIGN: 497 return rmmd_el3_token_sign(handle, x1, x2, x3, x4); 498 #endif 499 500 #if RMMD_ENABLE_IDE_KEY_PROG 501 case RMM_IDE_KEY_PROG: 502 { 503 rp_ide_key_info_t ide_key_info; 504 505 ide_key_info.keyqw0 = x4; 506 ide_key_info.keyqw1 = SMC_GET_GP(handle, CTX_GPREG_X5); 507 ide_key_info.keyqw2 = SMC_GET_GP(handle, CTX_GPREG_X6); 508 ide_key_info.keyqw3 = SMC_GET_GP(handle, CTX_GPREG_X7); 509 ide_key_info.ifvqw0 = SMC_GET_GP(handle, CTX_GPREG_X8); 510 ide_key_info.ifvqw1 = SMC_GET_GP(handle, CTX_GPREG_X9); 511 uint64_t x10 = SMC_GET_GP(handle, CTX_GPREG_X10); 512 uint64_t x11 = SMC_GET_GP(handle, CTX_GPREG_X11); 513 514 ret = rmmd_el3_ide_key_program(x1, x2, x3, &ide_key_info, x10, x11); 515 SMC_RET1(handle, ret); 516 } 517 case RMM_IDE_KEY_SET_GO: 518 ret = rmmd_el3_ide_key_set_go(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5)); 519 SMC_RET1(handle, ret); 520 case RMM_IDE_KEY_SET_STOP: 521 ret = rmmd_el3_ide_key_set_stop(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5)); 522 SMC_RET1(handle, ret); 523 case RMM_IDE_KM_PULL_RESPONSE: { 524 uint64_t req_resp = 0, req_id = 0, cookie_var = 0; 525 526 ret = rmmd_el3_ide_km_pull_response(x1, x2, &req_resp, &req_id, &cookie_var); 527 SMC_RET4(handle, ret, req_resp, req_id, cookie_var); 528 } 529 #endif /* RMMD_ENABLE_IDE_KEY_PROG */ 530 case RMM_RESERVE_MEMORY: 531 ret = rmmd_reserve_memory(x1, &x2); 532 SMC_RET2(handle, ret, x2); 533 534 case RMM_BOOT_COMPLETE: 535 { 536 rmmd_rmm_context_t *ctx = PER_CPU_CUR(rmm_context); 537 538 ctx->activation_token = x2; 539 VERBOSE("RMMD: running rmmd_rmm_sync_exit\n"); 540 rmmd_rmm_sync_exit(x1); 541 } 542 case RMM_MEC_REFRESH: 543 ret = rmmd_mecid_key_update(x1); 544 SMC_RET1(handle, ret); 545 default: 546 WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid); 547 SMC_RET1(handle, SMC_UNK); 548 } 549 } 550 551 /** 552 * Helper to activate Primary CPU with the updated RMM, mainly used during 553 * LFA of RMM. 554 */ 555 int rmmd_primary_activate(void) 556 { 557 int rc; 558 559 rc = rmmd_setup(); 560 if (rc != 0) { 561 ERROR("rmmd_setup failed during LFA: %d\n", rc); 562 return rc; 563 } 564 565 rc = rmm_init(); 566 if (rc == 0) { 567 ERROR("rmm_init failed during LFA: %d\n", rc); 568 return rc; 569 } 570 571 INFO("RMM warm reset done on primary during LFA. \n"); 572 573 return 0; 574 } 575 576 /** 577 * Helper to activate Primary CPU with the updated RMM, mainly used during 578 * LFA of RMM. 579 */ 580 int rmmd_secondary_activate(void) 581 { 582 rmmd_cpu_on_finish_handler(NULL); 583 584 return 0; 585 } 586