1 /* 2 * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch_features.h> 15 #include <bl31/bl31.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <context.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/el3_runtime/cpu_data.h> 21 #include <lib/el3_runtime/pubsub.h> 22 #include <lib/extensions/mpam.h> 23 #include <lib/extensions/pmuv3.h> 24 #include <lib/extensions/sys_reg_trace.h> 25 #include <lib/gpt_rme/gpt_rme.h> 26 27 #include <lib/spinlock.h> 28 #include <lib/utils.h> 29 #include <lib/xlat_tables/xlat_tables_v2.h> 30 #include <plat/common/common_def.h> 31 #include <plat/common/platform.h> 32 #include <platform_def.h> 33 #include <services/rmmd_svc.h> 34 #include <smccc_helpers.h> 35 #include <lib/extensions/sme.h> 36 #include <lib/extensions/sve.h> 37 #include <lib/extensions/spe.h> 38 #include <lib/extensions/trbe.h> 39 #include "rmmd_private.h" 40 41 #define MECID_SHIFT U(32) 42 #define MECID_MASK 0xFFFFU 43 44 #define MEC_REFRESH_REASON_SHIFT U(0) 45 #define MEC_REFRESH_REASON_MASK BIT(0) 46 47 /******************************************************************************* 48 * RMM boot failure flag 49 ******************************************************************************/ 50 static bool rmm_boot_failed; 51 52 /******************************************************************************* 53 * RMM context information. 54 ******************************************************************************/ 55 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT]; 56 57 /******************************************************************************* 58 * RMM entry point information. Discovered on the primary core and reused 59 * on secondary cores. 60 ******************************************************************************/ 61 static entry_point_info_t *rmm_ep_info; 62 63 /******************************************************************************* 64 * Static function declaration. 65 ******************************************************************************/ 66 static int32_t rmm_init(void); 67 68 /******************************************************************************* 69 * This function takes an RMM context pointer and performs a synchronous entry 70 * into it. 71 ******************************************************************************/ 72 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx) 73 { 74 uint64_t rc; 75 76 assert(rmm_ctx != NULL); 77 78 cm_set_context(&(rmm_ctx->cpu_ctx), REALM); 79 80 /* Restore the realm context assigned above */ 81 cm_el2_sysregs_context_restore(REALM); 82 cm_set_next_eret_context(REALM); 83 84 /* Enter RMM */ 85 rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx); 86 87 /* 88 * Save realm context. EL2 Non-secure context will be restored 89 * before exiting Non-secure world, therefore there is no need 90 * to clear EL2 context registers. 91 */ 92 cm_el2_sysregs_context_save(REALM); 93 94 return rc; 95 } 96 97 /******************************************************************************* 98 * This function returns to the place where rmmd_rmm_sync_entry() was 99 * called originally. 100 ******************************************************************************/ 101 __dead2 void rmmd_rmm_sync_exit(uint64_t rc) 102 { 103 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 104 105 /* Get context of the RMM in use by this CPU. */ 106 assert(cm_get_context(REALM) == &(ctx->cpu_ctx)); 107 108 /* 109 * The RMMD must have initiated the original request through a 110 * synchronous entry into RMM. Jump back to the original C runtime 111 * context with the value of rc in x0; 112 */ 113 rmmd_rmm_exit(ctx->c_rt_ctx, rc); 114 115 panic(); 116 } 117 118 /******************************************************************************* 119 * Jump to the RMM for the first time. 120 ******************************************************************************/ 121 static int32_t rmm_init(void) 122 { 123 long rc; 124 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 125 126 INFO("RMM init start.\n"); 127 128 rc = rmmd_rmm_sync_entry(ctx); 129 if (rc != E_RMM_BOOT_SUCCESS) { 130 ERROR("RMM init failed: %ld\n", rc); 131 /* Mark the boot as failed for all the CPUs */ 132 rmm_boot_failed = true; 133 return 0; 134 } 135 136 INFO("RMM init end.\n"); 137 138 return 1; 139 } 140 141 /******************************************************************************* 142 * Load and read RMM manifest, setup RMM. 143 ******************************************************************************/ 144 int rmmd_setup(void) 145 { 146 size_t shared_buf_size __unused; 147 uintptr_t shared_buf_base; 148 uint32_t ep_attr; 149 unsigned int linear_id = plat_my_core_pos(); 150 rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id]; 151 struct rmm_manifest *manifest; 152 int rc; 153 154 /* Make sure RME is supported. */ 155 if (is_feat_rme_present() == 0U) { 156 /* Mark the RMM boot as failed for all the CPUs */ 157 rmm_boot_failed = true; 158 return -ENOTSUP; 159 } 160 161 rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM); 162 if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) { 163 WARN("No RMM image provided by BL2 boot loader, Booting " 164 "device without RMM initialization. SMCs destined for " 165 "RMM will return SMC_UNK\n"); 166 167 /* Mark the boot as failed for all the CPUs */ 168 rmm_boot_failed = true; 169 return -ENOENT; 170 } 171 172 /* Initialise an entrypoint to set up the CPU context */ 173 ep_attr = EP_REALM; 174 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) { 175 ep_attr |= EP_EE_BIG; 176 } 177 178 SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr); 179 rmm_ep_info->spsr = SPSR_64(MODE_EL2, 180 MODE_SP_ELX, 181 DISABLE_ALL_EXCEPTIONS); 182 183 shared_buf_size = 184 plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base); 185 186 assert((shared_buf_size == SZ_4K) && 187 ((void *)shared_buf_base != NULL)); 188 189 /* Zero out and load the boot manifest at the beginning of the share area */ 190 manifest = (struct rmm_manifest *)shared_buf_base; 191 (void)memset((void *)manifest, 0, sizeof(struct rmm_manifest)); 192 193 rc = plat_rmmd_load_manifest(manifest); 194 if (rc != 0) { 195 ERROR("Error loading RMM Boot Manifest (%i)\n", rc); 196 /* Mark the boot as failed for all the CPUs */ 197 rmm_boot_failed = true; 198 return rc; 199 } 200 flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size); 201 202 /* 203 * Prepare coldboot arguments for RMM: 204 * arg0: This CPUID (primary processor). 205 * arg1: Version for this Boot Interface. 206 * arg2: PLATFORM_CORE_COUNT. 207 * arg3: Base address for the EL3 <-> RMM shared area. The boot 208 * manifest will be stored at the beginning of this area. 209 * arg4: opaque activation token, as returned by previous calls 210 */ 211 rmm_ep_info->args.arg0 = linear_id; 212 rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION; 213 rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT; 214 rmm_ep_info->args.arg3 = shared_buf_base; 215 rmm_ep_info->args.arg4 = rmm_ctx->activation_token; 216 217 /* Initialise RMM context with this entry point information */ 218 cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info); 219 220 INFO("RMM setup done.\n"); 221 222 /* Register init function for deferred init. */ 223 bl31_register_rmm_init(&rmm_init); 224 225 return 0; 226 } 227 228 /******************************************************************************* 229 * Forward SMC to the other security state 230 ******************************************************************************/ 231 static uint64_t rmmd_smc_forward(uint32_t src_sec_state, 232 uint32_t dst_sec_state, uint64_t x0, 233 uint64_t x1, uint64_t x2, uint64_t x3, 234 uint64_t x4, void *handle) 235 { 236 cpu_context_t *ctx = cm_get_context(dst_sec_state); 237 238 /* Save incoming security state */ 239 cm_el2_sysregs_context_save(src_sec_state); 240 241 /* Restore outgoing security state */ 242 cm_el2_sysregs_context_restore(dst_sec_state); 243 cm_set_next_eret_context(dst_sec_state); 244 245 /* 246 * As per SMCCCv1.2, we need to preserve x4 to x7 unless 247 * being used as return args. Hence we differentiate the 248 * onward and backward path. Support upto 8 args in the 249 * onward path and 4 args in return path. 250 * Register x4 will be preserved by RMM in case it is not 251 * used in return path. 252 */ 253 if (src_sec_state == NON_SECURE) { 254 SMC_RET8(ctx, x0, x1, x2, x3, x4, 255 SMC_GET_GP(handle, CTX_GPREG_X5), 256 SMC_GET_GP(handle, CTX_GPREG_X6), 257 SMC_GET_GP(handle, CTX_GPREG_X7)); 258 } 259 260 SMC_RET5(ctx, x0, x1, x2, x3, x4); 261 } 262 263 /******************************************************************************* 264 * This function handles all SMCs in the range reserved for RMI. Each call is 265 * either forwarded to the other security state or handled by the RMM dispatcher 266 ******************************************************************************/ 267 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 268 uint64_t x3, uint64_t x4, void *cookie, 269 void *handle, uint64_t flags) 270 { 271 uint32_t src_sec_state; 272 273 /* If RMM failed to boot, treat any RMI SMC as unknown */ 274 if (rmm_boot_failed) { 275 WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n"); 276 SMC_RET1(handle, SMC_UNK); 277 } 278 279 /* Determine which security state this SMC originated from */ 280 src_sec_state = caller_sec_state(flags); 281 282 /* RMI must not be invoked by the Secure world */ 283 if (src_sec_state == SMC_FROM_SECURE) { 284 WARN("RMMD: RMI invoked by secure world.\n"); 285 SMC_RET1(handle, SMC_UNK); 286 } 287 288 /* 289 * Forward an RMI call from the Normal world to the Realm world as it 290 * is. 291 */ 292 if (src_sec_state == SMC_FROM_NON_SECURE) { 293 /* 294 * If SVE hint bit is set in the flags then update the SMC 295 * function id and pass it on to the lower EL. 296 */ 297 if (is_sve_hint_set(flags)) { 298 smc_fid |= (FUNCID_SVE_HINT_MASK << 299 FUNCID_SVE_HINT_SHIFT); 300 } 301 VERBOSE("RMMD: RMI call from non-secure world.\n"); 302 return rmmd_smc_forward(NON_SECURE, REALM, smc_fid, 303 x1, x2, x3, x4, handle); 304 } 305 306 if (src_sec_state != SMC_FROM_REALM) { 307 SMC_RET1(handle, SMC_UNK); 308 } 309 310 switch (smc_fid) { 311 case RMM_RMI_REQ_COMPLETE: { 312 uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5); 313 314 return rmmd_smc_forward(REALM, NON_SECURE, x1, 315 x2, x3, x4, x5, handle); 316 } 317 default: 318 WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid); 319 SMC_RET1(handle, SMC_UNK); 320 } 321 } 322 323 /******************************************************************************* 324 * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM 325 * is done after initialising minimal architectural state that guarantees safe 326 * execution. 327 ******************************************************************************/ 328 static void *rmmd_cpu_on_finish_handler(const void *arg) 329 { 330 long rc; 331 uint32_t linear_id = plat_my_core_pos(); 332 rmmd_rmm_context_t *ctx = &rmm_context[linear_id]; 333 334 if (rmm_boot_failed) { 335 /* RMM Boot failed on a previous CPU. Abort. */ 336 ERROR("RMM Failed to initialize. Ignoring for CPU%d\n", 337 linear_id); 338 return NULL; 339 } 340 341 /* 342 * Prepare warmboot arguments for RMM: 343 * arg0: This CPUID. 344 * arg1: opaque activation token, as returned by previous calls 345 * arg2 to arg3: Not used. 346 */ 347 rmm_ep_info->args.arg0 = linear_id; 348 rmm_ep_info->args.arg1 = ctx->activation_token; 349 rmm_ep_info->args.arg2 = 0ULL; 350 rmm_ep_info->args.arg3 = 0ULL; 351 352 /* Initialise RMM context with this entry point information */ 353 cm_setup_context(&ctx->cpu_ctx, rmm_ep_info); 354 355 rc = rmmd_rmm_sync_entry(ctx); 356 357 if (rc != E_RMM_BOOT_SUCCESS) { 358 ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc); 359 /* 360 * TODO: Investigate handling of rmm_boot_failed under 361 * concurrent access, or explore alternative approaches 362 * to fixup the logic. 363 */ 364 rmm_boot_failed = true; 365 } 366 367 return NULL; 368 } 369 370 /* Subscribe to PSCI CPU on to initialize RMM on secondary */ 371 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler); 372 373 /* Convert GPT lib error to RMMD GTS error */ 374 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address) 375 { 376 int ret; 377 378 if (error == 0) { 379 return E_RMM_OK; 380 } 381 382 if (error == -EINVAL) { 383 ret = E_RMM_BAD_ADDR; 384 } else { 385 /* This is the only other error code we expect */ 386 assert(error == -EPERM); 387 ret = E_RMM_BAD_PAS; 388 } 389 390 ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n", 391 error, address, smc_fid); 392 return ret; 393 } 394 395 static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx, 396 uint64_t *feat_reg) 397 { 398 if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) { 399 ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx); 400 return E_RMM_INVAL; 401 } 402 403 *feat_reg = 0UL; 404 #if RMMD_ENABLE_EL3_TOKEN_SIGN 405 *feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK; 406 #endif 407 return E_RMM_OK; 408 } 409 410 /* 411 * Update encryption key associated with mecid included in x1. 412 */ 413 static int rmmd_mecid_key_update(uint64_t x1) 414 { 415 uint64_t mecid_width, mecid_width_mask; 416 uint16_t mecid; 417 unsigned int reason; 418 int ret; 419 420 /* 421 * Check whether FEAT_MEC is supported by the hardware. If not, return 422 * unknown SMC. 423 */ 424 if (is_feat_mec_supported() == false) { 425 return E_RMM_UNK; 426 } 427 428 /* 429 * Check whether the mecid parameter is at most MECIDR_EL2.MECIDWidthm1 + 1 430 * in length. 431 */ 432 mecid_width = ((read_mecidr_el2() >> MECIDR_EL2_MECIDWidthm1_SHIFT) & 433 MECIDR_EL2_MECIDWidthm1_MASK) + 1UL; 434 mecid_width_mask = ((1UL << mecid_width) - 1UL); 435 436 mecid = (x1 >> MECID_SHIFT) & MECID_MASK; 437 if ((mecid & ~mecid_width_mask) != 0U) { 438 return E_RMM_INVAL; 439 } 440 441 reason = (x1 >> MEC_REFRESH_REASON_SHIFT) & MEC_REFRESH_REASON_MASK; 442 ret = plat_rmmd_mecid_key_update(mecid, reason); 443 444 if (ret != 0) { 445 return E_RMM_UNK; 446 } 447 return E_RMM_OK; 448 } 449 450 /******************************************************************************* 451 * This function handles RMM-EL3 interface SMCs 452 ******************************************************************************/ 453 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 454 uint64_t x3, uint64_t x4, void *cookie, 455 void *handle, uint64_t flags) 456 { 457 uint64_t remaining_len = 0UL; 458 uint32_t src_sec_state; 459 int ret; 460 461 /* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */ 462 if (rmm_boot_failed) { 463 WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n"); 464 SMC_RET1(handle, SMC_UNK); 465 } 466 467 /* Determine which security state this SMC originated from */ 468 src_sec_state = caller_sec_state(flags); 469 470 if (src_sec_state != SMC_FROM_REALM) { 471 WARN("RMMD: RMM-EL3 call originated from secure or normal world\n"); 472 SMC_RET1(handle, SMC_UNK); 473 } 474 475 switch (smc_fid) { 476 case RMM_GTSI_DELEGATE: 477 ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 478 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 479 case RMM_GTSI_UNDELEGATE: 480 ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 481 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 482 case RMM_ATTEST_GET_REALM_KEY: 483 ret = rmmd_attest_get_signing_key(x1, &x2, x3); 484 SMC_RET2(handle, ret, x2); 485 case RMM_ATTEST_GET_PLAT_TOKEN: 486 ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len); 487 SMC_RET3(handle, ret, x2, remaining_len); 488 case RMM_EL3_FEATURES: 489 ret = rmm_el3_ifc_get_feat_register(x1, &x2); 490 SMC_RET2(handle, ret, x2); 491 #if RMMD_ENABLE_EL3_TOKEN_SIGN 492 case RMM_EL3_TOKEN_SIGN: 493 return rmmd_el3_token_sign(handle, x1, x2, x3, x4); 494 #endif 495 496 #if RMMD_ENABLE_IDE_KEY_PROG 497 case RMM_IDE_KEY_PROG: 498 { 499 rp_ide_key_info_t ide_key_info; 500 501 ide_key_info.keyqw0 = x4; 502 ide_key_info.keyqw1 = SMC_GET_GP(handle, CTX_GPREG_X5); 503 ide_key_info.keyqw2 = SMC_GET_GP(handle, CTX_GPREG_X6); 504 ide_key_info.keyqw3 = SMC_GET_GP(handle, CTX_GPREG_X7); 505 ide_key_info.ifvqw0 = SMC_GET_GP(handle, CTX_GPREG_X8); 506 ide_key_info.ifvqw1 = SMC_GET_GP(handle, CTX_GPREG_X9); 507 uint64_t x10 = SMC_GET_GP(handle, CTX_GPREG_X10); 508 uint64_t x11 = SMC_GET_GP(handle, CTX_GPREG_X11); 509 510 ret = rmmd_el3_ide_key_program(x1, x2, x3, &ide_key_info, x10, x11); 511 SMC_RET1(handle, ret); 512 } 513 case RMM_IDE_KEY_SET_GO: 514 ret = rmmd_el3_ide_key_set_go(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5)); 515 SMC_RET1(handle, ret); 516 case RMM_IDE_KEY_SET_STOP: 517 ret = rmmd_el3_ide_key_set_stop(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5)); 518 SMC_RET1(handle, ret); 519 case RMM_IDE_KM_PULL_RESPONSE: { 520 uint64_t req_resp = 0, req_id = 0, cookie_var = 0; 521 522 ret = rmmd_el3_ide_km_pull_response(x1, x2, &req_resp, &req_id, &cookie_var); 523 SMC_RET4(handle, ret, req_resp, req_id, cookie_var); 524 } 525 #endif /* RMMD_ENABLE_IDE_KEY_PROG */ 526 case RMM_RESERVE_MEMORY: 527 ret = rmmd_reserve_memory(x1, &x2); 528 SMC_RET2(handle, ret, x2); 529 530 case RMM_BOOT_COMPLETE: 531 { 532 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 533 534 ctx->activation_token = x2; 535 VERBOSE("RMMD: running rmmd_rmm_sync_exit\n"); 536 rmmd_rmm_sync_exit(x1); 537 } 538 case RMM_MEC_REFRESH: 539 ret = rmmd_mecid_key_update(x1); 540 SMC_RET1(handle, ret); 541 default: 542 WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid); 543 SMC_RET1(handle, SMC_UNK); 544 } 545 } 546 547 /** 548 * Helper to activate Primary CPU with the updated RMM, mainly used during 549 * LFA of RMM. 550 */ 551 int rmmd_primary_activate(void) 552 { 553 int rc; 554 555 rc = rmmd_setup(); 556 if (rc != 0) { 557 ERROR("rmmd_setup failed during LFA: %d\n", rc); 558 return rc; 559 } 560 561 rc = rmm_init(); 562 if (rc != 0) { 563 ERROR("rmm_init failed during LFA: %d\n", rc); 564 return rc; 565 } 566 567 INFO("RMM warm reset done on primary during LFA. \n"); 568 569 return 0; 570 } 571 572 /** 573 * Helper to activate Primary CPU with the updated RMM, mainly used during 574 * LFA of RMM. 575 */ 576 int rmmd_secondary_activate(void) 577 { 578 rmmd_cpu_on_finish_handler(NULL); 579 580 return 0; 581 } 582