1 /* 2 * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch_features.h> 15 #include <bl31/bl31.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <context.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/el3_runtime/cpu_data.h> 21 #include <lib/el3_runtime/pubsub.h> 22 #include <lib/extensions/mpam.h> 23 #include <lib/extensions/pmuv3.h> 24 #include <lib/extensions/sys_reg_trace.h> 25 #include <lib/gpt_rme/gpt_rme.h> 26 27 #include <lib/spinlock.h> 28 #include <lib/utils.h> 29 #include <lib/xlat_tables/xlat_tables_v2.h> 30 #include <plat/common/common_def.h> 31 #include <plat/common/platform.h> 32 #include <platform_def.h> 33 #include <services/rmmd_svc.h> 34 #include <smccc_helpers.h> 35 #include <lib/extensions/sme.h> 36 #include <lib/extensions/sve.h> 37 #include <lib/extensions/spe.h> 38 #include <lib/extensions/trbe.h> 39 #include "rmmd_private.h" 40 41 #define MECID_SHIFT U(32) 42 #define MECID_MASK 0xFFFFU 43 44 #define MEC_REFRESH_REASON_SHIFT U(0) 45 #define MEC_REFRESH_REASON_MASK BIT(0) 46 47 /******************************************************************************* 48 * RMM boot failure flag 49 ******************************************************************************/ 50 static bool rmm_boot_failed; 51 52 /******************************************************************************* 53 * RMM context information. 54 ******************************************************************************/ 55 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT]; 56 57 /******************************************************************************* 58 * RMM entry point information. Discovered on the primary core and reused 59 * on secondary cores. 60 ******************************************************************************/ 61 static entry_point_info_t *rmm_ep_info; 62 63 /******************************************************************************* 64 * Static function declaration. 65 ******************************************************************************/ 66 static int32_t rmm_init(void); 67 68 /******************************************************************************* 69 * This function takes an RMM context pointer and performs a synchronous entry 70 * into it. 71 ******************************************************************************/ 72 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx) 73 { 74 uint64_t rc; 75 76 assert(rmm_ctx != NULL); 77 78 cm_set_context(&(rmm_ctx->cpu_ctx), REALM); 79 80 /* Restore the realm context assigned above */ 81 cm_el2_sysregs_context_restore(REALM); 82 cm_set_next_eret_context(REALM); 83 84 /* Enter RMM */ 85 rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx); 86 87 /* 88 * Save realm context. EL2 Non-secure context will be restored 89 * before exiting Non-secure world, therefore there is no need 90 * to clear EL2 context registers. 91 */ 92 cm_el2_sysregs_context_save(REALM); 93 94 return rc; 95 } 96 97 /******************************************************************************* 98 * This function returns to the place where rmmd_rmm_sync_entry() was 99 * called originally. 100 ******************************************************************************/ 101 __dead2 void rmmd_rmm_sync_exit(uint64_t rc) 102 { 103 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 104 105 /* Get context of the RMM in use by this CPU. */ 106 assert(cm_get_context(REALM) == &(ctx->cpu_ctx)); 107 108 /* 109 * The RMMD must have initiated the original request through a 110 * synchronous entry into RMM. Jump back to the original C runtime 111 * context with the value of rc in x0; 112 */ 113 rmmd_rmm_exit(ctx->c_rt_ctx, rc); 114 115 panic(); 116 } 117 118 /******************************************************************************* 119 * Jump to the RMM for the first time. 120 ******************************************************************************/ 121 static int32_t rmm_init(void) 122 { 123 long rc; 124 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 125 126 INFO("RMM init start.\n"); 127 128 rc = rmmd_rmm_sync_entry(ctx); 129 if (rc != E_RMM_BOOT_SUCCESS) { 130 ERROR("RMM init failed: %ld\n", rc); 131 /* Mark the boot as failed for all the CPUs */ 132 rmm_boot_failed = true; 133 return 0; 134 } 135 136 INFO("RMM init end.\n"); 137 138 return 1; 139 } 140 141 /******************************************************************************* 142 * Load and read RMM manifest, setup RMM. 143 ******************************************************************************/ 144 int rmmd_setup(void) 145 { 146 size_t shared_buf_size __unused; 147 uintptr_t shared_buf_base; 148 uint32_t ep_attr; 149 unsigned int linear_id = plat_my_core_pos(); 150 rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id]; 151 struct rmm_manifest *manifest; 152 int rc; 153 154 /* Make sure RME is supported. */ 155 if (is_feat_rme_present() == 0U) { 156 /* Mark the RMM boot as failed for all the CPUs */ 157 rmm_boot_failed = true; 158 return -ENOTSUP; 159 } 160 161 rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM); 162 if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) { 163 WARN("No RMM image provided by BL2 boot loader, Booting " 164 "device without RMM initialization. SMCs destined for " 165 "RMM will return SMC_UNK\n"); 166 167 /* Mark the boot as failed for all the CPUs */ 168 rmm_boot_failed = true; 169 return -ENOENT; 170 } 171 172 /* Initialise an entrypoint to set up the CPU context */ 173 ep_attr = EP_REALM; 174 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) { 175 ep_attr |= EP_EE_BIG; 176 } 177 178 SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr); 179 rmm_ep_info->spsr = SPSR_64(MODE_EL2, 180 MODE_SP_ELX, 181 DISABLE_ALL_EXCEPTIONS); 182 183 shared_buf_size = 184 plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base); 185 186 assert((shared_buf_size == SZ_4K) && 187 ((void *)shared_buf_base != NULL)); 188 189 /* Zero out and load the boot manifest at the beginning of the share area */ 190 manifest = (struct rmm_manifest *)shared_buf_base; 191 (void)memset((void *)manifest, 0, sizeof(struct rmm_manifest)); 192 193 rc = plat_rmmd_load_manifest(manifest); 194 if (rc != 0) { 195 ERROR("Error loading RMM Boot Manifest (%i)\n", rc); 196 /* Mark the boot as failed for all the CPUs */ 197 rmm_boot_failed = true; 198 return rc; 199 } 200 flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size); 201 202 /* 203 * Prepare coldboot arguments for RMM: 204 * arg0: This CPUID (primary processor). 205 * arg1: Version for this Boot Interface. 206 * arg2: PLATFORM_CORE_COUNT. 207 * arg3: Base address for the EL3 <-> RMM shared area. The boot 208 * manifest will be stored at the beginning of this area. 209 * arg4: opaque activation token, as returned by previous calls 210 */ 211 rmm_ep_info->args.arg0 = linear_id; 212 rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION; 213 rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT; 214 rmm_ep_info->args.arg3 = shared_buf_base; 215 rmm_ep_info->args.arg4 = rmm_ctx->activation_token; 216 217 /* Initialise RMM context with this entry point information */ 218 cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info); 219 220 INFO("RMM setup done.\n"); 221 222 /* Register init function for deferred init. */ 223 bl31_register_rmm_init(&rmm_init); 224 225 return 0; 226 } 227 228 /******************************************************************************* 229 * Forward SMC to the other security state 230 ******************************************************************************/ 231 static uint64_t rmmd_smc_forward(uint32_t src_sec_state, 232 uint32_t dst_sec_state, uint64_t x0, 233 uint64_t x1, uint64_t x2, uint64_t x3, 234 uint64_t x4, void *handle) 235 { 236 cpu_context_t *ctx = cm_get_context(dst_sec_state); 237 238 /* Save incoming security state */ 239 cm_el2_sysregs_context_save(src_sec_state); 240 241 /* Restore outgoing security state */ 242 cm_el2_sysregs_context_restore(dst_sec_state); 243 cm_set_next_eret_context(dst_sec_state); 244 245 /* 246 * As per SMCCCv1.2, we need to preserve x4 to x7 unless 247 * being used as return args. Hence we differentiate the 248 * onward and backward path. Support upto 8 args in the 249 * onward path and 4 args in return path. 250 * Register x4 will be preserved by RMM in case it is not 251 * used in return path. 252 */ 253 if (src_sec_state == NON_SECURE) { 254 SMC_RET8(ctx, x0, x1, x2, x3, x4, 255 SMC_GET_GP(handle, CTX_GPREG_X5), 256 SMC_GET_GP(handle, CTX_GPREG_X6), 257 SMC_GET_GP(handle, CTX_GPREG_X7)); 258 } 259 260 SMC_RET5(ctx, x0, x1, x2, x3, x4); 261 } 262 263 /******************************************************************************* 264 * This function handles all SMCs in the range reserved for RMI. Each call is 265 * either forwarded to the other security state or handled by the RMM dispatcher 266 ******************************************************************************/ 267 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 268 uint64_t x3, uint64_t x4, void *cookie, 269 void *handle, uint64_t flags) 270 { 271 uint32_t src_sec_state; 272 273 /* If RMM failed to boot, treat any RMI SMC as unknown */ 274 if (rmm_boot_failed) { 275 WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n"); 276 SMC_RET1(handle, SMC_UNK); 277 } 278 279 /* Determine which security state this SMC originated from */ 280 src_sec_state = caller_sec_state(flags); 281 282 /* RMI must not be invoked by the Secure world */ 283 if (src_sec_state == SMC_FROM_SECURE) { 284 WARN("RMMD: RMI invoked by secure world.\n"); 285 SMC_RET1(handle, SMC_UNK); 286 } 287 288 /* 289 * Forward an RMI call from the Normal world to the Realm world as it 290 * is. 291 */ 292 if (src_sec_state == SMC_FROM_NON_SECURE) { 293 /* 294 * If SVE hint bit is set in the flags then update the SMC 295 * function id and pass it on to the lower EL. 296 */ 297 if (is_sve_hint_set(flags)) { 298 smc_fid |= (FUNCID_SVE_HINT_MASK << 299 FUNCID_SVE_HINT_SHIFT); 300 } 301 VERBOSE("RMMD: RMI call from non-secure world.\n"); 302 return rmmd_smc_forward(NON_SECURE, REALM, smc_fid, 303 x1, x2, x3, x4, handle); 304 } 305 306 if (src_sec_state != SMC_FROM_REALM) { 307 SMC_RET1(handle, SMC_UNK); 308 } 309 310 switch (smc_fid) { 311 case RMM_RMI_REQ_COMPLETE: { 312 uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5); 313 314 return rmmd_smc_forward(REALM, NON_SECURE, x1, 315 x2, x3, x4, x5, handle); 316 } 317 default: 318 WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid); 319 SMC_RET1(handle, SMC_UNK); 320 } 321 } 322 323 /******************************************************************************* 324 * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM 325 * is done after initialising minimal architectural state that guarantees safe 326 * execution. 327 ******************************************************************************/ 328 static void *rmmd_cpu_on_finish_handler(const void *arg) 329 { 330 long rc; 331 uint32_t linear_id = plat_my_core_pos(); 332 rmmd_rmm_context_t *ctx = &rmm_context[linear_id]; 333 334 if (rmm_boot_failed) { 335 /* RMM Boot failed on a previous CPU. Abort. */ 336 ERROR("RMM Failed to initialize. Ignoring for CPU%d\n", 337 linear_id); 338 return NULL; 339 } 340 341 /* 342 * Prepare warmboot arguments for RMM: 343 * arg0: This CPUID. 344 * arg1: opaque activation token, as returned by previous calls 345 * arg2 to arg3: Not used. 346 */ 347 rmm_ep_info->args.arg0 = linear_id; 348 rmm_ep_info->args.arg1 = ctx->activation_token; 349 rmm_ep_info->args.arg2 = 0ULL; 350 rmm_ep_info->args.arg3 = 0ULL; 351 352 /* Initialise RMM context with this entry point information */ 353 cm_setup_context(&ctx->cpu_ctx, rmm_ep_info); 354 355 rc = rmmd_rmm_sync_entry(ctx); 356 357 if (rc != E_RMM_BOOT_SUCCESS) { 358 ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc); 359 /* Mark the boot as failed for any other booting CPU */ 360 rmm_boot_failed = true; 361 } 362 363 return NULL; 364 } 365 366 /* Subscribe to PSCI CPU on to initialize RMM on secondary */ 367 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler); 368 369 /* Convert GPT lib error to RMMD GTS error */ 370 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address) 371 { 372 int ret; 373 374 if (error == 0) { 375 return E_RMM_OK; 376 } 377 378 if (error == -EINVAL) { 379 ret = E_RMM_BAD_ADDR; 380 } else { 381 /* This is the only other error code we expect */ 382 assert(error == -EPERM); 383 ret = E_RMM_BAD_PAS; 384 } 385 386 ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n", 387 error, address, smc_fid); 388 return ret; 389 } 390 391 static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx, 392 uint64_t *feat_reg) 393 { 394 if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) { 395 ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx); 396 return E_RMM_INVAL; 397 } 398 399 *feat_reg = 0UL; 400 #if RMMD_ENABLE_EL3_TOKEN_SIGN 401 *feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK; 402 #endif 403 return E_RMM_OK; 404 } 405 406 /* 407 * Update encryption key associated with mecid included in x1. 408 */ 409 static int rmmd_mecid_key_update(uint64_t x1) 410 { 411 uint64_t mecid_width, mecid_width_mask; 412 uint16_t mecid; 413 unsigned int reason; 414 int ret; 415 416 /* 417 * Check whether FEAT_MEC is supported by the hardware. If not, return 418 * unknown SMC. 419 */ 420 if (is_feat_mec_supported() == false) { 421 return E_RMM_UNK; 422 } 423 424 /* 425 * Check whether the mecid parameter is at most MECIDR_EL2.MECIDWidthm1 + 1 426 * in length. 427 */ 428 mecid_width = ((read_mecidr_el2() >> MECIDR_EL2_MECIDWidthm1_SHIFT) & 429 MECIDR_EL2_MECIDWidthm1_MASK) + 1UL; 430 mecid_width_mask = ((1UL << mecid_width) - 1UL); 431 432 mecid = (x1 >> MECID_SHIFT) & MECID_MASK; 433 if ((mecid & ~mecid_width_mask) != 0U) { 434 return E_RMM_INVAL; 435 } 436 437 reason = (x1 >> MEC_REFRESH_REASON_SHIFT) & MEC_REFRESH_REASON_MASK; 438 ret = plat_rmmd_mecid_key_update(mecid, reason); 439 440 if (ret != 0) { 441 return E_RMM_UNK; 442 } 443 return E_RMM_OK; 444 } 445 446 /******************************************************************************* 447 * This function handles RMM-EL3 interface SMCs 448 ******************************************************************************/ 449 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 450 uint64_t x3, uint64_t x4, void *cookie, 451 void *handle, uint64_t flags) 452 { 453 uint64_t remaining_len = 0UL; 454 uint32_t src_sec_state; 455 int ret; 456 457 /* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */ 458 if (rmm_boot_failed) { 459 WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n"); 460 SMC_RET1(handle, SMC_UNK); 461 } 462 463 /* Determine which security state this SMC originated from */ 464 src_sec_state = caller_sec_state(flags); 465 466 if (src_sec_state != SMC_FROM_REALM) { 467 WARN("RMMD: RMM-EL3 call originated from secure or normal world\n"); 468 SMC_RET1(handle, SMC_UNK); 469 } 470 471 switch (smc_fid) { 472 case RMM_GTSI_DELEGATE: 473 ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 474 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 475 case RMM_GTSI_UNDELEGATE: 476 ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 477 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 478 case RMM_ATTEST_GET_REALM_KEY: 479 ret = rmmd_attest_get_signing_key(x1, &x2, x3); 480 SMC_RET2(handle, ret, x2); 481 case RMM_ATTEST_GET_PLAT_TOKEN: 482 ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len); 483 SMC_RET3(handle, ret, x2, remaining_len); 484 case RMM_EL3_FEATURES: 485 ret = rmm_el3_ifc_get_feat_register(x1, &x2); 486 SMC_RET2(handle, ret, x2); 487 #if RMMD_ENABLE_EL3_TOKEN_SIGN 488 case RMM_EL3_TOKEN_SIGN: 489 return rmmd_el3_token_sign(handle, x1, x2, x3, x4); 490 #endif 491 492 #if RMMD_ENABLE_IDE_KEY_PROG 493 case RMM_IDE_KEY_PROG: 494 { 495 rp_ide_key_info_t ide_key_info; 496 497 ide_key_info.keyqw0 = x4; 498 ide_key_info.keyqw1 = SMC_GET_GP(handle, CTX_GPREG_X5); 499 ide_key_info.keyqw2 = SMC_GET_GP(handle, CTX_GPREG_X6); 500 ide_key_info.keyqw3 = SMC_GET_GP(handle, CTX_GPREG_X7); 501 ide_key_info.ifvqw0 = SMC_GET_GP(handle, CTX_GPREG_X8); 502 ide_key_info.ifvqw1 = SMC_GET_GP(handle, CTX_GPREG_X9); 503 uint64_t x10 = SMC_GET_GP(handle, CTX_GPREG_X10); 504 uint64_t x11 = SMC_GET_GP(handle, CTX_GPREG_X11); 505 506 ret = rmmd_el3_ide_key_program(x1, x2, x3, &ide_key_info, x10, x11); 507 SMC_RET1(handle, ret); 508 } 509 case RMM_IDE_KEY_SET_GO: 510 ret = rmmd_el3_ide_key_set_go(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5)); 511 SMC_RET1(handle, ret); 512 case RMM_IDE_KEY_SET_STOP: 513 ret = rmmd_el3_ide_key_set_stop(x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5)); 514 SMC_RET1(handle, ret); 515 case RMM_IDE_KM_PULL_RESPONSE: { 516 uint64_t req_resp = 0, req_id = 0, cookie_var = 0; 517 518 ret = rmmd_el3_ide_km_pull_response(x1, x2, &req_resp, &req_id, &cookie_var); 519 SMC_RET4(handle, ret, req_resp, req_id, cookie_var); 520 } 521 #endif /* RMMD_ENABLE_IDE_KEY_PROG */ 522 case RMM_RESERVE_MEMORY: 523 ret = rmmd_reserve_memory(x1, &x2); 524 SMC_RET2(handle, ret, x2); 525 526 case RMM_BOOT_COMPLETE: 527 { 528 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 529 530 ctx->activation_token = x2; 531 VERBOSE("RMMD: running rmmd_rmm_sync_exit\n"); 532 rmmd_rmm_sync_exit(x1); 533 } 534 case RMM_MEC_REFRESH: 535 ret = rmmd_mecid_key_update(x1); 536 SMC_RET1(handle, ret); 537 default: 538 WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid); 539 SMC_RET1(handle, SMC_UNK); 540 } 541 } 542