1 /* 2 * Copyright (c) 2021-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch_features.h> 15 #include <bl31/bl31.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <context.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/el3_runtime/cpu_data.h> 21 #include <lib/el3_runtime/pubsub.h> 22 #include <lib/extensions/pmuv3.h> 23 #include <lib/extensions/sys_reg_trace.h> 24 #include <lib/gpt_rme/gpt_rme.h> 25 26 #include <lib/spinlock.h> 27 #include <lib/utils.h> 28 #include <lib/xlat_tables/xlat_tables_v2.h> 29 #include <plat/common/common_def.h> 30 #include <plat/common/platform.h> 31 #include <platform_def.h> 32 #include <services/rmmd_svc.h> 33 #include <smccc_helpers.h> 34 #include <lib/extensions/sme.h> 35 #include <lib/extensions/sve.h> 36 #include <lib/extensions/spe.h> 37 #include <lib/extensions/trbe.h> 38 #include "rmmd_initial_context.h" 39 #include "rmmd_private.h" 40 41 /******************************************************************************* 42 * RMM boot failure flag 43 ******************************************************************************/ 44 static bool rmm_boot_failed; 45 46 /******************************************************************************* 47 * RMM context information. 48 ******************************************************************************/ 49 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT]; 50 51 /******************************************************************************* 52 * RMM entry point information. Discovered on the primary core and reused 53 * on secondary cores. 54 ******************************************************************************/ 55 static entry_point_info_t *rmm_ep_info; 56 57 /******************************************************************************* 58 * Static function declaration. 59 ******************************************************************************/ 60 static int32_t rmm_init(void); 61 62 /******************************************************************************* 63 * This function takes an RMM context pointer and performs a synchronous entry 64 * into it. 65 ******************************************************************************/ 66 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx) 67 { 68 uint64_t rc; 69 70 assert(rmm_ctx != NULL); 71 72 cm_set_context(&(rmm_ctx->cpu_ctx), REALM); 73 74 /* Restore the realm context assigned above */ 75 cm_el2_sysregs_context_restore(REALM); 76 cm_set_next_eret_context(REALM); 77 78 /* Enter RMM */ 79 rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx); 80 81 /* 82 * Save realm context. EL2 Non-secure context will be restored 83 * before exiting Non-secure world, therefore there is no need 84 * to clear EL2 context registers. 85 */ 86 cm_el2_sysregs_context_save(REALM); 87 88 return rc; 89 } 90 91 /******************************************************************************* 92 * This function returns to the place where rmmd_rmm_sync_entry() was 93 * called originally. 94 ******************************************************************************/ 95 __dead2 void rmmd_rmm_sync_exit(uint64_t rc) 96 { 97 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 98 99 /* Get context of the RMM in use by this CPU. */ 100 assert(cm_get_context(REALM) == &(ctx->cpu_ctx)); 101 102 /* 103 * The RMMD must have initiated the original request through a 104 * synchronous entry into RMM. Jump back to the original C runtime 105 * context with the value of rc in x0; 106 */ 107 rmmd_rmm_exit(ctx->c_rt_ctx, rc); 108 109 panic(); 110 } 111 112 static void rmm_el2_context_init(el2_sysregs_t *regs) 113 { 114 write_el2_ctx_common(regs, spsr_el2, REALM_SPSR_EL2); 115 write_el2_ctx_common(regs, sctlr_el2, SCTLR_EL2_RES1); 116 } 117 118 /******************************************************************************* 119 * Enable architecture extensions on first entry to Realm world. 120 ******************************************************************************/ 121 122 static void manage_extensions_realm(cpu_context_t *ctx) 123 { 124 /* 125 * Enable access to TPIDR2_EL0 if SME/SME2 is enabled for Non Secure world. 126 */ 127 if (is_feat_sme_supported()) { 128 sme_enable(ctx); 129 } 130 131 /* 132 * SPE and TRBE cannot be fully disabled from EL3 registers alone, only 133 * sysreg access can. In case the EL1 controls leave them active on 134 * context switch, we want the owning security state to be NS so Realm 135 * can't be DOSed. 136 */ 137 if (is_feat_spe_supported()) { 138 spe_disable(ctx); 139 } 140 141 if (is_feat_trbe_supported()) { 142 trbe_disable(ctx); 143 } 144 } 145 146 static void manage_extensions_realm_per_world(void) 147 { 148 cm_el3_arch_init_per_world(&per_world_context[CPU_CONTEXT_REALM]); 149 150 if (is_feat_sve_supported()) { 151 /* 152 * Enable SVE and FPU in realm context when it is enabled for NS. 153 * Realm manager must ensure that the SVE and FPU register 154 * contexts are properly managed. 155 */ 156 sve_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 157 } 158 159 /* NS can access this but Realm shouldn't */ 160 if (is_feat_sys_reg_trace_supported()) { 161 sys_reg_trace_disable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 162 } 163 164 /* 165 * If SME/SME2 is supported and enabled for NS world, then disable trapping 166 * of SME instructions for Realm world. RMM will save/restore required 167 * registers that are shared with SVE/FPU so that Realm can use FPU or SVE. 168 */ 169 if (is_feat_sme_supported()) { 170 sme_enable_per_world(&per_world_context[CPU_CONTEXT_REALM]); 171 } 172 } 173 174 /******************************************************************************* 175 * Jump to the RMM for the first time. 176 ******************************************************************************/ 177 static int32_t rmm_init(void) 178 { 179 long rc; 180 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 181 182 INFO("RMM init start.\n"); 183 184 /* Enable architecture extensions */ 185 manage_extensions_realm(&ctx->cpu_ctx); 186 187 manage_extensions_realm_per_world(); 188 189 /* Initialize RMM EL2 context. */ 190 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); 191 192 rc = rmmd_rmm_sync_entry(ctx); 193 if (rc != E_RMM_BOOT_SUCCESS) { 194 ERROR("RMM init failed: %ld\n", rc); 195 /* Mark the boot as failed for all the CPUs */ 196 rmm_boot_failed = true; 197 return 0; 198 } 199 200 INFO("RMM init end.\n"); 201 202 return 1; 203 } 204 205 /******************************************************************************* 206 * Load and read RMM manifest, setup RMM. 207 ******************************************************************************/ 208 int rmmd_setup(void) 209 { 210 size_t shared_buf_size __unused; 211 uintptr_t shared_buf_base; 212 uint32_t ep_attr; 213 unsigned int linear_id = plat_my_core_pos(); 214 rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id]; 215 struct rmm_manifest *manifest; 216 int rc; 217 218 /* Make sure RME is supported. */ 219 if (is_feat_rme_present() == 0U) { 220 /* Mark the RMM boot as failed for all the CPUs */ 221 rmm_boot_failed = true; 222 return -ENOTSUP; 223 } 224 225 rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM); 226 if ((rmm_ep_info == NULL) || (rmm_ep_info->pc == 0)) { 227 WARN("No RMM image provided by BL2 boot loader, Booting " 228 "device without RMM initialization. SMCs destined for " 229 "RMM will return SMC_UNK\n"); 230 231 /* Mark the boot as failed for all the CPUs */ 232 rmm_boot_failed = true; 233 return -ENOENT; 234 } 235 236 /* Initialise an entrypoint to set up the CPU context */ 237 ep_attr = EP_REALM; 238 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) { 239 ep_attr |= EP_EE_BIG; 240 } 241 242 SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr); 243 rmm_ep_info->spsr = SPSR_64(MODE_EL2, 244 MODE_SP_ELX, 245 DISABLE_ALL_EXCEPTIONS); 246 247 shared_buf_size = 248 plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base); 249 250 assert((shared_buf_size == SZ_4K) && 251 ((void *)shared_buf_base != NULL)); 252 253 /* Zero out and load the boot manifest at the beginning of the share area */ 254 manifest = (struct rmm_manifest *)shared_buf_base; 255 (void)memset((void *)manifest, 0, sizeof(struct rmm_manifest)); 256 257 rc = plat_rmmd_load_manifest(manifest); 258 if (rc != 0) { 259 ERROR("Error loading RMM Boot Manifest (%i)\n", rc); 260 /* Mark the boot as failed for all the CPUs */ 261 rmm_boot_failed = true; 262 return rc; 263 } 264 flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size); 265 266 /* 267 * Prepare coldboot arguments for RMM: 268 * arg0: This CPUID (primary processor). 269 * arg1: Version for this Boot Interface. 270 * arg2: PLATFORM_CORE_COUNT. 271 * arg3: Base address for the EL3 <-> RMM shared area. The boot 272 * manifest will be stored at the beginning of this area. 273 */ 274 rmm_ep_info->args.arg0 = linear_id; 275 rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION; 276 rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT; 277 rmm_ep_info->args.arg3 = shared_buf_base; 278 279 /* Initialise RMM context with this entry point information */ 280 cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info); 281 282 INFO("RMM setup done.\n"); 283 284 /* Register init function for deferred init. */ 285 bl31_register_rmm_init(&rmm_init); 286 287 return 0; 288 } 289 290 /******************************************************************************* 291 * Forward SMC to the other security state 292 ******************************************************************************/ 293 static uint64_t rmmd_smc_forward(uint32_t src_sec_state, 294 uint32_t dst_sec_state, uint64_t x0, 295 uint64_t x1, uint64_t x2, uint64_t x3, 296 uint64_t x4, void *handle) 297 { 298 cpu_context_t *ctx = cm_get_context(dst_sec_state); 299 300 /* Save incoming security state */ 301 cm_el2_sysregs_context_save(src_sec_state); 302 303 /* Restore outgoing security state */ 304 cm_el2_sysregs_context_restore(dst_sec_state); 305 cm_set_next_eret_context(dst_sec_state); 306 307 /* 308 * As per SMCCCv1.2, we need to preserve x4 to x7 unless 309 * being used as return args. Hence we differentiate the 310 * onward and backward path. Support upto 8 args in the 311 * onward path and 4 args in return path. 312 * Register x4 will be preserved by RMM in case it is not 313 * used in return path. 314 */ 315 if (src_sec_state == NON_SECURE) { 316 SMC_RET8(ctx, x0, x1, x2, x3, x4, 317 SMC_GET_GP(handle, CTX_GPREG_X5), 318 SMC_GET_GP(handle, CTX_GPREG_X6), 319 SMC_GET_GP(handle, CTX_GPREG_X7)); 320 } 321 322 SMC_RET5(ctx, x0, x1, x2, x3, x4); 323 } 324 325 /******************************************************************************* 326 * This function handles all SMCs in the range reserved for RMI. Each call is 327 * either forwarded to the other security state or handled by the RMM dispatcher 328 ******************************************************************************/ 329 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 330 uint64_t x3, uint64_t x4, void *cookie, 331 void *handle, uint64_t flags) 332 { 333 uint32_t src_sec_state; 334 335 /* If RMM failed to boot, treat any RMI SMC as unknown */ 336 if (rmm_boot_failed) { 337 WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n"); 338 SMC_RET1(handle, SMC_UNK); 339 } 340 341 /* Determine which security state this SMC originated from */ 342 src_sec_state = caller_sec_state(flags); 343 344 /* RMI must not be invoked by the Secure world */ 345 if (src_sec_state == SMC_FROM_SECURE) { 346 WARN("RMMD: RMI invoked by secure world.\n"); 347 SMC_RET1(handle, SMC_UNK); 348 } 349 350 /* 351 * Forward an RMI call from the Normal world to the Realm world as it 352 * is. 353 */ 354 if (src_sec_state == SMC_FROM_NON_SECURE) { 355 /* 356 * If SVE hint bit is set in the flags then update the SMC 357 * function id and pass it on to the lower EL. 358 */ 359 if (is_sve_hint_set(flags)) { 360 smc_fid |= (FUNCID_SVE_HINT_MASK << 361 FUNCID_SVE_HINT_SHIFT); 362 } 363 VERBOSE("RMMD: RMI call from non-secure world.\n"); 364 return rmmd_smc_forward(NON_SECURE, REALM, smc_fid, 365 x1, x2, x3, x4, handle); 366 } 367 368 if (src_sec_state != SMC_FROM_REALM) { 369 SMC_RET1(handle, SMC_UNK); 370 } 371 372 switch (smc_fid) { 373 case RMM_RMI_REQ_COMPLETE: { 374 uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5); 375 376 return rmmd_smc_forward(REALM, NON_SECURE, x1, 377 x2, x3, x4, x5, handle); 378 } 379 default: 380 WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid); 381 SMC_RET1(handle, SMC_UNK); 382 } 383 } 384 385 /******************************************************************************* 386 * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM 387 * is done after initialising minimal architectural state that guarantees safe 388 * execution. 389 ******************************************************************************/ 390 static void *rmmd_cpu_on_finish_handler(const void *arg) 391 { 392 long rc; 393 uint32_t linear_id = plat_my_core_pos(); 394 rmmd_rmm_context_t *ctx = &rmm_context[linear_id]; 395 396 if (rmm_boot_failed) { 397 /* RMM Boot failed on a previous CPU. Abort. */ 398 ERROR("RMM Failed to initialize. Ignoring for CPU%d\n", 399 linear_id); 400 return NULL; 401 } 402 403 /* 404 * Prepare warmboot arguments for RMM: 405 * arg0: This CPUID. 406 * arg1 to arg3: Not used. 407 */ 408 rmm_ep_info->args.arg0 = linear_id; 409 rmm_ep_info->args.arg1 = 0ULL; 410 rmm_ep_info->args.arg2 = 0ULL; 411 rmm_ep_info->args.arg3 = 0ULL; 412 413 /* Initialise RMM context with this entry point information */ 414 cm_setup_context(&ctx->cpu_ctx, rmm_ep_info); 415 416 /* Enable architecture extensions */ 417 manage_extensions_realm(&ctx->cpu_ctx); 418 419 /* Initialize RMM EL2 context. */ 420 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); 421 422 rc = rmmd_rmm_sync_entry(ctx); 423 424 if (rc != E_RMM_BOOT_SUCCESS) { 425 ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc); 426 /* Mark the boot as failed for any other booting CPU */ 427 rmm_boot_failed = true; 428 } 429 430 return NULL; 431 } 432 433 /* Subscribe to PSCI CPU on to initialize RMM on secondary */ 434 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler); 435 436 /* Convert GPT lib error to RMMD GTS error */ 437 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address) 438 { 439 int ret; 440 441 if (error == 0) { 442 return E_RMM_OK; 443 } 444 445 if (error == -EINVAL) { 446 ret = E_RMM_BAD_ADDR; 447 } else { 448 /* This is the only other error code we expect */ 449 assert(error == -EPERM); 450 ret = E_RMM_BAD_PAS; 451 } 452 453 ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n", 454 error, address, smc_fid); 455 return ret; 456 } 457 458 static int rmm_el3_ifc_get_feat_register(uint64_t feat_reg_idx, 459 uint64_t *feat_reg) 460 { 461 if (feat_reg_idx != RMM_EL3_FEAT_REG_0_IDX) { 462 ERROR("RMMD: Failed to get feature register %ld\n", feat_reg_idx); 463 return E_RMM_INVAL; 464 } 465 466 *feat_reg = 0UL; 467 #if RMMD_ENABLE_EL3_TOKEN_SIGN 468 *feat_reg |= RMM_EL3_FEAT_REG_0_EL3_TOKEN_SIGN_MASK; 469 #endif 470 return E_RMM_OK; 471 } 472 473 /******************************************************************************* 474 * This function handles RMM-EL3 interface SMCs 475 ******************************************************************************/ 476 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 477 uint64_t x3, uint64_t x4, void *cookie, 478 void *handle, uint64_t flags) 479 { 480 uint64_t remaining_len = 0UL; 481 uint32_t src_sec_state; 482 int ret; 483 484 /* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */ 485 if (rmm_boot_failed) { 486 WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n"); 487 SMC_RET1(handle, SMC_UNK); 488 } 489 490 /* Determine which security state this SMC originated from */ 491 src_sec_state = caller_sec_state(flags); 492 493 if (src_sec_state != SMC_FROM_REALM) { 494 WARN("RMMD: RMM-EL3 call originated from secure or normal world\n"); 495 SMC_RET1(handle, SMC_UNK); 496 } 497 498 switch (smc_fid) { 499 case RMM_GTSI_DELEGATE: 500 ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 501 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 502 case RMM_GTSI_UNDELEGATE: 503 ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM); 504 SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1)); 505 case RMM_ATTEST_GET_PLAT_TOKEN: 506 ret = rmmd_attest_get_platform_token(x1, &x2, x3, &remaining_len); 507 SMC_RET3(handle, ret, x2, remaining_len); 508 case RMM_ATTEST_GET_REALM_KEY: 509 ret = rmmd_attest_get_signing_key(x1, &x2, x3); 510 SMC_RET2(handle, ret, x2); 511 case RMM_EL3_FEATURES: 512 ret = rmm_el3_ifc_get_feat_register(x1, &x2); 513 SMC_RET2(handle, ret, x2); 514 #if RMMD_ENABLE_EL3_TOKEN_SIGN 515 case RMM_EL3_TOKEN_SIGN: 516 return rmmd_el3_token_sign(handle, x1, x2, x3, x4); 517 #endif 518 case RMM_BOOT_COMPLETE: 519 VERBOSE("RMMD: running rmmd_rmm_sync_exit\n"); 520 rmmd_rmm_sync_exit(x1); 521 522 default: 523 WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid); 524 SMC_RET1(handle, SMC_UNK); 525 } 526 } 527