1 /* 2 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <string.h> 10 11 #include <arch_helpers.h> 12 #include <bl31/bl31.h> 13 #include <common/debug.h> 14 #include <common/runtime_svc.h> 15 #include <lib/el3_runtime/context_mgmt.h> 16 #include <lib/smccc.h> 17 #include <lib/spinlock.h> 18 #include <lib/utils.h> 19 #include <lib/xlat_tables/xlat_tables_v2.h> 20 #include <plat/common/common_def.h> 21 #include <plat/common/platform.h> 22 #include <platform_def.h> 23 #include <services/spci_svc.h> 24 #include <services/spmd_svc.h> 25 #include <smccc_helpers.h> 26 #include "spmd_private.h" 27 28 /******************************************************************************* 29 * SPM Core context information. 30 ******************************************************************************/ 31 spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT]; 32 33 /******************************************************************************* 34 * SPM Core attribute information read from its manifest. 35 ******************************************************************************/ 36 static spmc_manifest_sect_attribute_t spmc_attrs; 37 38 /******************************************************************************* 39 * SPM Core entry point information. Discovered on the primary core and reused 40 * on secondary cores. 41 ******************************************************************************/ 42 static entry_point_info_t *spmc_ep_info; 43 44 /******************************************************************************* 45 * Static function declaration. 46 ******************************************************************************/ 47 static int32_t spmd_init(void); 48 static int spmd_spmc_init(void *rd_base, size_t rd_size); 49 static uint64_t spmd_spci_error_return(void *handle, int error_code); 50 static uint64_t spmd_smc_forward(uint32_t smc_fid, bool secure_origin, 51 uint64_t x1, uint64_t x2, uint64_t x3, 52 uint64_t x4, void *handle); 53 54 /******************************************************************************* 55 * This function takes an SP context pointer and performs a synchronous entry 56 * into it. 57 ******************************************************************************/ 58 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx) 59 { 60 uint64_t rc; 61 62 assert(spmc_ctx != NULL); 63 64 cm_set_context(&(spmc_ctx->cpu_ctx), SECURE); 65 66 /* Restore the context assigned above */ 67 cm_el1_sysregs_context_restore(SECURE); 68 #if SPMD_SPM_AT_SEL2 69 cm_el2_sysregs_context_restore(SECURE); 70 #endif 71 cm_set_next_eret_context(SECURE); 72 73 /* Enter SPMC */ 74 rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx); 75 76 /* Save secure state */ 77 cm_el1_sysregs_context_save(SECURE); 78 #if SPMD_SPM_AT_SEL2 79 cm_el2_sysregs_context_save(SECURE); 80 #endif 81 82 return rc; 83 } 84 85 /******************************************************************************* 86 * This function returns to the place where spm_sp_synchronous_entry() was 87 * called originally. 88 ******************************************************************************/ 89 __dead2 void spmd_spm_core_sync_exit(uint64_t rc) 90 { 91 spmd_spm_core_context_t *ctx = &spm_core_context[plat_my_core_pos()]; 92 93 /* Get context of the SP in use by this CPU. */ 94 assert(cm_get_context(SECURE) == &(ctx->cpu_ctx)); 95 96 /* 97 * The SPMD must have initiated the original request through a 98 * synchronous entry into SPMC. Jump back to the original C runtime 99 * context with the value of rc in x0; 100 */ 101 spmd_spm_core_exit(ctx->c_rt_ctx, rc); 102 103 panic(); 104 } 105 106 /******************************************************************************* 107 * Jump to the SPM core for the first time. 108 ******************************************************************************/ 109 static int32_t spmd_init(void) 110 { 111 uint64_t rc = 0; 112 spmd_spm_core_context_t *ctx = &spm_core_context[plat_my_core_pos()]; 113 114 INFO("SPM Core init start.\n"); 115 ctx->state = SPMC_STATE_RESET; 116 117 rc = spmd_spm_core_sync_entry(ctx); 118 if (rc) { 119 ERROR("SPMC initialisation failed 0x%llx\n", rc); 120 panic(); 121 } 122 123 ctx->state = SPMC_STATE_IDLE; 124 INFO("SPM Core init end.\n"); 125 126 return 1; 127 } 128 129 /******************************************************************************* 130 * Load SPMC manifest, init SPMC. 131 ******************************************************************************/ 132 static int spmd_spmc_init(void *rd_base, size_t rd_size) 133 { 134 int rc; 135 uint32_t ep_attr; 136 unsigned int linear_id = plat_my_core_pos(); 137 spmd_spm_core_context_t *spm_ctx = &spm_core_context[linear_id]; 138 139 /* Load the SPM core manifest */ 140 rc = plat_spm_core_manifest_load(&spmc_attrs, rd_base, rd_size); 141 if (rc != 0) { 142 WARN("No or invalid SPM core manifest image provided by BL2 " 143 "boot loader. "); 144 return 1; 145 } 146 147 /* 148 * Ensure that the SPM core version is compatible with the SPM 149 * dispatcher version 150 */ 151 if ((spmc_attrs.major_version != SPCI_VERSION_MAJOR) || 152 (spmc_attrs.minor_version > SPCI_VERSION_MINOR)) { 153 WARN("Unsupported SPCI version (%x.%x) specified in SPM core " 154 "manifest image provided by BL2 boot loader.\n", 155 spmc_attrs.major_version, spmc_attrs.minor_version); 156 return 1; 157 } 158 159 INFO("SPCI version (%x.%x).\n", spmc_attrs.major_version, 160 spmc_attrs.minor_version); 161 162 INFO("SPM core run time EL%x.\n", 163 SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1); 164 165 /* Validate the SPM core execution state */ 166 if ((spmc_attrs.exec_state != MODE_RW_64) && 167 (spmc_attrs.exec_state != MODE_RW_32)) { 168 WARN("Unsupported SPM core execution state %x specified in " 169 "manifest image provided by BL2 boot loader.\n", 170 spmc_attrs.exec_state); 171 return 1; 172 } 173 174 INFO("SPM core execution state %x.\n", spmc_attrs.exec_state); 175 176 #if SPMD_SPM_AT_SEL2 177 /* Ensure manifest has not requested AArch32 state in S-EL2 */ 178 if (spmc_attrs.exec_state == MODE_RW_32) { 179 WARN("AArch32 state at S-EL2 is not supported.\n"); 180 return 1; 181 } 182 183 /* 184 * Check if S-EL2 is supported on this system if S-EL2 185 * is required for SPM 186 */ 187 uint64_t sel2 = read_id_aa64pfr0_el1(); 188 189 sel2 >>= ID_AA64PFR0_SEL2_SHIFT; 190 sel2 &= ID_AA64PFR0_SEL2_MASK; 191 192 if (!sel2) { 193 WARN("SPM core run time S-EL2 is not supported."); 194 return 1; 195 } 196 #endif /* SPMD_SPM_AT_SEL2 */ 197 198 /* Initialise an entrypoint to set up the CPU context */ 199 ep_attr = SECURE | EP_ST_ENABLE; 200 if (read_sctlr_el3() & SCTLR_EE_BIT) { 201 ep_attr |= EP_EE_BIG; 202 } 203 204 SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr); 205 assert(spmc_ep_info->pc == BL32_BASE); 206 207 /* 208 * Populate SPSR for SPM core based upon validated parameters from the 209 * manifest 210 */ 211 if (spmc_attrs.exec_state == MODE_RW_32) { 212 spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 213 SPSR_E_LITTLE, 214 DAIF_FIQ_BIT | 215 DAIF_IRQ_BIT | 216 DAIF_ABT_BIT); 217 } else { 218 219 #if SPMD_SPM_AT_SEL2 220 static const uint32_t runtime_el = MODE_EL2; 221 #else 222 static const uint32_t runtime_el = MODE_EL1; 223 #endif 224 spmc_ep_info->spsr = SPSR_64(runtime_el, 225 MODE_SP_ELX, 226 DISABLE_ALL_EXCEPTIONS); 227 } 228 229 /* Initialise SPM core context with this entry point information */ 230 cm_setup_context(&spm_ctx->cpu_ctx, spmc_ep_info); 231 232 /* Reuse PSCI affinity states to mark this SPMC context as off */ 233 spm_ctx->state = AFF_STATE_OFF; 234 235 INFO("SPM core setup done.\n"); 236 237 /* Register init function for deferred init. */ 238 bl31_register_bl32_init(&spmd_init); 239 240 return 0; 241 } 242 243 /******************************************************************************* 244 * Initialize context of SPM core. 245 ******************************************************************************/ 246 int spmd_setup(void) 247 { 248 int rc; 249 void *rd_base; 250 size_t rd_size; 251 uintptr_t rd_base_align; 252 uintptr_t rd_size_align; 253 254 spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE); 255 if (!spmc_ep_info) { 256 WARN("No SPM core image provided by BL2 boot loader, Booting " 257 "device without SP initialization. SMC`s destined for SPM " 258 "core will return SMC_UNK\n"); 259 return 1; 260 } 261 262 /* Under no circumstances will this parameter be 0 */ 263 assert(spmc_ep_info->pc != 0U); 264 265 /* 266 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will 267 * be used as a manifest for the SPM core at the next lower EL/mode. 268 */ 269 if (spmc_ep_info->args.arg0 == 0U || spmc_ep_info->args.arg2 == 0U) { 270 ERROR("Invalid or absent SPM core manifest\n"); 271 panic(); 272 } 273 274 /* Obtain whereabouts of SPM core manifest */ 275 rd_base = (void *) spmc_ep_info->args.arg0; 276 rd_size = spmc_ep_info->args.arg2; 277 278 rd_base_align = page_align((uintptr_t) rd_base, DOWN); 279 rd_size_align = page_align((uintptr_t) rd_size, UP); 280 281 /* Map the manifest in the SPMD translation regime first */ 282 VERBOSE("SPM core manifest base : 0x%lx\n", rd_base_align); 283 VERBOSE("SPM core manifest size : 0x%lx\n", rd_size_align); 284 rc = mmap_add_dynamic_region((unsigned long long) rd_base_align, 285 (uintptr_t) rd_base_align, 286 rd_size_align, 287 MT_RO_DATA); 288 if (rc != 0) { 289 ERROR("Error while mapping SPM core manifest (%d).\n", rc); 290 panic(); 291 } 292 293 /* Load manifest, init SPMC */ 294 rc = spmd_spmc_init(rd_base, rd_size); 295 if (rc != 0) { 296 int mmap_rc; 297 298 WARN("Booting device without SPM initialization. " 299 "SPCI SMCs destined for SPM core will return " 300 "ENOTSUPPORTED\n"); 301 302 mmap_rc = mmap_remove_dynamic_region(rd_base_align, 303 rd_size_align); 304 if (mmap_rc != 0) { 305 ERROR("Error while unmapping SPM core manifest (%d).\n", 306 mmap_rc); 307 panic(); 308 } 309 310 return rc; 311 } 312 313 return 0; 314 } 315 316 /******************************************************************************* 317 * Forward SMC to the other security state 318 ******************************************************************************/ 319 static uint64_t spmd_smc_forward(uint32_t smc_fid, bool secure_origin, 320 uint64_t x1, uint64_t x2, uint64_t x3, 321 uint64_t x4, void *handle) 322 { 323 uint32_t secure_state_in = (secure_origin) ? SECURE : NON_SECURE; 324 uint32_t secure_state_out = (!secure_origin) ? SECURE : NON_SECURE; 325 326 /* Save incoming security state */ 327 cm_el1_sysregs_context_save(secure_state_in); 328 #if SPMD_SPM_AT_SEL2 329 cm_el2_sysregs_context_save(secure_state_in); 330 #endif 331 332 /* Restore outgoing security state */ 333 cm_el1_sysregs_context_restore(secure_state_out); 334 #if SPMD_SPM_AT_SEL2 335 cm_el2_sysregs_context_restore(secure_state_out); 336 #endif 337 cm_set_next_eret_context(secure_state_out); 338 339 SMC_RET8(cm_get_context(secure_state_out), smc_fid, x1, x2, x3, x4, 340 SMC_GET_GP(handle, CTX_GPREG_X5), 341 SMC_GET_GP(handle, CTX_GPREG_X6), 342 SMC_GET_GP(handle, CTX_GPREG_X7)); 343 } 344 345 /******************************************************************************* 346 * Return SPCI_ERROR with specified error code 347 ******************************************************************************/ 348 static uint64_t spmd_spci_error_return(void *handle, int error_code) 349 { 350 SMC_RET8(handle, SPCI_ERROR, 351 SPCI_TARGET_INFO_MBZ, error_code, 352 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, 353 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ); 354 } 355 356 /******************************************************************************* 357 * This function handles all SMCs in the range reserved for SPCI. Each call is 358 * either forwarded to the other security state or handled by the SPM dispatcher 359 ******************************************************************************/ 360 uint64_t spmd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 361 uint64_t x3, uint64_t x4, void *cookie, void *handle, 362 uint64_t flags) 363 { 364 spmd_spm_core_context_t *ctx = &spm_core_context[plat_my_core_pos()]; 365 bool secure_origin; 366 int32_t ret; 367 368 /* Determine which security state this SMC originated from */ 369 secure_origin = is_caller_secure(flags); 370 371 INFO("SPM: 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, " 372 "0x%llx, 0x%llx, 0x%llx\n", 373 smc_fid, x1, x2, x3, x4, SMC_GET_GP(handle, CTX_GPREG_X5), 374 SMC_GET_GP(handle, CTX_GPREG_X6), 375 SMC_GET_GP(handle, CTX_GPREG_X7)); 376 377 switch (smc_fid) { 378 case SPCI_ERROR: 379 /* 380 * Check if this is the first invocation of this interface on 381 * this CPU. If so, then indicate that the SPM core initialised 382 * unsuccessfully. 383 */ 384 if (secure_origin && (ctx->state == SPMC_STATE_RESET)) { 385 spmd_spm_core_sync_exit(x2); 386 } 387 388 return spmd_smc_forward(smc_fid, secure_origin, 389 x1, x2, x3, x4, handle); 390 break; /* not reached */ 391 392 case SPCI_VERSION: 393 /* 394 * TODO: This is an optimization that the version information 395 * provided by the SPM core manifest is returned by the SPM 396 * dispatcher. It might be a better idea to simply forward this 397 * call to the SPM core and wash our hands completely. 398 */ 399 ret = MAKE_SPCI_VERSION(spmc_attrs.major_version, 400 spmc_attrs.minor_version); 401 SMC_RET8(handle, SPCI_SUCCESS_SMC32, SPCI_TARGET_INFO_MBZ, ret, 402 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, SPCI_PARAM_MBZ, 403 SPCI_PARAM_MBZ, SPCI_PARAM_MBZ); 404 break; /* not reached */ 405 406 case SPCI_FEATURES: 407 /* 408 * This is an optional interface. Do the minimal checks and 409 * forward to SPM core which will handle it if implemented. 410 */ 411 412 /* 413 * Check if x1 holds a valid SPCI fid. This is an 414 * optimization. 415 */ 416 if (!is_spci_fid(x1)) { 417 return spmd_spci_error_return(handle, 418 SPCI_ERROR_NOT_SUPPORTED); 419 } 420 421 /* Forward SMC from Normal world to the SPM core */ 422 if (!secure_origin) { 423 return spmd_smc_forward(smc_fid, secure_origin, 424 x1, x2, x3, x4, handle); 425 } else { 426 /* 427 * Return success if call was from secure world i.e. all 428 * SPCI functions are supported. This is essentially a 429 * nop. 430 */ 431 SMC_RET8(handle, SPCI_SUCCESS_SMC32, x1, x2, x3, x4, 432 SMC_GET_GP(handle, CTX_GPREG_X5), 433 SMC_GET_GP(handle, CTX_GPREG_X6), 434 SMC_GET_GP(handle, CTX_GPREG_X7)); 435 } 436 437 break; /* not reached */ 438 439 case SPCI_RX_RELEASE: 440 case SPCI_RXTX_MAP_SMC32: 441 case SPCI_RXTX_MAP_SMC64: 442 case SPCI_RXTX_UNMAP: 443 case SPCI_MSG_RUN: 444 /* This interface must be invoked only by the Normal world */ 445 if (secure_origin) { 446 return spmd_spci_error_return(handle, 447 SPCI_ERROR_NOT_SUPPORTED); 448 } 449 450 /* Fall through to forward the call to the other world */ 451 452 case SPCI_PARTITION_INFO_GET: 453 case SPCI_MSG_SEND: 454 case SPCI_MSG_SEND_DIRECT_REQ_SMC32: 455 case SPCI_MSG_SEND_DIRECT_REQ_SMC64: 456 case SPCI_MSG_SEND_DIRECT_RESP_SMC32: 457 case SPCI_MSG_SEND_DIRECT_RESP_SMC64: 458 case SPCI_MEM_DONATE_SMC32: 459 case SPCI_MEM_DONATE_SMC64: 460 case SPCI_MEM_LEND_SMC32: 461 case SPCI_MEM_LEND_SMC64: 462 case SPCI_MEM_SHARE_SMC32: 463 case SPCI_MEM_SHARE_SMC64: 464 case SPCI_MEM_RETRIEVE_REQ_SMC32: 465 case SPCI_MEM_RETRIEVE_REQ_SMC64: 466 case SPCI_MEM_RETRIEVE_RESP: 467 case SPCI_MEM_RELINQUISH: 468 case SPCI_MEM_RECLAIM: 469 case SPCI_SUCCESS_SMC32: 470 case SPCI_SUCCESS_SMC64: 471 /* 472 * TODO: Assume that no requests originate from EL3 at the 473 * moment. This will change if a SP service is required in 474 * response to secure interrupts targeted to EL3. Until then 475 * simply forward the call to the Normal world. 476 */ 477 478 return spmd_smc_forward(smc_fid, secure_origin, 479 x1, x2, x3, x4, handle); 480 break; /* not reached */ 481 482 case SPCI_MSG_WAIT: 483 /* 484 * Check if this is the first invocation of this interface on 485 * this CPU from the Secure world. If so, then indicate that the 486 * SPM core initialised successfully. 487 */ 488 if (secure_origin && (ctx->state == SPMC_STATE_RESET)) { 489 spmd_spm_core_sync_exit(0); 490 } 491 492 /* Fall through to forward the call to the other world */ 493 494 case SPCI_MSG_YIELD: 495 /* This interface must be invoked only by the Secure world */ 496 if (!secure_origin) { 497 return spmd_spci_error_return(handle, 498 SPCI_ERROR_NOT_SUPPORTED); 499 } 500 501 return spmd_smc_forward(smc_fid, secure_origin, 502 x1, x2, x3, x4, handle); 503 break; /* not reached */ 504 505 default: 506 WARN("SPM: Unsupported call 0x%08x\n", smc_fid); 507 return spmd_spci_error_return(handle, SPCI_ERROR_NOT_SUPPORTED); 508 } 509 } 510