1 /* 2 * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch_features.h> 15 #include <bl31/bl31.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <context.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/el3_runtime/pubsub.h> 21 #include <lib/gpt_rme/gpt_rme.h> 22 23 #include <lib/spinlock.h> 24 #include <lib/utils.h> 25 #include <lib/xlat_tables/xlat_tables_v2.h> 26 #include <plat/common/common_def.h> 27 #include <plat/common/platform.h> 28 #include <platform_def.h> 29 #include <services/gtsi_svc.h> 30 #include <services/rmi_svc.h> 31 #include <services/rmmd_svc.h> 32 #include <smccc_helpers.h> 33 #include <lib/extensions/sve.h> 34 #include "rmmd_initial_context.h" 35 #include "rmmd_private.h" 36 37 /******************************************************************************* 38 * RMM context information. 39 ******************************************************************************/ 40 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT]; 41 42 /******************************************************************************* 43 * RMM entry point information. Discovered on the primary core and reused 44 * on secondary cores. 45 ******************************************************************************/ 46 static entry_point_info_t *rmm_ep_info; 47 48 /******************************************************************************* 49 * Static function declaration. 50 ******************************************************************************/ 51 static int32_t rmm_init(void); 52 53 /******************************************************************************* 54 * This function takes an RMM context pointer and performs a synchronous entry 55 * into it. 56 ******************************************************************************/ 57 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx) 58 { 59 uint64_t rc; 60 61 assert(rmm_ctx != NULL); 62 63 cm_set_context(&(rmm_ctx->cpu_ctx), REALM); 64 65 /* Save the current el1/el2 context before loading realm context. */ 66 cm_el1_sysregs_context_save(NON_SECURE); 67 cm_el2_sysregs_context_save(NON_SECURE); 68 69 /* Restore the realm context assigned above */ 70 cm_el1_sysregs_context_restore(REALM); 71 cm_el2_sysregs_context_restore(REALM); 72 cm_set_next_eret_context(REALM); 73 74 /* Enter RMM */ 75 rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx); 76 77 /* Save realm context */ 78 cm_el1_sysregs_context_save(REALM); 79 cm_el2_sysregs_context_save(REALM); 80 81 /* Restore the el1/el2 context again. */ 82 cm_el1_sysregs_context_restore(NON_SECURE); 83 cm_el2_sysregs_context_restore(NON_SECURE); 84 85 return rc; 86 } 87 88 /******************************************************************************* 89 * This function returns to the place where rmmd_rmm_sync_entry() was 90 * called originally. 91 ******************************************************************************/ 92 __dead2 void rmmd_rmm_sync_exit(uint64_t rc) 93 { 94 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 95 96 /* Get context of the RMM in use by this CPU. */ 97 assert(cm_get_context(REALM) == &(ctx->cpu_ctx)); 98 99 /* 100 * The RMMD must have initiated the original request through a 101 * synchronous entry into RMM. Jump back to the original C runtime 102 * context with the value of rc in x0; 103 */ 104 rmmd_rmm_exit(ctx->c_rt_ctx, rc); 105 106 panic(); 107 } 108 109 static void rmm_el2_context_init(el2_sysregs_t *regs) 110 { 111 regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2; 112 regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1; 113 } 114 115 /******************************************************************************* 116 * Enable architecture extensions on first entry to Realm world. 117 ******************************************************************************/ 118 static void manage_extensions_realm(cpu_context_t *ctx) 119 { 120 #if ENABLE_SVE_FOR_NS 121 /* 122 * Enable SVE and FPU in realm context when it is enabled for NS. 123 * Realm manager must ensure that the SVE and FPU register 124 * contexts are properly managed. 125 */ 126 sve_enable(ctx); 127 #else 128 /* 129 * Disable SVE and FPU in realm context when it is disabled for NS. 130 */ 131 sve_disable(ctx); 132 #endif /* ENABLE_SVE_FOR_NS */ 133 } 134 135 /******************************************************************************* 136 * Jump to the RMM for the first time. 137 ******************************************************************************/ 138 static int32_t rmm_init(void) 139 { 140 141 uint64_t rc; 142 143 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 144 145 INFO("RMM init start.\n"); 146 ctx->state = RMM_STATE_RESET; 147 148 /* Enable architecture extensions */ 149 manage_extensions_realm(&ctx->cpu_ctx); 150 151 /* Initialize RMM EL2 context. */ 152 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); 153 154 rc = rmmd_rmm_sync_entry(ctx); 155 if (rc != 0ULL) { 156 ERROR("RMM initialisation failed 0x%" PRIx64 "\n", rc); 157 panic(); 158 } 159 160 ctx->state = RMM_STATE_IDLE; 161 INFO("RMM init end.\n"); 162 163 return 1; 164 } 165 166 /******************************************************************************* 167 * Load and read RMM manifest, setup RMM. 168 ******************************************************************************/ 169 int rmmd_setup(void) 170 { 171 uint32_t ep_attr; 172 unsigned int linear_id = plat_my_core_pos(); 173 rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id]; 174 175 /* Make sure RME is supported. */ 176 assert(get_armv9_2_feat_rme_support() != 0U); 177 178 rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM); 179 if (rmm_ep_info == NULL) { 180 WARN("No RMM image provided by BL2 boot loader, Booting " 181 "device without RMM initialization. SMCs destined for " 182 "RMM will return SMC_UNK\n"); 183 return -ENOENT; 184 } 185 186 /* Under no circumstances will this parameter be 0 */ 187 assert(rmm_ep_info->pc == RMM_BASE); 188 189 /* Initialise an entrypoint to set up the CPU context */ 190 ep_attr = EP_REALM; 191 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) { 192 ep_attr |= EP_EE_BIG; 193 } 194 195 SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr); 196 rmm_ep_info->spsr = SPSR_64(MODE_EL2, 197 MODE_SP_ELX, 198 DISABLE_ALL_EXCEPTIONS); 199 200 /* Initialise RMM context with this entry point information */ 201 cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info); 202 203 INFO("RMM setup done.\n"); 204 205 /* Register init function for deferred init. */ 206 bl31_register_rmm_init(&rmm_init); 207 208 return 0; 209 } 210 211 /******************************************************************************* 212 * Forward SMC to the other security state 213 ******************************************************************************/ 214 static uint64_t rmmd_smc_forward(uint32_t src_sec_state, 215 uint32_t dst_sec_state, uint64_t x0, 216 uint64_t x1, uint64_t x2, uint64_t x3, 217 uint64_t x4, void *handle) 218 { 219 /* Save incoming security state */ 220 cm_el1_sysregs_context_save(src_sec_state); 221 cm_el2_sysregs_context_save(src_sec_state); 222 223 /* Restore outgoing security state */ 224 cm_el1_sysregs_context_restore(dst_sec_state); 225 cm_el2_sysregs_context_restore(dst_sec_state); 226 cm_set_next_eret_context(dst_sec_state); 227 228 /* 229 * As per SMCCCv1.1, we need to preserve x4 to x7 unless 230 * being used as return args. Hence we differentiate the 231 * onward and backward path. Support upto 8 args in the 232 * onward path and 4 args in return path. 233 */ 234 if (src_sec_state == NON_SECURE) { 235 SMC_RET8(cm_get_context(dst_sec_state), x0, x1, x2, x3, x4, 236 SMC_GET_GP(handle, CTX_GPREG_X5), 237 SMC_GET_GP(handle, CTX_GPREG_X6), 238 SMC_GET_GP(handle, CTX_GPREG_X7)); 239 } else { 240 SMC_RET4(cm_get_context(dst_sec_state), x0, x1, x2, x3); 241 } 242 } 243 244 /******************************************************************************* 245 * This function handles all SMCs in the range reserved for RMI. Each call is 246 * either forwarded to the other security state or handled by the RMM dispatcher 247 ******************************************************************************/ 248 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 249 uint64_t x3, uint64_t x4, void *cookie, 250 void *handle, uint64_t flags) 251 { 252 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 253 uint32_t src_sec_state; 254 255 /* Determine which security state this SMC originated from */ 256 src_sec_state = caller_sec_state(flags); 257 258 /* RMI must not be invoked by the Secure world */ 259 if (src_sec_state == SMC_FROM_SECURE) { 260 WARN("RMM: RMI invoked by secure world.\n"); 261 SMC_RET1(handle, SMC_UNK); 262 } 263 264 /* 265 * Forward an RMI call from the Normal world to the Realm world as it 266 * is. 267 */ 268 if (src_sec_state == SMC_FROM_NON_SECURE) { 269 VERBOSE("RMM: RMI call from non-secure world.\n"); 270 return rmmd_smc_forward(NON_SECURE, REALM, smc_fid, 271 x1, x2, x3, x4, handle); 272 } 273 274 assert(src_sec_state == SMC_FROM_REALM); 275 276 switch (smc_fid) { 277 case RMI_RMM_REQ_COMPLETE: 278 if (ctx->state == RMM_STATE_RESET) { 279 VERBOSE("RMM: running rmmd_rmm_sync_exit\n"); 280 rmmd_rmm_sync_exit(x1); 281 } 282 283 return rmmd_smc_forward(REALM, NON_SECURE, x1, 284 x2, x3, x4, 0, handle); 285 286 default: 287 WARN("RMM: Unsupported RMM call 0x%08x\n", smc_fid); 288 SMC_RET1(handle, SMC_UNK); 289 } 290 } 291 292 /******************************************************************************* 293 * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM 294 * is done after initialising minimal architectural state that guarantees safe 295 * execution. 296 ******************************************************************************/ 297 static void *rmmd_cpu_on_finish_handler(const void *arg) 298 { 299 int32_t rc; 300 uint32_t linear_id = plat_my_core_pos(); 301 rmmd_rmm_context_t *ctx = &rmm_context[linear_id]; 302 303 ctx->state = RMM_STATE_RESET; 304 305 /* Initialise RMM context with this entry point information */ 306 cm_setup_context(&ctx->cpu_ctx, rmm_ep_info); 307 308 /* Enable architecture extensions */ 309 manage_extensions_realm(&ctx->cpu_ctx); 310 311 /* Initialize RMM EL2 context. */ 312 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); 313 314 rc = rmmd_rmm_sync_entry(ctx); 315 if (rc != 0) { 316 ERROR("RMM initialisation failed (%d) on CPU%d\n", rc, 317 linear_id); 318 panic(); 319 } 320 321 ctx->state = RMM_STATE_IDLE; 322 return NULL; 323 } 324 325 /* Subscribe to PSCI CPU on to initialize RMM on secondary */ 326 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler); 327 328 static int gtsi_transition_granule(uint64_t pa, 329 unsigned int src_sec_state, 330 unsigned int target_pas) 331 { 332 int ret; 333 334 ret = gpt_transition_pas(pa, PAGE_SIZE_4KB, src_sec_state, target_pas); 335 336 /* Convert TF-A error codes into GTSI error codes */ 337 if (ret == -EINVAL) { 338 ERROR("[GTSI] Transition failed: invalid %s\n", "address"); 339 ERROR(" PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa, 340 src_sec_state, target_pas); 341 ret = GRAN_TRANS_RET_BAD_ADDR; 342 } else if (ret == -EPERM) { 343 ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS"); 344 ERROR(" PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa, 345 src_sec_state, target_pas); 346 ret = GRAN_TRANS_RET_BAD_PAS; 347 } 348 349 return ret; 350 } 351 352 /******************************************************************************* 353 * This function handles all SMCs in the range reserved for GTF. 354 ******************************************************************************/ 355 uint64_t rmmd_gtsi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 356 uint64_t x3, uint64_t x4, void *cookie, 357 void *handle, uint64_t flags) 358 { 359 uint32_t src_sec_state; 360 361 /* Determine which security state this SMC originated from */ 362 src_sec_state = caller_sec_state(flags); 363 364 if (src_sec_state != SMC_FROM_REALM) { 365 WARN("RMM: GTF call originated from secure or normal world\n"); 366 SMC_RET1(handle, SMC_UNK); 367 } 368 369 switch (smc_fid) { 370 case SMC_ASC_MARK_REALM: 371 SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, 372 GPT_GPI_REALM)); 373 case SMC_ASC_MARK_NONSECURE: 374 SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, 375 GPT_GPI_NS)); 376 default: 377 WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid); 378 SMC_RET1(handle, SMC_UNK); 379 } 380 } 381