1 /* 2 * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <errno.h> 9 #include <inttypes.h> 10 #include <stdint.h> 11 #include <string.h> 12 13 #include <arch_helpers.h> 14 #include <arch_features.h> 15 #include <bl31/bl31.h> 16 #include <common/debug.h> 17 #include <common/runtime_svc.h> 18 #include <context.h> 19 #include <lib/el3_runtime/context_mgmt.h> 20 #include <lib/el3_runtime/pubsub.h> 21 #include <lib/gpt_rme/gpt_rme.h> 22 23 #include <lib/spinlock.h> 24 #include <lib/utils.h> 25 #include <lib/xlat_tables/xlat_tables_v2.h> 26 #include <plat/common/common_def.h> 27 #include <plat/common/platform.h> 28 #include <platform_def.h> 29 #include <services/gtsi_svc.h> 30 #include <services/rmi_svc.h> 31 #include <services/rmmd_svc.h> 32 #include <smccc_helpers.h> 33 #include "rmmd_initial_context.h" 34 #include "rmmd_private.h" 35 36 /******************************************************************************* 37 * RMM context information. 38 ******************************************************************************/ 39 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT]; 40 41 /******************************************************************************* 42 * RMM entry point information. Discovered on the primary core and reused 43 * on secondary cores. 44 ******************************************************************************/ 45 static entry_point_info_t *rmm_ep_info; 46 47 /******************************************************************************* 48 * Static function declaration. 49 ******************************************************************************/ 50 static int32_t rmm_init(void); 51 52 /******************************************************************************* 53 * This function takes an RMM context pointer and performs a synchronous entry 54 * into it. 55 ******************************************************************************/ 56 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx) 57 { 58 uint64_t rc; 59 60 assert(rmm_ctx != NULL); 61 62 cm_set_context(&(rmm_ctx->cpu_ctx), REALM); 63 64 /* Save the current el1/el2 context before loading realm context. */ 65 cm_el1_sysregs_context_save(NON_SECURE); 66 cm_el2_sysregs_context_save(NON_SECURE); 67 68 /* Restore the realm context assigned above */ 69 cm_el1_sysregs_context_restore(REALM); 70 cm_el2_sysregs_context_restore(REALM); 71 cm_set_next_eret_context(REALM); 72 73 /* Enter RMM */ 74 rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx); 75 76 /* Save realm context */ 77 cm_el1_sysregs_context_save(REALM); 78 cm_el2_sysregs_context_save(REALM); 79 80 /* Restore the el1/el2 context again. */ 81 cm_el1_sysregs_context_restore(NON_SECURE); 82 cm_el2_sysregs_context_restore(NON_SECURE); 83 84 return rc; 85 } 86 87 /******************************************************************************* 88 * This function returns to the place where rmmd_rmm_sync_entry() was 89 * called originally. 90 ******************************************************************************/ 91 __dead2 void rmmd_rmm_sync_exit(uint64_t rc) 92 { 93 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 94 95 /* Get context of the RMM in use by this CPU. */ 96 assert(cm_get_context(REALM) == &(ctx->cpu_ctx)); 97 98 /* 99 * The RMMD must have initiated the original request through a 100 * synchronous entry into RMM. Jump back to the original C runtime 101 * context with the value of rc in x0; 102 */ 103 rmmd_rmm_exit(ctx->c_rt_ctx, rc); 104 105 panic(); 106 } 107 108 static void rmm_el2_context_init(el2_sysregs_t *regs) 109 { 110 regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2; 111 regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1; 112 } 113 114 /******************************************************************************* 115 * Jump to the RMM for the first time. 116 ******************************************************************************/ 117 static int32_t rmm_init(void) 118 { 119 120 uint64_t rc; 121 122 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 123 124 INFO("RMM init start.\n"); 125 ctx->state = RMM_STATE_RESET; 126 127 /* Initialize RMM EL2 context. */ 128 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); 129 130 rc = rmmd_rmm_sync_entry(ctx); 131 if (rc != 0ULL) { 132 ERROR("RMM initialisation failed 0x%" PRIx64 "\n", rc); 133 panic(); 134 } 135 136 ctx->state = RMM_STATE_IDLE; 137 INFO("RMM init end.\n"); 138 139 return 1; 140 } 141 142 /******************************************************************************* 143 * Load and read RMM manifest, setup RMM. 144 ******************************************************************************/ 145 int rmmd_setup(void) 146 { 147 uint32_t ep_attr; 148 unsigned int linear_id = plat_my_core_pos(); 149 rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id]; 150 151 /* Make sure RME is supported. */ 152 assert(get_armv9_2_feat_rme_support() != 0U); 153 154 rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM); 155 if (rmm_ep_info == NULL) { 156 WARN("No RMM image provided by BL2 boot loader, Booting " 157 "device without RMM initialization. SMCs destined for " 158 "RMM will return SMC_UNK\n"); 159 return -ENOENT; 160 } 161 162 /* Under no circumstances will this parameter be 0 */ 163 assert(rmm_ep_info->pc == RMM_BASE); 164 165 /* Initialise an entrypoint to set up the CPU context */ 166 ep_attr = EP_REALM; 167 if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) { 168 ep_attr |= EP_EE_BIG; 169 } 170 171 SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr); 172 rmm_ep_info->spsr = SPSR_64(MODE_EL2, 173 MODE_SP_ELX, 174 DISABLE_ALL_EXCEPTIONS); 175 176 /* Initialise RMM context with this entry point information */ 177 cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info); 178 179 INFO("RMM setup done.\n"); 180 181 /* Register init function for deferred init. */ 182 bl31_register_rmm_init(&rmm_init); 183 184 return 0; 185 } 186 187 /******************************************************************************* 188 * Forward SMC to the other security state 189 ******************************************************************************/ 190 static uint64_t rmmd_smc_forward(uint32_t src_sec_state, 191 uint32_t dst_sec_state, uint64_t x0, 192 uint64_t x1, uint64_t x2, uint64_t x3, 193 uint64_t x4, void *handle) 194 { 195 /* Save incoming security state */ 196 cm_el1_sysregs_context_save(src_sec_state); 197 cm_el2_sysregs_context_save(src_sec_state); 198 199 /* Restore outgoing security state */ 200 cm_el1_sysregs_context_restore(dst_sec_state); 201 cm_el2_sysregs_context_restore(dst_sec_state); 202 cm_set_next_eret_context(dst_sec_state); 203 204 /* 205 * As per SMCCCv1.1, we need to preserve x4 to x7 unless 206 * being used as return args. Hence we differentiate the 207 * onward and backward path. Support upto 8 args in the 208 * onward path and 4 args in return path. 209 */ 210 if (src_sec_state == NON_SECURE) { 211 SMC_RET8(cm_get_context(dst_sec_state), x0, x1, x2, x3, x4, 212 SMC_GET_GP(handle, CTX_GPREG_X5), 213 SMC_GET_GP(handle, CTX_GPREG_X6), 214 SMC_GET_GP(handle, CTX_GPREG_X7)); 215 } else { 216 SMC_RET4(cm_get_context(dst_sec_state), x0, x1, x2, x3); 217 } 218 } 219 220 /******************************************************************************* 221 * This function handles all SMCs in the range reserved for RMI. Each call is 222 * either forwarded to the other security state or handled by the RMM dispatcher 223 ******************************************************************************/ 224 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 225 uint64_t x3, uint64_t x4, void *cookie, 226 void *handle, uint64_t flags) 227 { 228 rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; 229 uint32_t src_sec_state; 230 231 /* Determine which security state this SMC originated from */ 232 src_sec_state = caller_sec_state(flags); 233 234 /* RMI must not be invoked by the Secure world */ 235 if (src_sec_state == SMC_FROM_SECURE) { 236 WARN("RMM: RMI invoked by secure world.\n"); 237 SMC_RET1(handle, SMC_UNK); 238 } 239 240 /* 241 * Forward an RMI call from the Normal world to the Realm world as it 242 * is. 243 */ 244 if (src_sec_state == SMC_FROM_NON_SECURE) { 245 VERBOSE("RMM: RMI call from non-secure world.\n"); 246 return rmmd_smc_forward(NON_SECURE, REALM, smc_fid, 247 x1, x2, x3, x4, handle); 248 } 249 250 assert(src_sec_state == SMC_FROM_REALM); 251 252 switch (smc_fid) { 253 case RMI_RMM_REQ_COMPLETE: 254 if (ctx->state == RMM_STATE_RESET) { 255 VERBOSE("RMM: running rmmd_rmm_sync_exit\n"); 256 rmmd_rmm_sync_exit(x1); 257 } 258 259 return rmmd_smc_forward(REALM, NON_SECURE, x1, 260 x2, x3, x4, 0, handle); 261 262 default: 263 WARN("RMM: Unsupported RMM call 0x%08x\n", smc_fid); 264 SMC_RET1(handle, SMC_UNK); 265 } 266 } 267 268 /******************************************************************************* 269 * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM 270 * is done after initialising minimal architectural state that guarantees safe 271 * execution. 272 ******************************************************************************/ 273 static void *rmmd_cpu_on_finish_handler(const void *arg) 274 { 275 int32_t rc; 276 uint32_t linear_id = plat_my_core_pos(); 277 rmmd_rmm_context_t *ctx = &rmm_context[linear_id]; 278 279 ctx->state = RMM_STATE_RESET; 280 281 /* Initialise RMM context with this entry point information */ 282 cm_setup_context(&ctx->cpu_ctx, rmm_ep_info); 283 284 /* Initialize RMM EL2 context. */ 285 rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); 286 287 rc = rmmd_rmm_sync_entry(ctx); 288 if (rc != 0) { 289 ERROR("RMM initialisation failed (%d) on CPU%d\n", rc, 290 linear_id); 291 panic(); 292 } 293 294 ctx->state = RMM_STATE_IDLE; 295 return NULL; 296 } 297 298 /* Subscribe to PSCI CPU on to initialize RMM on secondary */ 299 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler); 300 301 static int gtsi_transition_granule(uint64_t pa, 302 unsigned int src_sec_state, 303 unsigned int target_pas) 304 { 305 int ret; 306 307 ret = gpt_transition_pas(pa, PAGE_SIZE_4KB, src_sec_state, target_pas); 308 309 /* Convert TF-A error codes into GTSI error codes */ 310 if (ret == -EINVAL) { 311 ERROR("[GTSI] Transition failed: invalid %s\n", "address"); 312 ERROR(" PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa, 313 src_sec_state, target_pas); 314 ret = GRAN_TRANS_RET_BAD_ADDR; 315 } else if (ret == -EPERM) { 316 ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS"); 317 ERROR(" PA: 0x%" PRIx64 ", SRC: %d, PAS: %d\n", pa, 318 src_sec_state, target_pas); 319 ret = GRAN_TRANS_RET_BAD_PAS; 320 } 321 322 return ret; 323 } 324 325 /******************************************************************************* 326 * This function handles all SMCs in the range reserved for GTF. 327 ******************************************************************************/ 328 uint64_t rmmd_gtsi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, 329 uint64_t x3, uint64_t x4, void *cookie, 330 void *handle, uint64_t flags) 331 { 332 uint32_t src_sec_state; 333 334 /* Determine which security state this SMC originated from */ 335 src_sec_state = caller_sec_state(flags); 336 337 if (src_sec_state != SMC_FROM_REALM) { 338 WARN("RMM: GTF call originated from secure or normal world\n"); 339 SMC_RET1(handle, SMC_UNK); 340 } 341 342 switch (smc_fid) { 343 case SMC_ASC_MARK_REALM: 344 SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, 345 GPT_GPI_REALM)); 346 case SMC_ASC_MARK_NONSECURE: 347 SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, 348 GPT_GPI_NS)); 349 default: 350 WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid); 351 SMC_RET1(handle, SMC_UNK); 352 } 353 } 354