1 /* 2 * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <arch_helpers.h> 8 #include <assert.h> 9 #include <errno.h> 10 11 #include <bl31/bl31.h> 12 #include <bl31/ehf.h> 13 #include <common/debug.h> 14 #include <common/runtime_svc.h> 15 #include <lib/el3_runtime/context_mgmt.h> 16 #include <lib/el3_runtime/simd_ctx.h> 17 #include <lib/smccc.h> 18 #include <lib/spinlock.h> 19 #include <lib/utils.h> 20 #include <lib/xlat_tables/xlat_tables_v2.h> 21 #include <plat/common/platform.h> 22 #include <services/spm_mm_partition.h> 23 #include <services/spm_mm_svc.h> 24 #include <smccc_helpers.h> 25 26 #include "spm_common.h" 27 #include "spm_mm_private.h" 28 29 /******************************************************************************* 30 * Secure Partition context information. 31 ******************************************************************************/ 32 static sp_context_t sp_ctx; 33 34 /******************************************************************************* 35 * Set state of a Secure Partition context. 36 ******************************************************************************/ 37 void sp_state_set(sp_context_t *sp_ptr, sp_state_t state) 38 { 39 spin_lock(&(sp_ptr->state_lock)); 40 sp_ptr->state = state; 41 spin_unlock(&(sp_ptr->state_lock)); 42 } 43 44 /******************************************************************************* 45 * Wait until the state of a Secure Partition is the specified one and change it 46 * to the desired state. 47 ******************************************************************************/ 48 void sp_state_wait_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to) 49 { 50 int success = 0; 51 52 while (success == 0) { 53 spin_lock(&(sp_ptr->state_lock)); 54 55 if (sp_ptr->state == from) { 56 sp_ptr->state = to; 57 58 success = 1; 59 } 60 61 spin_unlock(&(sp_ptr->state_lock)); 62 } 63 } 64 65 /******************************************************************************* 66 * Check if the state of a Secure Partition is the specified one and, if so, 67 * change it to the desired state. Returns 0 on success, -1 on error. 68 ******************************************************************************/ 69 int sp_state_try_switch(sp_context_t *sp_ptr, sp_state_t from, sp_state_t to) 70 { 71 int ret = -1; 72 73 spin_lock(&(sp_ptr->state_lock)); 74 75 if (sp_ptr->state == from) { 76 sp_ptr->state = to; 77 78 ret = 0; 79 } 80 81 spin_unlock(&(sp_ptr->state_lock)); 82 83 return ret; 84 } 85 86 /******************************************************************************* 87 * This function takes an SP context pointer and performs a synchronous entry 88 * into it. 89 ******************************************************************************/ 90 static uint64_t spm_sp_synchronous_entry(sp_context_t *ctx) 91 { 92 uint64_t rc; 93 94 assert(ctx != NULL); 95 96 /* Assign the context of the SP to this CPU */ 97 cm_set_context(&(ctx->cpu_ctx), SECURE); 98 99 /* Restore the context assigned above */ 100 cm_el1_sysregs_context_restore(SECURE); 101 cm_set_next_eret_context(SECURE); 102 103 /* Invalidate TLBs at EL1. */ 104 tlbivmalle1(); 105 dsbish(); 106 107 /* Enter Secure Partition */ 108 rc = spm_secure_partition_enter(&ctx->c_rt_ctx); 109 110 /* Save secure state */ 111 cm_el1_sysregs_context_save(SECURE); 112 113 return rc; 114 } 115 116 /******************************************************************************* 117 * This function returns to the place where spm_sp_synchronous_entry() was 118 * called originally. 119 ******************************************************************************/ 120 __dead2 static void spm_sp_synchronous_exit(uint64_t rc) 121 { 122 sp_context_t *ctx = &sp_ctx; 123 124 /* 125 * The SPM must have initiated the original request through a 126 * synchronous entry into the secure partition. Jump back to the 127 * original C runtime context with the value of rc in x0; 128 */ 129 spm_secure_partition_exit(ctx->c_rt_ctx, rc); 130 131 panic(); 132 } 133 134 /******************************************************************************* 135 * Jump to each Secure Partition for the first time. 136 ******************************************************************************/ 137 static int32_t spm_init(void) 138 { 139 uint64_t rc; 140 sp_context_t *ctx; 141 142 INFO("Secure Partition init...\n"); 143 144 ctx = &sp_ctx; 145 146 ctx->state = SP_STATE_RESET; 147 148 rc = spm_sp_synchronous_entry(ctx); 149 assert(rc == 0); 150 151 ctx->state = SP_STATE_IDLE; 152 153 INFO("Secure Partition initialized.\n"); 154 155 return !rc; 156 } 157 158 /******************************************************************************* 159 * Initialize contexts of all Secure Partitions. 160 ******************************************************************************/ 161 int32_t spm_mm_setup(void) 162 { 163 sp_context_t *ctx; 164 165 /* Disable MMU at EL1 (initialized by BL2) */ 166 disable_mmu_icache_el1(); 167 168 /* Initialize context of the SP */ 169 INFO("Secure Partition context setup start...\n"); 170 171 ctx = &sp_ctx; 172 173 /* Assign translation tables context. */ 174 ctx->xlat_ctx_handle = spm_get_sp_xlat_context(); 175 176 spm_sp_setup(ctx); 177 178 /* Register init function for deferred init. */ 179 bl31_register_bl32_init(&spm_init); 180 181 INFO("Secure Partition setup done.\n"); 182 183 return 0; 184 } 185 186 /******************************************************************************* 187 * Function to perform a call to a Secure Partition. 188 ******************************************************************************/ 189 uint64_t spm_mm_sp_call(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3) 190 { 191 uint64_t rc; 192 sp_context_t *sp_ptr = &sp_ctx; 193 194 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 195 /* 196 * SP runs to completion, no need to restore FP/SVE registers of secure context. 197 * Save FP/SVE registers only for non secure context. 198 */ 199 simd_ctx_save(NON_SECURE, false); 200 #endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */ 201 202 /* Wait until the Secure Partition is idle and set it to busy. */ 203 sp_state_wait_switch(sp_ptr, SP_STATE_IDLE, SP_STATE_BUSY); 204 205 /* Set values for registers on SP entry */ 206 cpu_context_t *cpu_ctx = &(sp_ptr->cpu_ctx); 207 208 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X0, smc_fid); 209 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X1, x1); 210 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X2, x2); 211 write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X3, x3); 212 213 /* Jump to the Secure Partition. */ 214 rc = spm_sp_synchronous_entry(sp_ptr); 215 216 /* Flag Secure Partition as idle. */ 217 assert(sp_ptr->state == SP_STATE_BUSY); 218 sp_state_set(sp_ptr, SP_STATE_IDLE); 219 220 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS 221 /* 222 * SP runs to completion, no need to save FP/SVE registers of secure context. 223 * Restore only non secure world FP/SVE registers. 224 */ 225 simd_ctx_restore(NON_SECURE); 226 #endif /* CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS */ 227 228 return rc; 229 } 230 231 /******************************************************************************* 232 * MM_COMMUNICATE handler 233 ******************************************************************************/ 234 static uint64_t mm_communicate(uint32_t smc_fid, uint64_t mm_cookie, 235 uint64_t comm_buffer_address, 236 uint64_t comm_size_address, void *handle) 237 { 238 uint64_t rc; 239 240 /* Cookie. Reserved for future use. It must be zero. */ 241 if (mm_cookie != 0U) { 242 ERROR("MM_COMMUNICATE: cookie is not zero\n"); 243 SMC_RET1(handle, SPM_MM_INVALID_PARAMETER); 244 } 245 246 if (comm_buffer_address == 0U) { 247 ERROR("MM_COMMUNICATE: comm_buffer_address is zero\n"); 248 SMC_RET1(handle, SPM_MM_INVALID_PARAMETER); 249 } 250 251 if (comm_size_address != 0U) { 252 VERBOSE("MM_COMMUNICATE: comm_size_address is not 0 as recommended.\n"); 253 } 254 255 /* 256 * The current secure partition design mandates 257 * - at any point, only a single core can be 258 * executing in the secure partition. 259 * - a core cannot be preempted by an interrupt 260 * while executing in secure partition. 261 * Raise the running priority of the core to the 262 * interrupt level configured for secure partition 263 * so as to block any interrupt from preempting this 264 * core. 265 */ 266 ehf_activate_priority(PLAT_SP_PRI); 267 268 /* Save the Normal world context */ 269 cm_el1_sysregs_context_save(NON_SECURE); 270 271 rc = spm_mm_sp_call(smc_fid, comm_buffer_address, comm_size_address, 272 plat_my_core_pos()); 273 274 /* Restore non-secure state */ 275 cm_el1_sysregs_context_restore(NON_SECURE); 276 cm_set_next_eret_context(NON_SECURE); 277 278 /* 279 * Exited from secure partition. This core can take 280 * interrupts now. 281 */ 282 ehf_deactivate_priority(PLAT_SP_PRI); 283 284 SMC_RET1(handle, rc); 285 } 286 287 /******************************************************************************* 288 * Secure Partition Manager SMC handler. 289 ******************************************************************************/ 290 uint64_t spm_mm_smc_handler(uint32_t smc_fid, 291 uint64_t x1, 292 uint64_t x2, 293 uint64_t x3, 294 uint64_t x4, 295 void *cookie, 296 void *handle, 297 uint64_t flags) 298 { 299 unsigned int ns; 300 301 /* Determine which security state this SMC originated from */ 302 ns = is_caller_non_secure(flags); 303 304 if (ns == SMC_FROM_SECURE) { 305 306 /* Handle SMCs from Secure world. */ 307 308 assert(handle == cm_get_context(SECURE)); 309 310 /* Make next ERET jump to S-EL0 instead of S-EL1. */ 311 cm_set_elr_spsr_el3(SECURE, read_elr_el1(), read_spsr_el1()); 312 313 switch (smc_fid) { 314 315 case SPM_MM_VERSION_AARCH32: 316 SMC_RET1(handle, SPM_MM_VERSION_COMPILED); 317 318 case MM_SP_EVENT_COMPLETE_AARCH64: 319 spm_sp_synchronous_exit(x1); 320 321 case MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64: 322 INFO("Received MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64 SMC\n"); 323 324 if (sp_ctx.state != SP_STATE_RESET) { 325 WARN("MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64 is available at boot time only\n"); 326 SMC_RET1(handle, SPM_MM_NOT_SUPPORTED); 327 } 328 SMC_RET1(handle, 329 spm_memory_attributes_get_smc_handler( 330 &sp_ctx, x1)); 331 332 case MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64: 333 INFO("Received MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC\n"); 334 335 if (sp_ctx.state != SP_STATE_RESET) { 336 WARN("MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64 is available at boot time only\n"); 337 SMC_RET1(handle, SPM_MM_NOT_SUPPORTED); 338 } 339 SMC_RET1(handle, 340 spm_memory_attributes_set_smc_handler( 341 &sp_ctx, x1, x2, x3)); 342 default: 343 break; 344 } 345 } else { 346 347 /* Handle SMCs from Non-secure world. */ 348 349 assert(handle == cm_get_context(NON_SECURE)); 350 351 switch (smc_fid) { 352 353 case MM_VERSION_AARCH32: 354 SMC_RET1(handle, MM_VERSION_COMPILED); 355 356 case MM_COMMUNICATE_AARCH32: 357 case MM_COMMUNICATE_AARCH64: 358 return mm_communicate(smc_fid, x1, x2, x3, handle); 359 360 case MM_SP_MEMORY_ATTRIBUTES_GET_AARCH64: 361 case MM_SP_MEMORY_ATTRIBUTES_SET_AARCH64: 362 /* SMC interfaces reserved for secure callers. */ 363 SMC_RET1(handle, SPM_MM_NOT_SUPPORTED); 364 365 default: 366 break; 367 } 368 } 369 370 SMC_RET1(handle, SMC_UNK); 371 } 372