1 /* 2 * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved. 3 * Copyright (c) 2021, NVIDIA Corporation. All rights reserved. 4 * 5 * SPDX-License-Identifier: BSD-3-Clause 6 */ 7 8 #include <assert.h> 9 #include <string.h> 10 11 #include <arch.h> 12 #include <arch_helpers.h> 13 #include <context.h> 14 #include <common/debug.h> 15 #include <lib/el3_runtime/context_mgmt.h> 16 #if HOB_LIST 17 #include <lib/hob/hob.h> 18 #include <lib/hob/hob_guid.h> 19 #include <lib/hob/mmram.h> 20 #include <lib/hob/mpinfo.h> 21 #endif 22 #if TRANSFER_LIST 23 #include <transfer_list.h> 24 #endif 25 #include <lib/xlat_tables/xlat_tables_v2.h> 26 #include <platform_def.h> 27 #include <plat/common/common_def.h> 28 #include <plat/common/platform.h> 29 #include <services/spm_mm_partition.h> 30 31 #include "spm_common.h" 32 #include "spm_mm_private.h" 33 #include "spm_shim_private.h" 34 35 #if HOB_LIST && TRANSFER_LIST 36 static struct efi_hob_handoff_info_table *build_sp_boot_hob_list( 37 const spm_mm_boot_info_t *sp_boot_info, uint16_t *hob_table_size) 38 { 39 int ret; 40 struct efi_hob_handoff_info_table *hob_table; 41 struct efi_guid ns_buf_guid = MM_NS_BUFFER_GUID; 42 struct efi_guid mmram_resv_guid = MM_PEI_MMRAM_MEMORY_RESERVE_GUID; 43 struct efi_mmram_descriptor *mmram_desc_data; 44 uint16_t mmram_resv_data_size; 45 struct efi_mmram_hob_descriptor_block *mmram_hob_desc_data; 46 uint64_t hob_table_offset; 47 48 hob_table_offset = sizeof(struct transfer_list_header) + 49 sizeof(struct transfer_list_entry); 50 51 *hob_table_size = 0U; 52 53 hob_table = create_hob_list(sp_boot_info->sp_mem_base, 54 sp_boot_info->sp_mem_limit - sp_boot_info->sp_mem_base, 55 sp_boot_info->sp_shared_buf_base + hob_table_offset, 56 sp_boot_info->sp_shared_buf_size); 57 if (hob_table == NULL) { 58 return NULL; 59 } 60 61 ret = create_fv_hob(hob_table, sp_boot_info->sp_image_base, 62 sp_boot_info->sp_image_size); 63 if (ret) { 64 return NULL; 65 } 66 67 ret = create_guid_hob(hob_table, &ns_buf_guid, 68 sizeof(struct efi_mmram_descriptor), (void **) &mmram_desc_data); 69 if (ret) { 70 return NULL; 71 } 72 73 mmram_desc_data->physical_start = sp_boot_info->sp_ns_comm_buf_base; 74 mmram_desc_data->physical_size = sp_boot_info->sp_ns_comm_buf_size; 75 mmram_desc_data->cpu_start = sp_boot_info->sp_ns_comm_buf_base; 76 mmram_desc_data->region_state = EFI_CACHEABLE | EFI_ALLOCATED; 77 78 mmram_resv_data_size = sizeof(struct efi_mmram_hob_descriptor_block) + 79 sizeof(struct efi_mmram_descriptor) * sp_boot_info->num_sp_mem_regions; 80 81 ret = create_guid_hob(hob_table, &mmram_resv_guid, 82 mmram_resv_data_size, (void **) &mmram_hob_desc_data); 83 if (ret) { 84 return NULL; 85 } 86 87 *hob_table_size = hob_table->efi_free_memory_bottom - 88 (efi_physical_address_t) hob_table; 89 90 mmram_hob_desc_data->number_of_mm_reserved_regions = 4U; 91 mmram_desc_data = &mmram_hob_desc_data->descriptor[0]; 92 93 /* First, should be image mm range. */ 94 mmram_desc_data[0].physical_start = sp_boot_info->sp_image_base; 95 mmram_desc_data[0].physical_size = sp_boot_info->sp_image_size; 96 mmram_desc_data[0].cpu_start = sp_boot_info->sp_image_base; 97 mmram_desc_data[0].region_state = EFI_CACHEABLE | EFI_ALLOCATED; 98 99 /* Second, should be shared buffer mm range. */ 100 mmram_desc_data[1].physical_start = sp_boot_info->sp_shared_buf_base; 101 mmram_desc_data[1].physical_size = sp_boot_info->sp_shared_buf_size; 102 mmram_desc_data[1].cpu_start = sp_boot_info->sp_shared_buf_base; 103 mmram_desc_data[1].region_state = EFI_CACHEABLE | EFI_ALLOCATED; 104 105 /* Ns Buffer mm range */ 106 mmram_desc_data[2].physical_start = sp_boot_info->sp_ns_comm_buf_base; 107 mmram_desc_data[2].physical_size = sp_boot_info->sp_ns_comm_buf_size; 108 mmram_desc_data[2].cpu_start = sp_boot_info->sp_ns_comm_buf_base; 109 mmram_desc_data[2].region_state = EFI_CACHEABLE | EFI_ALLOCATED; 110 111 /* Heap mm range */ 112 mmram_desc_data[3].physical_start = sp_boot_info->sp_heap_base; 113 mmram_desc_data[3].physical_size = sp_boot_info->sp_heap_size; 114 mmram_desc_data[3].cpu_start = sp_boot_info->sp_heap_base; 115 mmram_desc_data[3].region_state = EFI_CACHEABLE; 116 117 return hob_table; 118 } 119 #endif 120 121 /* Setup context of the Secure Partition */ 122 void spm_sp_setup(sp_context_t *sp_ctx) 123 { 124 cpu_context_t *ctx = &(sp_ctx->cpu_ctx); 125 u_register_t sctlr_el1_val; 126 /* Pointer to the MP information from the platform port. */ 127 const spm_mm_boot_info_t *sp_boot_info = 128 plat_get_secure_partition_boot_info(NULL); 129 130 #if HOB_LIST && TRANSFER_LIST 131 struct efi_hob_handoff_info_table *hob_table; 132 struct transfer_list_header *sp_boot_tl; 133 struct transfer_list_entry *sp_boot_te; 134 uint16_t hob_table_size; 135 #endif 136 137 assert(sp_boot_info != NULL); 138 139 /* 140 * Initialize CPU context 141 * ---------------------- 142 */ 143 144 entry_point_info_t ep_info = {0}; 145 146 SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE); 147 148 /* Setup entrypoint and SPSR */ 149 ep_info.pc = sp_boot_info->sp_image_base; 150 ep_info.spsr = SPSR_64(MODE_EL0, MODE_SP_EL0, DISABLE_ALL_EXCEPTIONS); 151 152 /* 153 * X0: Virtual address of a buffer shared between EL3 and Secure EL0. 154 * The buffer will be mapped in the Secure EL1 translation regime 155 * with Normal IS WBWA attributes and RO data and Execute Never 156 * instruction access permissions. 157 * 158 * X1: Size of the buffer in bytes 159 * 160 * X2: cookie value (Implementation Defined) 161 * 162 * X3: cookie value (Implementation Defined) 163 * 164 * X4 to X7 = 0 165 */ 166 ep_info.args.arg0 = sp_boot_info->sp_shared_buf_base; 167 ep_info.args.arg1 = sp_boot_info->sp_shared_buf_size; 168 ep_info.args.arg2 = PLAT_SPM_COOKIE_0; 169 ep_info.args.arg3 = PLAT_SPM_COOKIE_1; 170 171 cm_setup_context(ctx, &ep_info); 172 173 /* 174 * SP_EL0: A non-zero value will indicate to the SP that the SPM has 175 * initialized the stack pointer for the current CPU through 176 * implementation defined means. The value will be 0 otherwise. 177 */ 178 write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0, 179 sp_boot_info->sp_stack_base + sp_boot_info->sp_pcpu_stack_size); 180 181 /* 182 * Setup translation tables 183 * ------------------------ 184 */ 185 186 #if ENABLE_ASSERTIONS 187 188 /* Get max granularity supported by the platform. */ 189 unsigned int max_granule = xlat_arch_get_max_supported_granule_size(); 190 191 VERBOSE("Max translation granule size supported: %u KiB\n", 192 max_granule / 1024U); 193 194 unsigned int max_granule_mask = max_granule - 1U; 195 196 /* Base must be aligned to the max granularity */ 197 assert((sp_boot_info->sp_ns_comm_buf_base & max_granule_mask) == 0); 198 199 /* Size must be a multiple of the max granularity */ 200 assert((sp_boot_info->sp_ns_comm_buf_size & max_granule_mask) == 0); 201 202 #endif /* ENABLE_ASSERTIONS */ 203 204 /* This region contains the exception vectors used at S-EL1. */ 205 const mmap_region_t sel1_exception_vectors = 206 MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START, 207 SPM_SHIM_EXCEPTIONS_SIZE, 208 MT_CODE | MT_SECURE | MT_PRIVILEGED); 209 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, 210 &sel1_exception_vectors); 211 212 mmap_add_ctx(sp_ctx->xlat_ctx_handle, 213 plat_get_secure_partition_mmap(NULL)); 214 215 init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle); 216 217 /* 218 * MMU-related registers 219 * --------------------- 220 */ 221 xlat_ctx_t *xlat_ctx = sp_ctx->xlat_ctx_handle; 222 223 uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX]; 224 225 setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table, 226 xlat_ctx->pa_max_address, xlat_ctx->va_max_address, 227 EL1_EL0_REGIME); 228 229 write_el1_ctx_common(get_el1_sysregs_ctx(ctx), mair_el1, 230 mmu_cfg_params[MMU_CFG_MAIR]); 231 write_ctx_tcr_el1_reg_errata(ctx, mmu_cfg_params[MMU_CFG_TCR]); 232 233 write_el1_ctx_common(get_el1_sysregs_ctx(ctx), ttbr0_el1, 234 mmu_cfg_params[MMU_CFG_TTBR0]); 235 236 /* Setup SCTLR_EL1 */ 237 sctlr_el1_val = read_ctx_sctlr_el1_reg_errata(ctx); 238 239 sctlr_el1_val |= 240 /*SCTLR_EL1_RES1 |*/ 241 /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */ 242 SCTLR_UCI_BIT | 243 /* RW regions at xlat regime EL1&0 are forced to be XN. */ 244 SCTLR_WXN_BIT | 245 /* Don't trap to EL1 execution of WFI or WFE at EL0. */ 246 SCTLR_NTWI_BIT | SCTLR_NTWE_BIT | 247 /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */ 248 SCTLR_UCT_BIT | 249 /* Don't trap to EL1 execution of DZ ZVA at EL0. */ 250 SCTLR_DZE_BIT | 251 /* Enable SP Alignment check for EL0 */ 252 SCTLR_SA0_BIT | 253 /* Don't change PSTATE.PAN on taking an exception to EL1 */ 254 SCTLR_SPAN_BIT | 255 /* Allow cacheable data and instr. accesses to normal memory. */ 256 SCTLR_C_BIT | SCTLR_I_BIT | 257 /* Enable MMU. */ 258 SCTLR_M_BIT 259 ; 260 261 sctlr_el1_val &= ~( 262 /* Explicit data accesses at EL0 are little-endian. */ 263 SCTLR_E0E_BIT | 264 /* 265 * Alignment fault checking disabled when at EL1 and EL0 as 266 * the UEFI spec permits unaligned accesses. 267 */ 268 SCTLR_A_BIT | 269 /* Accesses to DAIF from EL0 are trapped to EL1. */ 270 SCTLR_UMA_BIT 271 ); 272 273 /* Store the initialised SCTLR_EL1 value in the cpu_context */ 274 write_ctx_sctlr_el1_reg_errata(ctx, sctlr_el1_val); 275 276 /* 277 * Setup other system registers 278 * ---------------------------- 279 */ 280 281 /* Shim Exception Vector Base Address */ 282 write_el1_ctx_common(get_el1_sysregs_ctx(ctx), vbar_el1, 283 SPM_SHIM_EXCEPTIONS_PTR); 284 285 write_el1_ctx_arch_timer(get_el1_sysregs_ctx(ctx), cntkctl_el1, 286 EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT); 287 288 /* 289 * FPEN: Allow the Secure Partition to access FP/SIMD registers. 290 * Note that SPM will not do any saving/restoring of these registers on 291 * behalf of the SP. This falls under the SP's responsibility. 292 * TTA: Enable access to trace registers. 293 * ZEN (v8.2): Trap SVE instructions and access to SVE registers. 294 */ 295 write_el1_ctx_common(get_el1_sysregs_ctx(ctx), cpacr_el1, 296 CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE)); 297 298 /* 299 * Prepare information in buffer shared between EL3 and S-EL0 300 * ---------------------------------------------------------- 301 */ 302 #if HOB_LIST && TRANSFER_LIST 303 sp_boot_tl = transfer_list_init((void *) sp_boot_info->sp_shared_buf_base, 304 sp_boot_info->sp_shared_buf_size); 305 assert(sp_boot_tl != NULL); 306 307 hob_table = build_sp_boot_hob_list(sp_boot_info, &hob_table_size); 308 assert(hob_table != NULL); 309 310 transfer_list_update_checksum(sp_boot_tl); 311 312 sp_boot_te = transfer_list_add(sp_boot_tl, TL_TAG_HOB_LIST, 313 hob_table_size, hob_table); 314 if (sp_boot_te == NULL) { 315 ERROR("Failed to add HOB list to xfer list\n"); 316 } 317 318 transfer_list_set_handoff_args(sp_boot_tl, &ep_info); 319 320 transfer_list_dump(sp_boot_tl); 321 322 write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, 323 ep_info.args.arg0); 324 write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, 325 ep_info.args.arg1); 326 write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, 327 ep_info.args.arg2); 328 write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, 329 ep_info.args.arg3); 330 #else 331 void *shared_buf_ptr = (void *) sp_boot_info->sp_shared_buf_base; 332 333 /* Copy the boot information into the shared buffer with the SP. */ 334 assert((uintptr_t)shared_buf_ptr + sizeof(spm_mm_boot_info_t) 335 <= (sp_boot_info->sp_shared_buf_base + sp_boot_info->sp_shared_buf_size)); 336 337 assert(sp_boot_info->sp_shared_buf_base <= 338 (UINTPTR_MAX - sp_boot_info->sp_shared_buf_size + 1)); 339 340 341 memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info, 342 sizeof(spm_mm_boot_info_t)); 343 344 /* Pointer to the MP information from the platform port. */ 345 spm_mm_mp_info_t *sp_mp_info = 346 ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info; 347 348 assert(sp_mp_info != NULL); 349 350 /* 351 * Point the shared buffer MP information pointer to where the info will 352 * be populated, just after the boot info. 353 */ 354 ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info = 355 (spm_mm_mp_info_t *) ((uintptr_t)shared_buf_ptr 356 + sizeof(spm_mm_boot_info_t)); 357 358 /* 359 * Update the shared buffer pointer to where the MP information for the 360 * payload will be populated 361 */ 362 shared_buf_ptr = ((spm_mm_boot_info_t *) shared_buf_ptr)->mp_info; 363 364 /* 365 * Copy the cpu information into the shared buffer area after the boot 366 * information. 367 */ 368 assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT); 369 370 assert((uintptr_t)shared_buf_ptr 371 <= (sp_boot_info->sp_shared_buf_base + sp_boot_info->sp_shared_buf_size - 372 (sp_boot_info->num_cpus * sizeof(*sp_mp_info)))); 373 374 memcpy(shared_buf_ptr, (const void *) sp_mp_info, 375 sp_boot_info->num_cpus * sizeof(*sp_mp_info)); 376 377 /* 378 * Calculate the linear indices of cores in boot information for the 379 * secure partition and flag the primary CPU 380 */ 381 sp_mp_info = (spm_mm_mp_info_t *) shared_buf_ptr; 382 383 for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) { 384 u_register_t mpidr = sp_mp_info[index].mpidr; 385 386 sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr); 387 if (plat_my_core_pos() == sp_mp_info[index].linear_id) 388 sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU; 389 } 390 #endif 391 } 392