1 /* 2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <libfdt.h> 11 12 #include <platform_def.h> 13 14 #include <arch_features.h> 15 #include <arch_helpers.h> 16 #include <common/bl_common.h> 17 #include <common/debug.h> 18 #include <common/desc_image_load.h> 19 #include <common/fdt_fixup.h> 20 #include <common/fdt_wrappers.h> 21 #include <lib/optee_utils.h> 22 #if TRANSFER_LIST 23 #include <lib/transfer_list.h> 24 #endif 25 #include <lib/utils.h> 26 #include <plat/common/platform.h> 27 28 #include "qemu_private.h" 29 30 #define MAP_BL2_TOTAL MAP_REGION_FLAT( \ 31 bl2_tzram_layout.total_base, \ 32 bl2_tzram_layout.total_size, \ 33 MT_MEMORY | MT_RW | EL3_PAS) 34 35 #define MAP_BL2_RO MAP_REGION_FLAT( \ 36 BL_CODE_BASE, \ 37 BL_CODE_END - BL_CODE_BASE, \ 38 MT_CODE | EL3_PAS), \ 39 MAP_REGION_FLAT( \ 40 BL_RO_DATA_BASE, \ 41 BL_RO_DATA_END \ 42 - BL_RO_DATA_BASE, \ 43 MT_RO_DATA | EL3_PAS) 44 45 #if USE_COHERENT_MEM 46 #define MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \ 47 BL_COHERENT_RAM_BASE, \ 48 BL_COHERENT_RAM_END \ 49 - BL_COHERENT_RAM_BASE, \ 50 MT_DEVICE | MT_RW | EL3_PAS) 51 #endif 52 53 /* Data structure which holds the extents of the trusted SRAM for BL2 */ 54 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE); 55 #if TRANSFER_LIST 56 static struct transfer_list_header *bl2_tl; 57 #endif 58 59 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1, 60 u_register_t arg2, u_register_t arg3) 61 { 62 meminfo_t *mem_layout = (void *)arg1; 63 64 /* Initialize the console to provide early debug support */ 65 qemu_console_init(); 66 67 /* Setup the BL2 memory layout */ 68 bl2_tzram_layout = *mem_layout; 69 70 plat_qemu_io_setup(); 71 } 72 73 static void security_setup(void) 74 { 75 /* 76 * This is where a TrustZone address space controller and other 77 * security related peripherals, would be configured. 78 */ 79 } 80 81 static void update_dt(void) 82 { 83 #if TRANSFER_LIST 84 struct transfer_list_entry *te; 85 #endif 86 int ret; 87 void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE; 88 89 ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE); 90 if (ret < 0) { 91 ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret); 92 return; 93 } 94 95 if (dt_add_psci_node(fdt)) { 96 ERROR("Failed to add PSCI Device Tree node\n"); 97 return; 98 } 99 100 if (dt_add_psci_cpu_enable_methods(fdt)) { 101 ERROR("Failed to add PSCI cpu enable methods in Device Tree\n"); 102 return; 103 } 104 105 #if ENABLE_RME 106 if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE, 107 REALM_DRAM_SIZE)) { 108 ERROR("Failed to reserve RMM memory in Device Tree\n"); 109 return; 110 } 111 112 INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n", 113 (uintptr_t)REALM_DRAM_BASE, 114 (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1); 115 #endif 116 117 ret = fdt_pack(fdt); 118 if (ret < 0) 119 ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret); 120 121 #if TRANSFER_LIST 122 // create a TE 123 te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt); 124 if (!te) { 125 ERROR("Failed to add FDT entry to Transfer List\n"); 126 return; 127 } 128 #endif 129 } 130 131 void bl2_platform_setup(void) 132 { 133 #if TRANSFER_LIST 134 bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE, 135 FW_HANDOFF_SIZE); 136 if (!bl2_tl) { 137 ERROR("Failed to initialize Transfer List at 0x%lx\n", 138 (unsigned long)FW_HANDOFF_BASE); 139 } 140 #endif 141 security_setup(); 142 update_dt(); 143 144 /* TODO Initialize timer */ 145 } 146 147 void qemu_bl2_sync_transfer_list(void) 148 { 149 #if TRANSFER_LIST 150 transfer_list_update_checksum(bl2_tl); 151 #endif 152 } 153 154 void bl2_plat_arch_setup(void) 155 { 156 const mmap_region_t bl_regions[] = { 157 MAP_BL2_TOTAL, 158 MAP_BL2_RO, 159 #if USE_COHERENT_MEM 160 MAP_BL_COHERENT_RAM, 161 #endif 162 #if ENABLE_RME 163 MAP_RMM_DRAM, 164 MAP_GPT_L0_REGION, 165 MAP_GPT_L1_REGION, 166 #endif 167 {0} 168 }; 169 170 setup_page_tables(bl_regions, plat_qemu_get_mmap()); 171 172 #if ENABLE_RME 173 /* BL2 runs in EL3 when RME enabled. */ 174 assert(get_armv9_2_feat_rme_support() != 0U); 175 enable_mmu_el3(0); 176 #else /* ENABLE_RME */ 177 178 #ifdef __aarch64__ 179 enable_mmu_el1(0); 180 #else 181 enable_mmu_svc_mon(0); 182 #endif 183 #endif /* ENABLE_RME */ 184 } 185 186 /******************************************************************************* 187 * Gets SPSR for BL32 entry 188 ******************************************************************************/ 189 static uint32_t qemu_get_spsr_for_bl32_entry(void) 190 { 191 #ifdef __aarch64__ 192 /* 193 * The Secure Payload Dispatcher service is responsible for 194 * setting the SPSR prior to entry into the BL3-2 image. 195 */ 196 return 0; 197 #else 198 return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, 199 DISABLE_ALL_EXCEPTIONS); 200 #endif 201 } 202 203 /******************************************************************************* 204 * Gets SPSR for BL33 entry 205 ******************************************************************************/ 206 static uint32_t qemu_get_spsr_for_bl33_entry(void) 207 { 208 uint32_t spsr; 209 #ifdef __aarch64__ 210 unsigned int mode; 211 212 /* Figure out what mode we enter the non-secure world in */ 213 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1; 214 215 /* 216 * TODO: Consider the possibility of specifying the SPSR in 217 * the FIP ToC and allowing the platform to have a say as 218 * well. 219 */ 220 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); 221 #else 222 spsr = SPSR_MODE32(MODE32_svc, 223 plat_get_ns_image_entrypoint() & 0x1, 224 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS); 225 #endif 226 return spsr; 227 } 228 229 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2 230 static int load_sps_from_tb_fw_config(struct image_info *image_info) 231 { 232 void *dtb = (void *)image_info->image_base; 233 const char *compat_str = "arm,sp"; 234 const struct fdt_property *uuid; 235 uint32_t load_addr; 236 const char *name; 237 int sp_node; 238 int node; 239 240 node = fdt_node_offset_by_compatible(dtb, -1, compat_str); 241 if (node < 0) { 242 ERROR("Can't find %s in TB_FW_CONFIG", compat_str); 243 return -1; 244 } 245 246 fdt_for_each_subnode(sp_node, dtb, node) { 247 name = fdt_get_name(dtb, sp_node, NULL); 248 if (name == NULL) { 249 ERROR("Can't get name of node in dtb\n"); 250 return -1; 251 } 252 uuid = fdt_get_property(dtb, sp_node, "uuid", NULL); 253 if (uuid == NULL) { 254 ERROR("Can't find property uuid in node %s", name); 255 return -1; 256 } 257 if (fdt_read_uint32(dtb, sp_node, "load-address", 258 &load_addr) < 0) { 259 ERROR("Can't read load-address in node %s", name); 260 return -1; 261 } 262 if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) { 263 return -1; 264 } 265 } 266 267 return 0; 268 } 269 #endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/ 270 271 static int qemu_bl2_handle_post_image_load(unsigned int image_id) 272 { 273 int err = 0; 274 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); 275 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE) 276 bl_mem_params_node_t *pager_mem_params = NULL; 277 bl_mem_params_node_t *paged_mem_params = NULL; 278 #endif 279 #if defined(SPD_spmd) 280 bl_mem_params_node_t *bl32_mem_params = NULL; 281 #endif 282 #if TRANSFER_LIST 283 struct transfer_list_header *ns_tl = NULL; 284 struct transfer_list_entry *te = NULL; 285 #endif 286 287 assert(bl_mem_params); 288 289 switch (image_id) { 290 case BL32_IMAGE_ID: 291 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE) 292 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID); 293 assert(pager_mem_params); 294 295 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID); 296 assert(paged_mem_params); 297 298 err = parse_optee_header(&bl_mem_params->ep_info, 299 &pager_mem_params->image_info, 300 &paged_mem_params->image_info); 301 if (err != 0) { 302 WARN("OPTEE header parse error.\n"); 303 } 304 #endif 305 306 #if defined(SPMC_OPTEE) 307 /* 308 * Explicit zeroes to unused registers since they may have 309 * been populated by parse_optee_header() above. 310 * 311 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0, 312 * the latter is filled in below for TOS_FW_CONFIG_ID and 313 * applies to any other SPMC too. 314 */ 315 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE; 316 #elif defined(SPD_opteed) 317 /* 318 * OP-TEE expect to receive DTB address in x2. 319 * This will be copied into x2 by dispatcher. 320 */ 321 bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE; 322 #elif defined(AARCH32_SP_OPTEE) 323 bl_mem_params->ep_info.args.arg0 = 324 bl_mem_params->ep_info.args.arg1; 325 bl_mem_params->ep_info.args.arg1 = 0; 326 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE; 327 bl_mem_params->ep_info.args.arg3 = 0; 328 #endif 329 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry(); 330 break; 331 332 case BL33_IMAGE_ID: 333 #ifdef AARCH32_SP_OPTEE 334 /* AArch32 only core: OP-TEE expects NSec EP in register LR */ 335 pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID); 336 assert(pager_mem_params); 337 pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc; 338 #endif 339 340 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry(); 341 342 #if ARM_LINUX_KERNEL_AS_BL33 343 /* 344 * According to the file ``Documentation/arm64/booting.txt`` of 345 * the Linux kernel tree, Linux expects the physical address of 346 * the device tree blob (DTB) in x0, while x1-x3 are reserved 347 * for future use and must be 0. 348 */ 349 bl_mem_params->ep_info.args.arg0 = 350 (u_register_t)ARM_PRELOADED_DTB_BASE; 351 bl_mem_params->ep_info.args.arg1 = 0U; 352 bl_mem_params->ep_info.args.arg2 = 0U; 353 bl_mem_params->ep_info.args.arg3 = 0U; 354 #elif TRANSFER_LIST 355 if (bl2_tl) { 356 // relocate the tl to pre-allocate NS memory 357 ns_tl = transfer_list_relocate(bl2_tl, 358 (void *)(uintptr_t)FW_NS_HANDOFF_BASE, 359 bl2_tl->max_size); 360 if (!ns_tl) { 361 ERROR("Relocate TL to 0x%lx failed\n", 362 (unsigned long)FW_NS_HANDOFF_BASE); 363 return -1; 364 } 365 NOTICE("Transfer list handoff to BL33\n"); 366 transfer_list_dump(ns_tl); 367 368 te = transfer_list_find(ns_tl, TL_TAG_FDT); 369 370 bl_mem_params->ep_info.args.arg1 = 371 TRANSFER_LIST_SIGNATURE | 372 REGISTER_CONVENTION_VERSION_MASK; 373 bl_mem_params->ep_info.args.arg3 = (uintptr_t)ns_tl; 374 375 if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_32) { 376 // aarch32 377 bl_mem_params->ep_info.args.arg0 = 0; 378 bl_mem_params->ep_info.args.arg2 = te ? 379 (uintptr_t)transfer_list_entry_data(te) 380 : 0; 381 } else { 382 // aarch64 383 bl_mem_params->ep_info.args.arg0 = te ? 384 (uintptr_t)transfer_list_entry_data(te) 385 : 0; 386 bl_mem_params->ep_info.args.arg2 = 0; 387 } 388 } else { 389 // Legacy handoff 390 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); 391 } 392 #else 393 /* BL33 expects to receive the primary CPU MPID (through r0) */ 394 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); 395 #endif // ARM_LINUX_KERNEL_AS_BL33 396 397 break; 398 #ifdef SPD_spmd 399 #if SPMD_SPM_AT_SEL2 400 case TB_FW_CONFIG_ID: 401 err = load_sps_from_tb_fw_config(&bl_mem_params->image_info); 402 break; 403 #endif 404 case TOS_FW_CONFIG_ID: 405 /* An SPMC expects TOS_FW_CONFIG in x0/r0 */ 406 bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID); 407 bl32_mem_params->ep_info.args.arg0 = 408 bl_mem_params->image_info.image_base; 409 break; 410 #endif 411 default: 412 /* Do nothing in default case */ 413 break; 414 } 415 416 return err; 417 } 418 419 /******************************************************************************* 420 * This function can be used by the platforms to update/use image 421 * information for given `image_id`. 422 ******************************************************************************/ 423 int bl2_plat_handle_post_image_load(unsigned int image_id) 424 { 425 return qemu_bl2_handle_post_image_load(image_id); 426 } 427 428 uintptr_t plat_get_ns_image_entrypoint(void) 429 { 430 return NS_IMAGE_OFFSET; 431 } 432