1 /* 2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <libfdt.h> 11 12 #include <platform_def.h> 13 14 #include <arch_features.h> 15 #include <arch_helpers.h> 16 #include <common/bl_common.h> 17 #include <common/debug.h> 18 #include <common/desc_image_load.h> 19 #include <common/fdt_fixup.h> 20 #include <common/fdt_wrappers.h> 21 #include <lib/optee_utils.h> 22 #include <lib/transfer_list.h> 23 #include <lib/utils.h> 24 #include <plat/common/platform.h> 25 26 #include "qemu_private.h" 27 28 #define MAP_BL2_TOTAL MAP_REGION_FLAT( \ 29 bl2_tzram_layout.total_base, \ 30 bl2_tzram_layout.total_size, \ 31 MT_MEMORY | MT_RW | EL3_PAS) 32 33 #define MAP_BL2_RO MAP_REGION_FLAT( \ 34 BL_CODE_BASE, \ 35 BL_CODE_END - BL_CODE_BASE, \ 36 MT_CODE | EL3_PAS), \ 37 MAP_REGION_FLAT( \ 38 BL_RO_DATA_BASE, \ 39 BL_RO_DATA_END \ 40 - BL_RO_DATA_BASE, \ 41 MT_RO_DATA | EL3_PAS) 42 43 #if USE_COHERENT_MEM 44 #define MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \ 45 BL_COHERENT_RAM_BASE, \ 46 BL_COHERENT_RAM_END \ 47 - BL_COHERENT_RAM_BASE, \ 48 MT_DEVICE | MT_RW | EL3_PAS) 49 #endif 50 51 /* Data structure which holds the extents of the trusted SRAM for BL2 */ 52 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE); 53 static struct transfer_list_header *bl2_tl; 54 55 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1, 56 u_register_t arg2, u_register_t arg3) 57 { 58 meminfo_t *mem_layout = (void *)arg1; 59 60 /* Initialize the console to provide early debug support */ 61 qemu_console_init(); 62 63 /* Setup the BL2 memory layout */ 64 bl2_tzram_layout = *mem_layout; 65 66 plat_qemu_io_setup(); 67 } 68 69 static void security_setup(void) 70 { 71 /* 72 * This is where a TrustZone address space controller and other 73 * security related peripherals, would be configured. 74 */ 75 } 76 77 static void update_dt(void) 78 { 79 #if TRANSFER_LIST 80 struct transfer_list_entry *te; 81 #endif 82 int ret; 83 void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE; 84 85 ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE); 86 if (ret < 0) { 87 ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret); 88 return; 89 } 90 91 if (dt_add_psci_node(fdt)) { 92 ERROR("Failed to add PSCI Device Tree node\n"); 93 return; 94 } 95 96 if (dt_add_psci_cpu_enable_methods(fdt)) { 97 ERROR("Failed to add PSCI cpu enable methods in Device Tree\n"); 98 return; 99 } 100 101 #if ENABLE_RME 102 if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE, 103 REALM_DRAM_SIZE)) { 104 ERROR("Failed to reserve RMM memory in Device Tree\n"); 105 return; 106 } 107 108 INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n", 109 (uintptr_t)REALM_DRAM_BASE, 110 (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1); 111 #endif 112 113 ret = fdt_pack(fdt); 114 if (ret < 0) 115 ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret); 116 117 #if TRANSFER_LIST 118 /* create a TE */ 119 te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt); 120 if (!te) { 121 ERROR("Failed to add FDT entry to Transfer List\n"); 122 return; 123 } 124 #endif 125 } 126 127 void bl2_platform_setup(void) 128 { 129 #if TRANSFER_LIST 130 bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE, 131 FW_HANDOFF_SIZE); 132 if (!bl2_tl) { 133 ERROR("Failed to initialize Transfer List at 0x%lx\n", 134 (unsigned long)FW_HANDOFF_BASE); 135 } 136 #endif 137 security_setup(); 138 update_dt(); 139 140 /* TODO Initialize timer */ 141 } 142 143 void qemu_bl2_sync_transfer_list(void) 144 { 145 #if TRANSFER_LIST 146 transfer_list_update_checksum(bl2_tl); 147 #endif 148 } 149 150 void bl2_plat_arch_setup(void) 151 { 152 const mmap_region_t bl_regions[] = { 153 MAP_BL2_TOTAL, 154 MAP_BL2_RO, 155 #if USE_COHERENT_MEM 156 MAP_BL_COHERENT_RAM, 157 #endif 158 #if ENABLE_RME 159 MAP_RMM_DRAM, 160 MAP_GPT_L0_REGION, 161 MAP_GPT_L1_REGION, 162 #endif 163 {0} 164 }; 165 166 setup_page_tables(bl_regions, plat_qemu_get_mmap()); 167 168 #if ENABLE_RME 169 /* BL2 runs in EL3 when RME enabled. */ 170 assert(is_feat_rme_present()); 171 enable_mmu_el3(0); 172 #else /* ENABLE_RME */ 173 174 #ifdef __aarch64__ 175 enable_mmu_el1(0); 176 #else 177 enable_mmu_svc_mon(0); 178 #endif 179 #endif /* ENABLE_RME */ 180 } 181 182 /******************************************************************************* 183 * Gets SPSR for BL32 entry 184 ******************************************************************************/ 185 static uint32_t qemu_get_spsr_for_bl32_entry(void) 186 { 187 #ifdef __aarch64__ 188 /* 189 * The Secure Payload Dispatcher service is responsible for 190 * setting the SPSR prior to entry into the BL3-2 image. 191 */ 192 return 0; 193 #else 194 return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE, 195 DISABLE_ALL_EXCEPTIONS); 196 #endif 197 } 198 199 /******************************************************************************* 200 * Gets SPSR for BL33 entry 201 ******************************************************************************/ 202 static uint32_t qemu_get_spsr_for_bl33_entry(void) 203 { 204 uint32_t spsr; 205 #ifdef __aarch64__ 206 unsigned int mode; 207 208 /* Figure out what mode we enter the non-secure world in */ 209 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1; 210 211 /* 212 * TODO: Consider the possibility of specifying the SPSR in 213 * the FIP ToC and allowing the platform to have a say as 214 * well. 215 */ 216 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); 217 #else 218 spsr = SPSR_MODE32(MODE32_svc, 219 plat_get_ns_image_entrypoint() & 0x1, 220 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS); 221 #endif 222 return spsr; 223 } 224 225 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2 226 static int load_sps_from_tb_fw_config(struct image_info *image_info) 227 { 228 void *dtb = (void *)image_info->image_base; 229 const char *compat_str = "arm,sp"; 230 const struct fdt_property *uuid; 231 uint32_t load_addr; 232 const char *name; 233 int sp_node; 234 int node; 235 236 node = fdt_node_offset_by_compatible(dtb, -1, compat_str); 237 if (node < 0) { 238 ERROR("Can't find %s in TB_FW_CONFIG", compat_str); 239 return -1; 240 } 241 242 fdt_for_each_subnode(sp_node, dtb, node) { 243 name = fdt_get_name(dtb, sp_node, NULL); 244 if (name == NULL) { 245 ERROR("Can't get name of node in dtb\n"); 246 return -1; 247 } 248 uuid = fdt_get_property(dtb, sp_node, "uuid", NULL); 249 if (uuid == NULL) { 250 ERROR("Can't find property uuid in node %s", name); 251 return -1; 252 } 253 if (fdt_read_uint32(dtb, sp_node, "load-address", 254 &load_addr) < 0) { 255 ERROR("Can't read load-address in node %s", name); 256 return -1; 257 } 258 if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) { 259 return -1; 260 } 261 } 262 263 return 0; 264 } 265 #endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/ 266 267 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE) 268 static int handoff_pageable_part(uint64_t pagable_part) 269 { 270 #if TRANSFER_LIST 271 struct transfer_list_entry *te; 272 273 te = transfer_list_add(bl2_tl, TL_TAG_OPTEE_PAGABLE_PART, 274 sizeof(pagable_part), &pagable_part); 275 if (!te) { 276 INFO("Cannot add TE for pageable part\n"); 277 return -1; 278 } 279 #endif 280 return 0; 281 } 282 #endif 283 284 static int qemu_bl2_handle_post_image_load(unsigned int image_id) 285 { 286 int err = 0; 287 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); 288 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE) 289 bl_mem_params_node_t *pager_mem_params = NULL; 290 bl_mem_params_node_t *paged_mem_params = NULL; 291 #endif 292 #if defined(SPD_spmd) 293 bl_mem_params_node_t *bl32_mem_params = NULL; 294 #endif 295 #if TRANSFER_LIST 296 struct transfer_list_header *ns_tl = NULL; 297 #endif 298 299 assert(bl_mem_params); 300 301 switch (image_id) { 302 #if TRANSFER_LIST 303 case BL31_IMAGE_ID: 304 /* 305 * arg0 is a bl_params_t reserved for bl31_early_platform_setup2 306 * we just need arg1 and arg3 for BL31 to update the TL from S 307 * to NS memory before it exits 308 */ 309 #ifdef __aarch64__ 310 if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_64) { 311 bl_mem_params->ep_info.args.arg1 = 312 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION); 313 } else 314 #endif 315 { 316 bl_mem_params->ep_info.args.arg1 = 317 TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION); 318 } 319 320 bl_mem_params->ep_info.args.arg3 = (uintptr_t)bl2_tl; 321 break; 322 #endif 323 case BL32_IMAGE_ID: 324 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE) 325 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID); 326 assert(pager_mem_params); 327 328 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID); 329 assert(paged_mem_params); 330 331 err = parse_optee_header(&bl_mem_params->ep_info, 332 &pager_mem_params->image_info, 333 &paged_mem_params->image_info); 334 if (err != 0) { 335 WARN("OPTEE header parse error.\n"); 336 } 337 338 /* add TL_TAG_OPTEE_PAGABLE_PART entry to the TL */ 339 if (handoff_pageable_part(bl_mem_params->ep_info.args.arg1)) { 340 return -1; 341 } 342 #endif 343 344 INFO("Handoff to BL32\n"); 345 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry(); 346 if (TRANSFER_LIST && 347 transfer_list_set_handoff_args(bl2_tl, 348 &bl_mem_params->ep_info)) 349 break; 350 351 INFO("Using default arguments\n"); 352 #if defined(SPMC_OPTEE) 353 /* 354 * Explicit zeroes to unused registers since they may have 355 * been populated by parse_optee_header() above. 356 * 357 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0, 358 * the latter is filled in below for TOS_FW_CONFIG_ID and 359 * applies to any other SPMC too. 360 */ 361 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE; 362 #elif defined(SPD_opteed) 363 /* 364 * OP-TEE expect to receive DTB address in x2. 365 * This will be copied into x2 by dispatcher. 366 */ 367 bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE; 368 #elif defined(AARCH32_SP_OPTEE) 369 bl_mem_params->ep_info.args.arg0 = 370 bl_mem_params->ep_info.args.arg1; 371 bl_mem_params->ep_info.args.arg1 = 0; 372 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE; 373 bl_mem_params->ep_info.args.arg3 = 0; 374 #endif 375 break; 376 377 case BL33_IMAGE_ID: 378 #ifdef AARCH32_SP_OPTEE 379 /* AArch32 only core: OP-TEE expects NSec EP in register LR */ 380 pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID); 381 assert(pager_mem_params); 382 pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc; 383 #endif 384 385 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry(); 386 387 #if ARM_LINUX_KERNEL_AS_BL33 388 /* 389 * According to the file ``Documentation/arm64/booting.txt`` of 390 * the Linux kernel tree, Linux expects the physical address of 391 * the device tree blob (DTB) in x0, while x1-x3 are reserved 392 * for future use and must be 0. 393 */ 394 bl_mem_params->ep_info.args.arg0 = 395 (u_register_t)ARM_PRELOADED_DTB_BASE; 396 bl_mem_params->ep_info.args.arg1 = 0U; 397 bl_mem_params->ep_info.args.arg2 = 0U; 398 bl_mem_params->ep_info.args.arg3 = 0U; 399 #elif TRANSFER_LIST 400 if (bl2_tl) { 401 /* relocate the tl to pre-allocate NS memory */ 402 ns_tl = transfer_list_relocate(bl2_tl, 403 (void *)(uintptr_t)FW_NS_HANDOFF_BASE, 404 bl2_tl->max_size); 405 if (!ns_tl) { 406 ERROR("Relocate TL to 0x%lx failed\n", 407 (unsigned long)FW_NS_HANDOFF_BASE); 408 return -1; 409 } 410 } 411 412 INFO("Handoff to BL33\n"); 413 if (!transfer_list_set_handoff_args(ns_tl, 414 &bl_mem_params->ep_info)) { 415 INFO("Invalid TL, fallback to default arguments\n"); 416 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); 417 } 418 #else 419 /* BL33 expects to receive the primary CPU MPID (through r0) */ 420 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); 421 #endif /* ARM_LINUX_KERNEL_AS_BL33 */ 422 423 break; 424 #ifdef SPD_spmd 425 #if SPMD_SPM_AT_SEL2 426 case TB_FW_CONFIG_ID: 427 err = load_sps_from_tb_fw_config(&bl_mem_params->image_info); 428 break; 429 #endif 430 case TOS_FW_CONFIG_ID: 431 /* An SPMC expects TOS_FW_CONFIG in x0/r0 */ 432 bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID); 433 bl32_mem_params->ep_info.args.arg0 = 434 bl_mem_params->image_info.image_base; 435 break; 436 #endif 437 default: 438 /* Do nothing in default case */ 439 break; 440 } 441 442 return err; 443 } 444 445 /******************************************************************************* 446 * This function can be used by the platforms to update/use image 447 * information for given `image_id`. 448 ******************************************************************************/ 449 int bl2_plat_handle_post_image_load(unsigned int image_id) 450 { 451 return qemu_bl2_handle_post_image_load(image_id); 452 } 453 454 uintptr_t plat_get_ns_image_entrypoint(void) 455 { 456 return NS_IMAGE_OFFSET; 457 } 458