1 /* 2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 9 #include <arch.h> 10 #include <arch_helpers.h> 11 #include <common/bl_common.h> 12 #include <common/debug.h> 13 #include <drivers/console.h> 14 #include <lib/debugfs.h> 15 #include <lib/extensions/ras.h> 16 #include <lib/fconf/fconf.h> 17 #include <lib/gpt_rme/gpt_rme.h> 18 #include <lib/mmio.h> 19 #if TRANSFER_LIST 20 #include <lib/transfer_list.h> 21 #endif 22 #include <lib/xlat_tables/xlat_tables_compat.h> 23 #include <plat/arm/common/plat_arm.h> 24 #include <plat/common/platform.h> 25 #include <platform_def.h> 26 27 static struct transfer_list_header *secure_tl __unused; 28 static struct transfer_list_header *ns_tl __unused; 29 30 /* 31 * Placeholder variables for copying the arguments that have been passed to 32 * BL31 from BL2. 33 */ 34 static entry_point_info_t bl32_image_ep_info; 35 static entry_point_info_t bl33_image_ep_info; 36 #if ENABLE_RME 37 static entry_point_info_t rmm_image_ep_info; 38 #endif 39 40 #if !RESET_TO_BL31 41 /* 42 * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page 43 * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2. 44 */ 45 #if TRANSFER_LIST 46 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows); 47 #else 48 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows); 49 #endif /* TRANSFER_LIST */ 50 #endif /* RESET_TO_BL31 */ 51 52 /* Weak definitions may be overridden in specific ARM standard platform */ 53 #pragma weak bl31_early_platform_setup2 54 #pragma weak bl31_platform_setup 55 #pragma weak bl31_plat_arch_setup 56 #pragma weak bl31_plat_get_next_image_ep_info 57 #pragma weak bl31_plat_runtime_setup 58 59 #define MAP_BL31_TOTAL MAP_REGION_FLAT( \ 60 BL31_START, \ 61 BL31_END - BL31_START, \ 62 MT_MEMORY | MT_RW | EL3_PAS) 63 #if RECLAIM_INIT_CODE 64 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE); 65 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED); 66 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED); 67 68 #define BL_INIT_CODE_END ((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \ 69 ~(PAGE_SIZE - 1)) 70 #define BL_STACKS_END ((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \ 71 ~(PAGE_SIZE - 1)) 72 73 #define MAP_BL_INIT_CODE MAP_REGION_FLAT( \ 74 BL_INIT_CODE_BASE, \ 75 BL_INIT_CODE_END \ 76 - BL_INIT_CODE_BASE, \ 77 MT_CODE | EL3_PAS) 78 #endif 79 80 #if SEPARATE_NOBITS_REGION 81 #define MAP_BL31_NOBITS MAP_REGION_FLAT( \ 82 BL31_NOBITS_BASE, \ 83 BL31_NOBITS_LIMIT \ 84 - BL31_NOBITS_BASE, \ 85 MT_MEMORY | MT_RW | EL3_PAS) 86 87 #endif 88 /******************************************************************************* 89 * Return a pointer to the 'entry_point_info' structure of the next image for the 90 * security state specified. BL33 corresponds to the non-secure image type 91 * while BL32 corresponds to the secure image type. A NULL pointer is returned 92 * if the image does not exist. 93 ******************************************************************************/ 94 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type) 95 { 96 entry_point_info_t *next_image_info; 97 98 assert(sec_state_is_valid(type)); 99 if (type == NON_SECURE) { 100 #if TRANSFER_LIST && !RESET_TO_BL31 101 next_image_info = transfer_list_set_handoff_args( 102 ns_tl, &bl33_image_ep_info); 103 #else 104 next_image_info = &bl33_image_ep_info; 105 #endif 106 } 107 #if ENABLE_RME 108 else if (type == REALM) { 109 next_image_info = &rmm_image_ep_info; 110 } 111 #endif 112 else { 113 next_image_info = &bl32_image_ep_info; 114 } 115 116 /* 117 * None of the images on the ARM development platforms can have 0x0 118 * as the entrypoint 119 */ 120 if (next_image_info->pc) 121 return next_image_info; 122 else 123 return NULL; 124 } 125 126 /******************************************************************************* 127 * Perform any BL31 early platform setup common to ARM standard platforms. 128 * Here is an opportunity to copy parameters passed by the calling EL (S-EL1 129 * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be 130 * done before the MMU is initialized so that the memory layout can be used 131 * while creating page tables. BL2 has flushed this information to memory, so 132 * we are guaranteed to pick up good data. 133 ******************************************************************************/ 134 #if TRANSFER_LIST 135 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1, 136 u_register_t arg2, u_register_t arg3) 137 { 138 #if RESET_TO_BL31 139 /* Populate entry point information for BL33 */ 140 SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0); 141 /* 142 * Tell BL31 where the non-trusted software image 143 * is located and the entry state information 144 */ 145 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); 146 147 bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); 148 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); 149 150 bl33_image_ep_info.args.arg0 = PLAT_ARM_TRANSFER_LIST_DTB_OFFSET; 151 bl33_image_ep_info.args.arg1 = 152 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION); 153 bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE; 154 #else 155 struct transfer_list_entry *te = NULL; 156 struct entry_point_info *ep; 157 158 secure_tl = (struct transfer_list_header *)arg3; 159 160 /* 161 * Populate the global entry point structures used to execute subsequent 162 * images. 163 */ 164 while ((te = transfer_list_next(secure_tl, te)) != NULL) { 165 ep = transfer_list_entry_data(te); 166 167 if (te->tag_id == TL_TAG_EXEC_EP_INFO64) { 168 switch (GET_SECURITY_STATE(ep->h.attr)) { 169 case NON_SECURE: 170 bl33_image_ep_info = *ep; 171 break; 172 #if ENABLE_RME 173 case REALM: 174 rmm_image_ep_info = *ep; 175 break; 176 #endif 177 case SECURE: 178 bl32_image_ep_info = *ep; 179 break; 180 default: 181 ERROR("Unrecognized Image Security State %lu\n", 182 GET_SECURITY_STATE(ep->h.attr)); 183 panic(); 184 } 185 } 186 } 187 #endif /* RESET_TO_BL31 */ 188 } 189 #else 190 void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_config, 191 uintptr_t hw_config, void *plat_params_from_bl2) 192 { 193 /* Initialize the console to provide early debug support */ 194 arm_console_boot_init(); 195 196 #if RESET_TO_BL31 197 /* There are no parameters from BL2 if BL31 is a reset vector */ 198 assert(from_bl2 == NULL); 199 assert(plat_params_from_bl2 == NULL); 200 201 # ifdef BL32_BASE 202 /* Populate entry point information for BL32 */ 203 SET_PARAM_HEAD(&bl32_image_ep_info, 204 PARAM_EP, 205 VERSION_1, 206 0); 207 SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); 208 bl32_image_ep_info.pc = BL32_BASE; 209 bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry(); 210 211 #if defined(SPD_spmd) 212 /* SPM (hafnium in secure world) expects SPM Core manifest base address 213 * in x0, which in !RESET_TO_BL31 case loaded after base of non shared 214 * SRAM(after 4KB offset of SRAM). But in RESET_TO_BL31 case all non 215 * shared SRAM is allocated to BL31, so to avoid overwriting of manifest 216 * keep it in the last page. 217 */ 218 bl32_image_ep_info.args.arg0 = ARM_TRUSTED_SRAM_BASE + 219 PLAT_ARM_TRUSTED_SRAM_SIZE - PAGE_SIZE; 220 #endif 221 222 # endif /* BL32_BASE */ 223 224 /* Populate entry point information for BL33 */ 225 SET_PARAM_HEAD(&bl33_image_ep_info, 226 PARAM_EP, 227 VERSION_1, 228 0); 229 /* 230 * Tell BL31 where the non-trusted software image 231 * is located and the entry state information 232 */ 233 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); 234 235 bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); 236 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); 237 238 #if ENABLE_RME 239 /* 240 * Populate entry point information for RMM. 241 * Only PC needs to be set as other fields are determined by RMMD. 242 */ 243 rmm_image_ep_info.pc = RMM_BASE; 244 #endif /* ENABLE_RME */ 245 246 #else /* RESET_TO_BL31 */ 247 248 /* 249 * In debug builds, we pass a special value in 'plat_params_from_bl2' 250 * to verify platform parameters from BL2 to BL31. 251 * In release builds, it's not used. 252 */ 253 assert(((unsigned long long)plat_params_from_bl2) == 254 ARM_BL31_PLAT_PARAM_VAL); 255 256 /* 257 * Check params passed from BL2 should not be NULL, 258 */ 259 bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; 260 assert(params_from_bl2 != NULL); 261 assert(params_from_bl2->h.type == PARAM_BL_PARAMS); 262 assert(params_from_bl2->h.version >= VERSION_2); 263 264 bl_params_node_t *bl_params = params_from_bl2->head; 265 266 /* 267 * Copy BL33, BL32 and RMM (if present), entry point information. 268 * They are stored in Secure RAM, in BL2's address space. 269 */ 270 while (bl_params != NULL) { 271 if (bl_params->image_id == BL32_IMAGE_ID) { 272 bl32_image_ep_info = *bl_params->ep_info; 273 #if SPMC_AT_EL3 274 /* 275 * Populate the BL32 image base, size and max limit in 276 * the entry point information, since there is no 277 * platform function to retrieve them in generic 278 * code. We choose arg2, arg3 and arg4 since the generic 279 * code uses arg1 for stashing the SP manifest size. The 280 * SPMC setup uses these arguments to update SP manifest 281 * with actual SP's base address and it size. 282 */ 283 bl32_image_ep_info.args.arg2 = 284 bl_params->image_info->image_base; 285 bl32_image_ep_info.args.arg3 = 286 bl_params->image_info->image_size; 287 bl32_image_ep_info.args.arg4 = 288 bl_params->image_info->image_base + 289 bl_params->image_info->image_max_size; 290 #endif 291 } 292 #if ENABLE_RME 293 else if (bl_params->image_id == RMM_IMAGE_ID) { 294 rmm_image_ep_info = *bl_params->ep_info; 295 } 296 #endif 297 else if (bl_params->image_id == BL33_IMAGE_ID) { 298 bl33_image_ep_info = *bl_params->ep_info; 299 } 300 301 bl_params = bl_params->next_params_info; 302 } 303 304 if (bl33_image_ep_info.pc == 0U) 305 panic(); 306 #if ENABLE_RME 307 if (rmm_image_ep_info.pc == 0U) 308 panic(); 309 #endif 310 #endif /* RESET_TO_BL31 */ 311 312 # if ARM_LINUX_KERNEL_AS_BL33 313 /* 314 * According to the file ``Documentation/arm64/booting.txt`` of the 315 * Linux kernel tree, Linux expects the physical address of the device 316 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and 317 * must be 0. 318 * Repurpose the option to load Hafnium hypervisor in the normal world. 319 * It expects its manifest address in x0. This is essentially the linux 320 * dts (passed to the primary VM) by adding 'hypervisor' and chosen 321 * nodes specifying the Hypervisor configuration. 322 */ 323 #if RESET_TO_BL31 324 bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE; 325 #else 326 bl33_image_ep_info.args.arg0 = (u_register_t)hw_config; 327 #endif 328 bl33_image_ep_info.args.arg1 = 0U; 329 bl33_image_ep_info.args.arg2 = 0U; 330 bl33_image_ep_info.args.arg3 = 0U; 331 # endif 332 } 333 #endif 334 335 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, 336 u_register_t arg2, u_register_t arg3) 337 { 338 #if TRANSFER_LIST 339 arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3); 340 #else 341 arm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3); 342 #endif 343 344 /* 345 * Initialize Interconnect for this cluster during cold boot. 346 * No need for locks as no other CPU is active. 347 */ 348 plat_arm_interconnect_init(); 349 350 /* 351 * Enable Interconnect coherency for the primary CPU's cluster. 352 * Earlier bootloader stages might already do this (e.g. Trusted 353 * Firmware's BL1 does it) but we can't assume so. There is no harm in 354 * executing this code twice anyway. 355 * Platform specific PSCI code will enable coherency for other 356 * clusters. 357 */ 358 plat_arm_interconnect_enter_coherency(); 359 } 360 361 /******************************************************************************* 362 * Perform any BL31 platform setup common to ARM standard platforms 363 ******************************************************************************/ 364 void arm_bl31_platform_setup(void) 365 { 366 struct transfer_list_entry *te __unused; 367 368 #if TRANSFER_LIST && !RESET_TO_BL31 369 /* Initialise the non-secure world tl, BL31 may modify the HW_CONFIG so defer 370 * copying it until later. 371 */ 372 ns_tl = transfer_list_init((void *)FW_NS_HANDOFF_BASE, 373 PLAT_ARM_FW_HANDOFF_SIZE); 374 375 if (ns_tl == NULL) { 376 ERROR("Non-secure transfer list initialisation failed!"); 377 panic(); 378 } 379 380 #if !RESET_TO_BL2 381 te = transfer_list_find(secure_tl, TL_TAG_FDT); 382 assert(te != NULL); 383 384 fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te)); 385 #endif /* !(RESET_TO_BL2 && RESET_TO_BL31) */ 386 #endif /* TRANSFER_LIST */ 387 388 /* Initialize the GIC driver, cpu and distributor interfaces */ 389 plat_arm_gic_driver_init(); 390 plat_arm_gic_init(); 391 392 #if RESET_TO_BL31 393 /* 394 * Do initial security configuration to allow DRAM/device access 395 * (if earlier BL has not already done so). 396 */ 397 plat_arm_security_setup(); 398 399 #if defined(PLAT_ARM_MEM_PROT_ADDR) 400 arm_nor_psci_do_dyn_mem_protect(); 401 #endif /* PLAT_ARM_MEM_PROT_ADDR */ 402 403 #endif /* RESET_TO_BL31 */ 404 405 /* Enable and initialize the System level generic timer */ 406 mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF, 407 CNTCR_FCREQ(0U) | CNTCR_EN); 408 409 /* Allow access to the System counter timer module */ 410 arm_configure_sys_timer(); 411 412 /* Initialize power controller before setting up topology */ 413 plat_arm_pwrc_setup(); 414 415 #if ENABLE_FEAT_RAS && FFH_SUPPORT 416 ras_init(); 417 #endif 418 419 #if USE_DEBUGFS 420 debugfs_init(); 421 #endif /* USE_DEBUGFS */ 422 } 423 424 /******************************************************************************* 425 * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM 426 * standard platforms 427 ******************************************************************************/ 428 void arm_bl31_plat_runtime_setup(void) 429 { 430 struct transfer_list_entry *te __unused; 431 /* Initialize the runtime console */ 432 arm_console_runtime_init(); 433 434 #if TRANSFER_LIST && !RESET_TO_BL31 435 te = transfer_list_find(secure_tl, TL_TAG_FDT); 436 assert(te != NULL); 437 438 te = transfer_list_add(ns_tl, TL_TAG_FDT, te->data_size, 439 transfer_list_entry_data(te)); 440 assert(te != NULL); 441 442 /* 443 * We assume BL31 has added all TE's required by BL33 at this stage, ensure 444 * that data is visible to all observers by performing a flush operation, so 445 * they can access the updated data even if caching is not enabled. 446 */ 447 flush_dcache_range((uintptr_t)ns_tl, ns_tl->size); 448 #endif /* TRANSFER_LIST && !(RESET_TO_BL2 || RESET_TO_BL31) */ 449 450 #if RECLAIM_INIT_CODE 451 arm_free_init_memory(); 452 #endif 453 454 #if PLAT_RO_XLAT_TABLES 455 arm_xlat_make_tables_readonly(); 456 #endif 457 } 458 459 #if RECLAIM_INIT_CODE 460 /* 461 * Make memory for image boot time code RW to reclaim it as stack for the 462 * secondary cores, or RO where it cannot be reclaimed: 463 * 464 * |-------- INIT SECTION --------| 465 * ----------------------------------------- 466 * | CORE 0 | CORE 1 | CORE 2 | EXTRA | 467 * | STACK | STACK | STACK | SPACE | 468 * ----------------------------------------- 469 * <-------------------> <------> 470 * MAKE RW AND XN MAKE 471 * FOR STACKS RO AND XN 472 */ 473 void arm_free_init_memory(void) 474 { 475 int ret = 0; 476 477 if (BL_STACKS_END < BL_INIT_CODE_END) { 478 /* Reclaim some of the init section as stack if possible. */ 479 if (BL_INIT_CODE_BASE < BL_STACKS_END) { 480 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, 481 BL_STACKS_END - BL_INIT_CODE_BASE, 482 MT_RW_DATA); 483 } 484 /* Make the rest of the init section read-only. */ 485 ret |= xlat_change_mem_attributes(BL_STACKS_END, 486 BL_INIT_CODE_END - BL_STACKS_END, 487 MT_RO_DATA); 488 } else { 489 /* The stacks cover the init section, so reclaim it all. */ 490 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, 491 BL_INIT_CODE_END - BL_INIT_CODE_BASE, 492 MT_RW_DATA); 493 } 494 495 if (ret != 0) { 496 ERROR("Could not reclaim initialization code"); 497 panic(); 498 } 499 } 500 #endif 501 502 void __init bl31_platform_setup(void) 503 { 504 arm_bl31_platform_setup(); 505 } 506 507 void bl31_plat_runtime_setup(void) 508 { 509 arm_bl31_plat_runtime_setup(); 510 } 511 512 /******************************************************************************* 513 * Perform the very early platform specific architectural setup shared between 514 * ARM standard platforms. This only does basic initialization. Later 515 * architectural setup (bl31_arch_setup()) does not do anything platform 516 * specific. 517 ******************************************************************************/ 518 void __init arm_bl31_plat_arch_setup(void) 519 { 520 const mmap_region_t bl_regions[] = { 521 MAP_BL31_TOTAL, 522 #if ENABLE_RME 523 ARM_MAP_L0_GPT_REGION, 524 #endif 525 #if RECLAIM_INIT_CODE 526 MAP_BL_INIT_CODE, 527 #endif 528 #if SEPARATE_NOBITS_REGION 529 MAP_BL31_NOBITS, 530 #endif 531 ARM_MAP_BL_RO, 532 #if USE_ROMLIB 533 ARM_MAP_ROMLIB_CODE, 534 ARM_MAP_ROMLIB_DATA, 535 #endif 536 #if USE_COHERENT_MEM 537 ARM_MAP_BL_COHERENT_RAM, 538 #endif 539 {0} 540 }; 541 542 setup_page_tables(bl_regions, plat_arm_get_mmap()); 543 544 enable_mmu_el3(0); 545 546 #if ENABLE_RME 547 /* 548 * Initialise Granule Protection library and enable GPC for the primary 549 * processor. The tables have already been initialized by a previous BL 550 * stage, so there is no need to provide any PAS here. This function 551 * sets up pointers to those tables. 552 */ 553 if (gpt_runtime_init() < 0) { 554 ERROR("gpt_runtime_init() failed!\n"); 555 panic(); 556 } 557 #endif /* ENABLE_RME */ 558 559 arm_setup_romlib(); 560 } 561 562 void __init bl31_plat_arch_setup(void) 563 { 564 arm_bl31_plat_arch_setup(); 565 } 566