1 /* 2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 9 #include <arch.h> 10 #include <arch_features.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <drivers/console.h> 15 #include <lib/debugfs.h> 16 #include <lib/extensions/ras.h> 17 #include <lib/fconf/fconf.h> 18 #include <lib/gpt_rme/gpt_rme.h> 19 #include <lib/mmio.h> 20 #if TRANSFER_LIST 21 #include <lib/transfer_list.h> 22 #endif 23 #include <lib/xlat_tables/xlat_tables_compat.h> 24 #include <plat/arm/common/plat_arm.h> 25 #include <plat/common/platform.h> 26 #include <platform_def.h> 27 28 static struct transfer_list_header *secure_tl __unused; 29 static struct transfer_list_header *ns_tl __unused; 30 31 /* 32 * Placeholder variables for copying the arguments that have been passed to 33 * BL31 from BL2. 34 */ 35 static entry_point_info_t bl32_image_ep_info; 36 static entry_point_info_t bl33_image_ep_info; 37 #if ENABLE_RME 38 static entry_point_info_t rmm_image_ep_info; 39 #endif 40 41 #if !RESET_TO_BL31 42 /* 43 * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page 44 * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2. 45 */ 46 #if TRANSFER_LIST 47 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows); 48 #else 49 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows); 50 #endif /* TRANSFER_LIST */ 51 #endif /* RESET_TO_BL31 */ 52 53 /* Weak definitions may be overridden in specific ARM standard platform */ 54 #pragma weak bl31_early_platform_setup2 55 #pragma weak bl31_platform_setup 56 #pragma weak bl31_plat_arch_setup 57 #pragma weak bl31_plat_get_next_image_ep_info 58 #pragma weak bl31_plat_runtime_setup 59 60 #define MAP_BL31_TOTAL MAP_REGION_FLAT( \ 61 BL31_START, \ 62 BL31_END - BL31_START, \ 63 MT_MEMORY | MT_RW | EL3_PAS) 64 #if RECLAIM_INIT_CODE 65 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE); 66 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED); 67 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED); 68 69 #define BL_INIT_CODE_END ((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \ 70 ~(PAGE_SIZE - 1)) 71 #define BL_STACKS_END ((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \ 72 ~(PAGE_SIZE - 1)) 73 74 #define MAP_BL_INIT_CODE MAP_REGION_FLAT( \ 75 BL_INIT_CODE_BASE, \ 76 BL_INIT_CODE_END \ 77 - BL_INIT_CODE_BASE, \ 78 MT_CODE | EL3_PAS) 79 #endif 80 81 #if SEPARATE_NOBITS_REGION 82 #define MAP_BL31_NOBITS MAP_REGION_FLAT( \ 83 BL31_NOBITS_BASE, \ 84 BL31_NOBITS_LIMIT \ 85 - BL31_NOBITS_BASE, \ 86 MT_MEMORY | MT_RW | EL3_PAS) 87 88 #endif 89 /******************************************************************************* 90 * Return a pointer to the 'entry_point_info' structure of the next image for the 91 * security state specified. BL33 corresponds to the non-secure image type 92 * while BL32 corresponds to the secure image type. A NULL pointer is returned 93 * if the image does not exist. 94 ******************************************************************************/ 95 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type) 96 { 97 entry_point_info_t *next_image_info; 98 99 assert(sec_state_is_valid(type)); 100 if (type == NON_SECURE) { 101 #if TRANSFER_LIST && !RESET_TO_BL31 102 next_image_info = transfer_list_set_handoff_args( 103 ns_tl, &bl33_image_ep_info); 104 #else 105 next_image_info = &bl33_image_ep_info; 106 #endif 107 } 108 #if ENABLE_RME 109 else if (type == REALM) { 110 next_image_info = &rmm_image_ep_info; 111 } 112 #endif 113 else { 114 next_image_info = &bl32_image_ep_info; 115 } 116 117 /* 118 * None of the images on the ARM development platforms can have 0x0 119 * as the entrypoint 120 */ 121 if (next_image_info->pc) 122 return next_image_info; 123 else 124 return NULL; 125 } 126 127 /******************************************************************************* 128 * Perform any BL31 early platform setup common to ARM standard platforms. 129 * Here is an opportunity to copy parameters passed by the calling EL (S-EL1 130 * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be 131 * done before the MMU is initialized so that the memory layout can be used 132 * while creating page tables. BL2 has flushed this information to memory, so 133 * we are guaranteed to pick up good data. 134 ******************************************************************************/ 135 #if TRANSFER_LIST 136 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1, 137 u_register_t arg2, u_register_t arg3) 138 { 139 #if RESET_TO_BL31 140 /* Populate entry point information for BL33 */ 141 SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0); 142 /* 143 * Tell BL31 where the non-trusted software image 144 * is located and the entry state information 145 */ 146 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); 147 148 bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); 149 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); 150 151 bl33_image_ep_info.args.arg0 = 152 FW_NS_HANDOFF_BASE + ARM_PRELOADED_DTB_OFFSET; 153 bl33_image_ep_info.args.arg1 = 154 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION); 155 bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE; 156 #else 157 struct transfer_list_entry *te = NULL; 158 struct entry_point_info *ep; 159 160 secure_tl = (struct transfer_list_header *)arg3; 161 162 /* 163 * Populate the global entry point structures used to execute subsequent 164 * images. 165 */ 166 while ((te = transfer_list_next(secure_tl, te)) != NULL) { 167 ep = transfer_list_entry_data(te); 168 169 if (te->tag_id == TL_TAG_EXEC_EP_INFO64) { 170 switch (GET_SECURITY_STATE(ep->h.attr)) { 171 case NON_SECURE: 172 bl33_image_ep_info = *ep; 173 break; 174 #if ENABLE_RME 175 case REALM: 176 rmm_image_ep_info = *ep; 177 break; 178 #endif 179 case SECURE: 180 bl32_image_ep_info = *ep; 181 break; 182 default: 183 ERROR("Unrecognized Image Security State %lu\n", 184 GET_SECURITY_STATE(ep->h.attr)); 185 panic(); 186 } 187 } 188 } 189 #endif /* RESET_TO_BL31 */ 190 } 191 #else 192 void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_config, 193 uintptr_t hw_config, void *plat_params_from_bl2) 194 { 195 /* Initialize the console to provide early debug support */ 196 arm_console_boot_init(); 197 198 #if RESET_TO_BL31 199 /* There are no parameters from BL2 if BL31 is a reset vector */ 200 assert(from_bl2 == NULL); 201 assert(plat_params_from_bl2 == NULL); 202 203 # ifdef BL32_BASE 204 /* Populate entry point information for BL32 */ 205 SET_PARAM_HEAD(&bl32_image_ep_info, 206 PARAM_EP, 207 VERSION_1, 208 0); 209 SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); 210 bl32_image_ep_info.pc = BL32_BASE; 211 bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry(); 212 213 #if defined(SPD_spmd) 214 /* SPM (hafnium in secure world) expects SPM Core manifest base address 215 * in x0, which in !RESET_TO_BL31 case loaded after base of non shared 216 * SRAM(after 4KB offset of SRAM). But in RESET_TO_BL31 case all non 217 * shared SRAM is allocated to BL31, so to avoid overwriting of manifest 218 * keep it in the last page. 219 */ 220 bl32_image_ep_info.args.arg0 = ARM_TRUSTED_SRAM_BASE + 221 PLAT_ARM_TRUSTED_SRAM_SIZE - PAGE_SIZE; 222 #endif 223 224 # endif /* BL32_BASE */ 225 226 /* Populate entry point information for BL33 */ 227 SET_PARAM_HEAD(&bl33_image_ep_info, 228 PARAM_EP, 229 VERSION_1, 230 0); 231 /* 232 * Tell BL31 where the non-trusted software image 233 * is located and the entry state information 234 */ 235 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); 236 237 bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); 238 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); 239 240 #if ENABLE_RME 241 /* 242 * Populate entry point information for RMM. 243 * Only PC needs to be set as other fields are determined by RMMD. 244 */ 245 rmm_image_ep_info.pc = RMM_BASE; 246 #endif /* ENABLE_RME */ 247 248 #else /* RESET_TO_BL31 */ 249 250 /* 251 * In debug builds, we pass a special value in 'plat_params_from_bl2' 252 * to verify platform parameters from BL2 to BL31. 253 * In release builds, it's not used. 254 */ 255 assert(((unsigned long long)plat_params_from_bl2) == 256 ARM_BL31_PLAT_PARAM_VAL); 257 258 /* 259 * Check params passed from BL2 should not be NULL, 260 */ 261 bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2; 262 assert(params_from_bl2 != NULL); 263 assert(params_from_bl2->h.type == PARAM_BL_PARAMS); 264 assert(params_from_bl2->h.version >= VERSION_2); 265 266 bl_params_node_t *bl_params = params_from_bl2->head; 267 268 /* 269 * Copy BL33, BL32 and RMM (if present), entry point information. 270 * They are stored in Secure RAM, in BL2's address space. 271 */ 272 while (bl_params != NULL) { 273 if (bl_params->image_id == BL32_IMAGE_ID) { 274 bl32_image_ep_info = *bl_params->ep_info; 275 #if SPMC_AT_EL3 276 /* 277 * Populate the BL32 image base, size and max limit in 278 * the entry point information, since there is no 279 * platform function to retrieve them in generic 280 * code. We choose arg2, arg3 and arg4 since the generic 281 * code uses arg1 for stashing the SP manifest size. The 282 * SPMC setup uses these arguments to update SP manifest 283 * with actual SP's base address and it size. 284 */ 285 bl32_image_ep_info.args.arg2 = 286 bl_params->image_info->image_base; 287 bl32_image_ep_info.args.arg3 = 288 bl_params->image_info->image_size; 289 bl32_image_ep_info.args.arg4 = 290 bl_params->image_info->image_base + 291 bl_params->image_info->image_max_size; 292 #endif 293 } 294 #if ENABLE_RME 295 else if (bl_params->image_id == RMM_IMAGE_ID) { 296 rmm_image_ep_info = *bl_params->ep_info; 297 } 298 #endif 299 else if (bl_params->image_id == BL33_IMAGE_ID) { 300 bl33_image_ep_info = *bl_params->ep_info; 301 } 302 303 bl_params = bl_params->next_params_info; 304 } 305 306 if (bl33_image_ep_info.pc == 0U) 307 panic(); 308 #if ENABLE_RME 309 if (rmm_image_ep_info.pc == 0U) 310 panic(); 311 #endif 312 #endif /* RESET_TO_BL31 */ 313 314 # if ARM_LINUX_KERNEL_AS_BL33 315 /* 316 * According to the file ``Documentation/arm64/booting.txt`` of the 317 * Linux kernel tree, Linux expects the physical address of the device 318 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and 319 * must be 0. 320 * Repurpose the option to load Hafnium hypervisor in the normal world. 321 * It expects its manifest address in x0. This is essentially the linux 322 * dts (passed to the primary VM) by adding 'hypervisor' and chosen 323 * nodes specifying the Hypervisor configuration. 324 */ 325 #if RESET_TO_BL31 326 bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE; 327 #else 328 bl33_image_ep_info.args.arg0 = (u_register_t)hw_config; 329 #endif 330 bl33_image_ep_info.args.arg1 = 0U; 331 bl33_image_ep_info.args.arg2 = 0U; 332 bl33_image_ep_info.args.arg3 = 0U; 333 # endif 334 } 335 #endif 336 337 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, 338 u_register_t arg2, u_register_t arg3) 339 { 340 #if TRANSFER_LIST 341 arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3); 342 #else 343 arm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3); 344 #endif 345 346 /* 347 * Initialize Interconnect for this cluster during cold boot. 348 * No need for locks as no other CPU is active. 349 */ 350 plat_arm_interconnect_init(); 351 352 /* 353 * Enable Interconnect coherency for the primary CPU's cluster. 354 * Earlier bootloader stages might already do this (e.g. Trusted 355 * Firmware's BL1 does it) but we can't assume so. There is no harm in 356 * executing this code twice anyway. 357 * Platform specific PSCI code will enable coherency for other 358 * clusters. 359 */ 360 plat_arm_interconnect_enter_coherency(); 361 } 362 363 /******************************************************************************* 364 * Perform any BL31 platform setup common to ARM standard platforms 365 ******************************************************************************/ 366 void arm_bl31_platform_setup(void) 367 { 368 struct transfer_list_entry *te __unused; 369 370 #if TRANSFER_LIST && !RESET_TO_BL31 371 /* Initialise the non-secure world tl, BL31 may modify the HW_CONFIG so defer 372 * copying it until later. 373 */ 374 ns_tl = transfer_list_init((void *)FW_NS_HANDOFF_BASE, 375 PLAT_ARM_FW_HANDOFF_SIZE); 376 377 if (ns_tl == NULL) { 378 ERROR("Non-secure transfer list initialisation failed!"); 379 panic(); 380 } 381 382 #if !RESET_TO_BL2 383 te = transfer_list_find(secure_tl, TL_TAG_FDT); 384 assert(te != NULL); 385 386 fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te)); 387 #endif /* !(RESET_TO_BL2 && RESET_TO_BL31) */ 388 #endif /* TRANSFER_LIST */ 389 390 /* Initialize the GIC driver, cpu and distributor interfaces */ 391 plat_arm_gic_driver_init(); 392 plat_arm_gic_init(); 393 394 #if RESET_TO_BL31 395 /* 396 * Do initial security configuration to allow DRAM/device access 397 * (if earlier BL has not already done so). 398 */ 399 plat_arm_security_setup(); 400 401 #if defined(PLAT_ARM_MEM_PROT_ADDR) 402 arm_nor_psci_do_dyn_mem_protect(); 403 #endif /* PLAT_ARM_MEM_PROT_ADDR */ 404 405 #endif /* RESET_TO_BL31 */ 406 407 /* Enable and initialize the System level generic timer */ 408 mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF, 409 CNTCR_FCREQ(0U) | CNTCR_EN); 410 411 /* Allow access to the System counter timer module */ 412 arm_configure_sys_timer(); 413 414 /* Initialize power controller before setting up topology */ 415 plat_arm_pwrc_setup(); 416 417 #if ENABLE_FEAT_RAS && FFH_SUPPORT 418 ras_init(); 419 #endif 420 421 #if USE_DEBUGFS 422 debugfs_init(); 423 #endif /* USE_DEBUGFS */ 424 } 425 426 /******************************************************************************* 427 * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM 428 * standard platforms 429 ******************************************************************************/ 430 void arm_bl31_plat_runtime_setup(void) 431 { 432 struct transfer_list_entry *te __unused; 433 /* Initialize the runtime console */ 434 arm_console_runtime_init(); 435 436 #if TRANSFER_LIST && !RESET_TO_BL31 437 te = transfer_list_find(secure_tl, TL_TAG_FDT); 438 assert(te != NULL); 439 440 te = transfer_list_add(ns_tl, TL_TAG_FDT, te->data_size, 441 transfer_list_entry_data(te)); 442 assert(te != NULL); 443 444 /* 445 * We assume BL31 has added all TE's required by BL33 at this stage, ensure 446 * that data is visible to all observers by performing a flush operation, so 447 * they can access the updated data even if caching is not enabled. 448 */ 449 flush_dcache_range((uintptr_t)ns_tl, ns_tl->size); 450 #endif /* TRANSFER_LIST && !(RESET_TO_BL2 || RESET_TO_BL31) */ 451 452 #if RECLAIM_INIT_CODE 453 arm_free_init_memory(); 454 #endif 455 456 #if PLAT_RO_XLAT_TABLES 457 arm_xlat_make_tables_readonly(); 458 #endif 459 } 460 461 #if RECLAIM_INIT_CODE 462 /* 463 * Make memory for image boot time code RW to reclaim it as stack for the 464 * secondary cores, or RO where it cannot be reclaimed: 465 * 466 * |-------- INIT SECTION --------| 467 * ----------------------------------------- 468 * | CORE 0 | CORE 1 | CORE 2 | EXTRA | 469 * | STACK | STACK | STACK | SPACE | 470 * ----------------------------------------- 471 * <-------------------> <------> 472 * MAKE RW AND XN MAKE 473 * FOR STACKS RO AND XN 474 */ 475 void arm_free_init_memory(void) 476 { 477 int ret = 0; 478 479 if (BL_STACKS_END < BL_INIT_CODE_END) { 480 /* Reclaim some of the init section as stack if possible. */ 481 if (BL_INIT_CODE_BASE < BL_STACKS_END) { 482 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, 483 BL_STACKS_END - BL_INIT_CODE_BASE, 484 MT_RW_DATA); 485 } 486 /* Make the rest of the init section read-only. */ 487 ret |= xlat_change_mem_attributes(BL_STACKS_END, 488 BL_INIT_CODE_END - BL_STACKS_END, 489 MT_RO_DATA); 490 } else { 491 /* The stacks cover the init section, so reclaim it all. */ 492 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, 493 BL_INIT_CODE_END - BL_INIT_CODE_BASE, 494 MT_RW_DATA); 495 } 496 497 if (ret != 0) { 498 ERROR("Could not reclaim initialization code"); 499 panic(); 500 } 501 } 502 #endif 503 504 void __init bl31_platform_setup(void) 505 { 506 arm_bl31_platform_setup(); 507 } 508 509 void bl31_plat_runtime_setup(void) 510 { 511 arm_bl31_plat_runtime_setup(); 512 } 513 514 /******************************************************************************* 515 * Perform the very early platform specific architectural setup shared between 516 * ARM standard platforms. This only does basic initialization. Later 517 * architectural setup (bl31_arch_setup()) does not do anything platform 518 * specific. 519 ******************************************************************************/ 520 void __init arm_bl31_plat_arch_setup(void) 521 { 522 const mmap_region_t bl_regions[] = { 523 MAP_BL31_TOTAL, 524 #if ENABLE_RME 525 ARM_MAP_L0_GPT_REGION, 526 #endif 527 #if RECLAIM_INIT_CODE 528 MAP_BL_INIT_CODE, 529 #endif 530 #if SEPARATE_NOBITS_REGION 531 MAP_BL31_NOBITS, 532 #endif 533 ARM_MAP_BL_RO, 534 #if USE_ROMLIB 535 ARM_MAP_ROMLIB_CODE, 536 ARM_MAP_ROMLIB_DATA, 537 #endif 538 #if USE_COHERENT_MEM 539 ARM_MAP_BL_COHERENT_RAM, 540 #endif 541 {0} 542 }; 543 544 setup_page_tables(bl_regions, plat_arm_get_mmap()); 545 546 enable_mmu_el3(0); 547 548 #if ENABLE_RME 549 #if RESET_TO_BL31 550 /* initialize GPT only when RME is enabled. */ 551 assert(is_feat_rme_present()); 552 553 /* Initialise and enable granule protection after MMU. */ 554 arm_gpt_setup(); 555 #endif /* RESET_TO_BL31 */ 556 /* 557 * Initialise Granule Protection library and enable GPC for the primary 558 * processor. The tables have already been initialized by a previous BL 559 * stage, so there is no need to provide any PAS here. This function 560 * sets up pointers to those tables. 561 */ 562 if (gpt_runtime_init() < 0) { 563 ERROR("gpt_runtime_init() failed!\n"); 564 panic(); 565 } 566 #endif /* ENABLE_RME */ 567 568 arm_setup_romlib(); 569 } 570 571 void __init bl31_plat_arch_setup(void) 572 { 573 arm_bl31_plat_arch_setup(); 574 } 575