1 /* 2 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 9 #include <arch.h> 10 #include <arch_features.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <drivers/console.h> 15 #include <lib/debugfs.h> 16 #include <lib/extensions/ras.h> 17 #include <lib/fconf/fconf.h> 18 #include <lib/gpt_rme/gpt_rme.h> 19 #include <lib/mmio.h> 20 #include <services/lfa_svc.h> 21 #if TRANSFER_LIST 22 #include <transfer_list.h> 23 #endif 24 #include <lib/xlat_tables/xlat_tables_compat.h> 25 #include <plat/arm/common/plat_arm.h> 26 #include <plat/common/platform.h> 27 #include <platform_def.h> 28 29 struct transfer_list_header *secure_tl; 30 struct transfer_list_header *ns_tl __unused; 31 32 #if USE_GIC_DRIVER == 3 33 const uintptr_t gicr_base_addrs[2] = { 34 PLAT_ARM_GICR_BASE, /* GICR Base address of the primary CPU */ 35 0U /* Zero Termination */ 36 }; 37 #endif 38 39 /* 40 * Placeholder variables for copying the arguments that have been passed to 41 * BL31 from BL2. 42 */ 43 static entry_point_info_t bl32_image_ep_info; 44 static entry_point_info_t bl33_image_ep_info; 45 46 #if ENABLE_RME 47 static entry_point_info_t rmm_image_ep_info; 48 #if (RME_GPT_BITLOCK_BLOCK == 0) 49 #define BITLOCK_BASE UL(0) 50 #define BITLOCK_SIZE UL(0) 51 #else 52 /* 53 * Number of bitlock_t entries in bitlocks array for PLAT_ARM_PPS 54 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock. 55 */ 56 #if (PLAT_ARM_PPS > (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8))) 57 #define BITLOCKS_NUM (PLAT_ARM_PPS) / \ 58 (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8)) 59 #else 60 #define BITLOCKS_NUM U(1) 61 #endif 62 /* 63 * Bitlocks array 64 */ 65 static bitlock_t gpt_bitlock[BITLOCKS_NUM]; 66 #define BITLOCK_BASE (uintptr_t)gpt_bitlock 67 #define BITLOCK_SIZE sizeof(gpt_bitlock) 68 #endif /* RME_GPT_BITLOCK_BLOCK */ 69 #endif /* ENABLE_RME */ 70 71 #if !RESET_TO_BL31 72 /* 73 * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page 74 * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2. 75 */ 76 #if TRANSFER_LIST 77 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows); 78 #else 79 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows); 80 #endif /* TRANSFER_LIST */ 81 #endif /* RESET_TO_BL31 */ 82 83 /* Weak definitions may be overridden in specific ARM standard platform */ 84 #pragma weak bl31_early_platform_setup2 85 #pragma weak bl31_platform_setup 86 #pragma weak bl31_plat_arch_setup 87 #pragma weak bl31_plat_get_next_image_ep_info 88 #pragma weak bl31_plat_runtime_setup 89 90 #define MAP_BL31_TOTAL MAP_REGION_FLAT( \ 91 BL31_START, \ 92 BL31_END - BL31_START, \ 93 MT_MEMORY | MT_RW | EL3_PAS) 94 #if RECLAIM_INIT_CODE 95 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE); 96 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED); 97 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED); 98 99 #define BL_INIT_CODE_END ((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \ 100 ~(PAGE_SIZE - 1)) 101 #define BL_STACKS_END ((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \ 102 ~(PAGE_SIZE - 1)) 103 104 #define MAP_BL_INIT_CODE MAP_REGION_FLAT( \ 105 BL_INIT_CODE_BASE, \ 106 BL_INIT_CODE_END \ 107 - BL_INIT_CODE_BASE, \ 108 MT_CODE | EL3_PAS) 109 #endif 110 111 #if SEPARATE_NOBITS_REGION 112 #define MAP_BL31_NOBITS MAP_REGION_FLAT( \ 113 BL31_NOBITS_BASE, \ 114 BL31_NOBITS_LIMIT \ 115 - BL31_NOBITS_BASE, \ 116 MT_MEMORY | MT_RW | EL3_PAS) 117 118 #endif 119 /******************************************************************************* 120 * Return a pointer to the 'entry_point_info' structure of the next image for the 121 * security state specified. BL33 corresponds to the non-secure image type 122 * while BL32 corresponds to the secure image type. A NULL pointer is returned 123 * if the image does not exist. 124 ******************************************************************************/ 125 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type) 126 { 127 entry_point_info_t *next_image_info; 128 129 assert(sec_state_is_valid(type)); 130 if (type == NON_SECURE) { 131 #if TRANSFER_LIST && !RESET_TO_BL31 132 next_image_info = transfer_list_set_handoff_args( 133 ns_tl, &bl33_image_ep_info); 134 #else 135 next_image_info = &bl33_image_ep_info; 136 #endif 137 } 138 #if ENABLE_RME 139 else if (type == REALM) { 140 #if LFA_SUPPORT 141 if (lfa_is_prime_complete(RMM_IMAGE_ID)) { 142 rmm_image_ep_info.pc = 143 RMM_BASE + RMM_BANK_SIZE; 144 } 145 #endif /* LFA_SUPPORT */ 146 next_image_info = &rmm_image_ep_info; 147 } 148 #endif 149 else { 150 #if TRANSFER_LIST && !RESET_TO_BL31 151 next_image_info = transfer_list_set_handoff_args( 152 secure_tl, &bl32_image_ep_info); 153 #else 154 next_image_info = &bl32_image_ep_info; 155 #endif 156 } 157 158 /* 159 * None of the images on the ARM development platforms can have 0x0 160 * as the entrypoint 161 */ 162 if (next_image_info->pc) 163 return next_image_info; 164 else 165 return NULL; 166 } 167 168 /******************************************************************************* 169 * Perform any BL31 early platform setup common to ARM standard platforms. 170 * Here is an opportunity to copy parameters passed by the calling EL (S-EL1 171 * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be 172 * done before the MMU is initialized so that the memory layout can be used 173 * while creating page tables. BL2 has flushed this information to memory, so 174 * we are guaranteed to pick up good data. 175 ******************************************************************************/ 176 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1, 177 u_register_t arg2, u_register_t arg3) 178 { 179 #if TRANSFER_LIST 180 #if RESET_TO_BL31 181 /* Populate entry point information for BL33 */ 182 SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0); 183 /* 184 * Tell BL31 where the non-trusted software image 185 * is located and the entry state information 186 */ 187 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); 188 189 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID); 190 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); 191 192 bl33_image_ep_info.args.arg0 = PLAT_ARM_TRANSFER_LIST_DTB_OFFSET; 193 bl33_image_ep_info.args.arg1 = 194 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION); 195 bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE; 196 #else 197 struct transfer_list_entry *te = NULL; 198 struct entry_point_info *ep; 199 200 secure_tl = (struct transfer_list_header *)arg3; 201 202 /* 203 * Populate the global entry point structures used to execute subsequent 204 * images. 205 */ 206 while ((te = transfer_list_next(secure_tl, te)) != NULL) { 207 ep = transfer_list_entry_data(te); 208 209 if (te->tag_id == TL_TAG_EXEC_EP_INFO64) { 210 switch (GET_SECURITY_STATE(ep->h.attr)) { 211 case NON_SECURE: 212 bl33_image_ep_info = *ep; 213 break; 214 #if ENABLE_RME 215 case REALM: 216 rmm_image_ep_info = *ep; 217 break; 218 #endif 219 case SECURE: 220 bl32_image_ep_info = *ep; 221 break; 222 default: 223 ERROR("Unrecognized Image Security State %lu\n", 224 GET_SECURITY_STATE(ep->h.attr)); 225 panic(); 226 } 227 } 228 } 229 #endif /* RESET_TO_BL31 */ 230 #else /* (!TRANSFER_LIST) */ 231 #if RESET_TO_BL31 232 /* If BL31 is a reset vector, the parameters must be ignored */ 233 (void)arg0; 234 (void)arg1; 235 (void)arg2; 236 (void)arg3; 237 238 # ifdef BL32_BASE 239 /* Populate entry point information for BL32 */ 240 SET_PARAM_HEAD(&bl32_image_ep_info, 241 PARAM_EP, 242 VERSION_1, 243 0); 244 SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); 245 bl32_image_ep_info.pc = BL32_BASE; 246 bl32_image_ep_info.spsr = arm_get_spsr(BL32_IMAGE_ID); 247 248 #if defined(SPD_spmd) 249 bl32_image_ep_info.args.arg0 = ARM_SPMC_MANIFEST_BASE; 250 #endif 251 252 # endif /* BL32_BASE */ 253 254 /* Populate entry point information for BL33 */ 255 SET_PARAM_HEAD(&bl33_image_ep_info, 256 PARAM_EP, 257 VERSION_1, 258 0); 259 /* 260 * Tell BL31 where the non-trusted software image 261 * is located and the entry state information 262 */ 263 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); 264 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID); 265 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); 266 267 #if ENABLE_RME 268 /* 269 * Populate entry point information for RMM. 270 * Only PC needs to be set as other fields are determined by RMMD. 271 */ 272 rmm_image_ep_info.pc = RMM_BASE; 273 #endif /* ENABLE_RME */ 274 #else /* RESET_TO_BL31 */ 275 /* 276 * In debug builds, we pass a special value in 'arg3' 277 * to verify platform parameters from BL2 to BL31. 278 * In release builds, it's not used. 279 */ 280 #if DEBUG 281 assert(((uintptr_t)arg3) == ARM_BL31_PLAT_PARAM_VAL); 282 #endif 283 284 /* 285 * Check params passed from BL2 should not be NULL, 286 */ 287 bl_params_t *params_from_bl2 = (bl_params_t *)(uintptr_t)arg0; 288 assert(params_from_bl2 != NULL); 289 assert(params_from_bl2->h.type == PARAM_BL_PARAMS); 290 assert(params_from_bl2->h.version >= VERSION_2); 291 292 bl_params_node_t *bl_params = params_from_bl2->head; 293 294 /* 295 * Copy BL33, BL32 and RMM (if present), entry point information. 296 * They are stored in Secure RAM, in BL2's address space. 297 */ 298 while (bl_params != NULL) { 299 if (bl_params->image_id == BL32_IMAGE_ID) { 300 bl32_image_ep_info = *bl_params->ep_info; 301 #if SPMC_AT_EL3 302 /* 303 * Populate the BL32 image base, size and max limit in 304 * the entry point information, since there is no 305 * platform function to retrieve them in generic 306 * code. We choose arg2, arg3 and arg4 since the generic 307 * code uses arg1 for stashing the SP manifest size. The 308 * SPMC setup uses these arguments to update SP manifest 309 * with actual SP's base address and it size. 310 */ 311 bl32_image_ep_info.args.arg2 = 312 bl_params->image_info->image_base; 313 bl32_image_ep_info.args.arg3 = 314 bl_params->image_info->image_size; 315 bl32_image_ep_info.args.arg4 = 316 bl_params->image_info->image_base + 317 bl_params->image_info->image_max_size; 318 #endif 319 } 320 #if ENABLE_RME 321 else if (bl_params->image_id == RMM_IMAGE_ID) { 322 rmm_image_ep_info = *bl_params->ep_info; 323 } 324 #endif 325 else if (bl_params->image_id == BL33_IMAGE_ID) { 326 bl33_image_ep_info = *bl_params->ep_info; 327 } 328 329 bl_params = bl_params->next_params_info; 330 } 331 332 if (bl33_image_ep_info.pc == 0U) 333 panic(); 334 #if ENABLE_RME 335 if (rmm_image_ep_info.pc == 0U) 336 panic(); 337 #endif 338 #endif /* RESET_TO_BL31 */ 339 340 #if USE_KERNEL_DT_CONVENTION 341 /* 342 * Only use the default DT base address if TF-A has not supplied one. 343 * This can occur when the DT is side-loaded and its memory location 344 * is unknown (e.g., RESET_TO_BL31). 345 */ 346 347 if (bl33_image_ep_info.args.arg0 == 0U) { 348 bl33_image_ep_info.args.arg0 = HW_CONFIG_BASE; 349 } 350 351 #if ARM_LINUX_KERNEL_AS_BL33 352 bl33_image_ep_info.args.arg1 = 0U; 353 bl33_image_ep_info.args.arg2 = 0U; 354 bl33_image_ep_info.args.arg3 = 0U; 355 #endif 356 #endif 357 #endif /* TRANSFER_LIST */ 358 } 359 360 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, 361 u_register_t arg2, u_register_t arg3) 362 { 363 /* Initialize the console to provide early debug support */ 364 arm_console_boot_init(); 365 366 arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3); 367 368 /* 369 * Initialize Interconnect for this cluster during cold boot. 370 * No need for locks as no other CPU is active. 371 */ 372 plat_arm_interconnect_init(); 373 374 /* 375 * Enable Interconnect coherency for the primary CPU's cluster. 376 * Earlier bootloader stages might already do this (e.g. Trusted 377 * Firmware's BL1 does it) but we can't assume so. There is no harm in 378 * executing this code twice anyway. 379 * Platform specific PSCI code will enable coherency for other 380 * clusters. 381 */ 382 plat_arm_interconnect_enter_coherency(); 383 } 384 385 /******************************************************************************* 386 * Perform any BL31 platform setup common to ARM standard platforms 387 ******************************************************************************/ 388 void arm_bl31_platform_setup(void) 389 { 390 struct transfer_list_entry *te __unused; 391 392 #if TRANSFER_LIST && !RESET_TO_BL31 393 ns_tl = transfer_list_init((void *)FW_NS_HANDOFF_BASE, 394 PLAT_ARM_FW_HANDOFF_SIZE); 395 if (ns_tl == NULL) { 396 ERROR("Non-secure transfer list initialisation failed!\n"); 397 panic(); 398 } 399 /* BL31 may modify the HW_CONFIG so defer copying it until later. */ 400 te = transfer_list_find(secure_tl, TL_TAG_FDT); 401 assert(te != NULL); 402 403 /* 404 * A pre-existing assumption is that FCONF is unsupported w/ RESET_TO_BL2 and 405 * RESET_TO_BL31. In the case of RESET_TO_BL31 this makes sense because there 406 * isn't a prior stage to load the device tree, but the reasoning for RESET_TO_BL2 is 407 * less clear. For the moment hardware properties that would normally be 408 * derived from the DT are statically defined. 409 */ 410 #if !RESET_TO_BL2 411 fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te)); 412 #endif 413 414 te = transfer_list_add(ns_tl, TL_TAG_FDT, te->data_size, 415 transfer_list_entry_data(te)); 416 assert(te != NULL); 417 418 te = transfer_list_find(secure_tl, TL_TAG_TPM_EVLOG); 419 if (te != NULL) { 420 te = transfer_list_add(ns_tl, TL_TAG_TPM_EVLOG, te->data_size, 421 transfer_list_entry_data(te)); 422 if (te == NULL) { 423 ERROR("Failed to load event log in Non-Secure transfer list\n"); 424 panic(); 425 } 426 } 427 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */ 428 429 #if RESET_TO_BL31 430 /* 431 * Do initial security configuration to allow DRAM/device access 432 * (if earlier BL has not already done so). 433 */ 434 plat_arm_security_setup(); 435 436 #if defined(PLAT_ARM_MEM_PROT_ADDR) 437 arm_nor_psci_do_dyn_mem_protect(); 438 #endif /* PLAT_ARM_MEM_PROT_ADDR */ 439 440 #endif /* RESET_TO_BL31 */ 441 442 /* Enable and initialize the System level generic timer */ 443 mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF, 444 CNTCR_FCREQ(0U) | CNTCR_EN); 445 446 /* Allow access to the System counter timer module */ 447 arm_configure_sys_timer(); 448 449 /* Initialize power controller before setting up topology */ 450 plat_arm_pwrc_setup(); 451 452 #if ENABLE_FEAT_RAS && FFH_SUPPORT 453 ras_init(); 454 #endif 455 456 #if USE_DEBUGFS 457 debugfs_init(); 458 #endif /* USE_DEBUGFS */ 459 460 #if USE_GIC_DRIVER == 3 461 gic_set_gicr_frames(gicr_base_addrs); 462 #endif 463 } 464 465 /******************************************************************************* 466 * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM 467 * standard platforms 468 ******************************************************************************/ 469 void arm_bl31_plat_runtime_setup(void) 470 { 471 struct transfer_list_entry *te __unused; 472 /* Initialize the runtime console */ 473 arm_console_runtime_init(); 474 475 #if TRANSFER_LIST && !RESET_TO_BL31 476 /* 477 * We assume BL31 has added all TE's required by BL33 at this stage, ensure 478 * that data is visible to all observers by performing a flush operation, so 479 * they can access the updated data even if caching is not enabled. 480 */ 481 flush_dcache_range((uintptr_t)ns_tl, ns_tl->size); 482 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */ 483 484 #if RECLAIM_INIT_CODE 485 arm_free_init_memory(); 486 #endif 487 488 #if PLAT_RO_XLAT_TABLES 489 arm_xlat_make_tables_readonly(); 490 #endif 491 } 492 493 #if RECLAIM_INIT_CODE 494 /* 495 * Make memory for image boot time code RW to reclaim it as stack for the 496 * secondary cores, or RO where it cannot be reclaimed: 497 * 498 * |-------- INIT SECTION --------| 499 * ----------------------------------------- 500 * | CORE 0 | CORE 1 | CORE 2 | EXTRA | 501 * | STACK | STACK | STACK | SPACE | 502 * ----------------------------------------- 503 * <-------------------> <------> 504 * MAKE RW AND XN MAKE 505 * FOR STACKS RO AND XN 506 */ 507 void arm_free_init_memory(void) 508 { 509 int ret = 0; 510 511 if (BL_STACKS_END < BL_INIT_CODE_END) { 512 /* Reclaim some of the init section as stack if possible. */ 513 if (BL_INIT_CODE_BASE < BL_STACKS_END) { 514 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, 515 BL_STACKS_END - BL_INIT_CODE_BASE, 516 MT_RW_DATA); 517 } 518 /* Make the rest of the init section read-only. */ 519 ret |= xlat_change_mem_attributes(BL_STACKS_END, 520 BL_INIT_CODE_END - BL_STACKS_END, 521 MT_RO_DATA); 522 } else { 523 /* The stacks cover the init section, so reclaim it all. */ 524 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, 525 BL_INIT_CODE_END - BL_INIT_CODE_BASE, 526 MT_RW_DATA); 527 } 528 529 if (ret != 0) { 530 ERROR("Could not reclaim initialization code"); 531 panic(); 532 } 533 } 534 #endif 535 536 void __init bl31_platform_setup(void) 537 { 538 arm_bl31_platform_setup(); 539 } 540 541 void bl31_plat_runtime_setup(void) 542 { 543 arm_bl31_plat_runtime_setup(); 544 } 545 546 /******************************************************************************* 547 * Perform the very early platform specific architectural setup shared between 548 * ARM standard platforms. This only does basic initialization. Later 549 * architectural setup (bl31_arch_setup()) does not do anything platform 550 * specific. 551 ******************************************************************************/ 552 void __init arm_bl31_plat_arch_setup(void) 553 { 554 const mmap_region_t bl_regions[] = { 555 MAP_BL31_TOTAL, 556 #if ENABLE_RME 557 ARM_MAP_L0_GPT_REGION, 558 #endif 559 #if RECLAIM_INIT_CODE 560 MAP_BL_INIT_CODE, 561 #endif 562 #if SEPARATE_NOBITS_REGION 563 MAP_BL31_NOBITS, 564 #endif 565 ARM_MAP_BL_RO, 566 #if USE_ROMLIB 567 ARM_MAP_ROMLIB_CODE, 568 ARM_MAP_ROMLIB_DATA, 569 #endif 570 #if USE_COHERENT_MEM 571 ARM_MAP_BL_COHERENT_RAM, 572 #endif 573 {0} 574 }; 575 576 setup_page_tables(bl_regions, plat_arm_get_mmap()); 577 578 enable_mmu_el3(0); 579 580 #if ENABLE_RME 581 #if RESET_TO_BL31 582 /* initialize GPT only when RME is enabled. */ 583 assert(is_feat_rme_present()); 584 585 /* Initialise and enable granule protection after MMU. */ 586 arm_gpt_setup(); 587 #endif /* RESET_TO_BL31 */ 588 /* 589 * Initialise Granule Protection library and enable GPC for the primary 590 * processor. The tables have already been initialized by a previous BL 591 * stage, so there is no need to provide any PAS here. This function 592 * sets up pointers to those tables. 593 */ 594 if (gpt_runtime_init(BITLOCK_BASE, BITLOCK_SIZE) < 0) { 595 ERROR("gpt_runtime_init() failed!\n"); 596 panic(); 597 } 598 #endif /* ENABLE_RME */ 599 600 arm_setup_romlib(); 601 } 602 603 void __init bl31_plat_arch_setup(void) 604 { 605 arm_bl31_plat_arch_setup(); 606 } 607