1 /* 2 * Copyright (c) 2015-2026, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 9 #include <arch.h> 10 #include <arch_features.h> 11 #include <arch_helpers.h> 12 #include <common/bl_common.h> 13 #include <common/debug.h> 14 #include <drivers/console.h> 15 #include <lib/debugfs.h> 16 #include <lib/extensions/ras.h> 17 #include <lib/fconf/fconf.h> 18 #include <lib/gpt_rme/gpt_rme.h> 19 #include <lib/mmio.h> 20 #include <services/lfa_svc.h> 21 #if TRANSFER_LIST 22 #include <transfer_list.h> 23 #endif 24 #include <lib/xlat_tables/xlat_tables_compat.h> 25 #include <plat/arm/common/plat_arm.h> 26 #include <plat/arm/common/plat_arm_lfa_components.h> 27 #include <plat/common/platform.h> 28 #include <platform_def.h> 29 30 struct transfer_list_header *secure_tl; 31 struct transfer_list_header *ns_tl __unused; 32 33 #if USE_GIC_DRIVER == 3 34 uintptr_t arm_gicr_base_addrs[2] = { 35 PLAT_ARM_GICR_BASE, /* GICR Base address of the primary CPU */ 36 0U /* Zero Termination */ 37 }; 38 #endif 39 40 /* 41 * Placeholder variables for copying the arguments that have been passed to 42 * BL31 from BL2. 43 */ 44 static entry_point_info_t bl32_image_ep_info; 45 static entry_point_info_t bl33_image_ep_info; 46 47 #if ENABLE_RME 48 static entry_point_info_t rmm_image_ep_info; 49 #if (RME_GPT_BITLOCK_BLOCK == 0) 50 #define BITLOCK_BASE UL(0) 51 #define BITLOCK_SIZE UL(0) 52 #else 53 /* 54 * Number of bitlock_t entries in bitlocks array for PLAT_ARM_PPS 55 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock. 56 */ 57 #if (PLAT_ARM_PPS > (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8))) 58 #define BITLOCKS_NUM (PLAT_ARM_PPS) / \ 59 (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8)) 60 #else 61 #define BITLOCKS_NUM U(1) 62 #endif 63 /* 64 * Bitlocks array 65 */ 66 static bitlock_t gpt_bitlock[BITLOCKS_NUM]; 67 #define BITLOCK_BASE (uintptr_t)gpt_bitlock 68 #define BITLOCK_SIZE sizeof(gpt_bitlock) 69 #endif /* RME_GPT_BITLOCK_BLOCK */ 70 #endif /* ENABLE_RME */ 71 72 #if !RESET_TO_BL31 73 /* 74 * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page 75 * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2. 76 */ 77 #if TRANSFER_LIST 78 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows); 79 #else 80 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows); 81 #endif /* TRANSFER_LIST */ 82 #endif /* RESET_TO_BL31 */ 83 84 /* Weak definitions may be overridden in specific ARM standard platform */ 85 #pragma weak bl31_early_platform_setup2 86 #pragma weak bl31_platform_setup 87 #pragma weak bl31_plat_arch_setup 88 #pragma weak bl31_plat_get_next_image_ep_info 89 #pragma weak bl31_plat_runtime_setup 90 91 #define MAP_BL31_TOTAL MAP_REGION_FLAT( \ 92 BL31_START, \ 93 BL31_END - BL31_START, \ 94 MT_MEMORY | MT_RW | EL3_PAS | \ 95 MT_CAP_LD_ST_TRACK) 96 97 #if RECLAIM_INIT_CODE 98 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE); 99 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED); 100 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED); 101 102 #define BL_INIT_CODE_END ((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \ 103 ~(PAGE_SIZE - 1)) 104 #define BL_STACKS_END ((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \ 105 ~(PAGE_SIZE - 1)) 106 107 #define MAP_BL_INIT_CODE MAP_REGION_FLAT( \ 108 BL_INIT_CODE_BASE, \ 109 BL_INIT_CODE_END \ 110 - BL_INIT_CODE_BASE, \ 111 MT_CODE | EL3_PAS) 112 #endif 113 114 #if SEPARATE_NOBITS_REGION 115 #define MAP_BL31_NOBITS MAP_REGION_FLAT( \ 116 BL31_NOBITS_BASE, \ 117 BL31_NOBITS_LIMIT \ 118 - BL31_NOBITS_BASE, \ 119 MT_MEMORY | MT_RW | EL3_PAS) 120 121 #endif 122 /******************************************************************************* 123 * Return a pointer to the 'entry_point_info' structure of the next image for the 124 * security state specified. BL33 corresponds to the non-secure image type 125 * while BL32 corresponds to the secure image type. A NULL pointer is returned 126 * if the image does not exist. 127 ******************************************************************************/ 128 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type) 129 { 130 entry_point_info_t *next_image_info; 131 132 assert(sec_state_is_valid(type)); 133 if (type == NON_SECURE) { 134 #if TRANSFER_LIST && !RESET_TO_BL31 135 next_image_info = transfer_list_set_handoff_args( 136 ns_tl, &bl33_image_ep_info); 137 #else 138 next_image_info = &bl33_image_ep_info; 139 #endif 140 } 141 #if ENABLE_RME 142 else if (type == REALM) { 143 #if LFA_SUPPORT 144 if (lfa_is_prime_complete(LFA_RMM_COMPONENT)) { 145 rmm_image_ep_info.pc = 146 RMM_BASE + RMM_BANK_SIZE; 147 } 148 #endif /* LFA_SUPPORT */ 149 next_image_info = &rmm_image_ep_info; 150 } 151 #endif 152 else { 153 #if TRANSFER_LIST && !RESET_TO_BL31 154 next_image_info = transfer_list_set_handoff_args( 155 secure_tl, &bl32_image_ep_info); 156 #else 157 next_image_info = &bl32_image_ep_info; 158 #endif 159 } 160 161 /* 162 * None of the images on the ARM development platforms can have 0x0 163 * as the entrypoint 164 */ 165 if (next_image_info->pc) 166 return next_image_info; 167 else 168 return NULL; 169 } 170 171 /******************************************************************************* 172 * Perform any BL31 early platform setup common to ARM standard platforms. 173 * Here is an opportunity to copy parameters passed by the calling EL (S-EL1 174 * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be 175 * done before the MMU is initialized so that the memory layout can be used 176 * while creating page tables. BL2 has flushed this information to memory, so 177 * we are guaranteed to pick up good data. 178 ******************************************************************************/ 179 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1, 180 u_register_t arg2, u_register_t arg3) 181 { 182 #if TRANSFER_LIST 183 #if RESET_TO_BL31 184 /* Populate entry point information for BL33 */ 185 SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0); 186 /* 187 * Tell BL31 where the non-trusted software image 188 * is located and the entry state information 189 */ 190 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); 191 192 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID); 193 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); 194 195 bl33_image_ep_info.args.arg0 = PLAT_ARM_TRANSFER_LIST_DTB_OFFSET; 196 bl33_image_ep_info.args.arg1 = 197 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION); 198 bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE; 199 #else 200 struct transfer_list_entry *te = NULL; 201 struct entry_point_info *ep; 202 203 secure_tl = (struct transfer_list_header *)arg3; 204 205 /* 206 * Populate the global entry point structures used to execute subsequent 207 * images. 208 */ 209 while ((te = transfer_list_next(secure_tl, te)) != NULL) { 210 ep = transfer_list_entry_data(te); 211 212 if (te->tag_id == TL_TAG_EXEC_EP_INFO64) { 213 switch (GET_SECURITY_STATE(ep->h.attr)) { 214 case NON_SECURE: 215 bl33_image_ep_info = *ep; 216 break; 217 #if ENABLE_RME 218 case REALM: 219 rmm_image_ep_info = *ep; 220 break; 221 #endif 222 case SECURE: 223 bl32_image_ep_info = *ep; 224 break; 225 default: 226 ERROR("Unrecognized Image Security State %lu\n", 227 GET_SECURITY_STATE(ep->h.attr)); 228 panic(); 229 } 230 } 231 } 232 #endif /* RESET_TO_BL31 */ 233 #else /* (!TRANSFER_LIST) */ 234 #if RESET_TO_BL31 235 /* If BL31 is a reset vector, the parameters must be ignored */ 236 (void)arg0; 237 (void)arg1; 238 (void)arg2; 239 (void)arg3; 240 241 # ifdef BL32_BASE 242 /* Populate entry point information for BL32 */ 243 SET_PARAM_HEAD(&bl32_image_ep_info, 244 PARAM_EP, 245 VERSION_1, 246 0); 247 SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE); 248 bl32_image_ep_info.pc = BL32_BASE; 249 bl32_image_ep_info.spsr = arm_get_spsr(BL32_IMAGE_ID); 250 251 #if defined(SPD_spmd) 252 bl32_image_ep_info.args.arg0 = ARM_SPMC_MANIFEST_BASE; 253 #endif 254 255 # endif /* BL32_BASE */ 256 257 /* Populate entry point information for BL33 */ 258 SET_PARAM_HEAD(&bl33_image_ep_info, 259 PARAM_EP, 260 VERSION_1, 261 0); 262 /* 263 * Tell BL31 where the non-trusted software image 264 * is located and the entry state information 265 */ 266 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); 267 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID); 268 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); 269 270 #if ENABLE_RME 271 /* 272 * Populate entry point information for RMM. 273 * Only PC needs to be set as other fields are determined by RMMD. 274 */ 275 rmm_image_ep_info.pc = RMM_BASE; 276 #endif /* ENABLE_RME */ 277 #else /* RESET_TO_BL31 */ 278 /* 279 * In debug builds, we pass a special value in 'arg3' 280 * to verify platform parameters from BL2 to BL31. 281 * In release builds, it's not used. 282 */ 283 #if DEBUG 284 assert(((uintptr_t)arg3) == ARM_BL31_PLAT_PARAM_VAL); 285 #endif 286 287 /* 288 * Check params passed from BL2 should not be NULL, 289 */ 290 bl_params_t *params_from_bl2 = (bl_params_t *)(uintptr_t)arg0; 291 assert(params_from_bl2 != NULL); 292 assert(params_from_bl2->h.type == PARAM_BL_PARAMS); 293 assert(params_from_bl2->h.version >= VERSION_2); 294 295 bl_params_node_t *bl_params = params_from_bl2->head; 296 297 /* 298 * Copy BL33, BL32 and RMM (if present), entry point information. 299 * They are stored in Secure RAM, in BL2's address space. 300 */ 301 while (bl_params != NULL) { 302 if (bl_params->image_id == BL32_IMAGE_ID) { 303 bl32_image_ep_info = *bl_params->ep_info; 304 #if SPMC_AT_EL3 305 /* 306 * Populate the BL32 image base, size and max limit in 307 * the entry point information, since there is no 308 * platform function to retrieve them in generic 309 * code. We choose arg2, arg3 and arg4 since the generic 310 * code uses arg1 for stashing the SP manifest size. The 311 * SPMC setup uses these arguments to update SP manifest 312 * with actual SP's base address and it size. 313 */ 314 bl32_image_ep_info.args.arg2 = 315 bl_params->image_info->image_base; 316 bl32_image_ep_info.args.arg3 = 317 bl_params->image_info->image_size; 318 bl32_image_ep_info.args.arg4 = 319 bl_params->image_info->image_base + 320 bl_params->image_info->image_max_size; 321 #endif 322 } 323 #if ENABLE_RME 324 else if (bl_params->image_id == RMM_IMAGE_ID) { 325 rmm_image_ep_info = *bl_params->ep_info; 326 } 327 #endif 328 else if (bl_params->image_id == BL33_IMAGE_ID) { 329 bl33_image_ep_info = *bl_params->ep_info; 330 } 331 332 bl_params = bl_params->next_params_info; 333 } 334 335 if (bl33_image_ep_info.pc == 0U) 336 panic(); 337 #if ENABLE_RME 338 if (rmm_image_ep_info.pc == 0U) 339 panic(); 340 #endif 341 #endif /* RESET_TO_BL31 */ 342 343 #if USE_KERNEL_DT_CONVENTION 344 /* 345 * Only use the default DT base address if TF-A has not supplied one. 346 * This can occur when the DT is side-loaded and its memory location 347 * is unknown (e.g., RESET_TO_BL31). 348 */ 349 350 if (bl33_image_ep_info.args.arg0 == 0U) { 351 bl33_image_ep_info.args.arg0 = HW_CONFIG_BASE; 352 } 353 354 #if ARM_LINUX_KERNEL_AS_BL33 355 bl33_image_ep_info.args.arg1 = 0U; 356 bl33_image_ep_info.args.arg2 = 0U; 357 bl33_image_ep_info.args.arg3 = 0U; 358 #endif 359 #endif 360 #endif /* TRANSFER_LIST */ 361 } 362 363 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, 364 u_register_t arg2, u_register_t arg3) 365 { 366 /* Initialize the console to provide early debug support */ 367 arm_console_boot_init(); 368 369 arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3); 370 371 #if !HW_ASSISTED_COHERENCY 372 /* 373 * Initialize Interconnect for this cluster during cold boot. 374 * No need for locks as no other CPU is active. 375 */ 376 plat_arm_interconnect_init(); 377 378 /* 379 * Enable Interconnect coherency for the primary CPU's cluster. 380 * Earlier bootloader stages might already do this (e.g. Trusted 381 * Firmware's BL1 does it) but we can't assume so. There is no harm in 382 * executing this code twice anyway. 383 * Platform specific PSCI code will enable coherency for other 384 * clusters. 385 */ 386 plat_arm_interconnect_enter_coherency(); 387 #endif 388 } 389 390 /******************************************************************************* 391 * Perform any BL31 platform setup common to ARM standard platforms 392 ******************************************************************************/ 393 void arm_bl31_platform_setup(void) 394 { 395 struct transfer_list_entry *te __unused; 396 397 #if TRANSFER_LIST && !RESET_TO_BL31 398 ns_tl = transfer_list_init((void *)FW_NS_HANDOFF_BASE, 399 PLAT_ARM_FW_HANDOFF_SIZE); 400 if (ns_tl == NULL) { 401 ERROR("Non-secure transfer list initialisation failed!\n"); 402 panic(); 403 } 404 /* BL31 may modify the HW_CONFIG so defer copying it until later. */ 405 te = transfer_list_find(secure_tl, TL_TAG_FDT); 406 assert(te != NULL); 407 408 /* 409 * A pre-existing assumption is that FCONF is unsupported w/ RESET_TO_BL2 and 410 * RESET_TO_BL31. In the case of RESET_TO_BL31 this makes sense because there 411 * isn't a prior stage to load the device tree, but the reasoning for RESET_TO_BL2 is 412 * less clear. For the moment hardware properties that would normally be 413 * derived from the DT are statically defined. 414 */ 415 #if !RESET_TO_BL2 416 fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te)); 417 #endif 418 419 te = transfer_list_add(ns_tl, TL_TAG_FDT, te->data_size, 420 transfer_list_entry_data(te)); 421 assert(te != NULL); 422 423 te = transfer_list_find(secure_tl, TL_TAG_TPM_EVLOG); 424 if (te != NULL) { 425 te = transfer_list_add(ns_tl, TL_TAG_TPM_EVLOG, te->data_size, 426 transfer_list_entry_data(te)); 427 if (te == NULL) { 428 ERROR("Failed to load event log in Non-Secure transfer list\n"); 429 panic(); 430 } 431 } 432 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */ 433 434 #if RESET_TO_BL31 435 /* 436 * Do initial security configuration to allow DRAM/device access 437 * (if earlier BL has not already done so). 438 */ 439 plat_arm_security_setup(); 440 441 #if defined(PLAT_ARM_MEM_PROT_ADDR) 442 arm_nor_psci_do_dyn_mem_protect(); 443 #endif /* PLAT_ARM_MEM_PROT_ADDR */ 444 445 #endif /* RESET_TO_BL31 */ 446 447 /* Enable and initialize the System level generic timer */ 448 mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF, 449 CNTCR_FCREQ(0U) | CNTCR_EN); 450 451 /* Allow access to the System counter timer module */ 452 arm_configure_sys_timer(); 453 454 /* Initialize power controller before setting up topology */ 455 plat_arm_pwrc_setup(); 456 457 #if FFH_SUPPORT 458 if (is_feat_ras_supported()) { 459 ras_init(); 460 } 461 #endif 462 463 #if USE_DEBUGFS 464 debugfs_init(); 465 #endif /* USE_DEBUGFS */ 466 } 467 468 /******************************************************************************* 469 * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM 470 * standard platforms 471 ******************************************************************************/ 472 void arm_bl31_plat_runtime_setup(void) 473 { 474 struct transfer_list_entry *te __unused; 475 /* Initialize the runtime console */ 476 arm_console_runtime_init(); 477 478 #if TRANSFER_LIST && !RESET_TO_BL31 479 /* 480 * We assume BL31 has added all TE's required by BL33 at this stage, ensure 481 * that data is visible to all observers by performing a flush operation, so 482 * they can access the updated data even if caching is not enabled. 483 */ 484 flush_dcache_range((uintptr_t)ns_tl, ns_tl->size); 485 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */ 486 487 #if RECLAIM_INIT_CODE 488 arm_free_init_memory(); 489 #endif 490 491 #if PLAT_RO_XLAT_TABLES 492 arm_xlat_make_tables_readonly(); 493 #endif 494 } 495 496 #if RECLAIM_INIT_CODE 497 /* 498 * Make memory for image boot time code RW to reclaim it as stack for the 499 * secondary cores, or RO where it cannot be reclaimed: 500 * 501 * |-------- INIT SECTION --------| 502 * ----------------------------------------- 503 * | CORE 0 | CORE 1 | CORE 2 | EXTRA | 504 * | STACK | STACK | STACK | SPACE | 505 * ----------------------------------------- 506 * <-------------------> <------> 507 * MAKE RW AND XN MAKE 508 * FOR STACKS RO AND XN 509 */ 510 void arm_free_init_memory(void) 511 { 512 int ret = 0; 513 514 if (BL_STACKS_END < BL_INIT_CODE_END) { 515 /* Reclaim some of the init section as stack if possible. */ 516 if (BL_INIT_CODE_BASE < BL_STACKS_END) { 517 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, 518 BL_STACKS_END - BL_INIT_CODE_BASE, 519 MT_RW_DATA); 520 } 521 /* Make the rest of the init section read-only. */ 522 ret |= xlat_change_mem_attributes(BL_STACKS_END, 523 BL_INIT_CODE_END - BL_STACKS_END, 524 MT_RO_DATA); 525 } else { 526 /* The stacks cover the init section, so reclaim it all. */ 527 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE, 528 BL_INIT_CODE_END - BL_INIT_CODE_BASE, 529 MT_RW_DATA); 530 } 531 532 if (ret != 0) { 533 ERROR("Could not reclaim initialization code"); 534 panic(); 535 } 536 } 537 #endif 538 539 void __init bl31_platform_setup(void) 540 { 541 arm_bl31_platform_setup(); 542 543 #if USE_GIC_DRIVER == 3 544 gic_set_gicr_frames(arm_gicr_base_addrs); 545 #endif 546 } 547 548 void bl31_plat_runtime_setup(void) 549 { 550 arm_bl31_plat_runtime_setup(); 551 } 552 553 /******************************************************************************* 554 * Perform the very early platform specific architectural setup shared between 555 * ARM standard platforms. This only does basic initialization. Later 556 * architectural setup (bl31_arch_setup()) does not do anything platform 557 * specific. 558 ******************************************************************************/ 559 void __init arm_bl31_plat_arch_setup(void) 560 { 561 const mmap_region_t bl_regions[] = { 562 MAP_BL31_TOTAL, 563 #if ENABLE_RME 564 ARM_MAP_L0_GPT_REGION, 565 #endif 566 #if RECLAIM_INIT_CODE 567 MAP_BL_INIT_CODE, 568 #endif 569 #if SEPARATE_NOBITS_REGION 570 MAP_BL31_NOBITS, 571 #endif 572 ARM_MAP_BL_RO, 573 #if USE_ROMLIB 574 ARM_MAP_ROMLIB_CODE, 575 ARM_MAP_ROMLIB_DATA, 576 #endif 577 #if USE_COHERENT_MEM 578 ARM_MAP_BL_COHERENT_RAM, 579 #endif 580 {0} 581 }; 582 583 setup_page_tables(bl_regions, plat_arm_get_mmap()); 584 585 enable_mmu_el3(0); 586 587 #if ENABLE_RME 588 #if RESET_TO_BL31 589 /* initialize GPT only when RME is enabled. */ 590 assert(is_feat_rme_present()); 591 592 /* Initialise and enable granule protection after MMU. */ 593 arm_gpt_setup(); 594 #endif /* RESET_TO_BL31 */ 595 /* 596 * Initialise Granule Protection library and enable GPC for the primary 597 * processor. The tables have already been initialized by a previous BL 598 * stage, so there is no need to provide any PAS here. This function 599 * sets up pointers to those tables. 600 */ 601 if (gpt_runtime_init(BITLOCK_BASE, BITLOCK_SIZE) < 0) { 602 ERROR("gpt_runtime_init() failed!\n"); 603 panic(); 604 } 605 #endif /* ENABLE_RME */ 606 607 arm_setup_romlib(); 608 } 609 610 void __init bl31_plat_arch_setup(void) 611 { 612 arm_bl31_plat_arch_setup(); 613 } 614