1 /* 2 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <platform_def.h> 11 12 #include <arch_features.h> 13 #include <arch_helpers.h> 14 #include <common/bl_common.h> 15 #include <common/debug.h> 16 #include <common/desc_image_load.h> 17 #include <drivers/generic_delay_timer.h> 18 #include <drivers/partition/partition.h> 19 #include <lib/fconf/fconf.h> 20 #include <lib/fconf/fconf_dyn_cfg_getter.h> 21 #include <lib/gpt_rme/gpt_rme.h> 22 #if TRANSFER_LIST 23 #include <lib/transfer_list.h> 24 #endif 25 #ifdef SPD_opteed 26 #include <lib/optee_utils.h> 27 #endif 28 #include <lib/utils.h> 29 #include <plat/arm/common/plat_arm.h> 30 #include <plat/common/platform.h> 31 32 /* Data structure which holds the extents of the trusted SRAM for BL2 */ 33 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE); 34 35 /* Base address of fw_config received from BL1 */ 36 static uintptr_t config_base __unused; 37 38 /* 39 * Check that BL2_BASE is above ARM_FW_CONFIG_LIMIT. This reserved page is 40 * for `meminfo_t` data structure and fw_configs passed from BL1. 41 */ 42 #if TRANSFER_LIST 43 CASSERT(BL2_BASE >= PLAT_ARM_EL3_FW_HANDOFF_BASE + PLAT_ARM_FW_HANDOFF_SIZE, 44 assert_bl2_base_overflows); 45 #elif !RESET_TO_BL2 46 CASSERT(BL2_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl2_base_overflows); 47 #endif /* TRANSFER_LIST */ 48 49 /* Weak definitions may be overridden in specific ARM standard platform */ 50 #pragma weak bl2_early_platform_setup2 51 #pragma weak bl2_platform_setup 52 #pragma weak bl2_plat_arch_setup 53 #pragma weak bl2_plat_sec_mem_layout 54 55 #define MAP_BL2_TOTAL MAP_REGION_FLAT( \ 56 bl2_tzram_layout.total_base, \ 57 bl2_tzram_layout.total_size, \ 58 MT_MEMORY | MT_RW | EL3_PAS) 59 60 #pragma weak arm_bl2_plat_handle_post_image_load 61 62 struct transfer_list_header *secure_tl __unused; 63 64 /******************************************************************************* 65 * BL1 has passed the extents of the trusted SRAM that should be visible to BL2 66 * in x0. This memory layout is sitting at the base of the free trusted SRAM. 67 * Copy it to a safe location before its reclaimed by later BL2 functionality. 68 ******************************************************************************/ 69 void arm_bl2_early_platform_setup(u_register_t arg0, u_register_t arg1, 70 u_register_t arg2, u_register_t arg3) 71 { 72 struct transfer_list_entry *te __unused; 73 int __maybe_unused ret; 74 75 /* Initialize the console to provide early debug support */ 76 arm_console_boot_init(); 77 78 #if TRANSFER_LIST 79 secure_tl = (struct transfer_list_header *)arg3; 80 81 te = transfer_list_find(secure_tl, TL_TAG_SRAM_LAYOUT64); 82 assert(te != NULL); 83 84 bl2_tzram_layout = *(meminfo_t *)transfer_list_entry_data(te); 85 transfer_list_rem(secure_tl, te); 86 #else 87 config_base = (uintptr_t)arg0; 88 89 /* Setup the BL2 memory layout */ 90 bl2_tzram_layout = *(meminfo_t *)arg1; 91 #endif /* TRANSFER_LIST */ 92 93 /* Initialise the IO layer and register platform IO devices */ 94 plat_arm_io_setup(); 95 96 /* Load partition table */ 97 #if ARM_GPT_SUPPORT 98 ret = gpt_partition_init(); 99 if (ret != 0) { 100 ERROR("GPT partition initialisation failed!\n"); 101 panic(); 102 } 103 104 #endif /* ARM_GPT_SUPPORT */ 105 } 106 107 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1, 108 u_register_t arg2, u_register_t arg3) 109 { 110 arm_bl2_early_platform_setup(arg0, arg1, arg2, arg3); 111 112 generic_delay_timer_init(); 113 } 114 115 /* 116 * Perform BL2 preload setup. Currently we initialise the dynamic 117 * configuration here. 118 */ 119 void bl2_plat_preload_setup(void) 120 { 121 #if TRANSFER_LIST 122 /* Assume the secure TL hasn't been initialised if BL2 is running at EL3. */ 123 #if RESET_TO_BL2 124 secure_tl = transfer_list_ensure((void *)PLAT_ARM_EL3_FW_HANDOFF_BASE, 125 PLAT_ARM_FW_HANDOFF_SIZE); 126 127 if (secure_tl == NULL) { 128 ERROR("Secure transfer list initialisation failed!\n"); 129 panic(); 130 } 131 #endif 132 arm_transfer_list_dyn_cfg_init(secure_tl); 133 #else 134 #if ARM_FW_CONFIG_LOAD_ENABLE 135 arm_bl2_el3_plat_config_load(); 136 #endif /* ARM_FW_CONFIG_LOAD_ENABLE */ 137 arm_bl2_dyn_cfg_init(); 138 #endif 139 140 #if ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT 141 /* Always use the FIP from bank 0 */ 142 arm_set_fip_addr(0U); 143 #endif /* ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT */ 144 } 145 146 /* 147 * Perform ARM standard platform setup. 148 */ 149 void arm_bl2_platform_setup(void) 150 { 151 #if !ENABLE_RME 152 /* Initialize the secure environment */ 153 plat_arm_security_setup(); 154 #endif 155 156 #if defined(PLAT_ARM_MEM_PROT_ADDR) 157 arm_nor_psci_do_static_mem_protect(); 158 #endif 159 } 160 161 void bl2_platform_setup(void) 162 { 163 arm_bl2_platform_setup(); 164 } 165 166 /******************************************************************************* 167 * Perform the very early platform specific architectural setup here. 168 * When RME is enabled the secure environment is initialised before 169 * initialising and enabling Granule Protection. 170 * This function initialises the MMU in a quick and dirty way. 171 ******************************************************************************/ 172 void arm_bl2_plat_arch_setup(void) 173 { 174 #if USE_COHERENT_MEM 175 /* Ensure ARM platforms don't use coherent memory in BL2. */ 176 assert((BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE) == 0U); 177 #endif 178 179 const mmap_region_t bl_regions[] = { 180 MAP_BL2_TOTAL, 181 ARM_MAP_BL_RO, 182 #if USE_ROMLIB 183 ARM_MAP_ROMLIB_CODE, 184 ARM_MAP_ROMLIB_DATA, 185 #endif 186 #if !TRANSFER_LIST 187 ARM_MAP_BL_CONFIG_REGION, 188 #endif /* TRANSFER_LIST */ 189 #if ENABLE_RME 190 ARM_MAP_L0_GPT_REGION, 191 #endif 192 { 0 } 193 }; 194 195 #if ENABLE_RME 196 /* Initialise the secure environment */ 197 plat_arm_security_setup(); 198 #endif 199 setup_page_tables(bl_regions, plat_arm_get_mmap()); 200 201 #ifdef __aarch64__ 202 #if ENABLE_RME 203 /* BL2 runs in EL3 when RME enabled. */ 204 assert(is_feat_rme_present()); 205 enable_mmu_el3(0); 206 207 /* Initialise and enable granule protection after MMU. */ 208 arm_gpt_setup(); 209 #else 210 enable_mmu_el1(0); 211 #endif 212 #else 213 enable_mmu_svc_mon(0); 214 #endif 215 216 arm_setup_romlib(); 217 } 218 219 void bl2_plat_arch_setup(void) 220 { 221 const struct dyn_cfg_dtb_info_t *tb_fw_config_info __unused; 222 struct transfer_list_entry *te __unused; 223 arm_bl2_plat_arch_setup(); 224 225 #if TRANSFER_LIST 226 #if CRYPTO_SUPPORT 227 te = arm_transfer_list_set_heap_info(secure_tl); 228 transfer_list_rem(secure_tl, te); 229 #endif /* CRYPTO_SUPPORT */ 230 #else 231 /* Fill the properties struct with the info from the config dtb */ 232 fconf_populate("FW_CONFIG", config_base); 233 234 /* TB_FW_CONFIG was also loaded by BL1 */ 235 tb_fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TB_FW_CONFIG_ID); 236 assert(tb_fw_config_info != NULL); 237 238 fconf_populate("TB_FW", tb_fw_config_info->config_addr); 239 #endif /* TRANSFER_LIST */ 240 } 241 242 int arm_bl2_handle_post_image_load(unsigned int image_id) 243 { 244 int err = 0; 245 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); 246 #ifdef SPD_opteed 247 bl_mem_params_node_t *pager_mem_params = NULL; 248 bl_mem_params_node_t *paged_mem_params = NULL; 249 #endif 250 assert(bl_mem_params != NULL); 251 252 switch (image_id) { 253 #ifdef __aarch64__ 254 case BL32_IMAGE_ID: 255 #ifdef SPD_opteed 256 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID); 257 assert(pager_mem_params); 258 259 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID); 260 assert(paged_mem_params); 261 262 err = parse_optee_header(&bl_mem_params->ep_info, 263 &pager_mem_params->image_info, 264 &paged_mem_params->image_info); 265 if (err != 0) { 266 WARN("OPTEE header parse error.\n"); 267 } 268 #endif 269 bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl32_entry(); 270 break; 271 #endif 272 273 case BL33_IMAGE_ID: 274 /* BL33 expects to receive the primary CPU MPID (through r0) */ 275 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); 276 bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl33_entry(); 277 break; 278 279 #ifdef SCP_BL2_BASE 280 case SCP_BL2_IMAGE_ID: 281 /* The subsequent handling of SCP_BL2 is platform specific */ 282 err = plat_arm_bl2_handle_scp_bl2(&bl_mem_params->image_info); 283 if (err) { 284 WARN("Failure in platform-specific handling of SCP_BL2 image.\n"); 285 } 286 break; 287 #endif 288 default: 289 /* Do nothing in default case */ 290 break; 291 } 292 293 return err; 294 } 295 296 /******************************************************************************* 297 * This function can be used by the platforms to update/use image 298 * information for given `image_id`. 299 ******************************************************************************/ 300 int arm_bl2_plat_handle_post_image_load(unsigned int image_id) 301 { 302 #if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD 303 /* For Secure Partitions we don't need post processing */ 304 if ((image_id >= (MAX_NUMBER_IDS - MAX_SP_IDS)) && 305 (image_id < MAX_NUMBER_IDS)) { 306 return 0; 307 } 308 #endif 309 310 #if TRANSFER_LIST 311 if (image_id == HW_CONFIG_ID) { 312 /* Refresh the now stale checksum following loading of HW_CONFIG into the TL. */ 313 transfer_list_update_checksum(secure_tl); 314 } 315 #endif /* TRANSFER_LIST */ 316 317 return arm_bl2_handle_post_image_load(image_id); 318 } 319 320 void arm_bl2_setup_next_ep_info(bl_mem_params_node_t *next_param_node) 321 { 322 entry_point_info_t *ep __unused; 323 ep = transfer_list_set_handoff_args(secure_tl, 324 &next_param_node->ep_info); 325 assert(ep != NULL); 326 327 arm_transfer_list_populate_ep_info(next_param_node, secure_tl); 328 } 329