1 /* 2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <assert.h> 8 #include <string.h> 9 10 #include <platform_def.h> 11 12 #include <arch_features.h> 13 #include <arch_helpers.h> 14 #include <common/bl_common.h> 15 #include <common/debug.h> 16 #include <common/desc_image_load.h> 17 #include <drivers/generic_delay_timer.h> 18 #include <drivers/partition/partition.h> 19 #include <lib/fconf/fconf.h> 20 #include <lib/fconf/fconf_dyn_cfg_getter.h> 21 #include <lib/gpt_rme/gpt_rme.h> 22 #if TRANSFER_LIST 23 #include <lib/transfer_list.h> 24 #endif 25 #ifdef SPD_opteed 26 #include <lib/optee_utils.h> 27 #endif 28 #include <lib/utils.h> 29 #include <plat/arm/common/plat_arm.h> 30 #include <plat/common/platform.h> 31 32 /* Data structure which holds the extents of the trusted SRAM for BL2 */ 33 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE); 34 35 /* Base address of fw_config received from BL1 */ 36 static uintptr_t config_base __unused; 37 38 /* 39 * Check that BL2_BASE is above ARM_FW_CONFIG_LIMIT. This reserved page is 40 * for `meminfo_t` data structure and fw_configs passed from BL1. 41 */ 42 #if TRANSFER_LIST 43 CASSERT(BL2_BASE >= PLAT_ARM_EL3_FW_HANDOFF_BASE + PLAT_ARM_FW_HANDOFF_SIZE, 44 assert_bl2_base_overflows); 45 #elif !RESET_TO_BL2 46 CASSERT(BL2_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl2_base_overflows); 47 #endif /* TRANSFER_LIST */ 48 49 /* Weak definitions may be overridden in specific ARM standard platform */ 50 #pragma weak bl2_early_platform_setup2 51 #pragma weak bl2_platform_setup 52 #pragma weak bl2_plat_arch_setup 53 #pragma weak bl2_plat_sec_mem_layout 54 55 #if ENABLE_RME 56 #define MAP_BL2_TOTAL MAP_REGION_FLAT( \ 57 bl2_tzram_layout.total_base, \ 58 bl2_tzram_layout.total_size, \ 59 MT_MEMORY | MT_RW | MT_ROOT) 60 #else 61 #define MAP_BL2_TOTAL MAP_REGION_FLAT( \ 62 bl2_tzram_layout.total_base, \ 63 bl2_tzram_layout.total_size, \ 64 MT_MEMORY | MT_RW | MT_SECURE) 65 #endif /* ENABLE_RME */ 66 67 #pragma weak arm_bl2_plat_handle_post_image_load 68 69 static struct transfer_list_header *secure_tl __unused; 70 static struct transfer_list_header *ns_tl __unused; 71 72 /******************************************************************************* 73 * BL1 has passed the extents of the trusted SRAM that should be visible to BL2 74 * in x0. This memory layout is sitting at the base of the free trusted SRAM. 75 * Copy it to a safe location before its reclaimed by later BL2 functionality. 76 ******************************************************************************/ 77 void arm_bl2_early_platform_setup(uintptr_t fw_config, 78 struct meminfo *mem_layout) 79 { 80 struct transfer_list_entry *te __unused; 81 int __maybe_unused ret; 82 83 /* Initialize the console to provide early debug support */ 84 arm_console_boot_init(); 85 86 #if TRANSFER_LIST 87 // TODO: modify the prototype of this function fw_config != bl2_tl 88 secure_tl = (struct transfer_list_header *)fw_config; 89 90 te = transfer_list_find(secure_tl, TL_TAG_SRAM_LAYOUT64); 91 assert(te != NULL); 92 93 bl2_tzram_layout = *(meminfo_t *)transfer_list_entry_data(te); 94 transfer_list_rem(secure_tl, te); 95 #else 96 config_base = fw_config; 97 98 /* Setup the BL2 memory layout */ 99 bl2_tzram_layout = *mem_layout; 100 #endif 101 102 /* Initialise the IO layer and register platform IO devices */ 103 plat_arm_io_setup(); 104 105 /* Load partition table */ 106 #if ARM_GPT_SUPPORT 107 ret = gpt_partition_init(); 108 if (ret != 0) { 109 ERROR("GPT partition initialisation failed!\n"); 110 panic(); 111 } 112 113 #endif /* ARM_GPT_SUPPORT */ 114 } 115 116 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1, u_register_t arg2, u_register_t arg3) 117 { 118 arm_bl2_early_platform_setup((uintptr_t)arg0, (meminfo_t *)arg1); 119 120 generic_delay_timer_init(); 121 } 122 123 /* 124 * Perform BL2 preload setup. Currently we initialise the dynamic 125 * configuration here. 126 */ 127 void bl2_plat_preload_setup(void) 128 { 129 #if TRANSFER_LIST 130 /* Assume the secure TL hasn't been initialised if BL2 is running at EL3. */ 131 #if RESET_TO_BL2 132 secure_tl = transfer_list_init((void *)PLAT_ARM_EL3_FW_HANDOFF_BASE, 133 PLAT_ARM_FW_HANDOFF_SIZE); 134 135 if (secure_tl == NULL) { 136 ERROR("Secure transfer list initialisation failed!\n"); 137 panic(); 138 } 139 #endif 140 141 arm_transfer_list_dyn_cfg_init(secure_tl); 142 #else 143 #if ARM_FW_CONFIG_LOAD_ENABLE 144 arm_bl2_el3_plat_config_load(); 145 #endif /* ARM_FW_CONFIG_LOAD_ENABLE */ 146 arm_bl2_dyn_cfg_init(); 147 #endif 148 149 #if ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT 150 /* Always use the FIP from bank 0 */ 151 arm_set_fip_addr(0U); 152 #endif /* ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT */ 153 } 154 155 /* 156 * Perform ARM standard platform setup. 157 */ 158 void arm_bl2_platform_setup(void) 159 { 160 #if !ENABLE_RME 161 /* Initialize the secure environment */ 162 plat_arm_security_setup(); 163 #endif 164 165 #if defined(PLAT_ARM_MEM_PROT_ADDR) 166 arm_nor_psci_do_static_mem_protect(); 167 #endif 168 } 169 170 void bl2_platform_setup(void) 171 { 172 arm_bl2_platform_setup(); 173 } 174 175 /******************************************************************************* 176 * Perform the very early platform specific architectural setup here. 177 * When RME is enabled the secure environment is initialised before 178 * initialising and enabling Granule Protection. 179 * This function initialises the MMU in a quick and dirty way. 180 ******************************************************************************/ 181 void arm_bl2_plat_arch_setup(void) 182 { 183 #if USE_COHERENT_MEM 184 /* Ensure ARM platforms don't use coherent memory in BL2. */ 185 assert((BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE) == 0U); 186 #endif 187 188 const mmap_region_t bl_regions[] = { 189 MAP_BL2_TOTAL, 190 ARM_MAP_BL_RO, 191 #if USE_ROMLIB 192 ARM_MAP_ROMLIB_CODE, 193 ARM_MAP_ROMLIB_DATA, 194 #endif 195 #if !TRANSFER_LIST 196 ARM_MAP_BL_CONFIG_REGION, 197 #endif /* TRANSFER_LIST */ 198 #if ENABLE_RME 199 ARM_MAP_L0_GPT_REGION, 200 #endif 201 { 0 } 202 }; 203 204 #if ENABLE_RME 205 /* Initialise the secure environment */ 206 plat_arm_security_setup(); 207 #endif 208 setup_page_tables(bl_regions, plat_arm_get_mmap()); 209 210 #ifdef __aarch64__ 211 #if ENABLE_RME 212 /* BL2 runs in EL3 when RME enabled. */ 213 assert(is_feat_rme_present()); 214 enable_mmu_el3(0); 215 216 /* Initialise and enable granule protection after MMU. */ 217 arm_gpt_setup(); 218 #else 219 enable_mmu_el1(0); 220 #endif 221 #else 222 enable_mmu_svc_mon(0); 223 #endif 224 225 arm_setup_romlib(); 226 } 227 228 void bl2_plat_arch_setup(void) 229 { 230 const struct dyn_cfg_dtb_info_t *tb_fw_config_info __unused; 231 struct transfer_list_entry *te __unused; 232 arm_bl2_plat_arch_setup(); 233 234 #if TRANSFER_LIST 235 te = transfer_list_find(secure_tl, TL_TAG_TB_FW_CONFIG); 236 assert(te != NULL); 237 238 fconf_populate("TB_FW", (uintptr_t)transfer_list_entry_data(te)); 239 transfer_list_rem(secure_tl, te); 240 #else 241 /* Fill the properties struct with the info from the config dtb */ 242 fconf_populate("FW_CONFIG", config_base); 243 244 /* TB_FW_CONFIG was also loaded by BL1 */ 245 tb_fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TB_FW_CONFIG_ID); 246 assert(tb_fw_config_info != NULL); 247 248 fconf_populate("TB_FW", tb_fw_config_info->config_addr); 249 #endif 250 } 251 252 int arm_bl2_handle_post_image_load(unsigned int image_id) 253 { 254 int err = 0; 255 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id); 256 #ifdef SPD_opteed 257 bl_mem_params_node_t *pager_mem_params = NULL; 258 bl_mem_params_node_t *paged_mem_params = NULL; 259 #endif 260 assert(bl_mem_params != NULL); 261 262 switch (image_id) { 263 #ifdef __aarch64__ 264 case BL32_IMAGE_ID: 265 #ifdef SPD_opteed 266 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID); 267 assert(pager_mem_params); 268 269 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID); 270 assert(paged_mem_params); 271 272 err = parse_optee_header(&bl_mem_params->ep_info, 273 &pager_mem_params->image_info, 274 &paged_mem_params->image_info); 275 if (err != 0) { 276 WARN("OPTEE header parse error.\n"); 277 } 278 #endif 279 bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl32_entry(); 280 break; 281 #endif 282 283 case BL33_IMAGE_ID: 284 /* BL33 expects to receive the primary CPU MPID (through r0) */ 285 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr(); 286 bl_mem_params->ep_info.spsr = arm_get_spsr_for_bl33_entry(); 287 break; 288 289 #ifdef SCP_BL2_BASE 290 case SCP_BL2_IMAGE_ID: 291 /* The subsequent handling of SCP_BL2 is platform specific */ 292 err = plat_arm_bl2_handle_scp_bl2(&bl_mem_params->image_info); 293 if (err) { 294 WARN("Failure in platform-specific handling of SCP_BL2 image.\n"); 295 } 296 break; 297 #endif 298 default: 299 /* Do nothing in default case */ 300 break; 301 } 302 303 return err; 304 } 305 306 /******************************************************************************* 307 * This function can be used by the platforms to update/use image 308 * information for given `image_id`. 309 ******************************************************************************/ 310 int arm_bl2_plat_handle_post_image_load(unsigned int image_id) 311 { 312 #if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD 313 /* For Secure Partitions we don't need post processing */ 314 if ((image_id >= (MAX_NUMBER_IDS - MAX_SP_IDS)) && 315 (image_id < MAX_NUMBER_IDS)) { 316 return 0; 317 } 318 #endif 319 320 #if TRANSFER_LIST 321 if (image_id == HW_CONFIG_ID) { 322 /* Refresh the now stale checksum following loading of HW_CONFIG into the TL. */ 323 transfer_list_update_checksum(secure_tl); 324 } 325 #endif /* TRANSFER_LIST */ 326 327 return arm_bl2_handle_post_image_load(image_id); 328 } 329 330 void arm_bl2_setup_next_ep_info(bl_mem_params_node_t *next_param_node) 331 { 332 entry_point_info_t *ep __unused; 333 ep = transfer_list_set_handoff_args(secure_tl, 334 &next_param_node->ep_info); 335 assert(ep != NULL); 336 337 arm_transfer_list_populate_ep_info(next_param_node, secure_tl); 338 } 339