1 /*
2 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <string.h>
9
10 #include <platform_def.h>
11
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 #include <common/bl_common.h>
15 #include <common/debug.h>
16 #include <common/desc_image_load.h>
17 #include <drivers/generic_delay_timer.h>
18 #include <drivers/partition/partition.h>
19 #include <lib/fconf/fconf.h>
20 #include <lib/fconf/fconf_dyn_cfg_getter.h>
21 #include <lib/gpt_rme/gpt_rme.h>
22 #if TRANSFER_LIST
23 #include <transfer_list.h>
24 #endif
25 #ifdef SPD_opteed
26 #include <lib/optee_utils.h>
27 #endif
28 #include <lib/utils.h>
29 #include <plat/arm/common/plat_arm.h>
30 #include <plat/common/platform.h>
31
32 /* Data structure which holds the extents of the trusted SRAM for BL2 */
33 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
34
35 /* Base address of fw_config received from BL1 */
36 static uintptr_t config_base __unused;
37
38 #if ARM_GPT_SUPPORT
39 // FIXME: should be removed once the transfer list version is updated
40 #define TL_TAG_GPT_ERROR_INFO 0x109
41
42 /*
43 * Inform the GPT corruption to BL32.
44 */
arm_set_gpt_corruption(uintptr_t gpt_corrupted_info_ptr,uint8_t flags)45 static void arm_set_gpt_corruption(uintptr_t gpt_corrupted_info_ptr, uint8_t flags)
46 {
47 *(uint8_t *)gpt_corrupted_info_ptr |= flags;
48 }
49
arm_get_gpt_corruption(uintptr_t log_address,uint8_t gpt_corrupted_info)50 static void arm_get_gpt_corruption(uintptr_t log_address, uint8_t gpt_corrupted_info)
51 {
52 #if TRANSFER_LIST
53 /* Convey this information to BL2 via its TL. */
54 struct transfer_list_entry *te = transfer_list_add(
55 (struct transfer_list_header *)log_address,
56 TL_TAG_GPT_ERROR_INFO,
57 sizeof(gpt_corrupted_info),
58 (void *)&gpt_corrupted_info);
59 if (te == NULL) {
60 ERROR("Failed to log GPT corruption info in transfer list\n");
61 }
62 #endif /* TRANSFER_LIST */
63 }
64
65 static struct plat_log_gpt_corrupted arm_log_gpt_corruption = {
66 .gpt_corrupted_info = 0U,
67 .plat_set_gpt_corruption = arm_set_gpt_corruption,
68 .plat_log_gpt_corruption = arm_get_gpt_corruption,
69 };
70 #endif /* ARM_GPT_SUPPORT */
71
72 /*
73 * Check that BL2_BASE is above ARM_FW_CONFIG_LIMIT. This reserved page is
74 * for `meminfo_t` data structure and fw_configs passed from BL1.
75 */
76 #if TRANSFER_LIST
77 CASSERT(BL2_BASE >= PLAT_ARM_EL3_FW_HANDOFF_BASE + PLAT_ARM_FW_HANDOFF_SIZE,
78 assert_bl2_base_overflows);
79 #elif !RESET_TO_BL2
80 CASSERT(BL2_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl2_base_overflows);
81 #endif /* TRANSFER_LIST */
82
83 /* Weak definitions may be overridden in specific ARM standard platform */
84 #pragma weak bl2_early_platform_setup2
85 #pragma weak bl2_platform_setup
86 #pragma weak bl2_plat_arch_setup
87 #pragma weak bl2_plat_sec_mem_layout
88
89 #define MAP_BL2_TOTAL MAP_REGION_FLAT( \
90 bl2_tzram_layout.total_base, \
91 bl2_tzram_layout.total_size, \
92 MT_MEMORY | MT_RW | EL3_PAS)
93
94 #pragma weak arm_bl2_plat_handle_post_image_load
95
96 struct transfer_list_header *secure_tl __unused;
97
98 /*******************************************************************************
99 * BL1 has passed the extents of the trusted SRAM that should be visible to BL2
100 * in x0. This memory layout is sitting at the base of the free trusted SRAM.
101 * Copy it to a safe location before its reclaimed by later BL2 functionality.
102 ******************************************************************************/
arm_bl2_early_platform_setup(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)103 void arm_bl2_early_platform_setup(u_register_t arg0, u_register_t arg1,
104 u_register_t arg2, u_register_t arg3)
105 {
106 struct transfer_list_entry *te __unused;
107 int __maybe_unused ret;
108
109 /* Initialize the console to provide early debug support */
110 arm_console_boot_init();
111
112 #if RESET_TO_BL2
113 /*
114 * Allow BL2 to see the whole Trusted RAM. This is determined
115 * statically since we cannot rely on BL1 passing this information
116 * in the RESET_TO_BL2 case.
117 */
118 bl2_tzram_layout.total_base = ARM_BL_RAM_BASE;
119 bl2_tzram_layout.total_size = ARM_BL_RAM_SIZE;
120 #else /* !RESET_TO_BL2 */
121 #if TRANSFER_LIST
122 secure_tl = (struct transfer_list_header *)arg3;
123
124 te = transfer_list_find(secure_tl, TL_TAG_SRAM_LAYOUT);
125 assert(te != NULL);
126
127 bl2_tzram_layout = *(meminfo_t *)transfer_list_entry_data(te);
128 transfer_list_rem(secure_tl, te);
129 #else /* !TRANSFER_LIST */
130 config_base = (uintptr_t)arg0;
131
132 /* Setup the BL2 memory layout */
133 bl2_tzram_layout = *(meminfo_t *)arg1;
134 #endif /* TRANSFER_LIST */
135 #endif /* RESET_TO_BL2 */
136
137 /* Initialise the IO layer and register platform IO devices */
138 plat_arm_io_setup();
139
140 /* Load partition table */
141 #if ARM_GPT_SUPPORT
142 plat_setup_log_gpt_corrupted(&arm_log_gpt_corruption);
143
144 ret = gpt_partition_init();
145 if (ret != 0) {
146 ERROR("GPT partition initialisation failed!\n");
147 panic();
148 }
149
150 #if TRANSFER_LIST && !RESET_TO_BL2
151 plat_log_gpt_ptr->plat_log_gpt_corruption((uintptr_t)secure_tl,
152 plat_log_gpt_ptr->gpt_corrupted_info);
153 #endif /* TRANSFER_LIST */
154
155 #endif /* ARM_GPT_SUPPORT */
156 }
157
bl2_early_platform_setup2(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)158 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
159 u_register_t arg2, u_register_t arg3)
160 {
161 arm_bl2_early_platform_setup(arg0, arg1, arg2, arg3);
162
163 #if RESET_TO_BL2 && !defined(HW_ASSISTED_COHERENCY)
164 /*
165 * Initialize Interconnect for this cluster during cold boot.
166 * No need for locks as no other CPU is active.
167 */
168 plat_arm_interconnect_init();
169
170 /* Enable Interconnect coherency for the primary CPU's cluster. */
171 plat_arm_interconnect_enter_coherency();
172 #endif
173 generic_delay_timer_init();
174 }
175
176 #if ARM_FW_CONFIG_LOAD_ENABLE && !TRANSFER_LIST
177 /********************************************************************************
178 * FW CONFIG load function for BL2 when RESET_TO_BL2=1 &&
179 * ARM_FW_CONFIG_LOAD_ENABLE=1
180 *******************************************************************************/
arm_bl2_plat_config_load(void)181 static void arm_bl2_plat_config_load(void)
182 {
183 int ret;
184 const struct dyn_cfg_dtb_info_t *fw_config_info;
185
186 /* Set global DTB info for fixed fw_config information */
187 set_config_info(ARM_FW_CONFIG_BASE, ~0UL, ARM_FW_CONFIG_MAX_SIZE,
188 FW_CONFIG_ID);
189
190 /*
191 * Fill the device tree information struct with the info from the
192 * config dtb
193 */
194 ret = fconf_load_config(FW_CONFIG_ID);
195 if (ret < 0) {
196 ERROR("Loading of FW_CONFIG failed %d\n", ret);
197 plat_error_handler(ret);
198 }
199
200 /*
201 * FW_CONFIG loaded successfully. Check the FW_CONFIG device tree parsing
202 * is successful.
203 */
204 fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, FW_CONFIG_ID);
205 if (fw_config_info == NULL) {
206 ret = -1;
207 ERROR("Invalid FW_CONFIG address\n");
208 plat_error_handler(ret);
209 }
210 ret = fconf_populate_dtb_registry(fw_config_info->config_addr);
211 if (ret < 0) {
212 ERROR("Parsing of FW_CONFIG failed %d\n", ret);
213 plat_error_handler(ret);
214 }
215 }
216 #endif /* ARM_FW_CONFIG_LOAD_ENABLE && !TRANSFER_LIST */
217
218 /*
219 * Perform BL2 preload setup. Currently we initialise the dynamic
220 * configuration here.
221 */
bl2_plat_preload_setup(void)222 void bl2_plat_preload_setup(void)
223 {
224 #if ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT
225 /*
226 * Find FIP in GPT before FW Config load.
227 * Always use the FIP from bank 0
228 */
229 arm_set_fip_addr(0U);
230 #endif /* ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT */
231
232 #if TRANSFER_LIST
233 /* Assume the secure TL hasn't been initialised if BL2 is running at EL3. */
234 #if RESET_TO_BL2
235 secure_tl = transfer_list_ensure((void *)PLAT_ARM_EL3_FW_HANDOFF_BASE,
236 PLAT_ARM_FW_HANDOFF_SIZE);
237
238 if (secure_tl == NULL) {
239 ERROR("Secure transfer list initialisation failed!\n");
240 panic();
241 }
242 #endif /* RESET_TO_BL2 */
243 arm_transfer_list_dyn_cfg_init(secure_tl);
244 #else /* !TRANSFER_LIST */
245 #if ARM_FW_CONFIG_LOAD_ENABLE
246 arm_bl2_plat_config_load();
247 #endif /* ARM_FW_CONFIG_LOAD_ENABLE */
248 arm_bl2_dyn_cfg_init();
249 #endif /* TRANSFER_LIST */
250
251 }
252
253 /*
254 * Perform ARM standard platform setup.
255 */
arm_bl2_platform_setup(void)256 void arm_bl2_platform_setup(void)
257 {
258 #if !ENABLE_RME
259 /* Initialize the secure environment */
260 plat_arm_security_setup();
261 #endif
262
263 #if defined(PLAT_ARM_MEM_PROT_ADDR)
264 arm_nor_psci_do_static_mem_protect();
265 #endif
266 }
267
bl2_platform_setup(void)268 void bl2_platform_setup(void)
269 {
270 arm_bl2_platform_setup();
271 }
272
273 /*******************************************************************************
274 * Perform the very early platform specific architectural setup here.
275 * When RME is enabled the secure environment is initialised before
276 * initialising and enabling Granule Protection.
277 * This function initialises the MMU in a quick and dirty way.
278 ******************************************************************************/
arm_bl2_plat_arch_setup(void)279 static void arm_bl2_plat_arch_setup(void)
280 {
281 #if USE_COHERENT_MEM
282 /* Ensure ARM platforms don't use coherent memory in BL2. */
283 assert((BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE) == 0U);
284 #endif
285
286 const mmap_region_t bl_regions[] = {
287 MAP_BL2_TOTAL,
288 ARM_MAP_BL_RO,
289 #if USE_ROMLIB
290 ARM_MAP_ROMLIB_CODE,
291 ARM_MAP_ROMLIB_DATA,
292 #endif
293 #if !TRANSFER_LIST
294 ARM_MAP_BL_CONFIG_REGION,
295 #endif /* TRANSFER_LIST */
296 #if ENABLE_RME
297 ARM_MAP_L0_GPT_REGION,
298 #endif
299 { 0 }
300 };
301
302 #if ENABLE_RME
303 /* Initialise the secure environment */
304 plat_arm_security_setup();
305 #endif
306
307 setup_page_tables(bl_regions, plat_arm_get_mmap());
308
309 #ifdef __aarch64__
310 /* BL2 runs at EL3 incase of RESET_TO_BL2 or ENABLE_RME is set */
311 #if BL2_RUNS_AT_EL3
312 enable_mmu_el3(0);
313 #else
314 enable_mmu_el1(0);
315 #endif /* BL2_RUNS_AT_EL3 */
316
317 #if ENABLE_RME
318 /* Initialise and enable granule protection after MMU. */
319 assert(is_feat_rme_present());
320 arm_gpt_setup();
321 #endif /* ENABLE_RME */
322
323 #else /* !__aarch64__ */
324 enable_mmu_svc_mon(0);
325 #endif /* __aarch64__ */
326
327 arm_setup_romlib();
328 }
329
bl2_plat_arch_setup(void)330 void bl2_plat_arch_setup(void)
331 {
332 const struct dyn_cfg_dtb_info_t *tb_fw_config_info __unused;
333 struct transfer_list_entry *te __unused;
334
335 arm_bl2_plat_arch_setup();
336
337 #if TRANSFER_LIST
338 #if CRYPTO_SUPPORT
339 te = arm_transfer_list_set_heap_info(secure_tl);
340 transfer_list_rem(secure_tl, te);
341 #endif /* CRYPTO_SUPPORT */
342 #else
343
344 /*
345 * In case of RESET_TO_BL2 bl2_plat_preload_setup handles loading
346 * FW_CONFIG when ARM_FW_CONFIG_LOAD_ENABLE=1
347 */
348 #if !RESET_TO_BL2
349 /* Fill the properties struct with the info from the config dtb */
350 fconf_populate("FW_CONFIG", config_base);
351
352 /* TB_FW_CONFIG was also loaded by BL1 */
353 tb_fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TB_FW_CONFIG_ID);
354 assert(tb_fw_config_info != NULL);
355
356 fconf_populate("TB_FW", tb_fw_config_info->config_addr);
357 #endif /* !RESET_TO_BL2 */
358 #endif /* TRANSFER_LIST */
359 }
360
arm_bl2_handle_post_image_load(unsigned int image_id)361 int arm_bl2_handle_post_image_load(unsigned int image_id)
362 {
363 int err = 0;
364 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
365 #ifdef SPD_opteed
366 bl_mem_params_node_t *pager_mem_params = NULL;
367 bl_mem_params_node_t *paged_mem_params = NULL;
368 #endif
369 assert(bl_mem_params != NULL);
370
371 switch (image_id) {
372 #ifdef __aarch64__
373 case BL32_IMAGE_ID:
374 #ifdef SPD_opteed
375 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
376 assert(pager_mem_params);
377
378 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
379 assert(paged_mem_params);
380
381 err = parse_optee_header(&bl_mem_params->ep_info,
382 &pager_mem_params->image_info,
383 &paged_mem_params->image_info);
384 if (err != 0) {
385 WARN("OPTEE header parse error.\n");
386 }
387 #endif
388 bl_mem_params->ep_info.spsr = arm_get_spsr(BL32_IMAGE_ID);
389 break;
390 #endif
391
392 case BL33_IMAGE_ID:
393 #if !USE_KERNEL_DT_CONVENTION
394 /* BL33 expects to receive the primary CPU MPID (through r0) */
395 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
396 #endif /* !USE_KERNEL_DT_CONVENTION */
397 bl_mem_params->ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID);
398 break;
399
400 #ifdef SCP_BL2_BASE
401 case SCP_BL2_IMAGE_ID:
402 /* The subsequent handling of SCP_BL2 is platform specific */
403 err = plat_arm_bl2_handle_scp_bl2(&bl_mem_params->image_info);
404 if (err) {
405 WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
406 }
407 break;
408 #endif
409 default:
410 /* Do nothing in default case */
411 break;
412 }
413
414 return err;
415 }
416
417 /*******************************************************************************
418 * This function can be used by the platforms to update/use image
419 * information for given `image_id`.
420 ******************************************************************************/
arm_bl2_plat_handle_post_image_load(unsigned int image_id)421 int arm_bl2_plat_handle_post_image_load(unsigned int image_id)
422 {
423 #if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD
424 /* For Secure Partitions we don't need post processing */
425 if ((image_id >= (MAX_NUMBER_IDS - MAX_SP_IDS)) &&
426 (image_id < MAX_NUMBER_IDS)) {
427 return 0;
428 }
429 #endif
430
431 #if TRANSFER_LIST
432 if (image_id == HW_CONFIG_ID || image_id == TOS_FW_CONFIG_ID) {
433 /*
434 * Refresh the now stale checksum following loading of
435 * HW_CONFIG or TOS_FW_CONFIG into the TL.
436 */
437 transfer_list_update_checksum(secure_tl);
438 }
439 #endif /* TRANSFER_LIST */
440
441 return arm_bl2_handle_post_image_load(image_id);
442 }
443
arm_bl2_setup_next_ep_info(bl_mem_params_node_t * next_param_node)444 void arm_bl2_setup_next_ep_info(bl_mem_params_node_t *next_param_node)
445 {
446 entry_point_info_t *ep __unused;
447
448 #if TRANSFER_LIST
449 /*
450 * Information might have been added to the TL before this (i.e. event log)
451 * make sure the checksum is up to date.
452 */
453 transfer_list_update_checksum(secure_tl);
454
455 ep = transfer_list_set_handoff_args(secure_tl,
456 &next_param_node->ep_info);
457 assert(ep != NULL);
458
459 arm_transfer_list_populate_ep_info(next_param_node, secure_tl);
460 #endif
461 }
462