xref: /rk3399_ARM-atf/plat/arm/common/arm_bl2_setup.c (revision 52a502f90320b4c1dc44b32cb7b18271b75525eb)
1 /*
2  * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <platform_def.h>
11 
12 #include <arch_features.h>
13 #include <arch_helpers.h>
14 #include <common/bl_common.h>
15 #include <common/debug.h>
16 #include <common/desc_image_load.h>
17 #include <drivers/generic_delay_timer.h>
18 #include <drivers/partition/partition.h>
19 #include <lib/fconf/fconf.h>
20 #include <lib/fconf/fconf_dyn_cfg_getter.h>
21 #include <lib/gpt_rme/gpt_rme.h>
22 #if TRANSFER_LIST
23 #include <transfer_list.h>
24 #endif
25 #ifdef SPD_opteed
26 #include <lib/optee_utils.h>
27 #endif
28 #include <lib/utils.h>
29 #include <plat/arm/common/plat_arm.h>
30 #include <plat/common/platform.h>
31 
32 /* Data structure which holds the extents of the trusted SRAM for BL2 */
33 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
34 
35 /* Base address of fw_config received from BL1 */
36 static uintptr_t config_base __unused;
37 
38 #if ARM_GPT_SUPPORT
39 // FIXME: should be removed once the transfer list version is updated
40 #define TL_TAG_GPT_ERROR_INFO	0x109
41 
42 /*
43  * Inform the GPT corruption to BL32.
44  */
45 static void arm_set_gpt_corruption(uintptr_t gpt_corrupted_info_ptr, uint8_t flags)
46 {
47 	*(uint8_t *)gpt_corrupted_info_ptr |= flags;
48 }
49 
50 static void arm_get_gpt_corruption(uintptr_t log_address, uint8_t gpt_corrupted_info)
51 {
52 #if TRANSFER_LIST
53 	/* Convey this information to BL2 via its TL. */
54 	struct transfer_list_entry *te = transfer_list_add(
55 		(struct transfer_list_header *)log_address,
56 		TL_TAG_GPT_ERROR_INFO,
57 		sizeof(gpt_corrupted_info),
58 		(void *)&gpt_corrupted_info);
59 	if (te == NULL) {
60 		ERROR("Failed to log GPT corruption info in transfer list\n");
61 	}
62 #endif /* TRANSFER_LIST */
63 }
64 
65 static struct plat_log_gpt_corrupted arm_log_gpt_corruption = {
66 	.gpt_corrupted_info = 0U,
67 	.plat_set_gpt_corruption = arm_set_gpt_corruption,
68 	.plat_log_gpt_corruption = arm_get_gpt_corruption,
69 };
70 #endif /* ARM_GPT_SUPPORT */
71 
72 /*
73  * Check that BL2_BASE is above ARM_FW_CONFIG_LIMIT. This reserved page is
74  * for `meminfo_t` data structure and fw_configs passed from BL1.
75  */
76 #if TRANSFER_LIST
77 CASSERT(BL2_BASE >= PLAT_ARM_EL3_FW_HANDOFF_BASE + PLAT_ARM_FW_HANDOFF_SIZE,
78 	assert_bl2_base_overflows);
79 #elif !RESET_TO_BL2
80 CASSERT(BL2_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl2_base_overflows);
81 #endif /* TRANSFER_LIST */
82 
83 /* Weak definitions may be overridden in specific ARM standard platform */
84 #pragma weak bl2_early_platform_setup2
85 #pragma weak bl2_platform_setup
86 #pragma weak bl2_plat_arch_setup
87 #pragma weak bl2_plat_sec_mem_layout
88 
89 #define MAP_BL2_TOTAL		MAP_REGION_FLAT(			\
90 					bl2_tzram_layout.total_base,	\
91 					bl2_tzram_layout.total_size,	\
92 					MT_MEMORY | MT_RW | EL3_PAS)
93 
94 #pragma weak arm_bl2_plat_handle_post_image_load
95 
96 struct transfer_list_header *secure_tl __unused;
97 
98 /*******************************************************************************
99  * BL1 has passed the extents of the trusted SRAM that should be visible to BL2
100  * in x0. This memory layout is sitting at the base of the free trusted SRAM.
101  * Copy it to a safe location before its reclaimed by later BL2 functionality.
102  ******************************************************************************/
103 void arm_bl2_early_platform_setup(u_register_t arg0, u_register_t arg1,
104 				  u_register_t arg2, u_register_t arg3)
105 {
106 	struct transfer_list_entry *te __unused;
107 	int __maybe_unused ret;
108 
109 	/* Initialize the console to provide early debug support */
110 	arm_console_boot_init();
111 
112 #if TRANSFER_LIST
113 	secure_tl = (struct transfer_list_header *)arg3;
114 
115 	te = transfer_list_find(secure_tl, TL_TAG_SRAM_LAYOUT);
116 	assert(te != NULL);
117 
118 	bl2_tzram_layout = *(meminfo_t *)transfer_list_entry_data(te);
119 	transfer_list_rem(secure_tl, te);
120 #else
121 	config_base = (uintptr_t)arg0;
122 
123 	/* Setup the BL2 memory layout */
124 	bl2_tzram_layout = *(meminfo_t *)arg1;
125 #endif /* TRANSFER_LIST */
126 
127 	/* Initialise the IO layer and register platform IO devices */
128 	plat_arm_io_setup();
129 
130 	/* Load partition table */
131 #if ARM_GPT_SUPPORT
132 	plat_setup_log_gpt_corrupted(&arm_log_gpt_corruption);
133 
134 	ret = gpt_partition_init();
135 	if (ret != 0) {
136 		ERROR("GPT partition initialisation failed!\n");
137 		panic();
138 	}
139 
140 #if TRANSFER_LIST
141 	plat_log_gpt_ptr->plat_log_gpt_corruption((uintptr_t)secure_tl,
142 						   plat_log_gpt_ptr->gpt_corrupted_info);
143 #endif	/* TRANSFER_LIST */
144 
145 #endif /* ARM_GPT_SUPPORT */
146 }
147 
148 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
149 			       u_register_t arg2, u_register_t arg3)
150 {
151 	arm_bl2_early_platform_setup(arg0, arg1, arg2, arg3);
152 
153 	generic_delay_timer_init();
154 }
155 
156 /*
157  * Perform  BL2 preload setup. Currently we initialise the dynamic
158  * configuration here.
159  */
160 void bl2_plat_preload_setup(void)
161 {
162 #if ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT
163 	/*
164 	 * Find FIP in GPT before FW Config load.
165 	 * Always use the FIP from bank 0
166 	 */
167 	arm_set_fip_addr(0U);
168 #endif /* ARM_GPT_SUPPORT && !PSA_FWU_SUPPORT */
169 
170 #if TRANSFER_LIST
171 /* Assume the secure TL hasn't been initialised if BL2 is running at EL3. */
172 #if RESET_TO_BL2
173 	secure_tl = transfer_list_ensure((void *)PLAT_ARM_EL3_FW_HANDOFF_BASE,
174 					 PLAT_ARM_FW_HANDOFF_SIZE);
175 
176 	if (secure_tl == NULL) {
177 		ERROR("Secure transfer list initialisation failed!\n");
178 		panic();
179 	}
180 #endif
181 	arm_transfer_list_dyn_cfg_init(secure_tl);
182 #else
183 #if ARM_FW_CONFIG_LOAD_ENABLE
184 	arm_bl2_el3_plat_config_load();
185 #endif /* ARM_FW_CONFIG_LOAD_ENABLE */
186 	arm_bl2_dyn_cfg_init();
187 #endif
188 
189 }
190 
191 /*
192  * Perform ARM standard platform setup.
193  */
194 void arm_bl2_platform_setup(void)
195 {
196 #if !ENABLE_RME
197 	/* Initialize the secure environment */
198 	plat_arm_security_setup();
199 #endif
200 
201 #if defined(PLAT_ARM_MEM_PROT_ADDR)
202 	arm_nor_psci_do_static_mem_protect();
203 #endif
204 }
205 
206 void bl2_platform_setup(void)
207 {
208 	arm_bl2_platform_setup();
209 }
210 
211 /*******************************************************************************
212  * Perform the very early platform specific architectural setup here.
213  * When RME is enabled the secure environment is initialised before
214  * initialising and enabling Granule Protection.
215  * This function initialises the MMU in a quick and dirty way.
216  ******************************************************************************/
217 void arm_bl2_plat_arch_setup(void)
218 {
219 #if USE_COHERENT_MEM
220 	/* Ensure ARM platforms don't use coherent memory in BL2. */
221 	assert((BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE) == 0U);
222 #endif
223 
224 	const mmap_region_t bl_regions[] = {
225 		MAP_BL2_TOTAL,
226 		ARM_MAP_BL_RO,
227 #if USE_ROMLIB
228 		ARM_MAP_ROMLIB_CODE,
229 		ARM_MAP_ROMLIB_DATA,
230 #endif
231 #if !TRANSFER_LIST
232 		ARM_MAP_BL_CONFIG_REGION,
233 #endif /* TRANSFER_LIST */
234 #if ENABLE_RME
235 		ARM_MAP_L0_GPT_REGION,
236 #endif
237 		{ 0 }
238 	};
239 
240 #if ENABLE_RME
241 	/* Initialise the secure environment */
242 	plat_arm_security_setup();
243 #endif
244 	setup_page_tables(bl_regions, plat_arm_get_mmap());
245 
246 #ifdef __aarch64__
247 #if ENABLE_RME
248 	/* BL2 runs in EL3 when RME enabled. */
249 	assert(is_feat_rme_present());
250 	enable_mmu_el3(0);
251 
252 	/* Initialise and enable granule protection after MMU. */
253 	arm_gpt_setup();
254 #else
255 	enable_mmu_el1(0);
256 #endif
257 #else
258 	enable_mmu_svc_mon(0);
259 #endif
260 
261 	arm_setup_romlib();
262 }
263 
264 void bl2_plat_arch_setup(void)
265 {
266 	const struct dyn_cfg_dtb_info_t *tb_fw_config_info __unused;
267 	struct transfer_list_entry *te __unused;
268 	arm_bl2_plat_arch_setup();
269 
270 #if TRANSFER_LIST
271 #if CRYPTO_SUPPORT
272 	te = arm_transfer_list_set_heap_info(secure_tl);
273 	transfer_list_rem(secure_tl, te);
274 #endif /* CRYPTO_SUPPORT */
275 #else
276 	/* Fill the properties struct with the info from the config dtb */
277 	fconf_populate("FW_CONFIG", config_base);
278 
279 	/* TB_FW_CONFIG was also loaded by BL1 */
280 	tb_fw_config_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TB_FW_CONFIG_ID);
281 	assert(tb_fw_config_info != NULL);
282 
283 	fconf_populate("TB_FW", tb_fw_config_info->config_addr);
284 #endif /* TRANSFER_LIST */
285 }
286 
287 int arm_bl2_handle_post_image_load(unsigned int image_id)
288 {
289 	int err = 0;
290 	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
291 #ifdef SPD_opteed
292 	bl_mem_params_node_t *pager_mem_params = NULL;
293 	bl_mem_params_node_t *paged_mem_params = NULL;
294 #endif
295 	assert(bl_mem_params != NULL);
296 
297 	switch (image_id) {
298 #ifdef __aarch64__
299 	case BL32_IMAGE_ID:
300 #ifdef SPD_opteed
301 		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
302 		assert(pager_mem_params);
303 
304 		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
305 		assert(paged_mem_params);
306 
307 		err = parse_optee_header(&bl_mem_params->ep_info,
308 				&pager_mem_params->image_info,
309 				&paged_mem_params->image_info);
310 		if (err != 0) {
311 			WARN("OPTEE header parse error.\n");
312 		}
313 #endif
314 		bl_mem_params->ep_info.spsr = arm_get_spsr(BL32_IMAGE_ID);
315 		break;
316 #endif
317 
318 	case BL33_IMAGE_ID:
319 #if !USE_KERNEL_DT_CONVENTION
320 		/* BL33 expects to receive the primary CPU MPID (through r0) */
321 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
322 #endif /* !USE_KERNEL_DT_CONVENTION */
323 		bl_mem_params->ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID);
324 		break;
325 
326 #ifdef SCP_BL2_BASE
327 	case SCP_BL2_IMAGE_ID:
328 		/* The subsequent handling of SCP_BL2 is platform specific */
329 		err = plat_arm_bl2_handle_scp_bl2(&bl_mem_params->image_info);
330 		if (err) {
331 			WARN("Failure in platform-specific handling of SCP_BL2 image.\n");
332 		}
333 		break;
334 #endif
335 	default:
336 		/* Do nothing in default case */
337 		break;
338 	}
339 
340 	return err;
341 }
342 
343 /*******************************************************************************
344  * This function can be used by the platforms to update/use image
345  * information for given `image_id`.
346  ******************************************************************************/
347 int arm_bl2_plat_handle_post_image_load(unsigned int image_id)
348 {
349 #if defined(SPD_spmd) && BL2_ENABLE_SP_LOAD
350 	/* For Secure Partitions we don't need post processing */
351 	if ((image_id >= (MAX_NUMBER_IDS - MAX_SP_IDS)) &&
352 		(image_id < MAX_NUMBER_IDS)) {
353 		return 0;
354 	}
355 #endif
356 
357 #if TRANSFER_LIST
358 	if (image_id == HW_CONFIG_ID || image_id == TOS_FW_CONFIG_ID) {
359 		/*
360 		 * Refresh the now stale checksum following loading of
361 		 * HW_CONFIG or TOS_FW_CONFIG into the TL.
362 		 */
363 		transfer_list_update_checksum(secure_tl);
364 	}
365 #endif /* TRANSFER_LIST */
366 
367 	return arm_bl2_handle_post_image_load(image_id);
368 }
369 
370 void arm_bl2_setup_next_ep_info(bl_mem_params_node_t *next_param_node)
371 {
372 	entry_point_info_t *ep __unused;
373 
374 #if TRANSFER_LIST
375 	/*
376 	 * Information might have been added to the TL before this (i.e. event log)
377 	 * make sure the checksum is up to date.
378 	 */
379 	transfer_list_update_checksum(secure_tl);
380 
381 	ep = transfer_list_set_handoff_args(secure_tl,
382 					    &next_param_node->ep_info);
383 	assert(ep != NULL);
384 
385 	arm_transfer_list_populate_ep_info(next_param_node, secure_tl);
386 #endif
387 }
388