xref: /rk3399_ARM-atf/plat/arm/common/arm_bl31_setup.c (revision 1b7f51ea1662810dea4112a543f2309fe44fdca6)
1 /*
2  * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 
9 #include <arch.h>
10 #include <arch_helpers.h>
11 #include <common/bl_common.h>
12 #include <common/debug.h>
13 #include <drivers/console.h>
14 #include <lib/debugfs.h>
15 #include <lib/extensions/ras.h>
16 #include <lib/fconf/fconf.h>
17 #include <lib/gpt_rme/gpt_rme.h>
18 #include <lib/mmio.h>
19 #if TRANSFER_LIST
20 #include <lib/transfer_list.h>
21 #endif
22 #include <lib/xlat_tables/xlat_tables_compat.h>
23 #include <plat/arm/common/plat_arm.h>
24 #include <plat/common/platform.h>
25 #include <platform_def.h>
26 
27 static struct transfer_list_header *secure_tl __unused;
28 /*
29  * Placeholder variables for copying the arguments that have been passed to
30  * BL31 from BL2.
31  */
32 static entry_point_info_t bl32_image_ep_info;
33 static entry_point_info_t bl33_image_ep_info;
34 #if ENABLE_RME
35 static entry_point_info_t rmm_image_ep_info;
36 #endif
37 
38 #if !RESET_TO_BL31
39 /*
40  * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page
41  * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
42  */
43 #if TRANSFER_LIST
44 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows);
45 #else
46 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
47 #endif /* TRANSFER_LIST */
48 #endif /* RESET_TO_BL31 */
49 
50 /* Weak definitions may be overridden in specific ARM standard platform */
51 #pragma weak bl31_early_platform_setup2
52 #pragma weak bl31_platform_setup
53 #pragma weak bl31_plat_arch_setup
54 #pragma weak bl31_plat_get_next_image_ep_info
55 #pragma weak bl31_plat_runtime_setup
56 
57 #define MAP_BL31_TOTAL		MAP_REGION_FLAT(			\
58 					BL31_START,			\
59 					BL31_END - BL31_START,		\
60 					MT_MEMORY | MT_RW | EL3_PAS)
61 #if RECLAIM_INIT_CODE
62 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE);
63 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED);
64 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED);
65 
66 #define	BL_INIT_CODE_END	((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \
67 					~(PAGE_SIZE - 1))
68 #define	BL_STACKS_END		((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \
69 					~(PAGE_SIZE - 1))
70 
71 #define MAP_BL_INIT_CODE	MAP_REGION_FLAT(			\
72 					BL_INIT_CODE_BASE,		\
73 					BL_INIT_CODE_END		\
74 						- BL_INIT_CODE_BASE,	\
75 					MT_CODE | EL3_PAS)
76 #endif
77 
78 #if SEPARATE_NOBITS_REGION
79 #define MAP_BL31_NOBITS		MAP_REGION_FLAT(			\
80 					BL31_NOBITS_BASE,		\
81 					BL31_NOBITS_LIMIT 		\
82 						- BL31_NOBITS_BASE,	\
83 					MT_MEMORY | MT_RW | EL3_PAS)
84 
85 #endif
86 /*******************************************************************************
87  * Return a pointer to the 'entry_point_info' structure of the next image for the
88  * security state specified. BL33 corresponds to the non-secure image type
89  * while BL32 corresponds to the secure image type. A NULL pointer is returned
90  * if the image does not exist.
91  ******************************************************************************/
92 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
93 {
94 	entry_point_info_t *next_image_info;
95 
96 	assert(sec_state_is_valid(type));
97 	if (type == NON_SECURE) {
98 		next_image_info = &bl33_image_ep_info;
99 	}
100 #if ENABLE_RME
101 	else if (type == REALM) {
102 		next_image_info = &rmm_image_ep_info;
103 	}
104 #endif
105 	else {
106 		next_image_info = &bl32_image_ep_info;
107 	}
108 
109 	/*
110 	 * None of the images on the ARM development platforms can have 0x0
111 	 * as the entrypoint
112 	 */
113 	if (next_image_info->pc)
114 		return next_image_info;
115 	else
116 		return NULL;
117 }
118 
119 /*******************************************************************************
120  * Perform any BL31 early platform setup common to ARM standard platforms.
121  * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
122  * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
123  * done before the MMU is initialized so that the memory layout can be used
124  * while creating page tables. BL2 has flushed this information to memory, so
125  * we are guaranteed to pick up good data.
126  ******************************************************************************/
127 #if TRANSFER_LIST
128 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1,
129 					  u_register_t arg2, u_register_t arg3)
130 {
131 #if RESET_TO_BL31
132 	/* Populate entry point information for BL33 */
133 	SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
134 	/*
135 	 * Tell BL31 where the non-trusted software image
136 	 * is located and the entry state information
137 	 */
138 	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
139 
140 	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
141 	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
142 
143 	bl33_image_ep_info.args.arg0 =
144 		FW_NS_HANDOFF_BASE + ARM_PRELOADED_DTB_OFFSET;
145 	bl33_image_ep_info.args.arg1 = TRANSFER_LIST_SIGNATURE |
146 				       REGISTER_CONVENTION_VERSION_MASK;
147 	bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE;
148 #else
149 	struct transfer_list_entry *te = NULL;
150 	struct entry_point_info *ep;
151 
152 	secure_tl = (struct transfer_list_header *)arg3;
153 
154 	/*
155 	 * Populate the global entry point structures used to execute subsequent
156 	 * images.
157 	 */
158 	while ((te = transfer_list_next(secure_tl, te)) != NULL) {
159 		ep = transfer_list_entry_data(te);
160 
161 		if (te->tag_id == TL_TAG_EXEC_EP_INFO64) {
162 			switch (GET_SECURITY_STATE(ep->h.attr)) {
163 			case NON_SECURE:
164 				bl33_image_ep_info = *ep;
165 				break;
166 #if ENABLE_RME
167 			case REALM:
168 				rmm_image_ep_info = *ep;
169 				break;
170 #endif
171 			case SECURE:
172 				bl32_image_ep_info = *ep;
173 				break;
174 			default:
175 				ERROR("Unrecognized Image Security State %lu\n",
176 				      GET_SECURITY_STATE(ep->h.attr));
177 				panic();
178 			}
179 		}
180 	}
181 #endif /* RESET_TO_BL31 */
182 }
183 #else
184 void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_config,
185 				uintptr_t hw_config, void *plat_params_from_bl2)
186 {
187 	/* Initialize the console to provide early debug support */
188 	arm_console_boot_init();
189 
190 #if RESET_TO_BL31
191 	/* There are no parameters from BL2 if BL31 is a reset vector */
192 	assert(from_bl2 == NULL);
193 	assert(plat_params_from_bl2 == NULL);
194 
195 # ifdef BL32_BASE
196 	/* Populate entry point information for BL32 */
197 	SET_PARAM_HEAD(&bl32_image_ep_info,
198 				PARAM_EP,
199 				VERSION_1,
200 				0);
201 	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
202 	bl32_image_ep_info.pc = BL32_BASE;
203 	bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry();
204 
205 #if defined(SPD_spmd)
206 	/* SPM (hafnium in secure world) expects SPM Core manifest base address
207 	 * in x0, which in !RESET_TO_BL31 case loaded after base of non shared
208 	 * SRAM(after 4KB offset of SRAM). But in RESET_TO_BL31 case all non
209 	 * shared SRAM is allocated to BL31, so to avoid overwriting of manifest
210 	 * keep it in the last page.
211 	 */
212 	bl32_image_ep_info.args.arg0 = ARM_TRUSTED_SRAM_BASE +
213 				PLAT_ARM_TRUSTED_SRAM_SIZE - PAGE_SIZE;
214 #endif
215 
216 # endif /* BL32_BASE */
217 
218 	/* Populate entry point information for BL33 */
219 	SET_PARAM_HEAD(&bl33_image_ep_info,
220 				PARAM_EP,
221 				VERSION_1,
222 				0);
223 	/*
224 	 * Tell BL31 where the non-trusted software image
225 	 * is located and the entry state information
226 	 */
227 	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
228 
229 	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
230 	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
231 
232 #if ENABLE_RME
233 	/*
234 	 * Populate entry point information for RMM.
235 	 * Only PC needs to be set as other fields are determined by RMMD.
236 	 */
237 	rmm_image_ep_info.pc = RMM_BASE;
238 #endif /* ENABLE_RME */
239 
240 #else /* RESET_TO_BL31 */
241 
242 	/*
243 	 * In debug builds, we pass a special value in 'plat_params_from_bl2'
244 	 * to verify platform parameters from BL2 to BL31.
245 	 * In release builds, it's not used.
246 	 */
247 	assert(((unsigned long long)plat_params_from_bl2) ==
248 		ARM_BL31_PLAT_PARAM_VAL);
249 
250 	/*
251 	 * Check params passed from BL2 should not be NULL,
252 	 */
253 	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
254 	assert(params_from_bl2 != NULL);
255 	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
256 	assert(params_from_bl2->h.version >= VERSION_2);
257 
258 	bl_params_node_t *bl_params = params_from_bl2->head;
259 
260 	/*
261 	 * Copy BL33, BL32 and RMM (if present), entry point information.
262 	 * They are stored in Secure RAM, in BL2's address space.
263 	 */
264 	while (bl_params != NULL) {
265 		if (bl_params->image_id == BL32_IMAGE_ID) {
266 			bl32_image_ep_info = *bl_params->ep_info;
267 #if SPMC_AT_EL3
268 			/*
269 			 * Populate the BL32 image base, size and max limit in
270 			 * the entry point information, since there is no
271 			 * platform function to retrieve them in generic
272 			 * code. We choose arg2, arg3 and arg4 since the generic
273 			 * code uses arg1 for stashing the SP manifest size. The
274 			 * SPMC setup uses these arguments to update SP manifest
275 			 * with actual SP's base address and it size.
276 			 */
277 			bl32_image_ep_info.args.arg2 =
278 				bl_params->image_info->image_base;
279 			bl32_image_ep_info.args.arg3 =
280 				bl_params->image_info->image_size;
281 			bl32_image_ep_info.args.arg4 =
282 				bl_params->image_info->image_base +
283 				bl_params->image_info->image_max_size;
284 #endif
285 		}
286 #if ENABLE_RME
287 		else if (bl_params->image_id == RMM_IMAGE_ID) {
288 			rmm_image_ep_info = *bl_params->ep_info;
289 		}
290 #endif
291 		else if (bl_params->image_id == BL33_IMAGE_ID) {
292 			bl33_image_ep_info = *bl_params->ep_info;
293 		}
294 
295 		bl_params = bl_params->next_params_info;
296 	}
297 
298 	if (bl33_image_ep_info.pc == 0U)
299 		panic();
300 #if ENABLE_RME
301 	if (rmm_image_ep_info.pc == 0U)
302 		panic();
303 #endif
304 #endif /* RESET_TO_BL31 */
305 
306 # if ARM_LINUX_KERNEL_AS_BL33
307 	/*
308 	 * According to the file ``Documentation/arm64/booting.txt`` of the
309 	 * Linux kernel tree, Linux expects the physical address of the device
310 	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
311 	 * must be 0.
312 	 * Repurpose the option to load Hafnium hypervisor in the normal world.
313 	 * It expects its manifest address in x0. This is essentially the linux
314 	 * dts (passed to the primary VM) by adding 'hypervisor' and chosen
315 	 * nodes specifying the Hypervisor configuration.
316 	 */
317 #if RESET_TO_BL31
318 	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
319 #else
320 	bl33_image_ep_info.args.arg0 = (u_register_t)hw_config;
321 #endif
322 	bl33_image_ep_info.args.arg1 = 0U;
323 	bl33_image_ep_info.args.arg2 = 0U;
324 	bl33_image_ep_info.args.arg3 = 0U;
325 # endif
326 }
327 #endif
328 
329 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
330 		u_register_t arg2, u_register_t arg3)
331 {
332 #if TRANSFER_LIST
333 	arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3);
334 #else
335 	arm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3);
336 #endif
337 
338 	/*
339 	 * Initialize Interconnect for this cluster during cold boot.
340 	 * No need for locks as no other CPU is active.
341 	 */
342 	plat_arm_interconnect_init();
343 
344 	/*
345 	 * Enable Interconnect coherency for the primary CPU's cluster.
346 	 * Earlier bootloader stages might already do this (e.g. Trusted
347 	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
348 	 * executing this code twice anyway.
349 	 * Platform specific PSCI code will enable coherency for other
350 	 * clusters.
351 	 */
352 	plat_arm_interconnect_enter_coherency();
353 }
354 
355 /*******************************************************************************
356  * Perform any BL31 platform setup common to ARM standard platforms
357  ******************************************************************************/
358 void arm_bl31_platform_setup(void)
359 {
360 	/* Initialize the GIC driver, cpu and distributor interfaces */
361 	plat_arm_gic_driver_init();
362 	plat_arm_gic_init();
363 
364 #if RESET_TO_BL31
365 	/*
366 	 * Do initial security configuration to allow DRAM/device access
367 	 * (if earlier BL has not already done so).
368 	 */
369 	plat_arm_security_setup();
370 
371 #if defined(PLAT_ARM_MEM_PROT_ADDR)
372 	arm_nor_psci_do_dyn_mem_protect();
373 #endif /* PLAT_ARM_MEM_PROT_ADDR */
374 
375 #endif /* RESET_TO_BL31 */
376 
377 	/* Enable and initialize the System level generic timer */
378 	mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
379 			CNTCR_FCREQ(0U) | CNTCR_EN);
380 
381 	/* Allow access to the System counter timer module */
382 	arm_configure_sys_timer();
383 
384 	/* Initialize power controller before setting up topology */
385 	plat_arm_pwrc_setup();
386 
387 #if ENABLE_FEAT_RAS && FFH_SUPPORT
388 	ras_init();
389 #endif
390 
391 #if USE_DEBUGFS
392 	debugfs_init();
393 #endif /* USE_DEBUGFS */
394 }
395 
396 /*******************************************************************************
397  * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
398  * standard platforms
399  ******************************************************************************/
400 void arm_bl31_plat_runtime_setup(void)
401 {
402 	/* Initialize the runtime console */
403 	arm_console_runtime_init();
404 
405 #if RECLAIM_INIT_CODE
406 	arm_free_init_memory();
407 #endif
408 
409 #if PLAT_RO_XLAT_TABLES
410 	arm_xlat_make_tables_readonly();
411 #endif
412 }
413 
414 #if RECLAIM_INIT_CODE
415 /*
416  * Make memory for image boot time code RW to reclaim it as stack for the
417  * secondary cores, or RO where it cannot be reclaimed:
418  *
419  *            |-------- INIT SECTION --------|
420  *  -----------------------------------------
421  * |  CORE 0  |  CORE 1  |  CORE 2  | EXTRA  |
422  * |  STACK   |  STACK   |  STACK   | SPACE  |
423  *  -----------------------------------------
424  *             <-------------------> <------>
425  *                MAKE RW AND XN       MAKE
426  *                  FOR STACKS       RO AND XN
427  */
428 void arm_free_init_memory(void)
429 {
430 	int ret = 0;
431 
432 	if (BL_STACKS_END < BL_INIT_CODE_END) {
433 		/* Reclaim some of the init section as stack if possible. */
434 		if (BL_INIT_CODE_BASE < BL_STACKS_END) {
435 			ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
436 					BL_STACKS_END - BL_INIT_CODE_BASE,
437 					MT_RW_DATA);
438 		}
439 		/* Make the rest of the init section read-only. */
440 		ret |= xlat_change_mem_attributes(BL_STACKS_END,
441 				BL_INIT_CODE_END - BL_STACKS_END,
442 				MT_RO_DATA);
443 	} else {
444 		/* The stacks cover the init section, so reclaim it all. */
445 		ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
446 				BL_INIT_CODE_END - BL_INIT_CODE_BASE,
447 				MT_RW_DATA);
448 	}
449 
450 	if (ret != 0) {
451 		ERROR("Could not reclaim initialization code");
452 		panic();
453 	}
454 }
455 #endif
456 
457 void __init bl31_platform_setup(void)
458 {
459 	arm_bl31_platform_setup();
460 }
461 
462 void bl31_plat_runtime_setup(void)
463 {
464 	arm_bl31_plat_runtime_setup();
465 }
466 
467 /*******************************************************************************
468  * Perform the very early platform specific architectural setup shared between
469  * ARM standard platforms. This only does basic initialization. Later
470  * architectural setup (bl31_arch_setup()) does not do anything platform
471  * specific.
472  ******************************************************************************/
473 void __init arm_bl31_plat_arch_setup(void)
474 {
475 	const mmap_region_t bl_regions[] = {
476 		MAP_BL31_TOTAL,
477 #if ENABLE_RME
478 		ARM_MAP_L0_GPT_REGION,
479 #endif
480 #if RECLAIM_INIT_CODE
481 		MAP_BL_INIT_CODE,
482 #endif
483 #if SEPARATE_NOBITS_REGION
484 		MAP_BL31_NOBITS,
485 #endif
486 		ARM_MAP_BL_RO,
487 #if USE_ROMLIB
488 		ARM_MAP_ROMLIB_CODE,
489 		ARM_MAP_ROMLIB_DATA,
490 #endif
491 #if USE_COHERENT_MEM
492 		ARM_MAP_BL_COHERENT_RAM,
493 #endif
494 		{0}
495 	};
496 
497 	setup_page_tables(bl_regions, plat_arm_get_mmap());
498 
499 	enable_mmu_el3(0);
500 
501 #if ENABLE_RME
502 	/*
503 	 * Initialise Granule Protection library and enable GPC for the primary
504 	 * processor. The tables have already been initialized by a previous BL
505 	 * stage, so there is no need to provide any PAS here. This function
506 	 * sets up pointers to those tables.
507 	 */
508 	if (gpt_runtime_init() < 0) {
509 		ERROR("gpt_runtime_init() failed!\n");
510 		panic();
511 	}
512 #endif /* ENABLE_RME */
513 
514 	arm_setup_romlib();
515 }
516 
517 void __init bl31_plat_arch_setup(void)
518 {
519 	struct transfer_list_entry *te __unused;
520 
521 	arm_bl31_plat_arch_setup();
522 
523 #if TRANSFER_LIST && !(RESET_TO_BL2 || RESET_TO_BL31)
524 	te = transfer_list_find(secure_tl, TL_TAG_FDT);
525 	assert(te != NULL);
526 
527 	/* Populate HW_CONFIG device tree with the mapped address */
528 	fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te));
529 #endif
530 }
531