xref: /rk3399_ARM-atf/plat/arm/common/arm_bl31_setup.c (revision 06f3c7058c42a9f1a9f7df75ea2de71a000855e8)
1 /*
2  * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 
9 #include <arch.h>
10 #include <arch_features.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <drivers/console.h>
15 #include <lib/debugfs.h>
16 #include <lib/extensions/ras.h>
17 #include <lib/fconf/fconf.h>
18 #include <lib/gpt_rme/gpt_rme.h>
19 #include <lib/mmio.h>
20 #if TRANSFER_LIST
21 #include <lib/transfer_list.h>
22 #endif
23 #include <lib/xlat_tables/xlat_tables_compat.h>
24 #include <plat/arm/common/plat_arm.h>
25 #include <plat/common/platform.h>
26 #include <platform_def.h>
27 
28 struct transfer_list_header *secure_tl;
29 struct transfer_list_header *ns_tl __unused;
30 
31 /*
32  * Placeholder variables for copying the arguments that have been passed to
33  * BL31 from BL2.
34  */
35 static entry_point_info_t bl32_image_ep_info;
36 static entry_point_info_t bl33_image_ep_info;
37 
38 #if ENABLE_RME
39 static entry_point_info_t rmm_image_ep_info;
40 #if (RME_GPT_BITLOCK_BLOCK == 0)
41 #define BITLOCK_BASE	UL(0)
42 #define BITLOCK_SIZE	UL(0)
43 #else
44 /*
45  * Number of bitlock_t entries in bitlocks array for PLAT_ARM_PPS
46  * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
47  */
48 #if (PLAT_ARM_PPS > (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8)))
49 #define BITLOCKS_NUM	(PLAT_ARM_PPS) /	\
50 			(RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8))
51 #else
52 #define BITLOCKS_NUM	U(1)
53 #endif
54 /*
55  * Bitlocks array
56  */
57 static bitlock_t gpt_bitlock[BITLOCKS_NUM];
58 #define BITLOCK_BASE	(uintptr_t)gpt_bitlock
59 #define BITLOCK_SIZE	sizeof(gpt_bitlock)
60 #endif /* RME_GPT_BITLOCK_BLOCK */
61 #endif /* ENABLE_RME */
62 
63 #if !RESET_TO_BL31
64 /*
65  * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page
66  * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
67  */
68 #if TRANSFER_LIST
69 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows);
70 #else
71 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
72 #endif /* TRANSFER_LIST */
73 #endif /* RESET_TO_BL31 */
74 
75 /* Weak definitions may be overridden in specific ARM standard platform */
76 #pragma weak bl31_early_platform_setup2
77 #pragma weak bl31_platform_setup
78 #pragma weak bl31_plat_arch_setup
79 #pragma weak bl31_plat_get_next_image_ep_info
80 #pragma weak bl31_plat_runtime_setup
81 
82 #define MAP_BL31_TOTAL		MAP_REGION_FLAT(			\
83 					BL31_START,			\
84 					BL31_END - BL31_START,		\
85 					MT_MEMORY | MT_RW | EL3_PAS)
86 #if RECLAIM_INIT_CODE
87 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE);
88 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED);
89 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED);
90 
91 #define	BL_INIT_CODE_END	((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \
92 					~(PAGE_SIZE - 1))
93 #define	BL_STACKS_END		((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \
94 					~(PAGE_SIZE - 1))
95 
96 #define MAP_BL_INIT_CODE	MAP_REGION_FLAT(			\
97 					BL_INIT_CODE_BASE,		\
98 					BL_INIT_CODE_END		\
99 						- BL_INIT_CODE_BASE,	\
100 					MT_CODE | EL3_PAS)
101 #endif
102 
103 #if SEPARATE_NOBITS_REGION
104 #define MAP_BL31_NOBITS		MAP_REGION_FLAT(			\
105 					BL31_NOBITS_BASE,		\
106 					BL31_NOBITS_LIMIT 		\
107 						- BL31_NOBITS_BASE,	\
108 					MT_MEMORY | MT_RW | EL3_PAS)
109 
110 #endif
111 /*******************************************************************************
112  * Return a pointer to the 'entry_point_info' structure of the next image for the
113  * security state specified. BL33 corresponds to the non-secure image type
114  * while BL32 corresponds to the secure image type. A NULL pointer is returned
115  * if the image does not exist.
116  ******************************************************************************/
117 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
118 {
119 	entry_point_info_t *next_image_info;
120 
121 	assert(sec_state_is_valid(type));
122 	if (type == NON_SECURE) {
123 #if TRANSFER_LIST && !RESET_TO_BL31
124 		next_image_info = transfer_list_set_handoff_args(
125 			ns_tl, &bl33_image_ep_info);
126 #else
127 		next_image_info = &bl33_image_ep_info;
128 #endif
129 	}
130 #if ENABLE_RME
131 	else if (type == REALM) {
132 		next_image_info = &rmm_image_ep_info;
133 	}
134 #endif
135 	else {
136 #if TRANSFER_LIST && !RESET_TO_BL31
137 		next_image_info = transfer_list_set_handoff_args(
138 			secure_tl, &bl32_image_ep_info);
139 #else
140 		next_image_info = &bl32_image_ep_info;
141 #endif
142 	}
143 
144 	/*
145 	 * None of the images on the ARM development platforms can have 0x0
146 	 * as the entrypoint
147 	 */
148 	if (next_image_info->pc)
149 		return next_image_info;
150 	else
151 		return NULL;
152 }
153 
154 /*******************************************************************************
155  * Perform any BL31 early platform setup common to ARM standard platforms.
156  * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
157  * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
158  * done before the MMU is initialized so that the memory layout can be used
159  * while creating page tables. BL2 has flushed this information to memory, so
160  * we are guaranteed to pick up good data.
161  ******************************************************************************/
162 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1,
163 					  u_register_t arg2, u_register_t arg3)
164 {
165 #if TRANSFER_LIST
166 #if RESET_TO_BL31
167 	/* Populate entry point information for BL33 */
168 	SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
169 	/*
170 	 * Tell BL31 where the non-trusted software image
171 	 * is located and the entry state information
172 	 */
173 	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
174 
175 	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
176 	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
177 
178 	bl33_image_ep_info.args.arg0 = PLAT_ARM_TRANSFER_LIST_DTB_OFFSET;
179 	bl33_image_ep_info.args.arg1 =
180 		TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
181 	bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE;
182 #else
183 	struct transfer_list_entry *te = NULL;
184 	struct entry_point_info *ep;
185 
186 	secure_tl = (struct transfer_list_header *)arg3;
187 
188 	/*
189 	 * Populate the global entry point structures used to execute subsequent
190 	 * images.
191 	 */
192 	while ((te = transfer_list_next(secure_tl, te)) != NULL) {
193 		ep = transfer_list_entry_data(te);
194 
195 		if (te->tag_id == TL_TAG_EXEC_EP_INFO64) {
196 			switch (GET_SECURITY_STATE(ep->h.attr)) {
197 			case NON_SECURE:
198 				bl33_image_ep_info = *ep;
199 				break;
200 #if ENABLE_RME
201 			case REALM:
202 				rmm_image_ep_info = *ep;
203 				break;
204 #endif
205 			case SECURE:
206 				bl32_image_ep_info = *ep;
207 				break;
208 			default:
209 				ERROR("Unrecognized Image Security State %lu\n",
210 				      GET_SECURITY_STATE(ep->h.attr));
211 				panic();
212 			}
213 		}
214 	}
215 #endif /* RESET_TO_BL31 */
216 #else /* (!TRANSFER_LIST) */
217 #if RESET_TO_BL31
218 	/* There are no parameters from BL2 if BL31 is a reset vector */
219 	assert((uintptr_t)arg0 == 0U);
220 	assert((uintptr_t)arg3 == 0U);
221 
222 # ifdef BL32_BASE
223 	/* Populate entry point information for BL32 */
224 	SET_PARAM_HEAD(&bl32_image_ep_info,
225 				PARAM_EP,
226 				VERSION_1,
227 				0);
228 	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
229 	bl32_image_ep_info.pc = BL32_BASE;
230 	bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry();
231 
232 #if defined(SPD_spmd)
233 	bl32_image_ep_info.args.arg0 = ARM_SPMC_MANIFEST_BASE;
234 #endif
235 
236 # endif /* BL32_BASE */
237 
238 	/* Populate entry point information for BL33 */
239 	SET_PARAM_HEAD(&bl33_image_ep_info,
240 				PARAM_EP,
241 				VERSION_1,
242 				0);
243 	/*
244 	 * Tell BL31 where the non-trusted software image
245 	 * is located and the entry state information
246 	 */
247 	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
248 
249 	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
250 	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
251 
252 #if ENABLE_RME
253 	/*
254 	 * Populate entry point information for RMM.
255 	 * Only PC needs to be set as other fields are determined by RMMD.
256 	 */
257 	rmm_image_ep_info.pc = RMM_BASE;
258 #endif /* ENABLE_RME */
259 #else /* RESET_TO_BL31 */
260 	/*
261 	 * In debug builds, we pass a special value in 'arg3'
262 	 * to verify platform parameters from BL2 to BL31.
263 	 * In release builds, it's not used.
264 	 */
265 #if DEBUG
266 	assert(((uintptr_t)arg3) == ARM_BL31_PLAT_PARAM_VAL);
267 #endif
268 
269 	/*
270 	 * Check params passed from BL2 should not be NULL,
271 	 */
272 	bl_params_t *params_from_bl2 = (bl_params_t *)(uintptr_t)arg0;
273 	assert(params_from_bl2 != NULL);
274 	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
275 	assert(params_from_bl2->h.version >= VERSION_2);
276 
277 	bl_params_node_t *bl_params = params_from_bl2->head;
278 
279 	/*
280 	 * Copy BL33, BL32 and RMM (if present), entry point information.
281 	 * They are stored in Secure RAM, in BL2's address space.
282 	 */
283 	while (bl_params != NULL) {
284 		if (bl_params->image_id == BL32_IMAGE_ID) {
285 			bl32_image_ep_info = *bl_params->ep_info;
286 #if SPMC_AT_EL3
287 			/*
288 			 * Populate the BL32 image base, size and max limit in
289 			 * the entry point information, since there is no
290 			 * platform function to retrieve them in generic
291 			 * code. We choose arg2, arg3 and arg4 since the generic
292 			 * code uses arg1 for stashing the SP manifest size. The
293 			 * SPMC setup uses these arguments to update SP manifest
294 			 * with actual SP's base address and it size.
295 			 */
296 			bl32_image_ep_info.args.arg2 =
297 				bl_params->image_info->image_base;
298 			bl32_image_ep_info.args.arg3 =
299 				bl_params->image_info->image_size;
300 			bl32_image_ep_info.args.arg4 =
301 				bl_params->image_info->image_base +
302 				bl_params->image_info->image_max_size;
303 #endif
304 		}
305 #if ENABLE_RME
306 		else if (bl_params->image_id == RMM_IMAGE_ID) {
307 			rmm_image_ep_info = *bl_params->ep_info;
308 		}
309 #endif
310 		else if (bl_params->image_id == BL33_IMAGE_ID) {
311 			bl33_image_ep_info = *bl_params->ep_info;
312 		}
313 
314 		bl_params = bl_params->next_params_info;
315 	}
316 
317 	if (bl33_image_ep_info.pc == 0U)
318 		panic();
319 #if ENABLE_RME
320 	if (rmm_image_ep_info.pc == 0U)
321 		panic();
322 #endif
323 #endif /* RESET_TO_BL31 */
324 
325 #if ARM_LINUX_KERNEL_AS_BL33
326 	/*
327 	 * According to the file ``Documentation/arm64/booting.txt`` of the
328 	 * Linux kernel tree, Linux expects the physical address of the device
329 	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
330 	 * must be 0.
331 	 * Repurpose the option to load Hafnium hypervisor in the normal world.
332 	 * It expects its manifest address in x0. This is essentially the linux
333 	 * dts (passed to the primary VM) by adding 'hypervisor' and chosen
334 	 * nodes specifying the Hypervisor configuration.
335 	 */
336 #if RESET_TO_BL31
337 	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
338 #else
339 	bl33_image_ep_info.args.arg0 = arg2;
340 #endif /* RESET_TO_BL31 */
341 	bl33_image_ep_info.args.arg1 = 0U;
342 	bl33_image_ep_info.args.arg2 = 0U;
343 	bl33_image_ep_info.args.arg3 = 0U;
344 #endif /* ARM_LINUX_KERNEL_AS_BL33 */
345 #endif /* TRANSFER_LIST */
346 }
347 
348 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
349 		u_register_t arg2, u_register_t arg3)
350 {
351 	arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3);
352 
353 	/*
354 	 * Initialize Interconnect for this cluster during cold boot.
355 	 * No need for locks as no other CPU is active.
356 	 */
357 	plat_arm_interconnect_init();
358 
359 	/*
360 	 * Enable Interconnect coherency for the primary CPU's cluster.
361 	 * Earlier bootloader stages might already do this (e.g. Trusted
362 	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
363 	 * executing this code twice anyway.
364 	 * Platform specific PSCI code will enable coherency for other
365 	 * clusters.
366 	 */
367 	plat_arm_interconnect_enter_coherency();
368 }
369 
370 /*******************************************************************************
371  * Perform any BL31 platform setup common to ARM standard platforms
372  ******************************************************************************/
373 void arm_bl31_platform_setup(void)
374 {
375 	struct transfer_list_entry *te __unused;
376 
377 #if TRANSFER_LIST && !RESET_TO_BL31
378 	ns_tl = transfer_list_init((void *)FW_NS_HANDOFF_BASE,
379 				   PLAT_ARM_FW_HANDOFF_SIZE);
380 	if (ns_tl == NULL) {
381 		ERROR("Non-secure transfer list initialisation failed!\n");
382 		panic();
383 	}
384 	/* BL31 may modify the HW_CONFIG so defer copying it until later. */
385 	te = transfer_list_find(secure_tl, TL_TAG_FDT);
386 	assert(te != NULL);
387 
388 	/*
389 	 * A pre-existing assumption is that FCONF is unsupported w/ RESET_TO_BL2 and
390 	 * RESET_TO_BL31. In the case of RESET_TO_BL31 this makes sense because there
391 	 * isn't a prior stage to load the device tree, but the reasoning for RESET_TO_BL2 is
392 	 * less clear. For the moment hardware properties that would normally be
393 	 * derived from the DT are statically defined.
394 	 */
395 #if !RESET_TO_BL2
396 	fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te));
397 #endif
398 
399 	te = transfer_list_add(ns_tl, TL_TAG_FDT, te->data_size,
400 			       transfer_list_entry_data(te));
401 	assert(te != NULL);
402 
403 	te = transfer_list_find(secure_tl, TL_TAG_TPM_EVLOG);
404 	if (te != NULL) {
405 		te = transfer_list_add(ns_tl, TL_TAG_TPM_EVLOG, te->data_size,
406 				  transfer_list_entry_data(te));
407 		if (te == NULL) {
408 			ERROR("Failed to load event log in Non-Secure transfer list\n");
409 			panic();
410 		}
411 	}
412 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */
413 
414 #if RESET_TO_BL31
415 	/*
416 	 * Do initial security configuration to allow DRAM/device access
417 	 * (if earlier BL has not already done so).
418 	 */
419 	plat_arm_security_setup();
420 
421 #if defined(PLAT_ARM_MEM_PROT_ADDR)
422 	arm_nor_psci_do_dyn_mem_protect();
423 #endif /* PLAT_ARM_MEM_PROT_ADDR */
424 
425 #endif /* RESET_TO_BL31 */
426 
427 	/* Enable and initialize the System level generic timer */
428 	mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
429 			CNTCR_FCREQ(0U) | CNTCR_EN);
430 
431 	/* Allow access to the System counter timer module */
432 	arm_configure_sys_timer();
433 
434 	/* Initialize power controller before setting up topology */
435 	plat_arm_pwrc_setup();
436 
437 #if ENABLE_FEAT_RAS && FFH_SUPPORT
438 	ras_init();
439 #endif
440 
441 #if USE_DEBUGFS
442 	debugfs_init();
443 #endif /* USE_DEBUGFS */
444 }
445 
446 /*******************************************************************************
447  * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
448  * standard platforms
449  ******************************************************************************/
450 void arm_bl31_plat_runtime_setup(void)
451 {
452 	struct transfer_list_entry *te __unused;
453 	/* Initialize the runtime console */
454 	arm_console_runtime_init();
455 
456 #if TRANSFER_LIST && !RESET_TO_BL31
457 	/*
458 	 * We assume BL31 has added all TE's required by BL33 at this stage, ensure
459 	 * that data is visible to all observers by performing a flush operation, so
460 	 * they can access the updated data even if caching is not enabled.
461 	 */
462 	flush_dcache_range((uintptr_t)ns_tl, ns_tl->size);
463 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */
464 
465 #if RECLAIM_INIT_CODE
466 	arm_free_init_memory();
467 #endif
468 
469 #if PLAT_RO_XLAT_TABLES
470 	arm_xlat_make_tables_readonly();
471 #endif
472 }
473 
474 #if RECLAIM_INIT_CODE
475 /*
476  * Make memory for image boot time code RW to reclaim it as stack for the
477  * secondary cores, or RO where it cannot be reclaimed:
478  *
479  *            |-------- INIT SECTION --------|
480  *  -----------------------------------------
481  * |  CORE 0  |  CORE 1  |  CORE 2  | EXTRA  |
482  * |  STACK   |  STACK   |  STACK   | SPACE  |
483  *  -----------------------------------------
484  *             <-------------------> <------>
485  *                MAKE RW AND XN       MAKE
486  *                  FOR STACKS       RO AND XN
487  */
488 void arm_free_init_memory(void)
489 {
490 	int ret = 0;
491 
492 	if (BL_STACKS_END < BL_INIT_CODE_END) {
493 		/* Reclaim some of the init section as stack if possible. */
494 		if (BL_INIT_CODE_BASE < BL_STACKS_END) {
495 			ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
496 					BL_STACKS_END - BL_INIT_CODE_BASE,
497 					MT_RW_DATA);
498 		}
499 		/* Make the rest of the init section read-only. */
500 		ret |= xlat_change_mem_attributes(BL_STACKS_END,
501 				BL_INIT_CODE_END - BL_STACKS_END,
502 				MT_RO_DATA);
503 	} else {
504 		/* The stacks cover the init section, so reclaim it all. */
505 		ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
506 				BL_INIT_CODE_END - BL_INIT_CODE_BASE,
507 				MT_RW_DATA);
508 	}
509 
510 	if (ret != 0) {
511 		ERROR("Could not reclaim initialization code");
512 		panic();
513 	}
514 }
515 #endif
516 
517 void __init bl31_platform_setup(void)
518 {
519 	arm_bl31_platform_setup();
520 }
521 
522 void bl31_plat_runtime_setup(void)
523 {
524 	arm_bl31_plat_runtime_setup();
525 }
526 
527 /*******************************************************************************
528  * Perform the very early platform specific architectural setup shared between
529  * ARM standard platforms. This only does basic initialization. Later
530  * architectural setup (bl31_arch_setup()) does not do anything platform
531  * specific.
532  ******************************************************************************/
533 void __init arm_bl31_plat_arch_setup(void)
534 {
535 	const mmap_region_t bl_regions[] = {
536 		MAP_BL31_TOTAL,
537 #if ENABLE_RME
538 		ARM_MAP_L0_GPT_REGION,
539 #endif
540 #if RECLAIM_INIT_CODE
541 		MAP_BL_INIT_CODE,
542 #endif
543 #if SEPARATE_NOBITS_REGION
544 		MAP_BL31_NOBITS,
545 #endif
546 		ARM_MAP_BL_RO,
547 #if USE_ROMLIB
548 		ARM_MAP_ROMLIB_CODE,
549 		ARM_MAP_ROMLIB_DATA,
550 #endif
551 #if USE_COHERENT_MEM
552 		ARM_MAP_BL_COHERENT_RAM,
553 #endif
554 		{0}
555 	};
556 
557 	setup_page_tables(bl_regions, plat_arm_get_mmap());
558 
559 	enable_mmu_el3(0);
560 
561 #if ENABLE_RME
562 #if RESET_TO_BL31
563 	/*  initialize GPT only when RME is enabled. */
564 	assert(is_feat_rme_present());
565 
566 	/* Initialise and enable granule protection after MMU. */
567 	arm_gpt_setup();
568 #endif /* RESET_TO_BL31 */
569 	/*
570 	 * Initialise Granule Protection library and enable GPC for the primary
571 	 * processor. The tables have already been initialized by a previous BL
572 	 * stage, so there is no need to provide any PAS here. This function
573 	 * sets up pointers to those tables.
574 	 */
575 	if (gpt_runtime_init(BITLOCK_BASE, BITLOCK_SIZE) < 0) {
576 		ERROR("gpt_runtime_init() failed!\n");
577 		panic();
578 	}
579 #endif /* ENABLE_RME */
580 
581 	arm_setup_romlib();
582 }
583 
584 void __init bl31_plat_arch_setup(void)
585 {
586 	arm_bl31_plat_arch_setup();
587 }
588