1 /*
2 * Copyright (c) 2015-2026, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8
9 #include <arch.h>
10 #include <arch_features.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <drivers/console.h>
15 #include <lib/debugfs.h>
16 #include <lib/extensions/ras.h>
17 #include <lib/fconf/fconf.h>
18 #include <lib/gpt_rme/gpt_rme.h>
19 #include <lib/mmio.h>
20 #include <services/lfa_svc.h>
21 #if TRANSFER_LIST
22 #include <transfer_list.h>
23 #endif
24 #include <lib/xlat_tables/xlat_tables_compat.h>
25 #include <plat/arm/common/plat_arm.h>
26 #include <plat/arm/common/plat_arm_lfa_components.h>
27 #include <plat/common/platform.h>
28 #include <platform_def.h>
29
30 struct transfer_list_header *secure_tl;
31 struct transfer_list_header *ns_tl __unused;
32
33 /*
34 * Placeholder variables for copying the arguments that have been passed to
35 * BL31 from BL2.
36 */
37 static entry_point_info_t bl32_image_ep_info;
38 static entry_point_info_t bl33_image_ep_info;
39
40 #if ENABLE_RMM
41 static entry_point_info_t rmm_image_ep_info;
42 #endif
43
44 #if ENABLE_FEAT_RME && (RME_GPT_BITLOCK_BLOCK != 0)
45 /*
46 * Number of bitlock_t entries in bitlocks array for PLAT_ARM_PPS
47 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
48 */
49 #if (PLAT_ARM_PPS > (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8)))
50 #define BITLOCKS_NUM (PLAT_ARM_PPS) / \
51 (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8))
52 #else
53 #define BITLOCKS_NUM U(1)
54 #endif
55 /*
56 * Bitlocks array
57 */
58 static bitlock_t gpt_bitlock[BITLOCKS_NUM];
59 #define BITLOCK_BASE (uintptr_t)gpt_bitlock
60 #define BITLOCK_SIZE sizeof(gpt_bitlock)
61 #else /* !(ENABLE_FEAT_RME && (RME_GPT_BITLOCK_BLOCK != 0)) */
62 #define BITLOCK_BASE UL(0)
63 #define BITLOCK_SIZE UL(0)
64 #endif /* ENABLE_FEAT_RME && (RME_GPT_BITLOCK_BLOCK != 0) */
65
66 #if !RESET_TO_BL31
67 /*
68 * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page
69 * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
70 */
71 #if TRANSFER_LIST
72 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows);
73 #else
74 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
75 #endif /* TRANSFER_LIST */
76 #endif /* RESET_TO_BL31 */
77
78 /* Weak definitions may be overridden in specific ARM standard platform */
79 #pragma weak bl31_early_platform_setup2
80 #pragma weak bl31_platform_setup
81 #pragma weak bl31_plat_arch_setup
82 #pragma weak bl31_plat_get_next_image_ep_info
83 #pragma weak bl31_plat_runtime_setup
84
85 #define MAP_BL31_TOTAL MAP_REGION_FLAT( \
86 BL31_START, \
87 BL31_END - BL31_START, \
88 MT_MEMORY | MT_RW | EL3_PAS | \
89 MT_CAP_LD_ST_TRACK)
90
91 #if RECLAIM_INIT_CODE
92 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE);
93 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED);
94 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED);
95
96 #define BL_INIT_CODE_END ((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \
97 ~(PAGE_SIZE - 1))
98 #define BL_STACKS_END ((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \
99 ~(PAGE_SIZE - 1))
100
101 #define MAP_BL_INIT_CODE MAP_REGION_FLAT( \
102 BL_INIT_CODE_BASE, \
103 BL_INIT_CODE_END \
104 - BL_INIT_CODE_BASE, \
105 MT_CODE | EL3_PAS)
106 #endif
107
108 #if SEPARATE_NOBITS_REGION
109 #define MAP_BL31_NOBITS MAP_REGION_FLAT( \
110 BL31_NOBITS_BASE, \
111 BL31_NOBITS_LIMIT \
112 - BL31_NOBITS_BASE, \
113 MT_MEMORY | MT_RW | EL3_PAS)
114
115 #endif
116 /*******************************************************************************
117 * Return a pointer to the 'entry_point_info' structure of the next image for the
118 * security state specified. BL33 corresponds to the non-secure image type
119 * while BL32 corresponds to the secure image type. A NULL pointer is returned
120 * if the image does not exist.
121 ******************************************************************************/
bl31_plat_get_next_image_ep_info(uint32_t type)122 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
123 {
124 entry_point_info_t *next_image_info;
125
126 assert(sec_state_is_valid(type));
127 if (type == NON_SECURE) {
128 #if TRANSFER_LIST && !RESET_TO_BL31
129 next_image_info = transfer_list_set_handoff_args(
130 ns_tl, &bl33_image_ep_info);
131 #else
132 next_image_info = &bl33_image_ep_info;
133 #endif
134 }
135 #if ENABLE_RMM
136 else if (type == REALM) {
137 #if LFA_SUPPORT
138 if (lfa_is_prime_complete(LFA_RMM_COMPONENT)) {
139 rmm_image_ep_info.pc =
140 RMM_BASE + RMM_BANK_SIZE;
141 }
142 #endif /* LFA_SUPPORT */
143 next_image_info = &rmm_image_ep_info;
144 }
145 #endif
146 else {
147 #if TRANSFER_LIST && !RESET_TO_BL31
148 next_image_info = transfer_list_set_handoff_args(
149 secure_tl, &bl32_image_ep_info);
150 #else
151 next_image_info = &bl32_image_ep_info;
152 #endif
153 }
154
155 /*
156 * None of the images on the ARM development platforms can have 0x0
157 * as the entrypoint
158 */
159 if (next_image_info->pc)
160 return next_image_info;
161 else
162 return NULL;
163 }
164
165 /*******************************************************************************
166 * Perform any BL31 early platform setup common to ARM standard platforms.
167 * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
168 * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
169 * done before the MMU is initialized so that the memory layout can be used
170 * while creating page tables. BL2 has flushed this information to memory, so
171 * we are guaranteed to pick up good data.
172 ******************************************************************************/
arm_bl31_early_platform_setup(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)173 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1,
174 u_register_t arg2, u_register_t arg3)
175 {
176 #if TRANSFER_LIST
177 #if RESET_TO_BL31
178 /* Populate entry point information for BL33 */
179 SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
180 /*
181 * Tell BL31 where the non-trusted software image
182 * is located and the entry state information
183 */
184 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
185
186 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID);
187 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
188
189 bl33_image_ep_info.args.arg0 = PLAT_ARM_TRANSFER_LIST_DTB_OFFSET;
190 bl33_image_ep_info.args.arg1 =
191 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
192 bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE;
193 #else
194 struct transfer_list_entry *te = NULL;
195 struct entry_point_info *ep;
196
197 secure_tl = (struct transfer_list_header *)arg3;
198
199 /*
200 * Populate the global entry point structures used to execute subsequent
201 * images.
202 */
203 while ((te = transfer_list_next(secure_tl, te)) != NULL) {
204 ep = transfer_list_entry_data(te);
205
206 if (te->tag_id == TL_TAG_EXEC_EP_INFO64) {
207 switch (GET_SECURITY_STATE(ep->h.attr)) {
208 case NON_SECURE:
209 bl33_image_ep_info = *ep;
210 break;
211 #if ENABLE_RMM
212 case REALM:
213 rmm_image_ep_info = *ep;
214 break;
215 #endif
216 case SECURE:
217 bl32_image_ep_info = *ep;
218 break;
219 default:
220 ERROR("Unrecognized Image Security State %lu\n",
221 GET_SECURITY_STATE(ep->h.attr));
222 panic();
223 }
224 }
225 }
226 #endif /* RESET_TO_BL31 */
227 #else /* (!TRANSFER_LIST) */
228 #if RESET_TO_BL31
229 /* If BL31 is a reset vector, the parameters must be ignored */
230 (void)arg0;
231 (void)arg1;
232 (void)arg2;
233 (void)arg3;
234
235 # ifdef BL32_BASE
236 /* Populate entry point information for BL32 */
237 SET_PARAM_HEAD(&bl32_image_ep_info,
238 PARAM_EP,
239 VERSION_1,
240 0);
241 SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
242 bl32_image_ep_info.pc = BL32_BASE;
243 bl32_image_ep_info.spsr = arm_get_spsr(BL32_IMAGE_ID);
244
245 #if defined(SPD_spmd)
246 bl32_image_ep_info.args.arg0 = ARM_SPMC_MANIFEST_BASE;
247 #endif
248
249 # endif /* BL32_BASE */
250
251 /* Populate entry point information for BL33 */
252 SET_PARAM_HEAD(&bl33_image_ep_info,
253 PARAM_EP,
254 VERSION_1,
255 0);
256 /*
257 * Tell BL31 where the non-trusted software image
258 * is located and the entry state information
259 */
260 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
261 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID);
262 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
263
264 #if ENABLE_RMM
265 /*
266 * Populate entry point information for RMM.
267 * Only PC needs to be set as other fields are determined by RMMD.
268 */
269 rmm_image_ep_info.pc = RMM_BASE;
270 #endif /* ENABLE_RMM */
271 #else /* RESET_TO_BL31 */
272 /*
273 * In debug builds, we pass a special value in 'arg3'
274 * to verify platform parameters from BL2 to BL31.
275 * In release builds, it's not used.
276 */
277 #if DEBUG
278 assert(((uintptr_t)arg3) == ARM_BL31_PLAT_PARAM_VAL);
279 #endif
280
281 /*
282 * Check params passed from BL2 should not be NULL,
283 */
284 bl_params_t *params_from_bl2 = (bl_params_t *)(uintptr_t)arg0;
285 assert(params_from_bl2 != NULL);
286 assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
287 assert(params_from_bl2->h.version >= VERSION_2);
288
289 bl_params_node_t *bl_params = params_from_bl2->head;
290
291 /*
292 * Copy BL33, BL32 and RMM (if present), entry point information.
293 * They are stored in Secure RAM, in BL2's address space.
294 */
295 while (bl_params != NULL) {
296 if (bl_params->image_id == BL32_IMAGE_ID) {
297 bl32_image_ep_info = *bl_params->ep_info;
298 #if SPMC_AT_EL3
299 /*
300 * Populate the BL32 image base, size and max limit in
301 * the entry point information, since there is no
302 * platform function to retrieve them in generic
303 * code. We choose arg2, arg3 and arg4 since the generic
304 * code uses arg1 for stashing the SP manifest size. The
305 * SPMC setup uses these arguments to update SP manifest
306 * with actual SP's base address and it size.
307 */
308 bl32_image_ep_info.args.arg2 =
309 bl_params->image_info->image_base;
310 bl32_image_ep_info.args.arg3 =
311 bl_params->image_info->image_size;
312 bl32_image_ep_info.args.arg4 =
313 bl_params->image_info->image_base +
314 bl_params->image_info->image_max_size;
315 #endif
316 }
317 #if ENABLE_RMM
318 else if (bl_params->image_id == RMM_IMAGE_ID) {
319 rmm_image_ep_info = *bl_params->ep_info;
320 }
321 #endif
322 else if (bl_params->image_id == BL33_IMAGE_ID) {
323 bl33_image_ep_info = *bl_params->ep_info;
324 }
325
326 bl_params = bl_params->next_params_info;
327 }
328
329 if (bl33_image_ep_info.pc == 0U)
330 panic();
331 #if ENABLE_RMM
332 if (rmm_image_ep_info.pc == 0U)
333 panic();
334 #endif
335 #endif /* RESET_TO_BL31 */
336
337 #if USE_KERNEL_DT_CONVENTION
338 /*
339 * Only use the default DT base address if TF-A has not supplied one.
340 * This can occur when the DT is side-loaded and its memory location
341 * is unknown (e.g., RESET_TO_BL31).
342 */
343
344 if (bl33_image_ep_info.args.arg0 == 0U) {
345 bl33_image_ep_info.args.arg0 = HW_CONFIG_BASE;
346 }
347
348 #if ARM_LINUX_KERNEL_AS_BL33
349 bl33_image_ep_info.args.arg1 = 0U;
350 bl33_image_ep_info.args.arg2 = 0U;
351 bl33_image_ep_info.args.arg3 = 0U;
352 #endif
353 #endif
354 #endif /* TRANSFER_LIST */
355 }
356
bl31_early_platform_setup2(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)357 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
358 u_register_t arg2, u_register_t arg3)
359 {
360 /* Initialize the console to provide early debug support */
361 arm_console_boot_init();
362
363 arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3);
364
365 #if !HW_ASSISTED_COHERENCY
366 /*
367 * Initialize Interconnect for this cluster during cold boot.
368 * No need for locks as no other CPU is active.
369 */
370 plat_arm_interconnect_init();
371
372 /*
373 * Enable Interconnect coherency for the primary CPU's cluster.
374 * Earlier bootloader stages might already do this (e.g. Trusted
375 * Firmware's BL1 does it) but we can't assume so. There is no harm in
376 * executing this code twice anyway.
377 * Platform specific PSCI code will enable coherency for other
378 * clusters.
379 */
380 plat_arm_interconnect_enter_coherency();
381 #endif
382 }
383
384 /*******************************************************************************
385 * Perform any BL31 platform setup common to ARM standard platforms
386 ******************************************************************************/
arm_bl31_platform_setup(void)387 void arm_bl31_platform_setup(void)
388 {
389 #if RESET_TO_BL31
390 /*
391 * Do initial security configuration to allow DRAM/device access
392 * (if earlier BL has not already done so).
393 */
394 plat_arm_security_setup();
395
396 #if defined(PLAT_ARM_MEM_PROT_ADDR)
397 arm_nor_psci_do_dyn_mem_protect();
398 #endif /* PLAT_ARM_MEM_PROT_ADDR */
399
400 #endif /* RESET_TO_BL31 */
401
402 /* Enable and initialize the System level generic timer */
403 mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
404 CNTCR_FCREQ(0U) | CNTCR_EN);
405
406 /* Allow access to the System counter timer module */
407 arm_configure_sys_timer();
408
409 /* Initialize power controller before setting up topology */
410 plat_arm_pwrc_setup();
411
412 #if FFH_SUPPORT
413 if (is_feat_ras_supported()) {
414 ras_init();
415 }
416 #endif
417
418 #if USE_DEBUGFS
419 debugfs_init();
420 #endif /* USE_DEBUGFS */
421 }
422
423 /*******************************************************************************
424 * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
425 * standard platforms
426 ******************************************************************************/
arm_bl31_plat_runtime_setup(void)427 void arm_bl31_plat_runtime_setup(void)
428 {
429 struct transfer_list_entry *te __unused;
430 /* Initialize the runtime console */
431 arm_console_runtime_init();
432
433 #if TRANSFER_LIST && !RESET_TO_BL31
434 /*
435 * We assume BL31 has added all TE's required by BL33 at this stage, ensure
436 * that data is visible to all observers by performing a flush operation, so
437 * they can access the updated data even if caching is not enabled.
438 */
439 flush_dcache_range((uintptr_t)ns_tl, ns_tl->size);
440 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */
441
442 #if RECLAIM_INIT_CODE
443 arm_free_init_memory();
444 #endif
445
446 #if PLAT_RO_XLAT_TABLES
447 arm_xlat_make_tables_readonly();
448 #endif
449 }
450
451 #if RECLAIM_INIT_CODE
452 /*
453 * Make memory for image boot time code RW to reclaim it as stack for the
454 * secondary cores, or RO where it cannot be reclaimed:
455 *
456 * |-------- INIT SECTION --------|
457 * -----------------------------------------
458 * | CORE 0 | CORE 1 | CORE 2 | EXTRA |
459 * | STACK | STACK | STACK | SPACE |
460 * -----------------------------------------
461 * <-------------------> <------>
462 * MAKE RW AND XN MAKE
463 * FOR STACKS RO AND XN
464 */
arm_free_init_memory(void)465 void arm_free_init_memory(void)
466 {
467 int ret = 0;
468
469 if (BL_STACKS_END < BL_INIT_CODE_END) {
470 /* Reclaim some of the init section as stack if possible. */
471 if (BL_INIT_CODE_BASE < BL_STACKS_END) {
472 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
473 BL_STACKS_END - BL_INIT_CODE_BASE,
474 MT_RW_DATA);
475 }
476 /* Make the rest of the init section read-only. */
477 ret |= xlat_change_mem_attributes(BL_STACKS_END,
478 BL_INIT_CODE_END - BL_STACKS_END,
479 MT_RO_DATA);
480 } else {
481 /* The stacks cover the init section, so reclaim it all. */
482 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
483 BL_INIT_CODE_END - BL_INIT_CODE_BASE,
484 MT_RW_DATA);
485 }
486
487 if (ret != 0) {
488 ERROR("Could not reclaim initialization code");
489 panic();
490 }
491 }
492 #endif
493
bl31_platform_setup(void)494 void __init bl31_platform_setup(void)
495 {
496 arm_bl31_platform_setup();
497 }
498
bl31_plat_runtime_setup(void)499 void bl31_plat_runtime_setup(void)
500 {
501 arm_bl31_plat_runtime_setup();
502 }
503
504 /*******************************************************************************
505 * Perform the very early platform specific architectural setup shared between
506 * ARM standard platforms. This only does basic initialization. Later
507 * architectural setup (bl31_arch_setup()) does not do anything platform
508 * specific.
509 ******************************************************************************/
arm_bl31_plat_arch_setup(void)510 void __init arm_bl31_plat_arch_setup(void)
511 {
512 const mmap_region_t bl_regions[] = {
513 MAP_BL31_TOTAL,
514 #if ENABLE_FEAT_RME
515 ARM_MAP_L0_GPT_REGION,
516 #endif
517 #if RECLAIM_INIT_CODE
518 MAP_BL_INIT_CODE,
519 #endif
520 #if SEPARATE_NOBITS_REGION
521 MAP_BL31_NOBITS,
522 #endif
523 ARM_MAP_BL_RO,
524 #if USE_ROMLIB
525 ARM_MAP_ROMLIB_CODE,
526 ARM_MAP_ROMLIB_DATA,
527 #endif
528 #if USE_COHERENT_MEM
529 ARM_MAP_BL_COHERENT_RAM,
530 #endif
531 {0}
532 };
533
534 setup_page_tables(bl_regions, plat_arm_get_mmap());
535
536 enable_mmu_el3(0);
537
538 /*
539 * Initialise Granule Protection library and enable GPC for the primary
540 * processor. The tables have already been initialized by a previous BL
541 * stage, so there is no need to provide any PAS here. This function
542 * sets up pointers to those tables.
543 *
544 * Although FEAT_RME supports feature detection, a build with
545 * ENABLE_FEAT_RME=0 and -O0 (no optimization) fails due to undefined
546 * reference to gpt library calls as the compiler doesn't optimise the
547 * check done using is_feat_rme_supported(). So calls to gpt library
548 * are gated using ENABLE_FEAT_RME.
549 */
550 #if ENABLE_FEAT_RME
551 if (is_feat_rme_supported()) {
552 assert(is_feat_rme_present());
553
554 #if RESET_TO_BL31
555 /* Initialise and enable granule protection after MMU. */
556 arm_gpt_setup();
557 #endif /* RESET_TO_BL31 */
558
559 if (gpt_runtime_init(BITLOCK_BASE, BITLOCK_SIZE) < 0) {
560 ERROR("gpt_runtime_init() failed!\n");
561 panic();
562 }
563 }
564 #endif /* ENABLE_FEAT_RME */
565
566 arm_setup_romlib();
567
568 struct transfer_list_entry *te __unused;
569
570 #if TRANSFER_LIST && !RESET_TO_BL31
571 ns_tl = transfer_list_init((void *)FW_NS_HANDOFF_BASE,
572 PLAT_ARM_FW_HANDOFF_SIZE);
573 if (ns_tl == NULL) {
574 ERROR("Non-secure transfer list initialisation failed!\n");
575 panic();
576 }
577 /* BL31 may modify the HW_CONFIG so defer copying it until later. */
578 te = transfer_list_find(secure_tl, TL_TAG_FDT);
579 assert(te != NULL);
580
581 /* Populate HW_CONFIG device tree from transfer list entry */
582 fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te));
583
584 te = transfer_list_add(ns_tl, TL_TAG_FDT, te->data_size,
585 transfer_list_entry_data(te));
586 assert(te != NULL);
587
588 te = transfer_list_find(secure_tl, TL_TAG_TPM_EVLOG);
589 if (te != NULL) {
590 te = transfer_list_add(ns_tl, TL_TAG_TPM_EVLOG, te->data_size,
591 transfer_list_entry_data(te));
592 if (te == NULL) {
593 ERROR("Failed to load event log in Non-Secure transfer list\n");
594 panic();
595 }
596 }
597 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */
598
599 }
600
bl31_plat_arch_setup(void)601 void __init bl31_plat_arch_setup(void)
602 {
603 arm_bl31_plat_arch_setup();
604
605 #if USE_GIC_DRIVER == 3
606 gic_set_gicr_frames(arm_gicr_base_addrs);
607 #endif
608 }
609