1 /*
2 * Copyright (c) 2015-2026, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8
9 #include <arch.h>
10 #include <arch_features.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <drivers/console.h>
15 #include <lib/debugfs.h>
16 #include <lib/extensions/ras.h>
17 #include <lib/fconf/fconf.h>
18 #include <lib/gpt_rme/gpt_rme.h>
19 #include <lib/mmio.h>
20 #include <services/lfa_svc.h>
21 #if TRANSFER_LIST
22 #include <transfer_list.h>
23 #endif
24 #include <lib/xlat_tables/xlat_tables_compat.h>
25 #include <plat/arm/common/plat_arm.h>
26 #include <plat/arm/common/plat_arm_lfa_components.h>
27 #include <plat/common/platform.h>
28 #include <platform_def.h>
29
30 struct transfer_list_header *secure_tl;
31 struct transfer_list_header *ns_tl __unused;
32
33 #if USE_GIC_DRIVER == 3
34 uintptr_t arm_gicr_base_addrs[2] = {
35 PLAT_ARM_GICR_BASE, /* GICR Base address of the primary CPU */
36 0U /* Zero Termination */
37 };
38 #endif
39
40 /*
41 * Placeholder variables for copying the arguments that have been passed to
42 * BL31 from BL2.
43 */
44 static entry_point_info_t bl32_image_ep_info;
45 static entry_point_info_t bl33_image_ep_info;
46
47 #if ENABLE_RME
48 static entry_point_info_t rmm_image_ep_info;
49 #if (RME_GPT_BITLOCK_BLOCK == 0)
50 #define BITLOCK_BASE UL(0)
51 #define BITLOCK_SIZE UL(0)
52 #else
53 /*
54 * Number of bitlock_t entries in bitlocks array for PLAT_ARM_PPS
55 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
56 */
57 #if (PLAT_ARM_PPS > (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8)))
58 #define BITLOCKS_NUM (PLAT_ARM_PPS) / \
59 (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8))
60 #else
61 #define BITLOCKS_NUM U(1)
62 #endif
63 /*
64 * Bitlocks array
65 */
66 static bitlock_t gpt_bitlock[BITLOCKS_NUM];
67 #define BITLOCK_BASE (uintptr_t)gpt_bitlock
68 #define BITLOCK_SIZE sizeof(gpt_bitlock)
69 #endif /* RME_GPT_BITLOCK_BLOCK */
70 #endif /* ENABLE_RME */
71
72 #if !RESET_TO_BL31
73 /*
74 * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page
75 * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
76 */
77 #if TRANSFER_LIST
78 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows);
79 #else
80 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
81 #endif /* TRANSFER_LIST */
82 #endif /* RESET_TO_BL31 */
83
84 /* Weak definitions may be overridden in specific ARM standard platform */
85 #pragma weak bl31_early_platform_setup2
86 #pragma weak bl31_platform_setup
87 #pragma weak bl31_plat_arch_setup
88 #pragma weak bl31_plat_get_next_image_ep_info
89 #pragma weak bl31_plat_runtime_setup
90
91 #define MAP_BL31_TOTAL MAP_REGION_FLAT( \
92 BL31_START, \
93 BL31_END - BL31_START, \
94 MT_MEMORY | MT_RW | EL3_PAS | \
95 MT_CAP_LD_ST_TRACK)
96
97 #if RECLAIM_INIT_CODE
98 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE);
99 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED);
100 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED);
101
102 #define BL_INIT_CODE_END ((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \
103 ~(PAGE_SIZE - 1))
104 #define BL_STACKS_END ((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \
105 ~(PAGE_SIZE - 1))
106
107 #define MAP_BL_INIT_CODE MAP_REGION_FLAT( \
108 BL_INIT_CODE_BASE, \
109 BL_INIT_CODE_END \
110 - BL_INIT_CODE_BASE, \
111 MT_CODE | EL3_PAS)
112 #endif
113
114 #if SEPARATE_NOBITS_REGION
115 #define MAP_BL31_NOBITS MAP_REGION_FLAT( \
116 BL31_NOBITS_BASE, \
117 BL31_NOBITS_LIMIT \
118 - BL31_NOBITS_BASE, \
119 MT_MEMORY | MT_RW | EL3_PAS)
120
121 #endif
122 /*******************************************************************************
123 * Return a pointer to the 'entry_point_info' structure of the next image for the
124 * security state specified. BL33 corresponds to the non-secure image type
125 * while BL32 corresponds to the secure image type. A NULL pointer is returned
126 * if the image does not exist.
127 ******************************************************************************/
bl31_plat_get_next_image_ep_info(uint32_t type)128 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
129 {
130 entry_point_info_t *next_image_info;
131
132 assert(sec_state_is_valid(type));
133 if (type == NON_SECURE) {
134 #if TRANSFER_LIST && !RESET_TO_BL31
135 next_image_info = transfer_list_set_handoff_args(
136 ns_tl, &bl33_image_ep_info);
137 #else
138 next_image_info = &bl33_image_ep_info;
139 #endif
140 }
141 #if ENABLE_RME
142 else if (type == REALM) {
143 #if LFA_SUPPORT
144 if (lfa_is_prime_complete(LFA_RMM_COMPONENT)) {
145 rmm_image_ep_info.pc =
146 RMM_BASE + RMM_BANK_SIZE;
147 }
148 #endif /* LFA_SUPPORT */
149 next_image_info = &rmm_image_ep_info;
150 }
151 #endif
152 else {
153 #if TRANSFER_LIST && !RESET_TO_BL31
154 next_image_info = transfer_list_set_handoff_args(
155 secure_tl, &bl32_image_ep_info);
156 #else
157 next_image_info = &bl32_image_ep_info;
158 #endif
159 }
160
161 /*
162 * None of the images on the ARM development platforms can have 0x0
163 * as the entrypoint
164 */
165 if (next_image_info->pc)
166 return next_image_info;
167 else
168 return NULL;
169 }
170
171 /*******************************************************************************
172 * Perform any BL31 early platform setup common to ARM standard platforms.
173 * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
174 * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
175 * done before the MMU is initialized so that the memory layout can be used
176 * while creating page tables. BL2 has flushed this information to memory, so
177 * we are guaranteed to pick up good data.
178 ******************************************************************************/
arm_bl31_early_platform_setup(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)179 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1,
180 u_register_t arg2, u_register_t arg3)
181 {
182 #if TRANSFER_LIST
183 #if RESET_TO_BL31
184 /* Populate entry point information for BL33 */
185 SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
186 /*
187 * Tell BL31 where the non-trusted software image
188 * is located and the entry state information
189 */
190 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
191
192 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID);
193 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
194
195 bl33_image_ep_info.args.arg0 = PLAT_ARM_TRANSFER_LIST_DTB_OFFSET;
196 bl33_image_ep_info.args.arg1 =
197 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
198 bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE;
199 #else
200 struct transfer_list_entry *te = NULL;
201 struct entry_point_info *ep;
202
203 secure_tl = (struct transfer_list_header *)arg3;
204
205 /*
206 * Populate the global entry point structures used to execute subsequent
207 * images.
208 */
209 while ((te = transfer_list_next(secure_tl, te)) != NULL) {
210 ep = transfer_list_entry_data(te);
211
212 if (te->tag_id == TL_TAG_EXEC_EP_INFO64) {
213 switch (GET_SECURITY_STATE(ep->h.attr)) {
214 case NON_SECURE:
215 bl33_image_ep_info = *ep;
216 break;
217 #if ENABLE_RME
218 case REALM:
219 rmm_image_ep_info = *ep;
220 break;
221 #endif
222 case SECURE:
223 bl32_image_ep_info = *ep;
224 break;
225 default:
226 ERROR("Unrecognized Image Security State %lu\n",
227 GET_SECURITY_STATE(ep->h.attr));
228 panic();
229 }
230 }
231 }
232 #endif /* RESET_TO_BL31 */
233 #else /* (!TRANSFER_LIST) */
234 #if RESET_TO_BL31
235 /* If BL31 is a reset vector, the parameters must be ignored */
236 (void)arg0;
237 (void)arg1;
238 (void)arg2;
239 (void)arg3;
240
241 # ifdef BL32_BASE
242 /* Populate entry point information for BL32 */
243 SET_PARAM_HEAD(&bl32_image_ep_info,
244 PARAM_EP,
245 VERSION_1,
246 0);
247 SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
248 bl32_image_ep_info.pc = BL32_BASE;
249 bl32_image_ep_info.spsr = arm_get_spsr(BL32_IMAGE_ID);
250
251 #if defined(SPD_spmd)
252 bl32_image_ep_info.args.arg0 = ARM_SPMC_MANIFEST_BASE;
253 #endif
254
255 # endif /* BL32_BASE */
256
257 /* Populate entry point information for BL33 */
258 SET_PARAM_HEAD(&bl33_image_ep_info,
259 PARAM_EP,
260 VERSION_1,
261 0);
262 /*
263 * Tell BL31 where the non-trusted software image
264 * is located and the entry state information
265 */
266 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
267 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID);
268 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
269
270 #if ENABLE_RME
271 /*
272 * Populate entry point information for RMM.
273 * Only PC needs to be set as other fields are determined by RMMD.
274 */
275 rmm_image_ep_info.pc = RMM_BASE;
276 #endif /* ENABLE_RME */
277 #else /* RESET_TO_BL31 */
278 /*
279 * In debug builds, we pass a special value in 'arg3'
280 * to verify platform parameters from BL2 to BL31.
281 * In release builds, it's not used.
282 */
283 #if DEBUG
284 assert(((uintptr_t)arg3) == ARM_BL31_PLAT_PARAM_VAL);
285 #endif
286
287 /*
288 * Check params passed from BL2 should not be NULL,
289 */
290 bl_params_t *params_from_bl2 = (bl_params_t *)(uintptr_t)arg0;
291 assert(params_from_bl2 != NULL);
292 assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
293 assert(params_from_bl2->h.version >= VERSION_2);
294
295 bl_params_node_t *bl_params = params_from_bl2->head;
296
297 /*
298 * Copy BL33, BL32 and RMM (if present), entry point information.
299 * They are stored in Secure RAM, in BL2's address space.
300 */
301 while (bl_params != NULL) {
302 if (bl_params->image_id == BL32_IMAGE_ID) {
303 bl32_image_ep_info = *bl_params->ep_info;
304 #if SPMC_AT_EL3
305 /*
306 * Populate the BL32 image base, size and max limit in
307 * the entry point information, since there is no
308 * platform function to retrieve them in generic
309 * code. We choose arg2, arg3 and arg4 since the generic
310 * code uses arg1 for stashing the SP manifest size. The
311 * SPMC setup uses these arguments to update SP manifest
312 * with actual SP's base address and it size.
313 */
314 bl32_image_ep_info.args.arg2 =
315 bl_params->image_info->image_base;
316 bl32_image_ep_info.args.arg3 =
317 bl_params->image_info->image_size;
318 bl32_image_ep_info.args.arg4 =
319 bl_params->image_info->image_base +
320 bl_params->image_info->image_max_size;
321 #endif
322 }
323 #if ENABLE_RME
324 else if (bl_params->image_id == RMM_IMAGE_ID) {
325 rmm_image_ep_info = *bl_params->ep_info;
326 }
327 #endif
328 else if (bl_params->image_id == BL33_IMAGE_ID) {
329 bl33_image_ep_info = *bl_params->ep_info;
330 }
331
332 bl_params = bl_params->next_params_info;
333 }
334
335 if (bl33_image_ep_info.pc == 0U)
336 panic();
337 #if ENABLE_RME
338 if (rmm_image_ep_info.pc == 0U)
339 panic();
340 #endif
341 #endif /* RESET_TO_BL31 */
342
343 #if USE_KERNEL_DT_CONVENTION
344 /*
345 * Only use the default DT base address if TF-A has not supplied one.
346 * This can occur when the DT is side-loaded and its memory location
347 * is unknown (e.g., RESET_TO_BL31).
348 */
349
350 if (bl33_image_ep_info.args.arg0 == 0U) {
351 bl33_image_ep_info.args.arg0 = HW_CONFIG_BASE;
352 }
353
354 #if ARM_LINUX_KERNEL_AS_BL33
355 bl33_image_ep_info.args.arg1 = 0U;
356 bl33_image_ep_info.args.arg2 = 0U;
357 bl33_image_ep_info.args.arg3 = 0U;
358 #endif
359 #endif
360 #endif /* TRANSFER_LIST */
361 }
362
bl31_early_platform_setup2(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)363 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
364 u_register_t arg2, u_register_t arg3)
365 {
366 /* Initialize the console to provide early debug support */
367 arm_console_boot_init();
368
369 arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3);
370
371 #if !HW_ASSISTED_COHERENCY
372 /*
373 * Initialize Interconnect for this cluster during cold boot.
374 * No need for locks as no other CPU is active.
375 */
376 plat_arm_interconnect_init();
377
378 /*
379 * Enable Interconnect coherency for the primary CPU's cluster.
380 * Earlier bootloader stages might already do this (e.g. Trusted
381 * Firmware's BL1 does it) but we can't assume so. There is no harm in
382 * executing this code twice anyway.
383 * Platform specific PSCI code will enable coherency for other
384 * clusters.
385 */
386 plat_arm_interconnect_enter_coherency();
387 #endif
388 }
389
390 /*******************************************************************************
391 * Perform any BL31 platform setup common to ARM standard platforms
392 ******************************************************************************/
arm_bl31_platform_setup(void)393 void arm_bl31_platform_setup(void)
394 {
395 struct transfer_list_entry *te __unused;
396
397 #if TRANSFER_LIST && !RESET_TO_BL31
398 ns_tl = transfer_list_init((void *)FW_NS_HANDOFF_BASE,
399 PLAT_ARM_FW_HANDOFF_SIZE);
400 if (ns_tl == NULL) {
401 ERROR("Non-secure transfer list initialisation failed!\n");
402 panic();
403 }
404 /* BL31 may modify the HW_CONFIG so defer copying it until later. */
405 te = transfer_list_find(secure_tl, TL_TAG_FDT);
406 assert(te != NULL);
407
408 /* Populate HW_CONFIG device tree from transfer list entry */
409 fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te));
410
411 te = transfer_list_add(ns_tl, TL_TAG_FDT, te->data_size,
412 transfer_list_entry_data(te));
413 assert(te != NULL);
414
415 te = transfer_list_find(secure_tl, TL_TAG_TPM_EVLOG);
416 if (te != NULL) {
417 te = transfer_list_add(ns_tl, TL_TAG_TPM_EVLOG, te->data_size,
418 transfer_list_entry_data(te));
419 if (te == NULL) {
420 ERROR("Failed to load event log in Non-Secure transfer list\n");
421 panic();
422 }
423 }
424 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */
425
426 #if RESET_TO_BL31
427 /*
428 * Do initial security configuration to allow DRAM/device access
429 * (if earlier BL has not already done so).
430 */
431 plat_arm_security_setup();
432
433 #if defined(PLAT_ARM_MEM_PROT_ADDR)
434 arm_nor_psci_do_dyn_mem_protect();
435 #endif /* PLAT_ARM_MEM_PROT_ADDR */
436
437 #endif /* RESET_TO_BL31 */
438
439 /* Enable and initialize the System level generic timer */
440 mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
441 CNTCR_FCREQ(0U) | CNTCR_EN);
442
443 /* Allow access to the System counter timer module */
444 arm_configure_sys_timer();
445
446 /* Initialize power controller before setting up topology */
447 plat_arm_pwrc_setup();
448
449 #if FFH_SUPPORT
450 if (is_feat_ras_supported()) {
451 ras_init();
452 }
453 #endif
454
455 #if USE_DEBUGFS
456 debugfs_init();
457 #endif /* USE_DEBUGFS */
458 }
459
460 /*******************************************************************************
461 * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
462 * standard platforms
463 ******************************************************************************/
arm_bl31_plat_runtime_setup(void)464 void arm_bl31_plat_runtime_setup(void)
465 {
466 struct transfer_list_entry *te __unused;
467 /* Initialize the runtime console */
468 arm_console_runtime_init();
469
470 #if TRANSFER_LIST && !RESET_TO_BL31
471 /*
472 * We assume BL31 has added all TE's required by BL33 at this stage, ensure
473 * that data is visible to all observers by performing a flush operation, so
474 * they can access the updated data even if caching is not enabled.
475 */
476 flush_dcache_range((uintptr_t)ns_tl, ns_tl->size);
477 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */
478
479 #if RECLAIM_INIT_CODE
480 arm_free_init_memory();
481 #endif
482
483 #if PLAT_RO_XLAT_TABLES
484 arm_xlat_make_tables_readonly();
485 #endif
486 }
487
488 #if RECLAIM_INIT_CODE
489 /*
490 * Make memory for image boot time code RW to reclaim it as stack for the
491 * secondary cores, or RO where it cannot be reclaimed:
492 *
493 * |-------- INIT SECTION --------|
494 * -----------------------------------------
495 * | CORE 0 | CORE 1 | CORE 2 | EXTRA |
496 * | STACK | STACK | STACK | SPACE |
497 * -----------------------------------------
498 * <-------------------> <------>
499 * MAKE RW AND XN MAKE
500 * FOR STACKS RO AND XN
501 */
arm_free_init_memory(void)502 void arm_free_init_memory(void)
503 {
504 int ret = 0;
505
506 if (BL_STACKS_END < BL_INIT_CODE_END) {
507 /* Reclaim some of the init section as stack if possible. */
508 if (BL_INIT_CODE_BASE < BL_STACKS_END) {
509 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
510 BL_STACKS_END - BL_INIT_CODE_BASE,
511 MT_RW_DATA);
512 }
513 /* Make the rest of the init section read-only. */
514 ret |= xlat_change_mem_attributes(BL_STACKS_END,
515 BL_INIT_CODE_END - BL_STACKS_END,
516 MT_RO_DATA);
517 } else {
518 /* The stacks cover the init section, so reclaim it all. */
519 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
520 BL_INIT_CODE_END - BL_INIT_CODE_BASE,
521 MT_RW_DATA);
522 }
523
524 if (ret != 0) {
525 ERROR("Could not reclaim initialization code");
526 panic();
527 }
528 }
529 #endif
530
bl31_platform_setup(void)531 void __init bl31_platform_setup(void)
532 {
533 arm_bl31_platform_setup();
534
535 #if USE_GIC_DRIVER == 3
536 gic_set_gicr_frames(arm_gicr_base_addrs);
537 #endif
538 }
539
bl31_plat_runtime_setup(void)540 void bl31_plat_runtime_setup(void)
541 {
542 arm_bl31_plat_runtime_setup();
543 }
544
545 /*******************************************************************************
546 * Perform the very early platform specific architectural setup shared between
547 * ARM standard platforms. This only does basic initialization. Later
548 * architectural setup (bl31_arch_setup()) does not do anything platform
549 * specific.
550 ******************************************************************************/
arm_bl31_plat_arch_setup(void)551 void __init arm_bl31_plat_arch_setup(void)
552 {
553 const mmap_region_t bl_regions[] = {
554 MAP_BL31_TOTAL,
555 #if ENABLE_RME
556 ARM_MAP_L0_GPT_REGION,
557 #endif
558 #if RECLAIM_INIT_CODE
559 MAP_BL_INIT_CODE,
560 #endif
561 #if SEPARATE_NOBITS_REGION
562 MAP_BL31_NOBITS,
563 #endif
564 ARM_MAP_BL_RO,
565 #if USE_ROMLIB
566 ARM_MAP_ROMLIB_CODE,
567 ARM_MAP_ROMLIB_DATA,
568 #endif
569 #if USE_COHERENT_MEM
570 ARM_MAP_BL_COHERENT_RAM,
571 #endif
572 {0}
573 };
574
575 setup_page_tables(bl_regions, plat_arm_get_mmap());
576
577 enable_mmu_el3(0);
578
579 #if ENABLE_RME
580 #if RESET_TO_BL31
581 /* initialize GPT only when RME is enabled. */
582 assert(is_feat_rme_present());
583
584 /* Initialise and enable granule protection after MMU. */
585 arm_gpt_setup();
586 #endif /* RESET_TO_BL31 */
587 /*
588 * Initialise Granule Protection library and enable GPC for the primary
589 * processor. The tables have already been initialized by a previous BL
590 * stage, so there is no need to provide any PAS here. This function
591 * sets up pointers to those tables.
592 */
593 if (gpt_runtime_init(BITLOCK_BASE, BITLOCK_SIZE) < 0) {
594 ERROR("gpt_runtime_init() failed!\n");
595 panic();
596 }
597 #endif /* ENABLE_RME */
598
599 arm_setup_romlib();
600 }
601
bl31_plat_arch_setup(void)602 void __init bl31_plat_arch_setup(void)
603 {
604 arm_bl31_plat_arch_setup();
605 }
606