1 /*
2 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8
9 #include <arch.h>
10 #include <arch_features.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <drivers/console.h>
15 #include <lib/debugfs.h>
16 #include <lib/extensions/ras.h>
17 #include <lib/fconf/fconf.h>
18 #include <lib/gpt_rme/gpt_rme.h>
19 #include <lib/mmio.h>
20 #include <services/lfa_svc.h>
21 #if TRANSFER_LIST
22 #include <transfer_list.h>
23 #endif
24 #include <lib/xlat_tables/xlat_tables_compat.h>
25 #include <plat/arm/common/plat_arm.h>
26 #include <plat/arm/common/plat_arm_lfa_components.h>
27 #include <plat/common/platform.h>
28 #include <platform_def.h>
29
30 struct transfer_list_header *secure_tl;
31 struct transfer_list_header *ns_tl __unused;
32
33 #if USE_GIC_DRIVER == 3
34 uintptr_t arm_gicr_base_addrs[2] = {
35 PLAT_ARM_GICR_BASE, /* GICR Base address of the primary CPU */
36 0U /* Zero Termination */
37 };
38 #endif
39
40 /*
41 * Placeholder variables for copying the arguments that have been passed to
42 * BL31 from BL2.
43 */
44 static entry_point_info_t bl32_image_ep_info;
45 static entry_point_info_t bl33_image_ep_info;
46
47 #if ENABLE_RME
48 static entry_point_info_t rmm_image_ep_info;
49 #if (RME_GPT_BITLOCK_BLOCK == 0)
50 #define BITLOCK_BASE UL(0)
51 #define BITLOCK_SIZE UL(0)
52 #else
53 /*
54 * Number of bitlock_t entries in bitlocks array for PLAT_ARM_PPS
55 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
56 */
57 #if (PLAT_ARM_PPS > (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8)))
58 #define BITLOCKS_NUM (PLAT_ARM_PPS) / \
59 (RME_GPT_BITLOCK_BLOCK * SZ_512M * UL(8))
60 #else
61 #define BITLOCKS_NUM U(1)
62 #endif
63 /*
64 * Bitlocks array
65 */
66 static bitlock_t gpt_bitlock[BITLOCKS_NUM];
67 #define BITLOCK_BASE (uintptr_t)gpt_bitlock
68 #define BITLOCK_SIZE sizeof(gpt_bitlock)
69 #endif /* RME_GPT_BITLOCK_BLOCK */
70 #endif /* ENABLE_RME */
71
72 #if !RESET_TO_BL31
73 /*
74 * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page
75 * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
76 */
77 #if TRANSFER_LIST
78 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows);
79 #else
80 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
81 #endif /* TRANSFER_LIST */
82 #endif /* RESET_TO_BL31 */
83
84 /* Weak definitions may be overridden in specific ARM standard platform */
85 #pragma weak bl31_early_platform_setup2
86 #pragma weak bl31_platform_setup
87 #pragma weak bl31_plat_arch_setup
88 #pragma weak bl31_plat_get_next_image_ep_info
89 #pragma weak bl31_plat_runtime_setup
90
91 #define MAP_BL31_TOTAL MAP_REGION_FLAT( \
92 BL31_START, \
93 BL31_END - BL31_START, \
94 MT_MEMORY | MT_RW | EL3_PAS)
95 #if RECLAIM_INIT_CODE
96 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE);
97 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED);
98 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED);
99
100 #define BL_INIT_CODE_END ((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \
101 ~(PAGE_SIZE - 1))
102 #define BL_STACKS_END ((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \
103 ~(PAGE_SIZE - 1))
104
105 #define MAP_BL_INIT_CODE MAP_REGION_FLAT( \
106 BL_INIT_CODE_BASE, \
107 BL_INIT_CODE_END \
108 - BL_INIT_CODE_BASE, \
109 MT_CODE | EL3_PAS)
110 #endif
111
112 #if SEPARATE_NOBITS_REGION
113 #define MAP_BL31_NOBITS MAP_REGION_FLAT( \
114 BL31_NOBITS_BASE, \
115 BL31_NOBITS_LIMIT \
116 - BL31_NOBITS_BASE, \
117 MT_MEMORY | MT_RW | EL3_PAS)
118
119 #endif
120 /*******************************************************************************
121 * Return a pointer to the 'entry_point_info' structure of the next image for the
122 * security state specified. BL33 corresponds to the non-secure image type
123 * while BL32 corresponds to the secure image type. A NULL pointer is returned
124 * if the image does not exist.
125 ******************************************************************************/
bl31_plat_get_next_image_ep_info(uint32_t type)126 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
127 {
128 entry_point_info_t *next_image_info;
129
130 assert(sec_state_is_valid(type));
131 if (type == NON_SECURE) {
132 #if TRANSFER_LIST && !RESET_TO_BL31
133 next_image_info = transfer_list_set_handoff_args(
134 ns_tl, &bl33_image_ep_info);
135 #else
136 next_image_info = &bl33_image_ep_info;
137 #endif
138 }
139 #if ENABLE_RME
140 else if (type == REALM) {
141 #if LFA_SUPPORT
142 if (lfa_is_prime_complete(LFA_RMM_COMPONENT)) {
143 rmm_image_ep_info.pc =
144 RMM_BASE + RMM_BANK_SIZE;
145 }
146 #endif /* LFA_SUPPORT */
147 next_image_info = &rmm_image_ep_info;
148 }
149 #endif
150 else {
151 #if TRANSFER_LIST && !RESET_TO_BL31
152 next_image_info = transfer_list_set_handoff_args(
153 secure_tl, &bl32_image_ep_info);
154 #else
155 next_image_info = &bl32_image_ep_info;
156 #endif
157 }
158
159 /*
160 * None of the images on the ARM development platforms can have 0x0
161 * as the entrypoint
162 */
163 if (next_image_info->pc)
164 return next_image_info;
165 else
166 return NULL;
167 }
168
169 /*******************************************************************************
170 * Perform any BL31 early platform setup common to ARM standard platforms.
171 * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
172 * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
173 * done before the MMU is initialized so that the memory layout can be used
174 * while creating page tables. BL2 has flushed this information to memory, so
175 * we are guaranteed to pick up good data.
176 ******************************************************************************/
arm_bl31_early_platform_setup(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)177 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1,
178 u_register_t arg2, u_register_t arg3)
179 {
180 #if TRANSFER_LIST
181 #if RESET_TO_BL31
182 /* Populate entry point information for BL33 */
183 SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
184 /*
185 * Tell BL31 where the non-trusted software image
186 * is located and the entry state information
187 */
188 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
189
190 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID);
191 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
192
193 bl33_image_ep_info.args.arg0 = PLAT_ARM_TRANSFER_LIST_DTB_OFFSET;
194 bl33_image_ep_info.args.arg1 =
195 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
196 bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE;
197 #else
198 struct transfer_list_entry *te = NULL;
199 struct entry_point_info *ep;
200
201 secure_tl = (struct transfer_list_header *)arg3;
202
203 /*
204 * Populate the global entry point structures used to execute subsequent
205 * images.
206 */
207 while ((te = transfer_list_next(secure_tl, te)) != NULL) {
208 ep = transfer_list_entry_data(te);
209
210 if (te->tag_id == TL_TAG_EXEC_EP_INFO64) {
211 switch (GET_SECURITY_STATE(ep->h.attr)) {
212 case NON_SECURE:
213 bl33_image_ep_info = *ep;
214 break;
215 #if ENABLE_RME
216 case REALM:
217 rmm_image_ep_info = *ep;
218 break;
219 #endif
220 case SECURE:
221 bl32_image_ep_info = *ep;
222 break;
223 default:
224 ERROR("Unrecognized Image Security State %lu\n",
225 GET_SECURITY_STATE(ep->h.attr));
226 panic();
227 }
228 }
229 }
230 #endif /* RESET_TO_BL31 */
231 #else /* (!TRANSFER_LIST) */
232 #if RESET_TO_BL31
233 /* If BL31 is a reset vector, the parameters must be ignored */
234 (void)arg0;
235 (void)arg1;
236 (void)arg2;
237 (void)arg3;
238
239 # ifdef BL32_BASE
240 /* Populate entry point information for BL32 */
241 SET_PARAM_HEAD(&bl32_image_ep_info,
242 PARAM_EP,
243 VERSION_1,
244 0);
245 SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
246 bl32_image_ep_info.pc = BL32_BASE;
247 bl32_image_ep_info.spsr = arm_get_spsr(BL32_IMAGE_ID);
248
249 #if defined(SPD_spmd)
250 bl32_image_ep_info.args.arg0 = ARM_SPMC_MANIFEST_BASE;
251 #endif
252
253 # endif /* BL32_BASE */
254
255 /* Populate entry point information for BL33 */
256 SET_PARAM_HEAD(&bl33_image_ep_info,
257 PARAM_EP,
258 VERSION_1,
259 0);
260 /*
261 * Tell BL31 where the non-trusted software image
262 * is located and the entry state information
263 */
264 bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
265 bl33_image_ep_info.spsr = arm_get_spsr(BL33_IMAGE_ID);
266 SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
267
268 #if ENABLE_RME
269 /*
270 * Populate entry point information for RMM.
271 * Only PC needs to be set as other fields are determined by RMMD.
272 */
273 rmm_image_ep_info.pc = RMM_BASE;
274 #endif /* ENABLE_RME */
275 #else /* RESET_TO_BL31 */
276 /*
277 * In debug builds, we pass a special value in 'arg3'
278 * to verify platform parameters from BL2 to BL31.
279 * In release builds, it's not used.
280 */
281 #if DEBUG
282 assert(((uintptr_t)arg3) == ARM_BL31_PLAT_PARAM_VAL);
283 #endif
284
285 /*
286 * Check params passed from BL2 should not be NULL,
287 */
288 bl_params_t *params_from_bl2 = (bl_params_t *)(uintptr_t)arg0;
289 assert(params_from_bl2 != NULL);
290 assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
291 assert(params_from_bl2->h.version >= VERSION_2);
292
293 bl_params_node_t *bl_params = params_from_bl2->head;
294
295 /*
296 * Copy BL33, BL32 and RMM (if present), entry point information.
297 * They are stored in Secure RAM, in BL2's address space.
298 */
299 while (bl_params != NULL) {
300 if (bl_params->image_id == BL32_IMAGE_ID) {
301 bl32_image_ep_info = *bl_params->ep_info;
302 #if SPMC_AT_EL3
303 /*
304 * Populate the BL32 image base, size and max limit in
305 * the entry point information, since there is no
306 * platform function to retrieve them in generic
307 * code. We choose arg2, arg3 and arg4 since the generic
308 * code uses arg1 for stashing the SP manifest size. The
309 * SPMC setup uses these arguments to update SP manifest
310 * with actual SP's base address and it size.
311 */
312 bl32_image_ep_info.args.arg2 =
313 bl_params->image_info->image_base;
314 bl32_image_ep_info.args.arg3 =
315 bl_params->image_info->image_size;
316 bl32_image_ep_info.args.arg4 =
317 bl_params->image_info->image_base +
318 bl_params->image_info->image_max_size;
319 #endif
320 }
321 #if ENABLE_RME
322 else if (bl_params->image_id == RMM_IMAGE_ID) {
323 rmm_image_ep_info = *bl_params->ep_info;
324 }
325 #endif
326 else if (bl_params->image_id == BL33_IMAGE_ID) {
327 bl33_image_ep_info = *bl_params->ep_info;
328 }
329
330 bl_params = bl_params->next_params_info;
331 }
332
333 if (bl33_image_ep_info.pc == 0U)
334 panic();
335 #if ENABLE_RME
336 if (rmm_image_ep_info.pc == 0U)
337 panic();
338 #endif
339 #endif /* RESET_TO_BL31 */
340
341 #if USE_KERNEL_DT_CONVENTION
342 /*
343 * Only use the default DT base address if TF-A has not supplied one.
344 * This can occur when the DT is side-loaded and its memory location
345 * is unknown (e.g., RESET_TO_BL31).
346 */
347
348 if (bl33_image_ep_info.args.arg0 == 0U) {
349 bl33_image_ep_info.args.arg0 = HW_CONFIG_BASE;
350 }
351
352 #if ARM_LINUX_KERNEL_AS_BL33
353 bl33_image_ep_info.args.arg1 = 0U;
354 bl33_image_ep_info.args.arg2 = 0U;
355 bl33_image_ep_info.args.arg3 = 0U;
356 #endif
357 #endif
358 #endif /* TRANSFER_LIST */
359 }
360
bl31_early_platform_setup2(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)361 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
362 u_register_t arg2, u_register_t arg3)
363 {
364 /* Initialize the console to provide early debug support */
365 arm_console_boot_init();
366
367 arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3);
368
369 #if !HW_ASSISTED_COHERENCY
370 /*
371 * Initialize Interconnect for this cluster during cold boot.
372 * No need for locks as no other CPU is active.
373 */
374 plat_arm_interconnect_init();
375
376 /*
377 * Enable Interconnect coherency for the primary CPU's cluster.
378 * Earlier bootloader stages might already do this (e.g. Trusted
379 * Firmware's BL1 does it) but we can't assume so. There is no harm in
380 * executing this code twice anyway.
381 * Platform specific PSCI code will enable coherency for other
382 * clusters.
383 */
384 plat_arm_interconnect_enter_coherency();
385 #endif
386 }
387
388 /*******************************************************************************
389 * Perform any BL31 platform setup common to ARM standard platforms
390 ******************************************************************************/
arm_bl31_platform_setup(void)391 void arm_bl31_platform_setup(void)
392 {
393 struct transfer_list_entry *te __unused;
394
395 #if TRANSFER_LIST && !RESET_TO_BL31
396 ns_tl = transfer_list_init((void *)FW_NS_HANDOFF_BASE,
397 PLAT_ARM_FW_HANDOFF_SIZE);
398 if (ns_tl == NULL) {
399 ERROR("Non-secure transfer list initialisation failed!\n");
400 panic();
401 }
402 /* BL31 may modify the HW_CONFIG so defer copying it until later. */
403 te = transfer_list_find(secure_tl, TL_TAG_FDT);
404 assert(te != NULL);
405
406 /*
407 * A pre-existing assumption is that FCONF is unsupported w/ RESET_TO_BL2 and
408 * RESET_TO_BL31. In the case of RESET_TO_BL31 this makes sense because there
409 * isn't a prior stage to load the device tree, but the reasoning for RESET_TO_BL2 is
410 * less clear. For the moment hardware properties that would normally be
411 * derived from the DT are statically defined.
412 */
413 #if !RESET_TO_BL2
414 fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te));
415 #endif
416
417 te = transfer_list_add(ns_tl, TL_TAG_FDT, te->data_size,
418 transfer_list_entry_data(te));
419 assert(te != NULL);
420
421 te = transfer_list_find(secure_tl, TL_TAG_TPM_EVLOG);
422 if (te != NULL) {
423 te = transfer_list_add(ns_tl, TL_TAG_TPM_EVLOG, te->data_size,
424 transfer_list_entry_data(te));
425 if (te == NULL) {
426 ERROR("Failed to load event log in Non-Secure transfer list\n");
427 panic();
428 }
429 }
430 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */
431
432 #if RESET_TO_BL31
433 /*
434 * Do initial security configuration to allow DRAM/device access
435 * (if earlier BL has not already done so).
436 */
437 plat_arm_security_setup();
438
439 #if defined(PLAT_ARM_MEM_PROT_ADDR)
440 arm_nor_psci_do_dyn_mem_protect();
441 #endif /* PLAT_ARM_MEM_PROT_ADDR */
442
443 #endif /* RESET_TO_BL31 */
444
445 /* Enable and initialize the System level generic timer */
446 mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
447 CNTCR_FCREQ(0U) | CNTCR_EN);
448
449 /* Allow access to the System counter timer module */
450 arm_configure_sys_timer();
451
452 /* Initialize power controller before setting up topology */
453 plat_arm_pwrc_setup();
454
455 #if ENABLE_FEAT_RAS && FFH_SUPPORT
456 ras_init();
457 #endif
458
459 #if USE_DEBUGFS
460 debugfs_init();
461 #endif /* USE_DEBUGFS */
462 }
463
464 /*******************************************************************************
465 * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
466 * standard platforms
467 ******************************************************************************/
arm_bl31_plat_runtime_setup(void)468 void arm_bl31_plat_runtime_setup(void)
469 {
470 struct transfer_list_entry *te __unused;
471 /* Initialize the runtime console */
472 arm_console_runtime_init();
473
474 #if TRANSFER_LIST && !RESET_TO_BL31
475 /*
476 * We assume BL31 has added all TE's required by BL33 at this stage, ensure
477 * that data is visible to all observers by performing a flush operation, so
478 * they can access the updated data even if caching is not enabled.
479 */
480 flush_dcache_range((uintptr_t)ns_tl, ns_tl->size);
481 #endif /* TRANSFER_LIST && !RESET_TO_BL31 */
482
483 #if RECLAIM_INIT_CODE
484 arm_free_init_memory();
485 #endif
486
487 #if PLAT_RO_XLAT_TABLES
488 arm_xlat_make_tables_readonly();
489 #endif
490 }
491
492 #if RECLAIM_INIT_CODE
493 /*
494 * Make memory for image boot time code RW to reclaim it as stack for the
495 * secondary cores, or RO where it cannot be reclaimed:
496 *
497 * |-------- INIT SECTION --------|
498 * -----------------------------------------
499 * | CORE 0 | CORE 1 | CORE 2 | EXTRA |
500 * | STACK | STACK | STACK | SPACE |
501 * -----------------------------------------
502 * <-------------------> <------>
503 * MAKE RW AND XN MAKE
504 * FOR STACKS RO AND XN
505 */
arm_free_init_memory(void)506 void arm_free_init_memory(void)
507 {
508 int ret = 0;
509
510 if (BL_STACKS_END < BL_INIT_CODE_END) {
511 /* Reclaim some of the init section as stack if possible. */
512 if (BL_INIT_CODE_BASE < BL_STACKS_END) {
513 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
514 BL_STACKS_END - BL_INIT_CODE_BASE,
515 MT_RW_DATA);
516 }
517 /* Make the rest of the init section read-only. */
518 ret |= xlat_change_mem_attributes(BL_STACKS_END,
519 BL_INIT_CODE_END - BL_STACKS_END,
520 MT_RO_DATA);
521 } else {
522 /* The stacks cover the init section, so reclaim it all. */
523 ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
524 BL_INIT_CODE_END - BL_INIT_CODE_BASE,
525 MT_RW_DATA);
526 }
527
528 if (ret != 0) {
529 ERROR("Could not reclaim initialization code");
530 panic();
531 }
532 }
533 #endif
534
bl31_platform_setup(void)535 void __init bl31_platform_setup(void)
536 {
537 arm_bl31_platform_setup();
538
539 #if USE_GIC_DRIVER == 3
540 gic_set_gicr_frames(arm_gicr_base_addrs);
541 #endif
542 }
543
bl31_plat_runtime_setup(void)544 void bl31_plat_runtime_setup(void)
545 {
546 arm_bl31_plat_runtime_setup();
547 }
548
549 /*******************************************************************************
550 * Perform the very early platform specific architectural setup shared between
551 * ARM standard platforms. This only does basic initialization. Later
552 * architectural setup (bl31_arch_setup()) does not do anything platform
553 * specific.
554 ******************************************************************************/
arm_bl31_plat_arch_setup(void)555 void __init arm_bl31_plat_arch_setup(void)
556 {
557 const mmap_region_t bl_regions[] = {
558 MAP_BL31_TOTAL,
559 #if ENABLE_RME
560 ARM_MAP_L0_GPT_REGION,
561 #endif
562 #if RECLAIM_INIT_CODE
563 MAP_BL_INIT_CODE,
564 #endif
565 #if SEPARATE_NOBITS_REGION
566 MAP_BL31_NOBITS,
567 #endif
568 ARM_MAP_BL_RO,
569 #if USE_ROMLIB
570 ARM_MAP_ROMLIB_CODE,
571 ARM_MAP_ROMLIB_DATA,
572 #endif
573 #if USE_COHERENT_MEM
574 ARM_MAP_BL_COHERENT_RAM,
575 #endif
576 {0}
577 };
578
579 setup_page_tables(bl_regions, plat_arm_get_mmap());
580
581 enable_mmu_el3(0);
582
583 #if ENABLE_RME
584 #if RESET_TO_BL31
585 /* initialize GPT only when RME is enabled. */
586 assert(is_feat_rme_present());
587
588 /* Initialise and enable granule protection after MMU. */
589 arm_gpt_setup();
590 #endif /* RESET_TO_BL31 */
591 /*
592 * Initialise Granule Protection library and enable GPC for the primary
593 * processor. The tables have already been initialized by a previous BL
594 * stage, so there is no need to provide any PAS here. This function
595 * sets up pointers to those tables.
596 */
597 if (gpt_runtime_init(BITLOCK_BASE, BITLOCK_SIZE) < 0) {
598 ERROR("gpt_runtime_init() failed!\n");
599 panic();
600 }
601 #endif /* ENABLE_RME */
602
603 arm_setup_romlib();
604 }
605
bl31_plat_arch_setup(void)606 void __init bl31_plat_arch_setup(void)
607 {
608 arm_bl31_plat_arch_setup();
609 }
610