1 /*
2 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <string.h>
9
10 #include <libfdt.h>
11
12 #include <platform_def.h>
13
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/bl_common.h>
17 #include <common/debug.h>
18 #include <common/desc_image_load.h>
19 #include <common/fdt_fixup.h>
20 #include <common/fdt_wrappers.h>
21 #include <lib/optee_utils.h>
22 #if TRANSFER_LIST
23 #include <transfer_list.h>
24 #endif
25 #include <lib/utils.h>
26 #include <plat/common/platform.h>
27
28 #include "qemu_private.h"
29
30 #define MAP_BL2_TOTAL MAP_REGION_FLAT( \
31 bl2_tzram_layout.total_base, \
32 bl2_tzram_layout.total_size, \
33 MT_MEMORY | MT_RW | EL3_PAS)
34
35 #define MAP_BL2_RO MAP_REGION_FLAT( \
36 BL_CODE_BASE, \
37 BL_CODE_END - BL_CODE_BASE, \
38 MT_CODE | EL3_PAS), \
39 MAP_REGION_FLAT( \
40 BL_RO_DATA_BASE, \
41 BL_RO_DATA_END \
42 - BL_RO_DATA_BASE, \
43 MT_RO_DATA | EL3_PAS)
44
45 #if USE_COHERENT_MEM
46 #define MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \
47 BL_COHERENT_RAM_BASE, \
48 BL_COHERENT_RAM_END \
49 - BL_COHERENT_RAM_BASE, \
50 MT_DEVICE | MT_RW | EL3_PAS)
51 #endif
52
53 /* Data structure which holds the extents of the trusted SRAM for BL2 */
54 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
55 static struct transfer_list_header __maybe_unused *bl2_tl;
56
bl2_early_platform_setup2(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)57 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
58 u_register_t arg2, u_register_t arg3)
59 {
60 meminfo_t *mem_layout = (void *)arg1;
61
62 /* Initialize the console to provide early debug support */
63 qemu_console_init();
64
65 /* Setup the BL2 memory layout */
66 bl2_tzram_layout = *mem_layout;
67
68 plat_qemu_io_setup();
69 }
70
security_setup(void)71 static void security_setup(void)
72 {
73 /*
74 * This is where a TrustZone address space controller and other
75 * security related peripherals, would be configured.
76 */
77 }
78
update_dt(void)79 static void update_dt(void)
80 {
81 #if TRANSFER_LIST
82 struct transfer_list_entry *te;
83 #endif
84 int ret;
85 void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
86 void *dst = plat_qemu_dt_runtime_address();
87
88 ret = fdt_open_into(fdt, dst, PLAT_QEMU_DT_MAX_SIZE);
89 if (ret < 0) {
90 ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
91 return;
92 }
93
94 if (dt_add_psci_node(fdt)) {
95 ERROR("Failed to add PSCI Device Tree node\n");
96 return;
97 }
98
99 if (dt_add_psci_cpu_enable_methods(fdt)) {
100 ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
101 return;
102 }
103
104 #if ENABLE_RME
105 if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE,
106 REALM_DRAM_SIZE)) {
107 ERROR("Failed to reserve RMM memory in Device Tree\n");
108 return;
109 }
110
111 INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n",
112 (uintptr_t)REALM_DRAM_BASE,
113 (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1);
114 #endif
115
116 ret = fdt_pack(fdt);
117 if (ret < 0)
118 ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
119
120 #if TRANSFER_LIST
121 /* create a TE */
122 te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
123 if (!te) {
124 ERROR("Failed to add FDT entry to Transfer List\n");
125 return;
126 }
127 #endif
128 }
129
bl2_platform_setup(void)130 void bl2_platform_setup(void)
131 {
132 #if TRANSFER_LIST
133 bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
134 FW_HANDOFF_SIZE);
135 if (!bl2_tl) {
136 ERROR("Failed to initialize Transfer List at 0x%lx\n",
137 (unsigned long)FW_HANDOFF_BASE);
138 }
139 #endif
140 security_setup();
141 update_dt();
142
143 /* TODO Initialize timer */
144 }
145
qemu_bl2_sync_transfer_list(void)146 void qemu_bl2_sync_transfer_list(void)
147 {
148 #if TRANSFER_LIST
149 transfer_list_update_checksum(bl2_tl);
150 #endif
151 }
152
bl2_plat_arch_setup(void)153 void bl2_plat_arch_setup(void)
154 {
155 const mmap_region_t bl_regions[] = {
156 MAP_BL2_TOTAL,
157 MAP_BL2_RO,
158 #if USE_COHERENT_MEM
159 MAP_BL_COHERENT_RAM,
160 #endif
161 #if ENABLE_RME
162 MAP_RMM_DRAM,
163 MAP_GPT_L0_REGION,
164 MAP_GPT_L1_REGION,
165 #endif
166 {0}
167 };
168
169 setup_page_tables(bl_regions, plat_qemu_get_mmap());
170
171 #if ENABLE_RME
172 /* BL2 runs in EL3 when RME enabled. */
173 assert(is_feat_rme_present());
174 enable_mmu_el3(0);
175 #else /* ENABLE_RME */
176
177 #ifdef __aarch64__
178 enable_mmu_el1(0);
179 #else
180 enable_mmu_svc_mon(0);
181 #endif
182 #endif /* ENABLE_RME */
183 }
184
185 /*******************************************************************************
186 * Gets SPSR for BL32 entry
187 ******************************************************************************/
qemu_get_spsr_for_bl32_entry(void)188 static uint32_t qemu_get_spsr_for_bl32_entry(void)
189 {
190 #ifdef __aarch64__
191 /*
192 * The Secure Payload Dispatcher service is responsible for
193 * setting the SPSR prior to entry into the BL3-2 image.
194 */
195 return 0;
196 #else
197 return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
198 DISABLE_ALL_EXCEPTIONS);
199 #endif
200 }
201
202 /*******************************************************************************
203 * Gets SPSR for BL33 entry
204 ******************************************************************************/
qemu_get_spsr_for_bl33_entry(void)205 static uint32_t qemu_get_spsr_for_bl33_entry(void)
206 {
207 uint32_t spsr;
208 #ifdef __aarch64__
209 unsigned int mode;
210
211 /* Figure out what mode we enter the non-secure world in */
212 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
213
214 /*
215 * TODO: Consider the possibility of specifying the SPSR in
216 * the FIP ToC and allowing the platform to have a say as
217 * well.
218 */
219 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
220 #else
221 spsr = SPSR_MODE32(MODE32_svc,
222 plat_get_ns_image_entrypoint() & 0x1,
223 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
224 #endif
225 return spsr;
226 }
227
228 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
load_sps_from_tb_fw_config(struct image_info * image_info)229 static int load_sps_from_tb_fw_config(struct image_info *image_info)
230 {
231 void *dtb = (void *)image_info->image_base;
232 const char *compat_str = "arm,sp";
233 const struct fdt_property *uuid;
234 uint32_t load_addr;
235 const char *name;
236 int sp_node;
237 int node;
238
239 node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
240 if (node < 0) {
241 ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
242 return -1;
243 }
244
245 fdt_for_each_subnode(sp_node, dtb, node) {
246 name = fdt_get_name(dtb, sp_node, NULL);
247 if (name == NULL) {
248 ERROR("Can't get name of node in dtb\n");
249 return -1;
250 }
251 uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
252 if (uuid == NULL) {
253 ERROR("Can't find property uuid in node %s", name);
254 return -1;
255 }
256 if (fdt_read_uint32(dtb, sp_node, "load-address",
257 &load_addr) < 0) {
258 ERROR("Can't read load-address in node %s", name);
259 return -1;
260 }
261 if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
262 return -1;
263 }
264 }
265
266 return 0;
267 }
268 #endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
269
270 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
handoff_pageable_part(uint64_t pagable_part)271 static int handoff_pageable_part(uint64_t pagable_part)
272 {
273 #if TRANSFER_LIST
274 struct transfer_list_entry *te;
275
276 te = transfer_list_add(bl2_tl, TL_TAG_OPTEE_PAGABLE_PART,
277 sizeof(pagable_part), &pagable_part);
278 if (!te) {
279 INFO("Cannot add TE for pageable part\n");
280 return -1;
281 }
282 #endif
283 return 0;
284 }
285 #endif
286
qemu_bl2_handle_post_image_load(unsigned int image_id)287 static int qemu_bl2_handle_post_image_load(unsigned int image_id)
288 {
289 int err = 0;
290 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
291 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
292 bl_mem_params_node_t *pager_mem_params = NULL;
293 bl_mem_params_node_t *paged_mem_params = NULL;
294 image_info_t *paged_image_info = NULL;
295 #endif
296 #if defined(SPD_spmd)
297 bl_mem_params_node_t *bl32_mem_params = NULL;
298 #endif
299 #if TRANSFER_LIST
300 struct transfer_list_header *ns_tl = NULL;
301 #endif
302
303 assert(bl_mem_params);
304
305 switch (image_id) {
306 #if TRANSFER_LIST
307 case BL31_IMAGE_ID:
308 /*
309 * arg0 is a bl_params_t reserved for bl31_early_platform_setup2
310 * we just need arg1 and arg3 for BL31 to update the TL from S
311 * to NS memory before it exits
312 */
313 #ifdef __aarch64__
314 if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_64) {
315 bl_mem_params->ep_info.args.arg1 =
316 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
317 } else
318 #endif
319 {
320 bl_mem_params->ep_info.args.arg1 =
321 TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION);
322 }
323
324 bl_mem_params->ep_info.args.arg3 = (uintptr_t)bl2_tl;
325 break;
326 #endif
327 case BL32_IMAGE_ID:
328 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
329 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
330 assert(pager_mem_params);
331
332 #if !defined(SPMC_OPTEE)
333 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
334 assert(paged_mem_params);
335 #endif
336 if (paged_mem_params)
337 paged_image_info = &paged_mem_params->image_info;
338
339 err = parse_optee_header(&bl_mem_params->ep_info,
340 &pager_mem_params->image_info,
341 paged_image_info);
342 if (err != 0) {
343 WARN("OPTEE header parse error.\n");
344 }
345
346 /*
347 * Only add TL_TAG_OPTEE_PAGABLE_PART entry to the TL if
348 * the paged image has a size.
349 */
350 if (paged_image_info && paged_image_info->image_size &&
351 handoff_pageable_part(paged_image_info->image_base)) {
352 return -1;
353 }
354 #endif
355
356 INFO("Handoff to BL32\n");
357 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
358 #if TRANSFER_LIST
359 if (transfer_list_set_handoff_args(bl2_tl,
360 &bl_mem_params->ep_info))
361 break;
362 #endif
363 INFO("Using default arguments\n");
364 #if defined(SPMC_OPTEE)
365 /*
366 * Explicit zeroes to unused registers since they may have
367 * been populated by parse_optee_header() above.
368 *
369 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
370 * the latter is filled in below for TOS_FW_CONFIG_ID and
371 * applies to any other SPMC too.
372 */
373 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
374 #elif defined(SPD_opteed)
375 /*
376 * OP-TEE expect to receive DTB address in x2.
377 * This will be copied into x2 by dispatcher.
378 */
379 bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
380 #elif defined(AARCH32_SP_OPTEE)
381 bl_mem_params->ep_info.args.arg0 =
382 bl_mem_params->ep_info.args.arg1;
383 bl_mem_params->ep_info.args.arg1 = 0;
384 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
385 bl_mem_params->ep_info.args.arg3 = 0;
386 #endif
387 break;
388
389 case BL33_IMAGE_ID:
390 #ifdef AARCH32_SP_OPTEE
391 /* AArch32 only core: OP-TEE expects NSec EP in register LR */
392 pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
393 assert(pager_mem_params);
394 pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
395 #endif
396
397 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
398
399 #if ARM_LINUX_KERNEL_AS_BL33
400 /*
401 * According to the file ``Documentation/arm64/booting.txt`` of
402 * the Linux kernel tree, Linux expects the physical address of
403 * the device tree blob (DTB) in x0, while x1-x3 are reserved
404 * for future use and must be 0.
405 */
406 bl_mem_params->ep_info.args.arg0 =
407 (u_register_t)ARM_PRELOADED_DTB_BASE;
408 bl_mem_params->ep_info.args.arg1 = 0U;
409 bl_mem_params->ep_info.args.arg2 = 0U;
410 bl_mem_params->ep_info.args.arg3 = 0U;
411 #elif TRANSFER_LIST
412 if (bl2_tl) {
413 /* relocate the tl to pre-allocate NS memory */
414 ns_tl = transfer_list_relocate(bl2_tl,
415 (void *)(uintptr_t)FW_NS_HANDOFF_BASE,
416 bl2_tl->max_size);
417 if (!ns_tl) {
418 ERROR("Relocate TL to 0x%lx failed\n",
419 (unsigned long)FW_NS_HANDOFF_BASE);
420 return -1;
421 }
422 }
423
424 INFO("Handoff to BL33\n");
425 if (!transfer_list_set_handoff_args(ns_tl,
426 &bl_mem_params->ep_info)) {
427 INFO("Invalid TL, fallback to default arguments\n");
428 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
429 }
430 #else
431 /* BL33 expects to receive the primary CPU MPID (through r0) */
432 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
433 #endif /* ARM_LINUX_KERNEL_AS_BL33 */
434
435 break;
436 #ifdef SPD_spmd
437 #if SPMD_SPM_AT_SEL2
438 case TB_FW_CONFIG_ID:
439 err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
440 break;
441 #endif
442 case TOS_FW_CONFIG_ID:
443 /* An SPMC expects TOS_FW_CONFIG in x0/r0 */
444 bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
445 bl32_mem_params->ep_info.args.arg0 =
446 bl_mem_params->image_info.image_base;
447 break;
448 #endif
449 default:
450 /* Do nothing in default case */
451 break;
452 }
453
454 return err;
455 }
456
457 /*******************************************************************************
458 * This function can be used by the platforms to update/use image
459 * information for given `image_id`.
460 ******************************************************************************/
bl2_plat_handle_post_image_load(unsigned int image_id)461 int bl2_plat_handle_post_image_load(unsigned int image_id)
462 {
463 return qemu_bl2_handle_post_image_load(image_id);
464 }
465
plat_get_ns_image_entrypoint(void)466 uintptr_t plat_get_ns_image_entrypoint(void)
467 {
468 return NS_IMAGE_OFFSET;
469 }
470