xref: /rk3399_ARM-atf/plat/qemu/common/qemu_bl2_setup.c (revision ee9cfaccababfcf4c83bd5cc455941e1889f9de8)
1 /*
2  * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <libfdt.h>
11 
12 #include <platform_def.h>
13 
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/bl_common.h>
17 #include <common/debug.h>
18 #include <common/desc_image_load.h>
19 #include <common/fdt_fixup.h>
20 #include <common/fdt_wrappers.h>
21 #include <lib/optee_utils.h>
22 #include <lib/transfer_list.h>
23 #include <lib/utils.h>
24 #include <plat/common/platform.h>
25 #if ENABLE_RME
26 #include <qemu_pas_def.h>
27 #endif
28 
29 #include "qemu_private.h"
30 
31 #define MAP_BL2_TOTAL		MAP_REGION_FLAT(			\
32 					bl2_tzram_layout.total_base,	\
33 					bl2_tzram_layout.total_size,	\
34 					MT_MEMORY | MT_RW | EL3_PAS)
35 
36 #define MAP_BL2_RO		MAP_REGION_FLAT(			\
37 					BL_CODE_BASE,			\
38 					BL_CODE_END - BL_CODE_BASE,	\
39 					MT_CODE | EL3_PAS),		\
40 				MAP_REGION_FLAT(			\
41 					BL_RO_DATA_BASE,		\
42 					BL_RO_DATA_END			\
43 						- BL_RO_DATA_BASE,	\
44 					MT_RO_DATA | EL3_PAS)
45 
46 #if USE_COHERENT_MEM
47 #define MAP_BL_COHERENT_RAM	MAP_REGION_FLAT(			\
48 					BL_COHERENT_RAM_BASE,		\
49 					BL_COHERENT_RAM_END		\
50 						- BL_COHERENT_RAM_BASE,	\
51 					MT_DEVICE | MT_RW | EL3_PAS)
52 #endif
53 
54 /* Data structure which holds the extents of the trusted SRAM for BL2 */
55 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
56 static struct transfer_list_header *bl2_tl;
57 
58 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
59 			       u_register_t arg2, u_register_t arg3)
60 {
61 	meminfo_t *mem_layout = (void *)arg1;
62 
63 	/* Initialize the console to provide early debug support */
64 	qemu_console_init();
65 
66 	/* Setup the BL2 memory layout */
67 	bl2_tzram_layout = *mem_layout;
68 
69 	plat_qemu_io_setup();
70 }
71 
72 static void security_setup(void)
73 {
74 	/*
75 	 * This is where a TrustZone address space controller and other
76 	 * security related peripherals, would be configured.
77 	 */
78 }
79 
80 static void update_dt(void)
81 {
82 #if TRANSFER_LIST
83 	struct transfer_list_entry *te;
84 #endif
85 	int ret;
86 	void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
87 
88 	ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE);
89 	if (ret < 0) {
90 		ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
91 		return;
92 	}
93 
94 	if (dt_add_psci_node(fdt)) {
95 		ERROR("Failed to add PSCI Device Tree node\n");
96 		return;
97 	}
98 
99 	if (dt_add_psci_cpu_enable_methods(fdt)) {
100 		ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
101 		return;
102 	}
103 
104 #if ENABLE_RME
105 	if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE,
106 				    REALM_DRAM_SIZE)) {
107 		ERROR("Failed to reserve RMM memory in Device Tree\n");
108 		return;
109 	}
110 
111 	INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n",
112 	     (uintptr_t)REALM_DRAM_BASE,
113 	     (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1);
114 #endif
115 
116 	ret = fdt_pack(fdt);
117 	if (ret < 0)
118 		ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
119 
120 #if TRANSFER_LIST
121 	/* create a TE */
122 	te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
123 	if (!te) {
124 		ERROR("Failed to add FDT entry to Transfer List\n");
125 		return;
126 	}
127 #endif
128 }
129 
130 void bl2_platform_setup(void)
131 {
132 #if TRANSFER_LIST
133 	bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
134 				    FW_HANDOFF_SIZE);
135 	if (!bl2_tl) {
136 		ERROR("Failed to initialize Transfer List at 0x%lx\n",
137 		      (unsigned long)FW_HANDOFF_BASE);
138 	}
139 #endif
140 	security_setup();
141 	update_dt();
142 
143 	/* TODO Initialize timer */
144 }
145 
146 void qemu_bl2_sync_transfer_list(void)
147 {
148 #if TRANSFER_LIST
149 	transfer_list_update_checksum(bl2_tl);
150 #endif
151 }
152 
153 #if ENABLE_RME
154 static void bl2_plat_gpt_setup(void)
155 {
156 	/*
157 	 * The GPT library might modify the gpt regions structure to optimize
158 	 * the layout, so the array cannot be constant.
159 	 */
160 	pas_region_t pas_regions[] = {
161 		QEMU_PAS_ROOT,
162 		QEMU_PAS_SECURE,
163 		QEMU_PAS_GPTS,
164 		QEMU_PAS_NS0,
165 		QEMU_PAS_REALM,
166 		QEMU_PAS_NS1,
167 	};
168 
169 	/*
170 	 * Initialize entire protected space to GPT_GPI_ANY. With each L0 entry
171 	 * covering 1GB (currently the only supported option), then covering
172 	 * 256TB of RAM (48-bit PA) would require a 2MB L0 region. At the
173 	 * moment we use a 8KB table, which covers 1TB of RAM (40-bit PA).
174 	 */
175 	if (gpt_init_l0_tables(GPCCR_PPS_1TB, PLAT_QEMU_L0_GPT_BASE,
176 			       PLAT_QEMU_L0_GPT_SIZE) < 0) {
177 		ERROR("gpt_init_l0_tables() failed!\n");
178 		panic();
179 	}
180 
181 	/* Carve out defined PAS ranges. */
182 	if (gpt_init_pas_l1_tables(GPCCR_PGS_4K,
183 				   PLAT_QEMU_L1_GPT_BASE,
184 				   PLAT_QEMU_L1_GPT_SIZE,
185 				   pas_regions,
186 				   (unsigned int)(sizeof(pas_regions) /
187 						  sizeof(pas_region_t))) < 0) {
188 		ERROR("gpt_init_pas_l1_tables() failed!\n");
189 		panic();
190 	}
191 
192 	INFO("Enabling Granule Protection Checks\n");
193 	if (gpt_enable() < 0) {
194 		ERROR("gpt_enable() failed!\n");
195 		panic();
196 	}
197 }
198 #endif
199 
200 void bl2_plat_arch_setup(void)
201 {
202 	const mmap_region_t bl_regions[] = {
203 		MAP_BL2_TOTAL,
204 		MAP_BL2_RO,
205 #if USE_COHERENT_MEM
206 		MAP_BL_COHERENT_RAM,
207 #endif
208 #if ENABLE_RME
209 		MAP_RMM_DRAM,
210 		MAP_GPT_L0_REGION,
211 		MAP_GPT_L1_REGION,
212 #endif
213 		{0}
214 	};
215 
216 	setup_page_tables(bl_regions, plat_qemu_get_mmap());
217 
218 #if ENABLE_RME
219 	/* BL2 runs in EL3 when RME enabled. */
220 	assert(is_feat_rme_present());
221 	enable_mmu_el3(0);
222 
223 	/* Initialise and enable granule protection after MMU. */
224 	bl2_plat_gpt_setup();
225 #else /* ENABLE_RME */
226 
227 #ifdef __aarch64__
228 	enable_mmu_el1(0);
229 #else
230 	enable_mmu_svc_mon(0);
231 #endif
232 #endif /* ENABLE_RME */
233 }
234 
235 /*******************************************************************************
236  * Gets SPSR for BL32 entry
237  ******************************************************************************/
238 static uint32_t qemu_get_spsr_for_bl32_entry(void)
239 {
240 #ifdef __aarch64__
241 	/*
242 	 * The Secure Payload Dispatcher service is responsible for
243 	 * setting the SPSR prior to entry into the BL3-2 image.
244 	 */
245 	return 0;
246 #else
247 	return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
248 			   DISABLE_ALL_EXCEPTIONS);
249 #endif
250 }
251 
252 /*******************************************************************************
253  * Gets SPSR for BL33 entry
254  ******************************************************************************/
255 static uint32_t qemu_get_spsr_for_bl33_entry(void)
256 {
257 	uint32_t spsr;
258 #ifdef __aarch64__
259 	unsigned int mode;
260 
261 	/* Figure out what mode we enter the non-secure world in */
262 	mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
263 
264 	/*
265 	 * TODO: Consider the possibility of specifying the SPSR in
266 	 * the FIP ToC and allowing the platform to have a say as
267 	 * well.
268 	 */
269 	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
270 #else
271 	spsr = SPSR_MODE32(MODE32_svc,
272 		    plat_get_ns_image_entrypoint() & 0x1,
273 		    SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
274 #endif
275 	return spsr;
276 }
277 
278 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
279 static int load_sps_from_tb_fw_config(struct image_info *image_info)
280 {
281 	void *dtb = (void *)image_info->image_base;
282 	const char *compat_str = "arm,sp";
283 	const struct fdt_property *uuid;
284 	uint32_t load_addr;
285 	const char *name;
286 	int sp_node;
287 	int node;
288 
289 	node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
290 	if (node < 0) {
291 		ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
292 		return -1;
293 	}
294 
295 	fdt_for_each_subnode(sp_node, dtb, node) {
296 		name = fdt_get_name(dtb, sp_node, NULL);
297 		if (name == NULL) {
298 			ERROR("Can't get name of node in dtb\n");
299 			return -1;
300 		}
301 		uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
302 		if (uuid == NULL) {
303 			ERROR("Can't find property uuid in node %s", name);
304 			return -1;
305 		}
306 		if (fdt_read_uint32(dtb, sp_node, "load-address",
307 				    &load_addr) < 0) {
308 			ERROR("Can't read load-address in node %s", name);
309 			return -1;
310 		}
311 		if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
312 			return -1;
313 		}
314 	}
315 
316 	return 0;
317 }
318 #endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
319 
320 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
321 static int handoff_pageable_part(uint64_t pagable_part)
322 {
323 #if TRANSFER_LIST
324 	struct transfer_list_entry *te;
325 
326 	te = transfer_list_add(bl2_tl, TL_TAG_OPTEE_PAGABLE_PART,
327 			       sizeof(pagable_part), &pagable_part);
328 	if (!te) {
329 		INFO("Cannot add TE for pageable part\n");
330 		return -1;
331 	}
332 #endif
333 	return 0;
334 }
335 #endif
336 
337 static int qemu_bl2_handle_post_image_load(unsigned int image_id)
338 {
339 	int err = 0;
340 	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
341 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
342 	bl_mem_params_node_t *pager_mem_params = NULL;
343 	bl_mem_params_node_t *paged_mem_params = NULL;
344 #endif
345 #if defined(SPD_spmd)
346 	bl_mem_params_node_t *bl32_mem_params = NULL;
347 #endif
348 #if TRANSFER_LIST
349 	struct transfer_list_header *ns_tl = NULL;
350 #endif
351 
352 	assert(bl_mem_params);
353 
354 	switch (image_id) {
355 #if TRANSFER_LIST
356 	case BL31_IMAGE_ID:
357 		/*
358 		 * arg0 is a bl_params_t reserved for bl31_early_platform_setup2
359 		 * we just need arg1 and arg3 for BL31 to update th TL from S
360 		 * to NS memory before it exits
361 		 */
362 		bl_mem_params->ep_info.args.arg1 =
363 			TRANSFER_LIST_SIGNATURE |
364 			REGISTER_CONVENTION_VERSION_MASK;
365 		bl_mem_params->ep_info.args.arg3 = (uintptr_t)bl2_tl;
366 		break;
367 #endif
368 	case BL32_IMAGE_ID:
369 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
370 		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
371 		assert(pager_mem_params);
372 
373 		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
374 		assert(paged_mem_params);
375 
376 		err = parse_optee_header(&bl_mem_params->ep_info,
377 					 &pager_mem_params->image_info,
378 					 &paged_mem_params->image_info);
379 		if (err != 0) {
380 			WARN("OPTEE header parse error.\n");
381 		}
382 
383 		/* add TL_TAG_OPTEE_PAGABLE_PART entry to the TL */
384 		if (handoff_pageable_part(bl_mem_params->ep_info.args.arg1)) {
385 			return -1;
386 		}
387 #endif
388 
389 		INFO("Handoff to BL32\n");
390 		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
391 		if (TRANSFER_LIST &&
392 			transfer_list_set_handoff_args(bl2_tl,
393 				&bl_mem_params->ep_info))
394 			break;
395 
396 		INFO("Using default arguments\n");
397 #if defined(SPMC_OPTEE)
398 		/*
399 		 * Explicit zeroes to unused registers since they may have
400 		 * been populated by parse_optee_header() above.
401 		 *
402 		 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
403 		 * the latter is filled in below for TOS_FW_CONFIG_ID and
404 		 * applies to any other SPMC too.
405 		 */
406 		bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
407 #elif defined(SPD_opteed)
408 		/*
409 		 * OP-TEE expect to receive DTB address in x2.
410 		 * This will be copied into x2 by dispatcher.
411 		 */
412 		bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
413 #elif defined(AARCH32_SP_OPTEE)
414 		bl_mem_params->ep_info.args.arg0 =
415 					bl_mem_params->ep_info.args.arg1;
416 		bl_mem_params->ep_info.args.arg1 = 0;
417 		bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
418 		bl_mem_params->ep_info.args.arg3 = 0;
419 #endif
420 		break;
421 
422 	case BL33_IMAGE_ID:
423 #ifdef AARCH32_SP_OPTEE
424 		/* AArch32 only core: OP-TEE expects NSec EP in register LR */
425 		pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
426 		assert(pager_mem_params);
427 		pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
428 #endif
429 
430 		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
431 
432 #if ARM_LINUX_KERNEL_AS_BL33
433 		/*
434 		 * According to the file ``Documentation/arm64/booting.txt`` of
435 		 * the Linux kernel tree, Linux expects the physical address of
436 		 * the device tree blob (DTB) in x0, while x1-x3 are reserved
437 		 * for future use and must be 0.
438 		 */
439 		bl_mem_params->ep_info.args.arg0 =
440 			(u_register_t)ARM_PRELOADED_DTB_BASE;
441 		bl_mem_params->ep_info.args.arg1 = 0U;
442 		bl_mem_params->ep_info.args.arg2 = 0U;
443 		bl_mem_params->ep_info.args.arg3 = 0U;
444 #elif TRANSFER_LIST
445 		if (bl2_tl) {
446 			/* relocate the tl to pre-allocate NS memory */
447 			ns_tl = transfer_list_relocate(bl2_tl,
448 					(void *)(uintptr_t)FW_NS_HANDOFF_BASE,
449 					bl2_tl->max_size);
450 			if (!ns_tl) {
451 				ERROR("Relocate TL to 0x%lx failed\n",
452 					(unsigned long)FW_NS_HANDOFF_BASE);
453 				return -1;
454 			}
455 		}
456 
457 		INFO("Handoff to BL33\n");
458 		if (!transfer_list_set_handoff_args(ns_tl,
459 						    &bl_mem_params->ep_info)) {
460 			INFO("Invalid TL, fallback to default arguments\n");
461 			bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
462 		}
463 #else
464 		/* BL33 expects to receive the primary CPU MPID (through r0) */
465 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
466 #endif /* ARM_LINUX_KERNEL_AS_BL33 */
467 
468 		break;
469 #ifdef SPD_spmd
470 #if SPMD_SPM_AT_SEL2
471 	case TB_FW_CONFIG_ID:
472 		err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
473 		break;
474 #endif
475 	case TOS_FW_CONFIG_ID:
476 		/* An SPMC expects TOS_FW_CONFIG in x0/r0 */
477 		bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
478 		bl32_mem_params->ep_info.args.arg0 =
479 					bl_mem_params->image_info.image_base;
480 		break;
481 #endif
482 	default:
483 		/* Do nothing in default case */
484 		break;
485 	}
486 
487 	return err;
488 }
489 
490 /*******************************************************************************
491  * This function can be used by the platforms to update/use image
492  * information for given `image_id`.
493  ******************************************************************************/
494 int bl2_plat_handle_post_image_load(unsigned int image_id)
495 {
496 	return qemu_bl2_handle_post_image_load(image_id);
497 }
498 
499 uintptr_t plat_get_ns_image_entrypoint(void)
500 {
501 	return NS_IMAGE_OFFSET;
502 }
503