xref: /rk3399_ARM-atf/plat/qemu/common/qemu_bl2_setup.c (revision 7f152ea6856c7780424ec3e92b181d805a314f43)
1 /*
2  * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <libfdt.h>
11 
12 #include <platform_def.h>
13 
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/bl_common.h>
17 #include <common/debug.h>
18 #include <common/desc_image_load.h>
19 #include <common/fdt_fixup.h>
20 #include <common/fdt_wrappers.h>
21 #include <lib/optee_utils.h>
22 #include <lib/transfer_list.h>
23 #include <lib/utils.h>
24 #include <plat/common/platform.h>
25 #if ENABLE_RME
26 #include <qemu_pas_def.h>
27 #endif
28 
29 #include "qemu_private.h"
30 
31 #define MAP_BL2_TOTAL		MAP_REGION_FLAT(			\
32 					bl2_tzram_layout.total_base,	\
33 					bl2_tzram_layout.total_size,	\
34 					MT_MEMORY | MT_RW | EL3_PAS)
35 
36 #define MAP_BL2_RO		MAP_REGION_FLAT(			\
37 					BL_CODE_BASE,			\
38 					BL_CODE_END - BL_CODE_BASE,	\
39 					MT_CODE | EL3_PAS),		\
40 				MAP_REGION_FLAT(			\
41 					BL_RO_DATA_BASE,		\
42 					BL_RO_DATA_END			\
43 						- BL_RO_DATA_BASE,	\
44 					MT_RO_DATA | EL3_PAS)
45 
46 #if USE_COHERENT_MEM
47 #define MAP_BL_COHERENT_RAM	MAP_REGION_FLAT(			\
48 					BL_COHERENT_RAM_BASE,		\
49 					BL_COHERENT_RAM_END		\
50 						- BL_COHERENT_RAM_BASE,	\
51 					MT_DEVICE | MT_RW | EL3_PAS)
52 #endif
53 
54 /* Data structure which holds the extents of the trusted SRAM for BL2 */
55 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
56 static struct transfer_list_header *bl2_tl;
57 
58 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
59 			       u_register_t arg2, u_register_t arg3)
60 {
61 	meminfo_t *mem_layout = (void *)arg1;
62 
63 	/* Initialize the console to provide early debug support */
64 	qemu_console_init();
65 
66 	/* Setup the BL2 memory layout */
67 	bl2_tzram_layout = *mem_layout;
68 
69 	plat_qemu_io_setup();
70 }
71 
72 static void security_setup(void)
73 {
74 	/*
75 	 * This is where a TrustZone address space controller and other
76 	 * security related peripherals, would be configured.
77 	 */
78 }
79 
80 static void update_dt(void)
81 {
82 #if TRANSFER_LIST
83 	struct transfer_list_entry *te;
84 #endif
85 	int ret;
86 	void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
87 
88 	ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE);
89 	if (ret < 0) {
90 		ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
91 		return;
92 	}
93 
94 	if (dt_add_psci_node(fdt)) {
95 		ERROR("Failed to add PSCI Device Tree node\n");
96 		return;
97 	}
98 
99 	if (dt_add_psci_cpu_enable_methods(fdt)) {
100 		ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
101 		return;
102 	}
103 
104 #if ENABLE_RME
105 	if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE,
106 				    REALM_DRAM_SIZE)) {
107 		ERROR("Failed to reserve RMM memory in Device Tree\n");
108 		return;
109 	}
110 
111 	INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n",
112 	     (uintptr_t)REALM_DRAM_BASE,
113 	     (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1);
114 #endif
115 
116 	ret = fdt_pack(fdt);
117 	if (ret < 0)
118 		ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
119 
120 #if TRANSFER_LIST
121 	/* create a TE */
122 	te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
123 	if (!te) {
124 		ERROR("Failed to add FDT entry to Transfer List\n");
125 		return;
126 	}
127 #endif
128 }
129 
130 void bl2_platform_setup(void)
131 {
132 #if TRANSFER_LIST
133 	bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
134 				    FW_HANDOFF_SIZE);
135 	if (!bl2_tl) {
136 		ERROR("Failed to initialize Transfer List at 0x%lx\n",
137 		      (unsigned long)FW_HANDOFF_BASE);
138 	}
139 #endif
140 	security_setup();
141 	update_dt();
142 
143 	/* TODO Initialize timer */
144 }
145 
146 void qemu_bl2_sync_transfer_list(void)
147 {
148 #if TRANSFER_LIST
149 	transfer_list_update_checksum(bl2_tl);
150 #endif
151 }
152 
153 #if ENABLE_RME
154 static void bl2_plat_gpt_setup(void)
155 {
156 	/*
157 	 * The GPT library might modify the gpt regions structure to optimize
158 	 * the layout, so the array cannot be constant.
159 	 */
160 	pas_region_t pas_regions[] = {
161 		QEMU_PAS_ROOT,
162 		QEMU_PAS_SECURE,
163 		QEMU_PAS_GPTS,
164 		QEMU_PAS_NS0,
165 		QEMU_PAS_REALM,
166 		QEMU_PAS_NS1,
167 	};
168 
169 	/*
170 	 * Initialize entire protected space to GPT_GPI_ANY. With each L0 entry
171 	 * covering 1GB (currently the only supported option), then covering
172 	 * 256TB of RAM (48-bit PA) would require a 2MB L0 region. At the
173 	 * moment we use a 8KB table, which covers 1TB of RAM (40-bit PA).
174 	 */
175 	if (gpt_init_l0_tables(GPCCR_PPS_1TB, PLAT_QEMU_L0_GPT_BASE,
176 			       PLAT_QEMU_L0_GPT_SIZE +
177 			       PLAT_QEMU_GPT_BITLOCK_SIZE) < 0) {
178 		ERROR("gpt_init_l0_tables() failed!\n");
179 		panic();
180 	}
181 
182 	/* Carve out defined PAS ranges. */
183 	if (gpt_init_pas_l1_tables(GPCCR_PGS_4K,
184 				   PLAT_QEMU_L1_GPT_BASE,
185 				   PLAT_QEMU_L1_GPT_SIZE,
186 				   pas_regions,
187 				   (unsigned int)(sizeof(pas_regions) /
188 						  sizeof(pas_region_t))) < 0) {
189 		ERROR("gpt_init_pas_l1_tables() failed!\n");
190 		panic();
191 	}
192 
193 	INFO("Enabling Granule Protection Checks\n");
194 	if (gpt_enable() < 0) {
195 		ERROR("gpt_enable() failed!\n");
196 		panic();
197 	}
198 }
199 #endif
200 
201 void bl2_plat_arch_setup(void)
202 {
203 	const mmap_region_t bl_regions[] = {
204 		MAP_BL2_TOTAL,
205 		MAP_BL2_RO,
206 #if USE_COHERENT_MEM
207 		MAP_BL_COHERENT_RAM,
208 #endif
209 #if ENABLE_RME
210 		MAP_RMM_DRAM,
211 		MAP_GPT_L0_REGION,
212 		MAP_GPT_L1_REGION,
213 #endif
214 		{0}
215 	};
216 
217 	setup_page_tables(bl_regions, plat_qemu_get_mmap());
218 
219 #if ENABLE_RME
220 	/* BL2 runs in EL3 when RME enabled. */
221 	assert(is_feat_rme_present());
222 	enable_mmu_el3(0);
223 
224 	/* Initialise and enable granule protection after MMU. */
225 	bl2_plat_gpt_setup();
226 #else /* ENABLE_RME */
227 
228 #ifdef __aarch64__
229 	enable_mmu_el1(0);
230 #else
231 	enable_mmu_svc_mon(0);
232 #endif
233 #endif /* ENABLE_RME */
234 }
235 
236 /*******************************************************************************
237  * Gets SPSR for BL32 entry
238  ******************************************************************************/
239 static uint32_t qemu_get_spsr_for_bl32_entry(void)
240 {
241 #ifdef __aarch64__
242 	/*
243 	 * The Secure Payload Dispatcher service is responsible for
244 	 * setting the SPSR prior to entry into the BL3-2 image.
245 	 */
246 	return 0;
247 #else
248 	return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
249 			   DISABLE_ALL_EXCEPTIONS);
250 #endif
251 }
252 
253 /*******************************************************************************
254  * Gets SPSR for BL33 entry
255  ******************************************************************************/
256 static uint32_t qemu_get_spsr_for_bl33_entry(void)
257 {
258 	uint32_t spsr;
259 #ifdef __aarch64__
260 	unsigned int mode;
261 
262 	/* Figure out what mode we enter the non-secure world in */
263 	mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
264 
265 	/*
266 	 * TODO: Consider the possibility of specifying the SPSR in
267 	 * the FIP ToC and allowing the platform to have a say as
268 	 * well.
269 	 */
270 	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
271 #else
272 	spsr = SPSR_MODE32(MODE32_svc,
273 		    plat_get_ns_image_entrypoint() & 0x1,
274 		    SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
275 #endif
276 	return spsr;
277 }
278 
279 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
280 static int load_sps_from_tb_fw_config(struct image_info *image_info)
281 {
282 	void *dtb = (void *)image_info->image_base;
283 	const char *compat_str = "arm,sp";
284 	const struct fdt_property *uuid;
285 	uint32_t load_addr;
286 	const char *name;
287 	int sp_node;
288 	int node;
289 
290 	node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
291 	if (node < 0) {
292 		ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
293 		return -1;
294 	}
295 
296 	fdt_for_each_subnode(sp_node, dtb, node) {
297 		name = fdt_get_name(dtb, sp_node, NULL);
298 		if (name == NULL) {
299 			ERROR("Can't get name of node in dtb\n");
300 			return -1;
301 		}
302 		uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
303 		if (uuid == NULL) {
304 			ERROR("Can't find property uuid in node %s", name);
305 			return -1;
306 		}
307 		if (fdt_read_uint32(dtb, sp_node, "load-address",
308 				    &load_addr) < 0) {
309 			ERROR("Can't read load-address in node %s", name);
310 			return -1;
311 		}
312 		if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
313 			return -1;
314 		}
315 	}
316 
317 	return 0;
318 }
319 #endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
320 
321 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
322 static int handoff_pageable_part(uint64_t pagable_part)
323 {
324 #if TRANSFER_LIST
325 	struct transfer_list_entry *te;
326 
327 	te = transfer_list_add(bl2_tl, TL_TAG_OPTEE_PAGABLE_PART,
328 			       sizeof(pagable_part), &pagable_part);
329 	if (!te) {
330 		INFO("Cannot add TE for pageable part\n");
331 		return -1;
332 	}
333 #endif
334 	return 0;
335 }
336 #endif
337 
338 static int qemu_bl2_handle_post_image_load(unsigned int image_id)
339 {
340 	int err = 0;
341 	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
342 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
343 	bl_mem_params_node_t *pager_mem_params = NULL;
344 	bl_mem_params_node_t *paged_mem_params = NULL;
345 #endif
346 #if defined(SPD_spmd)
347 	bl_mem_params_node_t *bl32_mem_params = NULL;
348 #endif
349 #if TRANSFER_LIST
350 	struct transfer_list_header *ns_tl = NULL;
351 #endif
352 
353 	assert(bl_mem_params);
354 
355 	switch (image_id) {
356 #if TRANSFER_LIST
357 	case BL31_IMAGE_ID:
358 		/*
359 		 * arg0 is a bl_params_t reserved for bl31_early_platform_setup2
360 		 * we just need arg1 and arg3 for BL31 to update the TL from S
361 		 * to NS memory before it exits
362 		 */
363 #ifdef __aarch64__
364 		if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_64) {
365 			bl_mem_params->ep_info.args.arg1 =
366 				TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
367 		} else
368 #endif
369 		{
370 			bl_mem_params->ep_info.args.arg1 =
371 				TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION);
372 		}
373 
374 		bl_mem_params->ep_info.args.arg3 = (uintptr_t)bl2_tl;
375 		break;
376 #endif
377 	case BL32_IMAGE_ID:
378 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
379 		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
380 		assert(pager_mem_params);
381 
382 		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
383 		assert(paged_mem_params);
384 
385 		err = parse_optee_header(&bl_mem_params->ep_info,
386 					 &pager_mem_params->image_info,
387 					 &paged_mem_params->image_info);
388 		if (err != 0) {
389 			WARN("OPTEE header parse error.\n");
390 		}
391 
392 		/* add TL_TAG_OPTEE_PAGABLE_PART entry to the TL */
393 		if (handoff_pageable_part(bl_mem_params->ep_info.args.arg1)) {
394 			return -1;
395 		}
396 #endif
397 
398 		INFO("Handoff to BL32\n");
399 		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
400 		if (TRANSFER_LIST &&
401 			transfer_list_set_handoff_args(bl2_tl,
402 				&bl_mem_params->ep_info))
403 			break;
404 
405 		INFO("Using default arguments\n");
406 #if defined(SPMC_OPTEE)
407 		/*
408 		 * Explicit zeroes to unused registers since they may have
409 		 * been populated by parse_optee_header() above.
410 		 *
411 		 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
412 		 * the latter is filled in below for TOS_FW_CONFIG_ID and
413 		 * applies to any other SPMC too.
414 		 */
415 		bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
416 #elif defined(SPD_opteed)
417 		/*
418 		 * OP-TEE expect to receive DTB address in x2.
419 		 * This will be copied into x2 by dispatcher.
420 		 */
421 		bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
422 #elif defined(AARCH32_SP_OPTEE)
423 		bl_mem_params->ep_info.args.arg0 =
424 					bl_mem_params->ep_info.args.arg1;
425 		bl_mem_params->ep_info.args.arg1 = 0;
426 		bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
427 		bl_mem_params->ep_info.args.arg3 = 0;
428 #endif
429 		break;
430 
431 	case BL33_IMAGE_ID:
432 #ifdef AARCH32_SP_OPTEE
433 		/* AArch32 only core: OP-TEE expects NSec EP in register LR */
434 		pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
435 		assert(pager_mem_params);
436 		pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
437 #endif
438 
439 		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
440 
441 #if ARM_LINUX_KERNEL_AS_BL33
442 		/*
443 		 * According to the file ``Documentation/arm64/booting.txt`` of
444 		 * the Linux kernel tree, Linux expects the physical address of
445 		 * the device tree blob (DTB) in x0, while x1-x3 are reserved
446 		 * for future use and must be 0.
447 		 */
448 		bl_mem_params->ep_info.args.arg0 =
449 			(u_register_t)ARM_PRELOADED_DTB_BASE;
450 		bl_mem_params->ep_info.args.arg1 = 0U;
451 		bl_mem_params->ep_info.args.arg2 = 0U;
452 		bl_mem_params->ep_info.args.arg3 = 0U;
453 #elif TRANSFER_LIST
454 		if (bl2_tl) {
455 			/* relocate the tl to pre-allocate NS memory */
456 			ns_tl = transfer_list_relocate(bl2_tl,
457 					(void *)(uintptr_t)FW_NS_HANDOFF_BASE,
458 					bl2_tl->max_size);
459 			if (!ns_tl) {
460 				ERROR("Relocate TL to 0x%lx failed\n",
461 					(unsigned long)FW_NS_HANDOFF_BASE);
462 				return -1;
463 			}
464 		}
465 
466 		INFO("Handoff to BL33\n");
467 		if (!transfer_list_set_handoff_args(ns_tl,
468 						    &bl_mem_params->ep_info)) {
469 			INFO("Invalid TL, fallback to default arguments\n");
470 			bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
471 		}
472 #else
473 		/* BL33 expects to receive the primary CPU MPID (through r0) */
474 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
475 #endif /* ARM_LINUX_KERNEL_AS_BL33 */
476 
477 		break;
478 #ifdef SPD_spmd
479 #if SPMD_SPM_AT_SEL2
480 	case TB_FW_CONFIG_ID:
481 		err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
482 		break;
483 #endif
484 	case TOS_FW_CONFIG_ID:
485 		/* An SPMC expects TOS_FW_CONFIG in x0/r0 */
486 		bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
487 		bl32_mem_params->ep_info.args.arg0 =
488 					bl_mem_params->image_info.image_base;
489 		break;
490 #endif
491 	default:
492 		/* Do nothing in default case */
493 		break;
494 	}
495 
496 	return err;
497 }
498 
499 /*******************************************************************************
500  * This function can be used by the platforms to update/use image
501  * information for given `image_id`.
502  ******************************************************************************/
503 int bl2_plat_handle_post_image_load(unsigned int image_id)
504 {
505 	return qemu_bl2_handle_post_image_load(image_id);
506 }
507 
508 uintptr_t plat_get_ns_image_entrypoint(void)
509 {
510 	return NS_IMAGE_OFFSET;
511 }
512