xref: /rk3399_ARM-atf/plat/qemu/common/qemu_bl2_setup.c (revision bb31fbcef16a66aa49a06f45364b65b24f182beb)
1 /*
2  * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <libfdt.h>
11 
12 #include <platform_def.h>
13 
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/bl_common.h>
17 #include <common/debug.h>
18 #include <common/desc_image_load.h>
19 #include <common/fdt_fixup.h>
20 #include <common/fdt_wrappers.h>
21 #include <lib/optee_utils.h>
22 #if TRANSFER_LIST
23 #include <lib/transfer_list.h>
24 #endif
25 #include <lib/utils.h>
26 #include <plat/common/platform.h>
27 #if ENABLE_RME
28 #include <qemu_pas_def.h>
29 #endif
30 
31 #include "qemu_private.h"
32 
33 #define MAP_BL2_TOTAL		MAP_REGION_FLAT(			\
34 					bl2_tzram_layout.total_base,	\
35 					bl2_tzram_layout.total_size,	\
36 					MT_MEMORY | MT_RW | EL3_PAS)
37 
38 #define MAP_BL2_RO		MAP_REGION_FLAT(			\
39 					BL_CODE_BASE,			\
40 					BL_CODE_END - BL_CODE_BASE,	\
41 					MT_CODE | EL3_PAS),		\
42 				MAP_REGION_FLAT(			\
43 					BL_RO_DATA_BASE,		\
44 					BL_RO_DATA_END			\
45 						- BL_RO_DATA_BASE,	\
46 					MT_RO_DATA | EL3_PAS)
47 
48 #if USE_COHERENT_MEM
49 #define MAP_BL_COHERENT_RAM	MAP_REGION_FLAT(			\
50 					BL_COHERENT_RAM_BASE,		\
51 					BL_COHERENT_RAM_END		\
52 						- BL_COHERENT_RAM_BASE,	\
53 					MT_DEVICE | MT_RW | EL3_PAS)
54 #endif
55 
56 /* Data structure which holds the extents of the trusted SRAM for BL2 */
57 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
58 #if TRANSFER_LIST
59 static struct transfer_list_header *bl2_tl;
60 #endif
61 
62 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
63 			       u_register_t arg2, u_register_t arg3)
64 {
65 	meminfo_t *mem_layout = (void *)arg1;
66 
67 	/* Initialize the console to provide early debug support */
68 	qemu_console_init();
69 
70 	/* Setup the BL2 memory layout */
71 	bl2_tzram_layout = *mem_layout;
72 
73 	plat_qemu_io_setup();
74 }
75 
76 static void security_setup(void)
77 {
78 	/*
79 	 * This is where a TrustZone address space controller and other
80 	 * security related peripherals, would be configured.
81 	 */
82 }
83 
84 static void update_dt(void)
85 {
86 #if TRANSFER_LIST
87 	struct transfer_list_entry *te;
88 #endif
89 	int ret;
90 	void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
91 
92 	ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE);
93 	if (ret < 0) {
94 		ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
95 		return;
96 	}
97 
98 	if (dt_add_psci_node(fdt)) {
99 		ERROR("Failed to add PSCI Device Tree node\n");
100 		return;
101 	}
102 
103 	if (dt_add_psci_cpu_enable_methods(fdt)) {
104 		ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
105 		return;
106 	}
107 
108 #if ENABLE_RME
109 	if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE,
110 				    REALM_DRAM_SIZE)) {
111 		ERROR("Failed to reserve RMM memory in Device Tree\n");
112 		return;
113 	}
114 
115 	INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n",
116 	     (uintptr_t)REALM_DRAM_BASE,
117 	     (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1);
118 #endif
119 
120 	ret = fdt_pack(fdt);
121 	if (ret < 0)
122 		ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
123 
124 #if TRANSFER_LIST
125 	// create a TE
126 	te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
127 	if (!te) {
128 		ERROR("Failed to add FDT entry to Transfer List\n");
129 		return;
130 	}
131 #endif
132 }
133 
134 void bl2_platform_setup(void)
135 {
136 #if TRANSFER_LIST
137 	bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
138 				    FW_HANDOFF_SIZE);
139 	if (!bl2_tl) {
140 		ERROR("Failed to initialize Transfer List at 0x%lx\n",
141 		      (unsigned long)FW_HANDOFF_BASE);
142 	}
143 #endif
144 	security_setup();
145 	update_dt();
146 
147 	/* TODO Initialize timer */
148 }
149 
150 void qemu_bl2_sync_transfer_list(void)
151 {
152 #if TRANSFER_LIST
153 	transfer_list_update_checksum(bl2_tl);
154 #endif
155 }
156 
157 #if ENABLE_RME
158 static void bl2_plat_gpt_setup(void)
159 {
160 	/*
161 	 * The GPT library might modify the gpt regions structure to optimize
162 	 * the layout, so the array cannot be constant.
163 	 */
164 	pas_region_t pas_regions[] = {
165 		QEMU_PAS_ROOT,
166 		QEMU_PAS_SECURE,
167 		QEMU_PAS_GPTS,
168 		QEMU_PAS_NS0,
169 		QEMU_PAS_REALM,
170 		QEMU_PAS_NS1,
171 	};
172 
173 	/*
174 	 * Initialize entire protected space to GPT_GPI_ANY. With each L0 entry
175 	 * covering 1GB (currently the only supported option), then covering
176 	 * 256TB of RAM (48-bit PA) would require a 2MB L0 region. At the
177 	 * moment we use a 8KB table, which covers 1TB of RAM (40-bit PA).
178 	 */
179 	if (gpt_init_l0_tables(GPCCR_PPS_1TB, PLAT_QEMU_L0_GPT_BASE,
180 			       PLAT_QEMU_L0_GPT_SIZE) < 0) {
181 		ERROR("gpt_init_l0_tables() failed!\n");
182 		panic();
183 	}
184 
185 	/* Carve out defined PAS ranges. */
186 	if (gpt_init_pas_l1_tables(GPCCR_PGS_4K,
187 				   PLAT_QEMU_L1_GPT_BASE,
188 				   PLAT_QEMU_L1_GPT_SIZE,
189 				   pas_regions,
190 				   (unsigned int)(sizeof(pas_regions) /
191 						  sizeof(pas_region_t))) < 0) {
192 		ERROR("gpt_init_pas_l1_tables() failed!\n");
193 		panic();
194 	}
195 
196 	INFO("Enabling Granule Protection Checks\n");
197 	if (gpt_enable() < 0) {
198 		ERROR("gpt_enable() failed!\n");
199 		panic();
200 	}
201 }
202 #endif
203 
204 void bl2_plat_arch_setup(void)
205 {
206 	const mmap_region_t bl_regions[] = {
207 		MAP_BL2_TOTAL,
208 		MAP_BL2_RO,
209 #if USE_COHERENT_MEM
210 		MAP_BL_COHERENT_RAM,
211 #endif
212 #if ENABLE_RME
213 		MAP_RMM_DRAM,
214 		MAP_GPT_L0_REGION,
215 		MAP_GPT_L1_REGION,
216 #endif
217 		{0}
218 	};
219 
220 	setup_page_tables(bl_regions, plat_qemu_get_mmap());
221 
222 #if ENABLE_RME
223 	/* BL2 runs in EL3 when RME enabled. */
224 	assert(get_armv9_2_feat_rme_support() != 0U);
225 	enable_mmu_el3(0);
226 
227 	/* Initialise and enable granule protection after MMU. */
228 	bl2_plat_gpt_setup();
229 #else /* ENABLE_RME */
230 
231 #ifdef __aarch64__
232 	enable_mmu_el1(0);
233 #else
234 	enable_mmu_svc_mon(0);
235 #endif
236 #endif /* ENABLE_RME */
237 }
238 
239 /*******************************************************************************
240  * Gets SPSR for BL32 entry
241  ******************************************************************************/
242 static uint32_t qemu_get_spsr_for_bl32_entry(void)
243 {
244 #ifdef __aarch64__
245 	/*
246 	 * The Secure Payload Dispatcher service is responsible for
247 	 * setting the SPSR prior to entry into the BL3-2 image.
248 	 */
249 	return 0;
250 #else
251 	return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
252 			   DISABLE_ALL_EXCEPTIONS);
253 #endif
254 }
255 
256 /*******************************************************************************
257  * Gets SPSR for BL33 entry
258  ******************************************************************************/
259 static uint32_t qemu_get_spsr_for_bl33_entry(void)
260 {
261 	uint32_t spsr;
262 #ifdef __aarch64__
263 	unsigned int mode;
264 
265 	/* Figure out what mode we enter the non-secure world in */
266 	mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
267 
268 	/*
269 	 * TODO: Consider the possibility of specifying the SPSR in
270 	 * the FIP ToC and allowing the platform to have a say as
271 	 * well.
272 	 */
273 	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
274 #else
275 	spsr = SPSR_MODE32(MODE32_svc,
276 		    plat_get_ns_image_entrypoint() & 0x1,
277 		    SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
278 #endif
279 	return spsr;
280 }
281 
282 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
283 static int load_sps_from_tb_fw_config(struct image_info *image_info)
284 {
285 	void *dtb = (void *)image_info->image_base;
286 	const char *compat_str = "arm,sp";
287 	const struct fdt_property *uuid;
288 	uint32_t load_addr;
289 	const char *name;
290 	int sp_node;
291 	int node;
292 
293 	node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
294 	if (node < 0) {
295 		ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
296 		return -1;
297 	}
298 
299 	fdt_for_each_subnode(sp_node, dtb, node) {
300 		name = fdt_get_name(dtb, sp_node, NULL);
301 		if (name == NULL) {
302 			ERROR("Can't get name of node in dtb\n");
303 			return -1;
304 		}
305 		uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
306 		if (uuid == NULL) {
307 			ERROR("Can't find property uuid in node %s", name);
308 			return -1;
309 		}
310 		if (fdt_read_uint32(dtb, sp_node, "load-address",
311 				    &load_addr) < 0) {
312 			ERROR("Can't read load-address in node %s", name);
313 			return -1;
314 		}
315 		if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
316 			return -1;
317 		}
318 	}
319 
320 	return 0;
321 }
322 #endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
323 
324 static int qemu_bl2_handle_post_image_load(unsigned int image_id)
325 {
326 	int err = 0;
327 	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
328 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
329 	bl_mem_params_node_t *pager_mem_params = NULL;
330 	bl_mem_params_node_t *paged_mem_params = NULL;
331 #endif
332 #if defined(SPD_spmd)
333 	bl_mem_params_node_t *bl32_mem_params = NULL;
334 #endif
335 #if TRANSFER_LIST
336 	struct transfer_list_header *ns_tl = NULL;
337 	struct transfer_list_entry *te = NULL;
338 #endif
339 
340 	assert(bl_mem_params);
341 
342 	switch (image_id) {
343 	case BL32_IMAGE_ID:
344 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
345 		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
346 		assert(pager_mem_params);
347 
348 		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
349 		assert(paged_mem_params);
350 
351 		err = parse_optee_header(&bl_mem_params->ep_info,
352 					 &pager_mem_params->image_info,
353 					 &paged_mem_params->image_info);
354 		if (err != 0) {
355 			WARN("OPTEE header parse error.\n");
356 		}
357 #endif
358 
359 #if defined(SPMC_OPTEE)
360 		/*
361 		 * Explicit zeroes to unused registers since they may have
362 		 * been populated by parse_optee_header() above.
363 		 *
364 		 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
365 		 * the latter is filled in below for TOS_FW_CONFIG_ID and
366 		 * applies to any other SPMC too.
367 		 */
368 		bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
369 #elif defined(SPD_opteed)
370 		/*
371 		 * OP-TEE expect to receive DTB address in x2.
372 		 * This will be copied into x2 by dispatcher.
373 		 */
374 		bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
375 #elif defined(AARCH32_SP_OPTEE)
376 		bl_mem_params->ep_info.args.arg0 =
377 					bl_mem_params->ep_info.args.arg1;
378 		bl_mem_params->ep_info.args.arg1 = 0;
379 		bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
380 		bl_mem_params->ep_info.args.arg3 = 0;
381 #endif
382 		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
383 		break;
384 
385 	case BL33_IMAGE_ID:
386 #ifdef AARCH32_SP_OPTEE
387 		/* AArch32 only core: OP-TEE expects NSec EP in register LR */
388 		pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
389 		assert(pager_mem_params);
390 		pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
391 #endif
392 
393 		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
394 
395 #if ARM_LINUX_KERNEL_AS_BL33
396 		/*
397 		 * According to the file ``Documentation/arm64/booting.txt`` of
398 		 * the Linux kernel tree, Linux expects the physical address of
399 		 * the device tree blob (DTB) in x0, while x1-x3 are reserved
400 		 * for future use and must be 0.
401 		 */
402 		bl_mem_params->ep_info.args.arg0 =
403 			(u_register_t)ARM_PRELOADED_DTB_BASE;
404 		bl_mem_params->ep_info.args.arg1 = 0U;
405 		bl_mem_params->ep_info.args.arg2 = 0U;
406 		bl_mem_params->ep_info.args.arg3 = 0U;
407 #elif TRANSFER_LIST
408 		if (bl2_tl) {
409 			// relocate the tl to pre-allocate NS memory
410 			ns_tl = transfer_list_relocate(bl2_tl,
411 					(void *)(uintptr_t)FW_NS_HANDOFF_BASE,
412 					bl2_tl->max_size);
413 			if (!ns_tl) {
414 				ERROR("Relocate TL to 0x%lx failed\n",
415 					(unsigned long)FW_NS_HANDOFF_BASE);
416 				return -1;
417 			}
418 			NOTICE("Transfer list handoff to BL33\n");
419 			transfer_list_dump(ns_tl);
420 
421 			te = transfer_list_find(ns_tl, TL_TAG_FDT);
422 
423 			bl_mem_params->ep_info.args.arg1 =
424 				TRANSFER_LIST_SIGNATURE |
425 				REGISTER_CONVENTION_VERSION_MASK;
426 			bl_mem_params->ep_info.args.arg3 = (uintptr_t)ns_tl;
427 
428 			if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_32) {
429 				// aarch32
430 				bl_mem_params->ep_info.args.arg0 = 0;
431 				bl_mem_params->ep_info.args.arg2 = te ?
432 					(uintptr_t)transfer_list_entry_data(te)
433 					: 0;
434 			} else {
435 				// aarch64
436 				bl_mem_params->ep_info.args.arg0 = te ?
437 					(uintptr_t)transfer_list_entry_data(te)
438 					: 0;
439 				bl_mem_params->ep_info.args.arg2 = 0;
440 			}
441 		} else {
442 			// Legacy handoff
443 			bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
444 		}
445 #else
446 		/* BL33 expects to receive the primary CPU MPID (through r0) */
447 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
448 #endif // ARM_LINUX_KERNEL_AS_BL33
449 
450 		break;
451 #ifdef SPD_spmd
452 #if SPMD_SPM_AT_SEL2
453 	case TB_FW_CONFIG_ID:
454 		err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
455 		break;
456 #endif
457 	case TOS_FW_CONFIG_ID:
458 		/* An SPMC expects TOS_FW_CONFIG in x0/r0 */
459 		bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
460 		bl32_mem_params->ep_info.args.arg0 =
461 					bl_mem_params->image_info.image_base;
462 		break;
463 #endif
464 	default:
465 		/* Do nothing in default case */
466 		break;
467 	}
468 
469 	return err;
470 }
471 
472 /*******************************************************************************
473  * This function can be used by the platforms to update/use image
474  * information for given `image_id`.
475  ******************************************************************************/
476 int bl2_plat_handle_post_image_load(unsigned int image_id)
477 {
478 	return qemu_bl2_handle_post_image_load(image_id);
479 }
480 
481 uintptr_t plat_get_ns_image_entrypoint(void)
482 {
483 	return NS_IMAGE_OFFSET;
484 }
485