xref: /rk3399_ARM-atf/plat/qemu/common/qemu_bl2_setup.c (revision 10ecd58093a34e95e2dfad65b1180610f29397cc)
1 /*
2  * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <libfdt.h>
11 
12 #include <platform_def.h>
13 
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/bl_common.h>
17 #include <common/debug.h>
18 #include <common/desc_image_load.h>
19 #include <common/fdt_fixup.h>
20 #include <common/fdt_wrappers.h>
21 #include <lib/optee_utils.h>
22 #include <lib/transfer_list.h>
23 #include <lib/utils.h>
24 #include <plat/common/platform.h>
25 
26 #include "qemu_private.h"
27 
28 #define MAP_BL2_TOTAL		MAP_REGION_FLAT(			\
29 					bl2_tzram_layout.total_base,	\
30 					bl2_tzram_layout.total_size,	\
31 					MT_MEMORY | MT_RW | EL3_PAS)
32 
33 #define MAP_BL2_RO		MAP_REGION_FLAT(			\
34 					BL_CODE_BASE,			\
35 					BL_CODE_END - BL_CODE_BASE,	\
36 					MT_CODE | EL3_PAS),		\
37 				MAP_REGION_FLAT(			\
38 					BL_RO_DATA_BASE,		\
39 					BL_RO_DATA_END			\
40 						- BL_RO_DATA_BASE,	\
41 					MT_RO_DATA | EL3_PAS)
42 
43 #if USE_COHERENT_MEM
44 #define MAP_BL_COHERENT_RAM	MAP_REGION_FLAT(			\
45 					BL_COHERENT_RAM_BASE,		\
46 					BL_COHERENT_RAM_END		\
47 						- BL_COHERENT_RAM_BASE,	\
48 					MT_DEVICE | MT_RW | EL3_PAS)
49 #endif
50 
51 /* Data structure which holds the extents of the trusted SRAM for BL2 */
52 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
53 static struct transfer_list_header *bl2_tl;
54 
55 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
56 			       u_register_t arg2, u_register_t arg3)
57 {
58 	meminfo_t *mem_layout = (void *)arg1;
59 
60 	/* Initialize the console to provide early debug support */
61 	qemu_console_init();
62 
63 	/* Setup the BL2 memory layout */
64 	bl2_tzram_layout = *mem_layout;
65 
66 	plat_qemu_io_setup();
67 }
68 
69 static void security_setup(void)
70 {
71 	/*
72 	 * This is where a TrustZone address space controller and other
73 	 * security related peripherals, would be configured.
74 	 */
75 }
76 
77 static void update_dt(void)
78 {
79 #if TRANSFER_LIST
80 	struct transfer_list_entry *te;
81 #endif
82 	int ret;
83 	void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
84 	void *dst = plat_qemu_dt_runtime_address();
85 
86 	ret = fdt_open_into(fdt, dst, PLAT_QEMU_DT_MAX_SIZE);
87 	if (ret < 0) {
88 		ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
89 		return;
90 	}
91 
92 	if (dt_add_psci_node(fdt)) {
93 		ERROR("Failed to add PSCI Device Tree node\n");
94 		return;
95 	}
96 
97 	if (dt_add_psci_cpu_enable_methods(fdt)) {
98 		ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
99 		return;
100 	}
101 
102 #if ENABLE_RME
103 	if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE,
104 				    REALM_DRAM_SIZE)) {
105 		ERROR("Failed to reserve RMM memory in Device Tree\n");
106 		return;
107 	}
108 
109 	INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n",
110 	     (uintptr_t)REALM_DRAM_BASE,
111 	     (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1);
112 #endif
113 
114 	ret = fdt_pack(fdt);
115 	if (ret < 0)
116 		ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
117 
118 #if TRANSFER_LIST
119 	/* create a TE */
120 	te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
121 	if (!te) {
122 		ERROR("Failed to add FDT entry to Transfer List\n");
123 		return;
124 	}
125 #endif
126 }
127 
128 void bl2_platform_setup(void)
129 {
130 #if TRANSFER_LIST
131 	bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
132 				    FW_HANDOFF_SIZE);
133 	if (!bl2_tl) {
134 		ERROR("Failed to initialize Transfer List at 0x%lx\n",
135 		      (unsigned long)FW_HANDOFF_BASE);
136 	}
137 #endif
138 	security_setup();
139 	update_dt();
140 
141 	/* TODO Initialize timer */
142 }
143 
144 void qemu_bl2_sync_transfer_list(void)
145 {
146 #if TRANSFER_LIST
147 	transfer_list_update_checksum(bl2_tl);
148 #endif
149 }
150 
151 void bl2_plat_arch_setup(void)
152 {
153 	const mmap_region_t bl_regions[] = {
154 		MAP_BL2_TOTAL,
155 		MAP_BL2_RO,
156 #if USE_COHERENT_MEM
157 		MAP_BL_COHERENT_RAM,
158 #endif
159 #if ENABLE_RME
160 		MAP_RMM_DRAM,
161 		MAP_GPT_L0_REGION,
162 		MAP_GPT_L1_REGION,
163 #endif
164 		{0}
165 	};
166 
167 	setup_page_tables(bl_regions, plat_qemu_get_mmap());
168 
169 #if ENABLE_RME
170 	/* BL2 runs in EL3 when RME enabled. */
171 	assert(is_feat_rme_present());
172 	enable_mmu_el3(0);
173 #else /* ENABLE_RME */
174 
175 #ifdef __aarch64__
176 	enable_mmu_el1(0);
177 #else
178 	enable_mmu_svc_mon(0);
179 #endif
180 #endif /* ENABLE_RME */
181 }
182 
183 /*******************************************************************************
184  * Gets SPSR for BL32 entry
185  ******************************************************************************/
186 static uint32_t qemu_get_spsr_for_bl32_entry(void)
187 {
188 #ifdef __aarch64__
189 	/*
190 	 * The Secure Payload Dispatcher service is responsible for
191 	 * setting the SPSR prior to entry into the BL3-2 image.
192 	 */
193 	return 0;
194 #else
195 	return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
196 			   DISABLE_ALL_EXCEPTIONS);
197 #endif
198 }
199 
200 /*******************************************************************************
201  * Gets SPSR for BL33 entry
202  ******************************************************************************/
203 static uint32_t qemu_get_spsr_for_bl33_entry(void)
204 {
205 	uint32_t spsr;
206 #ifdef __aarch64__
207 	unsigned int mode;
208 
209 	/* Figure out what mode we enter the non-secure world in */
210 	mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
211 
212 	/*
213 	 * TODO: Consider the possibility of specifying the SPSR in
214 	 * the FIP ToC and allowing the platform to have a say as
215 	 * well.
216 	 */
217 	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
218 #else
219 	spsr = SPSR_MODE32(MODE32_svc,
220 		    plat_get_ns_image_entrypoint() & 0x1,
221 		    SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
222 #endif
223 	return spsr;
224 }
225 
226 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
227 static int load_sps_from_tb_fw_config(struct image_info *image_info)
228 {
229 	void *dtb = (void *)image_info->image_base;
230 	const char *compat_str = "arm,sp";
231 	const struct fdt_property *uuid;
232 	uint32_t load_addr;
233 	const char *name;
234 	int sp_node;
235 	int node;
236 
237 	node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
238 	if (node < 0) {
239 		ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
240 		return -1;
241 	}
242 
243 	fdt_for_each_subnode(sp_node, dtb, node) {
244 		name = fdt_get_name(dtb, sp_node, NULL);
245 		if (name == NULL) {
246 			ERROR("Can't get name of node in dtb\n");
247 			return -1;
248 		}
249 		uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
250 		if (uuid == NULL) {
251 			ERROR("Can't find property uuid in node %s", name);
252 			return -1;
253 		}
254 		if (fdt_read_uint32(dtb, sp_node, "load-address",
255 				    &load_addr) < 0) {
256 			ERROR("Can't read load-address in node %s", name);
257 			return -1;
258 		}
259 		if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
260 			return -1;
261 		}
262 	}
263 
264 	return 0;
265 }
266 #endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
267 
268 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
269 static int handoff_pageable_part(uint64_t pagable_part)
270 {
271 #if TRANSFER_LIST
272 	struct transfer_list_entry *te;
273 
274 	te = transfer_list_add(bl2_tl, TL_TAG_OPTEE_PAGABLE_PART,
275 			       sizeof(pagable_part), &pagable_part);
276 	if (!te) {
277 		INFO("Cannot add TE for pageable part\n");
278 		return -1;
279 	}
280 #endif
281 	return 0;
282 }
283 #endif
284 
285 static int qemu_bl2_handle_post_image_load(unsigned int image_id)
286 {
287 	int err = 0;
288 	bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
289 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
290 	bl_mem_params_node_t *pager_mem_params = NULL;
291 	bl_mem_params_node_t *paged_mem_params = NULL;
292 #endif
293 #if defined(SPD_spmd)
294 	bl_mem_params_node_t *bl32_mem_params = NULL;
295 #endif
296 #if TRANSFER_LIST
297 	struct transfer_list_header *ns_tl = NULL;
298 #endif
299 
300 	assert(bl_mem_params);
301 
302 	switch (image_id) {
303 #if TRANSFER_LIST
304 	case BL31_IMAGE_ID:
305 		/*
306 		 * arg0 is a bl_params_t reserved for bl31_early_platform_setup2
307 		 * we just need arg1 and arg3 for BL31 to update the TL from S
308 		 * to NS memory before it exits
309 		 */
310 #ifdef __aarch64__
311 		if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_64) {
312 			bl_mem_params->ep_info.args.arg1 =
313 				TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
314 		} else
315 #endif
316 		{
317 			bl_mem_params->ep_info.args.arg1 =
318 				TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION);
319 		}
320 
321 		bl_mem_params->ep_info.args.arg3 = (uintptr_t)bl2_tl;
322 		break;
323 #endif
324 	case BL32_IMAGE_ID:
325 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
326 		pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
327 		assert(pager_mem_params);
328 
329 		paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
330 		assert(paged_mem_params);
331 
332 		err = parse_optee_header(&bl_mem_params->ep_info,
333 					 &pager_mem_params->image_info,
334 					 &paged_mem_params->image_info);
335 		if (err != 0) {
336 			WARN("OPTEE header parse error.\n");
337 		}
338 
339 		/* add TL_TAG_OPTEE_PAGABLE_PART entry to the TL */
340 		if (handoff_pageable_part(bl_mem_params->ep_info.args.arg1)) {
341 			return -1;
342 		}
343 #endif
344 
345 		INFO("Handoff to BL32\n");
346 		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
347 		if (TRANSFER_LIST &&
348 			transfer_list_set_handoff_args(bl2_tl,
349 				&bl_mem_params->ep_info))
350 			break;
351 
352 		INFO("Using default arguments\n");
353 #if defined(SPMC_OPTEE)
354 		/*
355 		 * Explicit zeroes to unused registers since they may have
356 		 * been populated by parse_optee_header() above.
357 		 *
358 		 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
359 		 * the latter is filled in below for TOS_FW_CONFIG_ID and
360 		 * applies to any other SPMC too.
361 		 */
362 		bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
363 #elif defined(SPD_opteed)
364 		/*
365 		 * OP-TEE expect to receive DTB address in x2.
366 		 * This will be copied into x2 by dispatcher.
367 		 */
368 		bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
369 #elif defined(AARCH32_SP_OPTEE)
370 		bl_mem_params->ep_info.args.arg0 =
371 					bl_mem_params->ep_info.args.arg1;
372 		bl_mem_params->ep_info.args.arg1 = 0;
373 		bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
374 		bl_mem_params->ep_info.args.arg3 = 0;
375 #endif
376 		break;
377 
378 	case BL33_IMAGE_ID:
379 #ifdef AARCH32_SP_OPTEE
380 		/* AArch32 only core: OP-TEE expects NSec EP in register LR */
381 		pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
382 		assert(pager_mem_params);
383 		pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
384 #endif
385 
386 		bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
387 
388 #if ARM_LINUX_KERNEL_AS_BL33
389 		/*
390 		 * According to the file ``Documentation/arm64/booting.txt`` of
391 		 * the Linux kernel tree, Linux expects the physical address of
392 		 * the device tree blob (DTB) in x0, while x1-x3 are reserved
393 		 * for future use and must be 0.
394 		 */
395 		bl_mem_params->ep_info.args.arg0 =
396 			(u_register_t)ARM_PRELOADED_DTB_BASE;
397 		bl_mem_params->ep_info.args.arg1 = 0U;
398 		bl_mem_params->ep_info.args.arg2 = 0U;
399 		bl_mem_params->ep_info.args.arg3 = 0U;
400 #elif TRANSFER_LIST
401 		if (bl2_tl) {
402 			/* relocate the tl to pre-allocate NS memory */
403 			ns_tl = transfer_list_relocate(bl2_tl,
404 					(void *)(uintptr_t)FW_NS_HANDOFF_BASE,
405 					bl2_tl->max_size);
406 			if (!ns_tl) {
407 				ERROR("Relocate TL to 0x%lx failed\n",
408 					(unsigned long)FW_NS_HANDOFF_BASE);
409 				return -1;
410 			}
411 		}
412 
413 		INFO("Handoff to BL33\n");
414 		if (!transfer_list_set_handoff_args(ns_tl,
415 						    &bl_mem_params->ep_info)) {
416 			INFO("Invalid TL, fallback to default arguments\n");
417 			bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
418 		}
419 #else
420 		/* BL33 expects to receive the primary CPU MPID (through r0) */
421 		bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
422 #endif /* ARM_LINUX_KERNEL_AS_BL33 */
423 
424 		break;
425 #ifdef SPD_spmd
426 #if SPMD_SPM_AT_SEL2
427 	case TB_FW_CONFIG_ID:
428 		err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
429 		break;
430 #endif
431 	case TOS_FW_CONFIG_ID:
432 		/* An SPMC expects TOS_FW_CONFIG in x0/r0 */
433 		bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
434 		bl32_mem_params->ep_info.args.arg0 =
435 					bl_mem_params->image_info.image_base;
436 		break;
437 #endif
438 	default:
439 		/* Do nothing in default case */
440 		break;
441 	}
442 
443 	return err;
444 }
445 
446 /*******************************************************************************
447  * This function can be used by the platforms to update/use image
448  * information for given `image_id`.
449  ******************************************************************************/
450 int bl2_plat_handle_post_image_load(unsigned int image_id)
451 {
452 	return qemu_bl2_handle_post_image_load(image_id);
453 }
454 
455 uintptr_t plat_get_ns_image_entrypoint(void)
456 {
457 	return NS_IMAGE_OFFSET;
458 }
459