xref: /optee_os/core/kernel/dt.c (revision 45fecab081173ef58b1cb14b6ddf6892b0b9d3f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <kernel/dt.h>
10 #include <kernel/dt_driver.h>
11 #include <kernel/interrupt.h>
12 #include <libfdt.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <stdio.h>
16 #include <string.h>
17 #include <trace.h>
18 
19 static struct dt_descriptor external_dt __nex_bss;
20 
21 #if defined(CFG_CORE_FFA)
22 static void *manifest_dt __nex_bss;
23 #endif
24 
25 const struct dt_driver *dt_find_compatible_driver(const void *fdt, int offs)
26 {
27 	const struct dt_device_match *dm;
28 	const struct dt_driver *drv;
29 
30 	for_each_dt_driver(drv) {
31 		for (dm = drv->match_table; dm; dm++) {
32 			if (!dm->compatible) {
33 				break;
34 			}
35 			if (!fdt_node_check_compatible(fdt, offs,
36 						       dm->compatible)) {
37 				return drv;
38 			}
39 		}
40 	}
41 
42 	return NULL;
43 }
44 
45 bool dt_have_prop(const void *fdt, int offs, const char *propname)
46 {
47 	const void *prop;
48 
49 	prop = fdt_getprop(fdt, offs, propname, NULL);
50 
51 	return prop;
52 }
53 
54 int dt_disable_status(void *fdt, int node)
55 {
56 	const char *prop = NULL;
57 	int len = 0;
58 
59 	prop = fdt_getprop(fdt, node, "status", &len);
60 	if (!prop) {
61 		if (fdt_setprop_string(fdt, node, "status", "disabled"))
62 			return -1;
63 	} else {
64 		/*
65 		 * Status is there, modify it.
66 		 * Ask to set "disabled" value to the property. The value
67 		 * will be automatically truncated with "len" size by the
68 		 * fdt_setprop_inplace function.
69 		 * Setting a value different from "ok" or "okay" will disable
70 		 * the property.
71 		 * Setting a truncated value of "disabled" with the original
72 		 * property "len" is preferred to not increase the DT size and
73 		 * losing time in recalculating the overall DT offsets.
74 		 * If original length of the status property is larger than
75 		 * "disabled", the property will start with "disabled" and be
76 		 * completed with the rest of the original property.
77 		 */
78 		if (fdt_setprop_inplace(fdt, node, "status", "disabled", len))
79 			return -1;
80 	}
81 
82 	return 0;
83 }
84 
85 int dt_enable_secure_status(void *fdt, int node)
86 {
87 	if (dt_disable_status(fdt, node)) {
88 		EMSG("Unable to disable Normal Status");
89 		return -1;
90 	}
91 
92 	if (fdt_setprop_string(fdt, node, "secure-status", "okay"))
93 		return -1;
94 
95 	return 0;
96 }
97 
98 int dt_map_dev(const void *fdt, int offs, vaddr_t *base, size_t *size,
99 	       enum dt_map_dev_directive mapping)
100 {
101 	enum teecore_memtypes mtype;
102 	paddr_t pbase;
103 	vaddr_t vbase;
104 	size_t sz;
105 	int st;
106 
107 	assert(cpu_mmu_enabled());
108 
109 	st = fdt_get_status(fdt, offs);
110 	if (st == DT_STATUS_DISABLED)
111 		return -1;
112 
113 	if (fdt_reg_info(fdt, offs, &pbase, &sz))
114 		return -1;
115 
116 	switch (mapping) {
117 	case DT_MAP_AUTO:
118 		if ((st & DT_STATUS_OK_SEC) && !(st & DT_STATUS_OK_NSEC))
119 			mtype = MEM_AREA_IO_SEC;
120 		else
121 			mtype = MEM_AREA_IO_NSEC;
122 		break;
123 	case DT_MAP_SECURE:
124 		mtype = MEM_AREA_IO_SEC;
125 		break;
126 	case DT_MAP_NON_SECURE:
127 		mtype = MEM_AREA_IO_NSEC;
128 		break;
129 	default:
130 		panic("Invalid mapping specified");
131 		break;
132 	}
133 
134 	/* Check if we have a mapping, create one if needed */
135 	vbase = (vaddr_t)core_mmu_add_mapping(mtype, pbase, sz);
136 	if (!vbase) {
137 		EMSG("Failed to map %zu bytes at PA 0x%"PRIxPA,
138 		     (size_t)sz, pbase);
139 		return -1;
140 	}
141 
142 	*base = vbase;
143 	*size = sz;
144 	return 0;
145 }
146 
147 /* Read a physical address (n=1 or 2 cells) */
148 static paddr_t fdt_read_paddr(const uint32_t *cell, int n)
149 {
150 	paddr_t addr;
151 
152 	if (n < 1 || n > 2)
153 		goto bad;
154 
155 	addr = fdt32_to_cpu(*cell);
156 	cell++;
157 	if (n == 2) {
158 #ifdef ARM32
159 		if (addr) {
160 			/* High order 32 bits can't be nonzero */
161 			goto bad;
162 		}
163 		addr = fdt32_to_cpu(*cell);
164 #else
165 		addr = (addr << 32) | fdt32_to_cpu(*cell);
166 #endif
167 	}
168 
169 	return addr;
170 bad:
171 	return DT_INFO_INVALID_REG;
172 
173 }
174 
175 static size_t fdt_read_size(const uint32_t *cell, int n)
176 {
177 	uint32_t sz = 0;
178 
179 	sz = fdt32_to_cpu(*cell);
180 	if (n == 2) {
181 		if (sz)
182 			return DT_INFO_INVALID_REG_SIZE;
183 
184 		cell++;
185 		sz = fdt32_to_cpu(*cell);
186 	}
187 
188 	return sz;
189 }
190 
191 int fdt_get_reg_props_by_index(const void *fdt, int offs, int index,
192 			       paddr_t *base, size_t *size)
193 {
194 	const fdt32_t *reg = NULL;
195 	int addr_ncells = 0;
196 	int size_ncells = 0;
197 	int cell_offset = 0;
198 	int parent = 0;
199 	int len = 0;
200 
201 	if (index < 0)
202 		return -FDT_ERR_BADOFFSET;
203 
204 	reg = (const uint32_t *)fdt_getprop(fdt, offs, "reg", &len);
205 	if (!reg)
206 		return -FDT_ERR_NOTFOUND;
207 
208 	if (fdt_find_cached_parent_reg_cells(fdt, offs, &addr_ncells,
209 					     &size_ncells) != 0) {
210 		parent = fdt_parent_offset(fdt, offs);
211 		if (parent < 0)
212 			return -FDT_ERR_NOTFOUND;
213 
214 		addr_ncells = fdt_address_cells(fdt, parent);
215 		if (addr_ncells < 0)
216 			return -FDT_ERR_NOTFOUND;
217 
218 		size_ncells = fdt_size_cells(fdt, parent);
219 		if (size_ncells < 0)
220 			return -FDT_ERR_NOTFOUND;
221 	}
222 
223 	cell_offset = index * (addr_ncells + size_ncells);
224 
225 	if ((size_t)len < (cell_offset + addr_ncells) * sizeof(*reg))
226 		return -FDT_ERR_BADSTRUCTURE;
227 
228 	if (base) {
229 		*base = fdt_read_paddr(reg + cell_offset, addr_ncells);
230 		if (*base == DT_INFO_INVALID_REG)
231 			return -FDT_ERR_NOTFOUND;
232 	}
233 
234 	if (size) {
235 		if ((size_t)len <
236 		    (cell_offset + addr_ncells + size_ncells) * sizeof(*reg))
237 			return -FDT_ERR_BADSTRUCTURE;
238 
239 		*size = fdt_read_size(reg + cell_offset + addr_ncells,
240 				      size_ncells);
241 		if (*size == DT_INFO_INVALID_REG_SIZE)
242 			return -FDT_ERR_NOTFOUND;
243 	}
244 
245 	return 0;
246 }
247 
248 int fdt_reg_info(const void *fdt, int offs, paddr_t *base, size_t *size)
249 {
250 	return fdt_get_reg_props_by_index(fdt, offs, 0, base, size);
251 }
252 
253 paddr_t fdt_reg_base_address(const void *fdt, int offs)
254 {
255 	paddr_t base = 0;
256 
257 	if (fdt_reg_info(fdt, offs, &base, NULL))
258 		return DT_INFO_INVALID_REG;
259 
260 	return base;
261 }
262 
263 size_t fdt_reg_size(const void *fdt, int offs)
264 {
265 	size_t size = 0;
266 
267 	if (fdt_reg_info(fdt, offs, NULL, &size))
268 		return DT_INFO_INVALID_REG_SIZE;
269 
270 	return size;
271 }
272 
273 static bool is_okay(const char *st, int len)
274 {
275 	return !strncmp(st, "ok", len) || !strncmp(st, "okay", len);
276 }
277 
278 int fdt_get_status(const void *fdt, int offs)
279 {
280 	const char *prop;
281 	int st = 0;
282 	int len;
283 
284 	prop = fdt_getprop(fdt, offs, "status", &len);
285 	if (!prop || is_okay(prop, len)) {
286 		/* If status is not specified, it defaults to "okay" */
287 		st |= DT_STATUS_OK_NSEC;
288 	}
289 
290 	prop = fdt_getprop(fdt, offs, "secure-status", &len);
291 	if (!prop) {
292 		/*
293 		 * When secure-status is not specified it defaults to the same
294 		 * value as status
295 		 */
296 		if (st & DT_STATUS_OK_NSEC)
297 			st |= DT_STATUS_OK_SEC;
298 	} else {
299 		if (is_okay(prop, len))
300 			st |= DT_STATUS_OK_SEC;
301 	}
302 
303 	return st;
304 }
305 
306 void fdt_fill_device_info(const void *fdt, struct dt_node_info *info, int offs)
307 {
308 	struct dt_node_info dinfo = {
309 		.reg = DT_INFO_INVALID_REG,
310 		.reg_size = DT_INFO_INVALID_REG_SIZE,
311 		.clock = DT_INFO_INVALID_CLOCK,
312 		.reset = DT_INFO_INVALID_RESET,
313 		.interrupt = DT_INFO_INVALID_INTERRUPT,
314 	};
315 	const fdt32_t *cuint = NULL;
316 
317 	/* Intentionally discard fdt_reg_info() return value */
318 	fdt_reg_info(fdt, offs, &dinfo.reg, &dinfo.reg_size);
319 
320 	cuint = fdt_getprop(fdt, offs, "clocks", NULL);
321 	if (cuint) {
322 		cuint++;
323 		dinfo.clock = (int)fdt32_to_cpu(*cuint);
324 	}
325 
326 	cuint = fdt_getprop(fdt, offs, "resets", NULL);
327 	if (cuint) {
328 		cuint++;
329 		dinfo.reset = (int)fdt32_to_cpu(*cuint);
330 	}
331 
332 	dinfo.interrupt = dt_get_irq_type_prio(fdt, offs, &dinfo.type,
333 					       &dinfo.prio);
334 
335 	dinfo.status = fdt_get_status(fdt, offs);
336 
337 	*info = dinfo;
338 }
339 
340 int fdt_read_uint32_array(const void *fdt, int node, const char *prop_name,
341 			  uint32_t *array, size_t count)
342 {
343 	const fdt32_t *cuint = NULL;
344 	int len = 0;
345 	uint32_t i = 0;
346 
347 	cuint = fdt_getprop(fdt, node, prop_name, &len);
348 	if (!cuint)
349 		return len;
350 
351 	if ((uint32_t)len != (count * sizeof(uint32_t)))
352 		return -FDT_ERR_BADLAYOUT;
353 
354 	for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++) {
355 		*array = fdt32_to_cpu(*cuint);
356 		array++;
357 		cuint++;
358 	}
359 
360 	return 0;
361 }
362 
363 int fdt_read_uint32_index(const void *fdt, int node, const char *prop_name,
364 			  int index, uint32_t *value)
365 {
366 	const fdt32_t *cuint = NULL;
367 	int len = 0;
368 
369 	cuint = fdt_getprop(fdt, node, prop_name, &len);
370 	if (!cuint)
371 		return len;
372 
373 	if ((uint32_t)len < (sizeof(uint32_t) * (index + 1)))
374 		return -FDT_ERR_BADLAYOUT;
375 
376 	*value = fdt32_to_cpu(cuint[index]);
377 
378 	return 0;
379 }
380 
381 int fdt_read_uint32(const void *fdt, int node, const char *prop_name,
382 		    uint32_t *value)
383 {
384 	return fdt_read_uint32_array(fdt, node, prop_name, value, 1);
385 }
386 
387 uint32_t fdt_read_uint32_default(const void *fdt, int node,
388 				 const char *prop_name, uint32_t dflt_value)
389 {
390 	uint32_t ret = dflt_value;
391 
392 	fdt_read_uint32_index(fdt, node, prop_name, 0, &ret);
393 
394 	return ret;
395 }
396 
397 int fdt_get_reg_props_by_name(const void *fdt, int node, const char *name,
398 			      paddr_t *base, size_t *size)
399 {
400 	int index = 0;
401 
402 	index = fdt_stringlist_search(fdt, node, "reg-names", name);
403 	if (index < 0)
404 		return index;
405 
406 	return fdt_get_reg_props_by_index(fdt, node, index, base, size);
407 }
408 
409 int dt_getprop_as_number(const void *fdt, int nodeoffset, const char *name,
410 			 uint64_t *num)
411 {
412 	const void *prop = NULL;
413 	int len = 0;
414 
415 	prop = fdt_getprop(fdt, nodeoffset, name, &len);
416 	if (!prop)
417 		return len;
418 
419 	switch (len) {
420 	case sizeof(uint32_t):
421 		*num = fdt32_ld(prop);
422 		return 0;
423 	case sizeof(uint64_t):
424 		*num = fdt64_ld(prop);
425 		return 0;
426 	default:
427 		return -FDT_ERR_BADVALUE;
428 	}
429 }
430 
431 void *get_dt(void)
432 {
433 	void *fdt = get_embedded_dt();
434 
435 	if (!fdt)
436 		fdt = get_external_dt();
437 
438 	if (!fdt)
439 		fdt = get_manifest_dt();
440 
441 	return fdt;
442 }
443 
444 void *get_secure_dt(void)
445 {
446 	void *fdt = get_embedded_dt();
447 
448 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
449 		fdt = get_external_dt();
450 
451 	if (!fdt)
452 		fdt = get_manifest_dt();
453 
454 	return fdt;
455 }
456 
457 #if defined(CFG_EMBED_DTB)
458 #ifdef CFG_DT_CACHED_NODE_INFO
459 /*
460  * struct cached_node - Cached information of a DT node
461  *
462  * @node_offset: Offset of the node in @cached_node_info_fdt
463  * @parent_offset: Offset of @node_offset parent node
464  * @address_cells: #address-cells property value of the parent node or 0
465  * @size_cells: #size-cells property value of the parent node or 0
466  * @phandle: Phandle associated to the node or 0 if none
467  */
468 struct cached_node {
469 	int node_offset;
470 	int parent_offset;
471 	int8_t address_cells;
472 	int8_t size_cells;
473 	uint32_t phandle;
474 };
475 
476 /*
477  * struct dt_node_cache - Reference to cached information of DT nodes
478  *
479  * @array: Array of the cached node
480  * @count: Number of initialized cells in @array
481  * @alloced_count: Number of allocated cells in @array
482  * @fdt: Reference to the FDT for which node information are cached
483  */
484 struct dt_node_cache {
485 	struct cached_node *array;
486 	size_t count;
487 	size_t alloced_count;
488 	const void *fdt;
489 };
490 
491 static struct dt_node_cache *dt_node_cache;
492 
493 static bool fdt_node_info_are_cached(const void *fdt)
494 {
495 	return dt_node_cache && dt_node_cache->fdt == fdt;
496 }
497 
498 static struct cached_node *find_cached_parent_node(const void *fdt,
499 						   int node_offset)
500 {
501 	struct cached_node *cell = NULL;
502 	size_t n = 0;
503 
504 	if (!fdt_node_info_are_cached(fdt))
505 		return NULL;
506 
507 	for (n = 0; n < dt_node_cache->count; n++)
508 		if (dt_node_cache->array[n].node_offset == node_offset)
509 			cell = dt_node_cache->array + n;
510 
511 	return cell;
512 }
513 
514 int fdt_find_cached_parent_node(const void *fdt, int node_offset,
515 				int *parent_offset)
516 {
517 	struct cached_node *cell = NULL;
518 
519 	cell = find_cached_parent_node(fdt, node_offset);
520 	if (!cell)
521 		return -FDT_ERR_NOTFOUND;
522 
523 	*parent_offset = cell->parent_offset;
524 
525 	return 0;
526 }
527 
528 int fdt_find_cached_parent_reg_cells(const void *fdt, int node_offset,
529 				     int *address_cells, int *size_cells)
530 {
531 	struct cached_node *cell = NULL;
532 	int rc = 0;
533 
534 	cell = find_cached_parent_node(fdt, node_offset);
535 	if (!cell)
536 		return -FDT_ERR_NOTFOUND;
537 
538 	if (address_cells) {
539 		if (cell->address_cells >= 0)
540 			*address_cells = cell->address_cells;
541 		else
542 			rc = -FDT_ERR_NOTFOUND;
543 	}
544 
545 	if (size_cells) {
546 		if (cell->size_cells >= 0)
547 			*size_cells = cell->size_cells;
548 		else
549 			rc = -FDT_ERR_NOTFOUND;
550 	}
551 
552 	return rc;
553 }
554 
555 int fdt_find_cached_node_phandle(const void *fdt, uint32_t phandle,
556 				 int *node_offset)
557 {
558 	struct cached_node *cell = NULL;
559 	size_t n = 0;
560 
561 	if (!fdt_node_info_are_cached(fdt))
562 		return -FDT_ERR_NOTFOUND;
563 
564 	for (n = 0; n < dt_node_cache->count; n++)
565 		if (dt_node_cache->array[n].phandle == phandle)
566 			cell = dt_node_cache->array + n;
567 
568 	if (!cell)
569 		return -FDT_ERR_NOTFOUND;
570 
571 	*node_offset = cell->node_offset;
572 
573 	return 0;
574 }
575 
576 static TEE_Result realloc_cached_node_array(void)
577 {
578 	assert(dt_node_cache);
579 
580 	if (dt_node_cache->count + 1 > dt_node_cache->alloced_count) {
581 		size_t new_count = dt_node_cache->alloced_count * 2;
582 		struct cached_node *new = NULL;
583 
584 		if (!new_count)
585 			new_count = 4;
586 
587 		new = realloc(dt_node_cache->array,
588 			      sizeof(*dt_node_cache->array) * new_count);
589 		if (!new)
590 			return TEE_ERROR_OUT_OF_MEMORY;
591 
592 		dt_node_cache->array = new;
593 		dt_node_cache->alloced_count = new_count;
594 	}
595 
596 	return TEE_SUCCESS;
597 }
598 
599 static TEE_Result add_cached_node(int parent_offset,
600 				  int node_offset, int address_cells,
601 				  int size_cells)
602 {
603 	TEE_Result res = TEE_ERROR_GENERIC;
604 
605 	res = realloc_cached_node_array();
606 	if (res)
607 		return res;
608 
609 	dt_node_cache->array[dt_node_cache->count] = (struct cached_node){
610 		.node_offset = node_offset,
611 		.parent_offset = parent_offset,
612 		.address_cells = address_cells,
613 		.size_cells = size_cells,
614 		.phandle = fdt_get_phandle(dt_node_cache->fdt, node_offset),
615 	};
616 
617 	dt_node_cache->count++;
618 
619 	return TEE_SUCCESS;
620 }
621 
622 static TEE_Result add_cached_node_subtree(int node_offset)
623 {
624 	TEE_Result res = TEE_ERROR_GENERIC;
625 	const fdt32_t *cuint = NULL;
626 	int subnode_offset = 0;
627 	int8_t addr_cells = -1;
628 	int8_t size_cells = -1;
629 
630 	cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#address-cells",
631 			    NULL);
632 	if (cuint)
633 		addr_cells = (int)fdt32_to_cpu(*cuint);
634 
635 	cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#size-cells",
636 			    NULL);
637 	if (cuint)
638 		size_cells = (int)fdt32_to_cpu(*cuint);
639 
640 	fdt_for_each_subnode(subnode_offset, dt_node_cache->fdt, node_offset) {
641 		res = add_cached_node(node_offset, subnode_offset, addr_cells,
642 				      size_cells);
643 		if (res)
644 			return res;
645 
646 		res = add_cached_node_subtree(subnode_offset);
647 		if (res)
648 			return res;
649 	}
650 
651 	return TEE_SUCCESS;
652 }
653 
654 static TEE_Result release_node_cache_info(void)
655 {
656 	if (dt_node_cache) {
657 		free(dt_node_cache->array);
658 		free(dt_node_cache);
659 		dt_node_cache = NULL;
660 	}
661 
662 	return TEE_SUCCESS;
663 }
664 
665 release_init_resource(release_node_cache_info);
666 
667 static void init_node_cache_info(const void *fdt)
668 {
669 	TEE_Result res = TEE_ERROR_GENERIC;
670 
671 	assert(!dt_node_cache);
672 
673 	dt_node_cache = calloc(1, sizeof(*dt_node_cache));
674 	if (dt_node_cache) {
675 		dt_node_cache->fdt = fdt;
676 		res = add_cached_node_subtree(0);
677 	} else {
678 		res = TEE_ERROR_OUT_OF_MEMORY;
679 	}
680 
681 	if (res) {
682 		EMSG("Error %#"PRIx32", disable DT cached info", res);
683 		release_node_cache_info();
684 	}
685 }
686 #else
687 static void init_node_cache_info(const void *fdt __unused)
688 {
689 }
690 #endif /* CFG_DT_CACHED_NODE_INFO */
691 
692 void *get_embedded_dt(void)
693 {
694 	static bool checked;
695 
696 	assert(cpu_mmu_enabled());
697 
698 	if (!checked) {
699 		IMSG("Embedded DTB found");
700 
701 		if (fdt_check_header(embedded_secure_dtb))
702 			panic("Invalid embedded DTB");
703 
704 		checked = true;
705 
706 		init_node_cache_info(embedded_secure_dtb);
707 	}
708 
709 	return embedded_secure_dtb;
710 }
711 #else
712 void *get_embedded_dt(void)
713 {
714 	return NULL;
715 }
716 #endif /*CFG_EMBED_DTB*/
717 
718 #ifdef _CFG_USE_DTB_OVERLAY
719 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
720 {
721 	char frag[32] = { };
722 	int offs = 0;
723 	int ret = 0;
724 
725 	ret = snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
726 	if (ret < 0 || (size_t)ret >= sizeof(frag))
727 		return -1;
728 
729 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
730 	if (offs < 0)
731 		return offs;
732 
733 	dt->frag_id += 1;
734 
735 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
736 	if (ret < 0)
737 		return ret;
738 
739 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
740 }
741 
742 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
743 {
744 	int fragment = 0;
745 
746 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
747 		if (!fdt_check_header(dt->blob)) {
748 			fdt_for_each_subnode(fragment, dt->blob, 0)
749 				dt->frag_id += 1;
750 			return 0;
751 		}
752 	}
753 
754 	return fdt_create_empty_tree(dt->blob, dt_size);
755 }
756 #else
757 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
758 {
759 	return offs;
760 }
761 
762 static int init_dt_overlay(struct dt_descriptor *dt __unused,
763 			   int dt_size __unused)
764 {
765 	return 0;
766 }
767 #endif /* _CFG_USE_DTB_OVERLAY */
768 
769 struct dt_descriptor *get_external_dt_desc(void)
770 {
771 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
772 		return NULL;
773 
774 	return &external_dt;
775 }
776 
777 void init_external_dt(unsigned long phys_dt, size_t dt_sz)
778 {
779 	struct dt_descriptor *dt = &external_dt;
780 	int ret = 0;
781 	enum teecore_memtypes mtype = MEM_AREA_MAXTYPE;
782 
783 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
784 		return;
785 
786 	if (!phys_dt || !dt_sz) {
787 		/*
788 		 * No need to panic as we're not using the DT in OP-TEE
789 		 * yet, we're only adding some nodes for normal world use.
790 		 * This makes the switch to using DT easier as we can boot
791 		 * a newer OP-TEE with older boot loaders. Once we start to
792 		 * initialize devices based on DT we'll likely panic
793 		 * instead of returning here.
794 		 */
795 		IMSG("No non-secure external DT");
796 		return;
797 	}
798 
799 	mtype = core_mmu_get_type_by_pa(phys_dt);
800 	if (mtype == MEM_AREA_MAXTYPE) {
801 		/* Map the DTB if it is not yet mapped */
802 		dt->blob = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt,
803 						dt_sz);
804 		if (!dt->blob)
805 			panic("Failed to map external DTB");
806 	} else {
807 		/* Get the DTB address if already mapped in a memory area */
808 		dt->blob = phys_to_virt(phys_dt, mtype, dt_sz);
809 		if (!dt->blob) {
810 			EMSG("Failed to get a mapped external DTB for PA %#lx",
811 			     phys_dt);
812 			panic();
813 		}
814 	}
815 
816 	ret = init_dt_overlay(dt, dt_sz);
817 	if (ret < 0) {
818 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
819 		     ret);
820 		panic();
821 	}
822 
823 	ret = fdt_open_into(dt->blob, dt->blob, dt_sz);
824 	if (ret < 0) {
825 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
826 		panic();
827 	}
828 
829 	IMSG("Non-secure external DT found");
830 }
831 
832 void *get_external_dt(void)
833 {
834 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
835 		return NULL;
836 
837 	assert(cpu_mmu_enabled());
838 	return external_dt.blob;
839 }
840 
841 static TEE_Result release_external_dt(void)
842 {
843 	int ret = 0;
844 	paddr_t pa_dt = 0;
845 
846 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
847 		return TEE_SUCCESS;
848 
849 	if (!external_dt.blob)
850 		return TEE_SUCCESS;
851 
852 	pa_dt = virt_to_phys(external_dt.blob);
853 	/*
854 	 * Skip packing and un-mapping operations if the external DTB is mapped
855 	 * in a different memory area
856 	 */
857 	if (core_mmu_get_type_by_pa(pa_dt) != MEM_AREA_EXT_DT)
858 		return TEE_SUCCESS;
859 
860 	ret = fdt_pack(external_dt.blob);
861 	if (ret < 0) {
862 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
863 		     virt_to_phys(external_dt.blob), ret);
864 		panic();
865 	}
866 
867 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
868 				    CFG_DTB_MAX_SIZE))
869 		panic("Failed to remove temporary Device Tree mapping");
870 
871 	/* External DTB no more reached, reset pointer to invalid */
872 	external_dt.blob = NULL;
873 
874 	return TEE_SUCCESS;
875 }
876 
877 boot_final(release_external_dt);
878 
879 int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
880 			const char *subnode)
881 {
882 	int offs = 0;
883 
884 	offs = fdt_path_offset(dt->blob, path);
885 	if (offs < 0)
886 		return offs;
887 	offs = add_dt_overlay_fragment(dt, offs);
888 	if (offs < 0)
889 		return offs;
890 	return fdt_add_subnode(dt->blob, offs, subnode);
891 }
892 
893 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
894 {
895 	if (cell_size == 1) {
896 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
897 
898 		memcpy(data, &v, sizeof(v));
899 	} else {
900 		fdt64_t v = cpu_to_fdt64(val);
901 
902 		memcpy(data, &v, sizeof(v));
903 	}
904 }
905 
906 int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
907 			paddr_t pa, size_t size)
908 {
909 	int offs = 0;
910 	int ret = 0;
911 	int addr_size = -1;
912 	int len_size = -1;
913 	bool found = true;
914 	char subnode_name[80] = { };
915 
916 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
917 
918 	if (offs < 0) {
919 		found = false;
920 		offs = 0;
921 	}
922 
923 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
924 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
925 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
926 	} else {
927 		len_size = fdt_size_cells(dt->blob, offs);
928 		if (len_size < 0)
929 			return len_size;
930 		addr_size = fdt_address_cells(dt->blob, offs);
931 		if (addr_size < 0)
932 			return addr_size;
933 	}
934 
935 	if (!found) {
936 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
937 		if (offs < 0)
938 			return offs;
939 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
940 				       addr_size);
941 		if (ret < 0)
942 			return ret;
943 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
944 		if (ret < 0)
945 			return ret;
946 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
947 		if (ret < 0)
948 			return ret;
949 	}
950 
951 	ret = snprintf(subnode_name, sizeof(subnode_name),
952 		       "%s@%" PRIxPA, name, pa);
953 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
954 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
955 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
956 	if (offs >= 0) {
957 		uint32_t data[FDT_MAX_NCELLS * 2] = { };
958 
959 		set_dt_val(data, addr_size, pa);
960 		set_dt_val(data + addr_size, len_size, size);
961 		ret = fdt_setprop(dt->blob, offs, "reg", data,
962 				  sizeof(uint32_t) * (addr_size + len_size));
963 		if (ret < 0)
964 			return ret;
965 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
966 		if (ret < 0)
967 			return ret;
968 	} else {
969 		return offs;
970 	}
971 	return 0;
972 }
973 
974 #if defined(CFG_CORE_FFA)
975 void init_manifest_dt(void *fdt)
976 {
977 	manifest_dt = fdt;
978 }
979 
980 void reinit_manifest_dt(void)
981 {
982 	paddr_t pa = (unsigned long)manifest_dt;
983 	void *fdt = NULL;
984 	int ret = 0;
985 
986 	if (!pa) {
987 		EMSG("No manifest DT found");
988 		return;
989 	}
990 
991 	fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE);
992 	if (!fdt)
993 		panic("Failed to map manifest DT");
994 
995 	manifest_dt = fdt;
996 
997 	ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE);
998 	if (ret < 0) {
999 		EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret);
1000 		panic();
1001 	}
1002 
1003 	IMSG("manifest DT found");
1004 }
1005 
1006 void *get_manifest_dt(void)
1007 {
1008 	return manifest_dt;
1009 }
1010 
1011 static TEE_Result release_manifest_dt(void)
1012 {
1013 	if (!manifest_dt)
1014 		return TEE_SUCCESS;
1015 
1016 	if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt,
1017 				    CFG_DTB_MAX_SIZE))
1018 		panic("Failed to remove temporary manifest DT mapping");
1019 	manifest_dt = NULL;
1020 
1021 	return TEE_SUCCESS;
1022 }
1023 
1024 boot_final(release_manifest_dt);
1025 #else
1026 void init_manifest_dt(void *fdt __unused)
1027 {
1028 }
1029 
1030 void reinit_manifest_dt(void)
1031 {
1032 }
1033 
1034 void *get_manifest_dt(void)
1035 {
1036 	return NULL;
1037 }
1038 #endif /*CFG_CORE_FFA*/
1039