xref: /optee_os/core/kernel/dt.c (revision 578bc4fe77ccbb3145211713eaf2b6129245b93e)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <kernel/dt.h>
10 #include <kernel/dt_driver.h>
11 #include <kernel/interrupt.h>
12 #include <libfdt.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <stdio.h>
16 #include <string.h>
17 #include <trace.h>
18 
19 static struct dt_descriptor external_dt __nex_bss;
20 
21 #if defined(CFG_CORE_FFA)
22 static void *manifest_dt __nex_bss;
23 #endif
24 
25 const struct dt_driver *dt_find_compatible_driver(const void *fdt, int offs)
26 {
27 	const struct dt_device_match *dm;
28 	const struct dt_driver *drv;
29 
30 	for_each_dt_driver(drv) {
31 		for (dm = drv->match_table; dm; dm++) {
32 			if (!dm->compatible) {
33 				break;
34 			}
35 			if (!fdt_node_check_compatible(fdt, offs,
36 						       dm->compatible)) {
37 				return drv;
38 			}
39 		}
40 	}
41 
42 	return NULL;
43 }
44 
45 bool dt_have_prop(const void *fdt, int offs, const char *propname)
46 {
47 	const void *prop;
48 
49 	prop = fdt_getprop(fdt, offs, propname, NULL);
50 
51 	return prop;
52 }
53 
54 int dt_disable_status(void *fdt, int node)
55 {
56 	const char *prop = NULL;
57 	int len = 0;
58 
59 	prop = fdt_getprop(fdt, node, "status", &len);
60 	if (!prop) {
61 		if (fdt_setprop_string(fdt, node, "status", "disabled"))
62 			return -1;
63 	} else {
64 		/*
65 		 * Status is there, modify it.
66 		 * Ask to set "disabled" value to the property. The value
67 		 * will be automatically truncated with "len" size by the
68 		 * fdt_setprop_inplace function.
69 		 * Setting a value different from "ok" or "okay" will disable
70 		 * the property.
71 		 * Setting a truncated value of "disabled" with the original
72 		 * property "len" is preferred to not increase the DT size and
73 		 * losing time in recalculating the overall DT offsets.
74 		 * If original length of the status property is larger than
75 		 * "disabled", the property will start with "disabled" and be
76 		 * completed with the rest of the original property.
77 		 */
78 		if (fdt_setprop_inplace(fdt, node, "status", "disabled", len))
79 			return -1;
80 	}
81 
82 	return 0;
83 }
84 
85 int dt_enable_secure_status(void *fdt, int node)
86 {
87 	if (dt_disable_status(fdt, node)) {
88 		EMSG("Unable to disable Normal Status");
89 		return -1;
90 	}
91 
92 	if (fdt_setprop_string(fdt, node, "secure-status", "okay"))
93 		return -1;
94 
95 	return 0;
96 }
97 
98 int dt_map_dev(const void *fdt, int offs, vaddr_t *base, size_t *size,
99 	       enum dt_map_dev_directive mapping)
100 {
101 	enum teecore_memtypes mtype;
102 	paddr_t pbase;
103 	vaddr_t vbase;
104 	size_t sz;
105 	int st;
106 
107 	assert(cpu_mmu_enabled());
108 
109 	st = fdt_get_status(fdt, offs);
110 	if (st == DT_STATUS_DISABLED)
111 		return -1;
112 
113 	pbase = fdt_reg_base_address(fdt, offs);
114 	if (pbase == DT_INFO_INVALID_REG)
115 		return -1;
116 	sz = fdt_reg_size(fdt, offs);
117 	if (sz == DT_INFO_INVALID_REG_SIZE)
118 		return -1;
119 
120 	switch (mapping) {
121 	case DT_MAP_AUTO:
122 		if ((st & DT_STATUS_OK_SEC) && !(st & DT_STATUS_OK_NSEC))
123 			mtype = MEM_AREA_IO_SEC;
124 		else
125 			mtype = MEM_AREA_IO_NSEC;
126 		break;
127 	case DT_MAP_SECURE:
128 		mtype = MEM_AREA_IO_SEC;
129 		break;
130 	case DT_MAP_NON_SECURE:
131 		mtype = MEM_AREA_IO_NSEC;
132 		break;
133 	default:
134 		panic("Invalid mapping specified");
135 		break;
136 	}
137 
138 	/* Check if we have a mapping, create one if needed */
139 	vbase = (vaddr_t)core_mmu_add_mapping(mtype, pbase, sz);
140 	if (!vbase) {
141 		EMSG("Failed to map %zu bytes at PA 0x%"PRIxPA,
142 		     (size_t)sz, pbase);
143 		return -1;
144 	}
145 
146 	*base = vbase;
147 	*size = sz;
148 	return 0;
149 }
150 
151 /* Read a physical address (n=1 or 2 cells) */
152 static paddr_t fdt_read_paddr(const uint32_t *cell, int n)
153 {
154 	paddr_t addr;
155 
156 	if (n < 1 || n > 2)
157 		goto bad;
158 
159 	addr = fdt32_to_cpu(*cell);
160 	cell++;
161 	if (n == 2) {
162 #ifdef ARM32
163 		if (addr) {
164 			/* High order 32 bits can't be nonzero */
165 			goto bad;
166 		}
167 		addr = fdt32_to_cpu(*cell);
168 #else
169 		addr = (addr << 32) | fdt32_to_cpu(*cell);
170 #endif
171 	}
172 
173 	return addr;
174 bad:
175 	return DT_INFO_INVALID_REG;
176 
177 }
178 
179 paddr_t fdt_reg_base_address(const void *fdt, int offs)
180 {
181 	const void *reg = NULL;
182 	int ncells = 0;
183 	int len = 0;
184 	int parent = 0;
185 
186 	reg = fdt_getprop(fdt, offs, "reg", &len);
187 	if (!reg)
188 		return DT_INFO_INVALID_REG;
189 
190 	if (fdt_find_cached_parent_reg_cells(fdt, offs, &ncells, NULL)) {
191 		parent = fdt_parent_offset(fdt, offs);
192 		if (parent < 0)
193 			return DT_INFO_INVALID_REG;
194 
195 		ncells = fdt_address_cells(fdt, parent);
196 		if (ncells < 0)
197 			return DT_INFO_INVALID_REG;
198 	}
199 
200 	return fdt_read_paddr(reg, ncells);
201 }
202 
203 static size_t fdt_read_size(const uint32_t *cell, int n)
204 {
205 	uint32_t sz = 0;
206 
207 	sz = fdt32_to_cpu(*cell);
208 	if (n == 2) {
209 		if (sz)
210 			return DT_INFO_INVALID_REG_SIZE;
211 
212 		cell++;
213 		sz = fdt32_to_cpu(*cell);
214 	}
215 
216 	return sz;
217 }
218 
219 size_t fdt_reg_size(const void *fdt, int offs)
220 {
221 	const uint32_t *reg = NULL;
222 	int n = 0;
223 	int len = 0;
224 	int parent = 0;
225 	int addr_cells = 0;
226 
227 	reg = (const uint32_t *)fdt_getprop(fdt, offs, "reg", &len);
228 	if (!reg)
229 		return DT_INFO_INVALID_REG_SIZE;
230 
231 	if (fdt_find_cached_parent_reg_cells(fdt, offs, &addr_cells, &n) == 0) {
232 		reg += addr_cells;
233 	} else {
234 		parent = fdt_parent_offset(fdt, offs);
235 		if (parent < 0)
236 			return DT_INFO_INVALID_REG_SIZE;
237 
238 		n = fdt_address_cells(fdt, parent);
239 		if (n < 1 || n > 2)
240 			return DT_INFO_INVALID_REG_SIZE;
241 
242 		reg += n;
243 
244 		n = fdt_size_cells(fdt, parent);
245 	}
246 
247 	if (n < 1 || n > 2)
248 		return DT_INFO_INVALID_REG_SIZE;
249 
250 	return fdt_read_size(reg, n);
251 }
252 
253 static bool is_okay(const char *st, int len)
254 {
255 	return !strncmp(st, "ok", len) || !strncmp(st, "okay", len);
256 }
257 
258 int fdt_get_status(const void *fdt, int offs)
259 {
260 	const char *prop;
261 	int st = 0;
262 	int len;
263 
264 	prop = fdt_getprop(fdt, offs, "status", &len);
265 	if (!prop || is_okay(prop, len)) {
266 		/* If status is not specified, it defaults to "okay" */
267 		st |= DT_STATUS_OK_NSEC;
268 	}
269 
270 	prop = fdt_getprop(fdt, offs, "secure-status", &len);
271 	if (!prop) {
272 		/*
273 		 * When secure-status is not specified it defaults to the same
274 		 * value as status
275 		 */
276 		if (st & DT_STATUS_OK_NSEC)
277 			st |= DT_STATUS_OK_SEC;
278 	} else {
279 		if (is_okay(prop, len))
280 			st |= DT_STATUS_OK_SEC;
281 	}
282 
283 	return st;
284 }
285 
286 void fdt_fill_device_info(const void *fdt, struct dt_node_info *info, int offs)
287 {
288 	struct dt_node_info dinfo = {
289 		.reg = DT_INFO_INVALID_REG,
290 		.reg_size = DT_INFO_INVALID_REG_SIZE,
291 		.clock = DT_INFO_INVALID_CLOCK,
292 		.reset = DT_INFO_INVALID_RESET,
293 		.interrupt = DT_INFO_INVALID_INTERRUPT,
294 	};
295 	const fdt32_t *cuint = NULL;
296 	int addr_cells = 0;
297 	int size_cells = 0;
298 
299 	if (fdt_find_cached_parent_reg_cells(fdt, offs, &addr_cells,
300 					     &size_cells) == 0) {
301 		int len = 0;
302 
303 		cuint = fdt_getprop(fdt, offs, "reg", &len);
304 		if (cuint &&
305 		    (size_t)len == (addr_cells + size_cells) * sizeof(*cuint)) {
306 			dinfo.reg = fdt_read_paddr(cuint, addr_cells);
307 			dinfo.reg_size = fdt_read_size(cuint + addr_cells,
308 						       size_cells);
309 		}
310 	} else {
311 		dinfo.reg = fdt_reg_base_address(fdt, offs);
312 		dinfo.reg_size = fdt_reg_size(fdt, offs);
313 	}
314 
315 	cuint = fdt_getprop(fdt, offs, "clocks", NULL);
316 	if (cuint) {
317 		cuint++;
318 		dinfo.clock = (int)fdt32_to_cpu(*cuint);
319 	}
320 
321 	cuint = fdt_getprop(fdt, offs, "resets", NULL);
322 	if (cuint) {
323 		cuint++;
324 		dinfo.reset = (int)fdt32_to_cpu(*cuint);
325 	}
326 
327 	dinfo.interrupt = dt_get_irq_type_prio(fdt, offs, &dinfo.type,
328 					       &dinfo.prio);
329 
330 	dinfo.status = fdt_get_status(fdt, offs);
331 
332 	*info = dinfo;
333 }
334 
335 int fdt_read_uint32_array(const void *fdt, int node, const char *prop_name,
336 			  uint32_t *array, size_t count)
337 {
338 	const fdt32_t *cuint = NULL;
339 	int len = 0;
340 	uint32_t i = 0;
341 
342 	cuint = fdt_getprop(fdt, node, prop_name, &len);
343 	if (!cuint)
344 		return len;
345 
346 	if ((uint32_t)len != (count * sizeof(uint32_t)))
347 		return -FDT_ERR_BADLAYOUT;
348 
349 	for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++) {
350 		*array = fdt32_to_cpu(*cuint);
351 		array++;
352 		cuint++;
353 	}
354 
355 	return 0;
356 }
357 
358 int fdt_read_uint32_index(const void *fdt, int node, const char *prop_name,
359 			  int index, uint32_t *value)
360 {
361 	const fdt32_t *cuint = NULL;
362 	int len = 0;
363 
364 	cuint = fdt_getprop(fdt, node, prop_name, &len);
365 	if (!cuint)
366 		return len;
367 
368 	if ((uint32_t)len < (sizeof(uint32_t) * (index + 1)))
369 		return -FDT_ERR_BADLAYOUT;
370 
371 	*value = fdt32_to_cpu(cuint[index]);
372 
373 	return 0;
374 }
375 
376 int fdt_read_uint32(const void *fdt, int node, const char *prop_name,
377 		    uint32_t *value)
378 {
379 	return fdt_read_uint32_array(fdt, node, prop_name, value, 1);
380 }
381 
382 uint32_t fdt_read_uint32_default(const void *fdt, int node,
383 				 const char *prop_name, uint32_t dflt_value)
384 {
385 	uint32_t ret = dflt_value;
386 
387 	fdt_read_uint32_index(fdt, node, prop_name, 0, &ret);
388 
389 	return ret;
390 }
391 
392 int fdt_get_reg_props_by_index(const void *fdt, int node, int index,
393 			       paddr_t *base, size_t *size)
394 {
395 	const fdt32_t *prop = NULL;
396 	int parent = 0;
397 	int len = 0;
398 	int address_cells = 0;
399 	int size_cells = 0;
400 	int cell = 0;
401 
402 	parent = fdt_parent_offset(fdt, node);
403 	if (parent < 0)
404 		return parent;
405 
406 	address_cells = fdt_address_cells(fdt, parent);
407 	if (address_cells < 0)
408 		return address_cells;
409 
410 	size_cells = fdt_size_cells(fdt, parent);
411 	if (size_cells < 0)
412 		return size_cells;
413 
414 	cell = index * (address_cells + size_cells);
415 
416 	prop = fdt_getprop(fdt, node, "reg", &len);
417 	if (!prop)
418 		return len;
419 
420 	if (((cell + address_cells + size_cells) * (int)sizeof(uint32_t)) > len)
421 		return -FDT_ERR_BADVALUE;
422 
423 	if (base) {
424 		*base = fdt_read_paddr(&prop[cell], address_cells);
425 		if (*base == DT_INFO_INVALID_REG)
426 			return -FDT_ERR_BADVALUE;
427 	}
428 
429 	if (size) {
430 		*size = fdt_read_size(&prop[cell + address_cells], size_cells);
431 		if (*size == DT_INFO_INVALID_REG_SIZE)
432 			return -FDT_ERR_BADVALUE;
433 	}
434 
435 	return 0;
436 }
437 
438 int fdt_get_reg_props_by_name(const void *fdt, int node, const char *name,
439 			      paddr_t *base, size_t *size)
440 {
441 	int index = 0;
442 
443 	index = fdt_stringlist_search(fdt, node, "reg-names", name);
444 	if (index < 0)
445 		return index;
446 
447 	return fdt_get_reg_props_by_index(fdt, node, index, base, size);
448 }
449 
450 int dt_getprop_as_number(const void *fdt, int nodeoffset, const char *name,
451 			 uint64_t *num)
452 {
453 	const void *prop = NULL;
454 	int len = 0;
455 
456 	prop = fdt_getprop(fdt, nodeoffset, name, &len);
457 	if (!prop)
458 		return len;
459 
460 	switch (len) {
461 	case sizeof(uint32_t):
462 		*num = fdt32_ld(prop);
463 		return 0;
464 	case sizeof(uint64_t):
465 		*num = fdt64_ld(prop);
466 		return 0;
467 	default:
468 		return -FDT_ERR_BADVALUE;
469 	}
470 }
471 
472 void *get_dt(void)
473 {
474 	void *fdt = get_embedded_dt();
475 
476 	if (!fdt)
477 		fdt = get_external_dt();
478 
479 	if (!fdt)
480 		fdt = get_manifest_dt();
481 
482 	return fdt;
483 }
484 
485 void *get_secure_dt(void)
486 {
487 	void *fdt = get_embedded_dt();
488 
489 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
490 		fdt = get_external_dt();
491 
492 	if (!fdt)
493 		fdt = get_manifest_dt();
494 
495 	return fdt;
496 }
497 
498 #if defined(CFG_EMBED_DTB)
499 #ifdef CFG_DT_CACHED_NODE_INFO
500 /*
501  * struct cached_node - Cached information of a DT node
502  *
503  * @node_offset: Offset of the node in @cached_node_info_fdt
504  * @parent_offset: Offset of @node_offset parent node
505  * @address_cells: #address-cells property value of the parent node or 0
506  * @size_cells: #size-cells property value of the parent node or 0
507  * @phandle: Phandle associated to the node or 0 if none
508  */
509 struct cached_node {
510 	int node_offset;
511 	int parent_offset;
512 	int8_t address_cells;
513 	int8_t size_cells;
514 	uint32_t phandle;
515 };
516 
517 /*
518  * struct dt_node_cache - Reference to cached information of DT nodes
519  *
520  * @array: Array of the cached node
521  * @count: Number of initialized cells in @array
522  * @alloced_count: Number of allocated cells in @array
523  * @fdt: Reference to the FDT for which node information are cached
524  */
525 struct dt_node_cache {
526 	struct cached_node *array;
527 	size_t count;
528 	size_t alloced_count;
529 	const void *fdt;
530 };
531 
532 static struct dt_node_cache *dt_node_cache;
533 
534 static bool fdt_node_info_are_cached(const void *fdt)
535 {
536 	return dt_node_cache && dt_node_cache->fdt == fdt;
537 }
538 
539 static struct cached_node *find_cached_parent_node(const void *fdt,
540 						   int node_offset)
541 {
542 	struct cached_node *cell = NULL;
543 	size_t n = 0;
544 
545 	if (!fdt_node_info_are_cached(fdt))
546 		return NULL;
547 
548 	for (n = 0; n < dt_node_cache->count; n++)
549 		if (dt_node_cache->array[n].node_offset == node_offset)
550 			cell = dt_node_cache->array + n;
551 
552 	return cell;
553 }
554 
555 int fdt_find_cached_parent_node(const void *fdt, int node_offset,
556 				int *parent_offset)
557 {
558 	struct cached_node *cell = NULL;
559 
560 	cell = find_cached_parent_node(fdt, node_offset);
561 	if (!cell)
562 		return -FDT_ERR_NOTFOUND;
563 
564 	*parent_offset = cell->parent_offset;
565 
566 	return 0;
567 }
568 
569 int fdt_find_cached_parent_reg_cells(const void *fdt, int node_offset,
570 				     int *address_cells, int *size_cells)
571 {
572 	struct cached_node *cell = NULL;
573 	int rc = 0;
574 
575 	cell = find_cached_parent_node(fdt, node_offset);
576 	if (!cell)
577 		return -FDT_ERR_NOTFOUND;
578 
579 	if (address_cells) {
580 		if (cell->address_cells >= 0)
581 			*address_cells = cell->address_cells;
582 		else
583 			rc = -FDT_ERR_NOTFOUND;
584 	}
585 
586 	if (size_cells) {
587 		if (cell->size_cells >= 0)
588 			*size_cells = cell->size_cells;
589 		else
590 			rc = -FDT_ERR_NOTFOUND;
591 	}
592 
593 	return rc;
594 }
595 
596 int fdt_find_cached_node_phandle(const void *fdt, uint32_t phandle,
597 				 int *node_offset)
598 {
599 	struct cached_node *cell = NULL;
600 	size_t n = 0;
601 
602 	if (!fdt_node_info_are_cached(fdt))
603 		return -FDT_ERR_NOTFOUND;
604 
605 	for (n = 0; n < dt_node_cache->count; n++)
606 		if (dt_node_cache->array[n].phandle == phandle)
607 			cell = dt_node_cache->array + n;
608 
609 	if (!cell)
610 		return -FDT_ERR_NOTFOUND;
611 
612 	*node_offset = cell->node_offset;
613 
614 	return 0;
615 }
616 
617 static TEE_Result realloc_cached_node_array(void)
618 {
619 	assert(dt_node_cache);
620 
621 	if (dt_node_cache->count + 1 > dt_node_cache->alloced_count) {
622 		size_t new_count = dt_node_cache->alloced_count * 2;
623 		struct cached_node *new = NULL;
624 
625 		if (!new_count)
626 			new_count = 4;
627 
628 		new = realloc(dt_node_cache->array,
629 			      sizeof(*dt_node_cache->array) * new_count);
630 		if (!new)
631 			return TEE_ERROR_OUT_OF_MEMORY;
632 
633 		dt_node_cache->array = new;
634 		dt_node_cache->alloced_count = new_count;
635 	}
636 
637 	return TEE_SUCCESS;
638 }
639 
640 static TEE_Result add_cached_node(int parent_offset,
641 				  int node_offset, int address_cells,
642 				  int size_cells)
643 {
644 	TEE_Result res = TEE_ERROR_GENERIC;
645 
646 	res = realloc_cached_node_array();
647 	if (res)
648 		return res;
649 
650 	dt_node_cache->array[dt_node_cache->count] = (struct cached_node){
651 		.node_offset = node_offset,
652 		.parent_offset = parent_offset,
653 		.address_cells = address_cells,
654 		.size_cells = size_cells,
655 		.phandle = fdt_get_phandle(dt_node_cache->fdt, node_offset),
656 	};
657 
658 	dt_node_cache->count++;
659 
660 	return TEE_SUCCESS;
661 }
662 
663 static TEE_Result add_cached_node_subtree(int node_offset)
664 {
665 	TEE_Result res = TEE_ERROR_GENERIC;
666 	const fdt32_t *cuint = NULL;
667 	int subnode_offset = 0;
668 	int8_t addr_cells = -1;
669 	int8_t size_cells = -1;
670 
671 	cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#address-cells",
672 			    NULL);
673 	if (cuint)
674 		addr_cells = (int)fdt32_to_cpu(*cuint);
675 
676 	cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#size-cells",
677 			    NULL);
678 	if (cuint)
679 		size_cells = (int)fdt32_to_cpu(*cuint);
680 
681 	fdt_for_each_subnode(subnode_offset, dt_node_cache->fdt, node_offset) {
682 		res = add_cached_node(node_offset, subnode_offset, addr_cells,
683 				      size_cells);
684 		if (res)
685 			return res;
686 
687 		res = add_cached_node_subtree(subnode_offset);
688 		if (res)
689 			return res;
690 	}
691 
692 	return TEE_SUCCESS;
693 }
694 
695 static TEE_Result release_node_cache_info(void)
696 {
697 	if (dt_node_cache) {
698 		free(dt_node_cache->array);
699 		free(dt_node_cache);
700 		dt_node_cache = NULL;
701 	}
702 
703 	return TEE_SUCCESS;
704 }
705 
706 release_init_resource(release_node_cache_info);
707 
708 static void init_node_cache_info(const void *fdt)
709 {
710 	TEE_Result res = TEE_ERROR_GENERIC;
711 
712 	assert(!dt_node_cache);
713 
714 	dt_node_cache = calloc(1, sizeof(*dt_node_cache));
715 	if (dt_node_cache) {
716 		dt_node_cache->fdt = fdt;
717 		res = add_cached_node_subtree(0);
718 	} else {
719 		res = TEE_ERROR_OUT_OF_MEMORY;
720 	}
721 
722 	if (res) {
723 		EMSG("Error %#"PRIx32", disable DT cached info", res);
724 		release_node_cache_info();
725 	}
726 }
727 #else
728 static void init_node_cache_info(const void *fdt __unused)
729 {
730 }
731 #endif /* CFG_DT_CACHED_NODE_INFO */
732 
733 void *get_embedded_dt(void)
734 {
735 	static bool checked;
736 
737 	assert(cpu_mmu_enabled());
738 
739 	if (!checked) {
740 		IMSG("Embedded DTB found");
741 
742 		if (fdt_check_header(embedded_secure_dtb))
743 			panic("Invalid embedded DTB");
744 
745 		checked = true;
746 
747 		init_node_cache_info(embedded_secure_dtb);
748 	}
749 
750 	return embedded_secure_dtb;
751 }
752 #else
753 void *get_embedded_dt(void)
754 {
755 	return NULL;
756 }
757 #endif /*CFG_EMBED_DTB*/
758 
759 #ifdef _CFG_USE_DTB_OVERLAY
760 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
761 {
762 	char frag[32] = { };
763 	int offs = 0;
764 	int ret = 0;
765 
766 	ret = snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
767 	if (ret < 0 || (size_t)ret >= sizeof(frag))
768 		return -1;
769 
770 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
771 	if (offs < 0)
772 		return offs;
773 
774 	dt->frag_id += 1;
775 
776 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
777 	if (ret < 0)
778 		return ret;
779 
780 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
781 }
782 
783 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
784 {
785 	int fragment = 0;
786 
787 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
788 		if (!fdt_check_header(dt->blob)) {
789 			fdt_for_each_subnode(fragment, dt->blob, 0)
790 				dt->frag_id += 1;
791 			return 0;
792 		}
793 	}
794 
795 	return fdt_create_empty_tree(dt->blob, dt_size);
796 }
797 #else
798 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
799 {
800 	return offs;
801 }
802 
803 static int init_dt_overlay(struct dt_descriptor *dt __unused,
804 			   int dt_size __unused)
805 {
806 	return 0;
807 }
808 #endif /* _CFG_USE_DTB_OVERLAY */
809 
810 struct dt_descriptor *get_external_dt_desc(void)
811 {
812 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
813 		return NULL;
814 
815 	return &external_dt;
816 }
817 
818 void init_external_dt(unsigned long phys_dt, size_t dt_sz)
819 {
820 	struct dt_descriptor *dt = &external_dt;
821 	int ret = 0;
822 	enum teecore_memtypes mtype = MEM_AREA_MAXTYPE;
823 
824 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
825 		return;
826 
827 	if (!phys_dt || !dt_sz) {
828 		/*
829 		 * No need to panic as we're not using the DT in OP-TEE
830 		 * yet, we're only adding some nodes for normal world use.
831 		 * This makes the switch to using DT easier as we can boot
832 		 * a newer OP-TEE with older boot loaders. Once we start to
833 		 * initialize devices based on DT we'll likely panic
834 		 * instead of returning here.
835 		 */
836 		IMSG("No non-secure external DT");
837 		return;
838 	}
839 
840 	mtype = core_mmu_get_type_by_pa(phys_dt);
841 	if (mtype == MEM_AREA_MAXTYPE) {
842 		/* Map the DTB if it is not yet mapped */
843 		dt->blob = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt,
844 						dt_sz);
845 		if (!dt->blob)
846 			panic("Failed to map external DTB");
847 	} else {
848 		/* Get the DTB address if already mapped in a memory area */
849 		dt->blob = phys_to_virt(phys_dt, mtype, dt_sz);
850 		if (!dt->blob) {
851 			EMSG("Failed to get a mapped external DTB for PA %#lx",
852 			     phys_dt);
853 			panic();
854 		}
855 	}
856 
857 	ret = init_dt_overlay(dt, dt_sz);
858 	if (ret < 0) {
859 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
860 		     ret);
861 		panic();
862 	}
863 
864 	ret = fdt_open_into(dt->blob, dt->blob, dt_sz);
865 	if (ret < 0) {
866 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
867 		panic();
868 	}
869 
870 	IMSG("Non-secure external DT found");
871 }
872 
873 void *get_external_dt(void)
874 {
875 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
876 		return NULL;
877 
878 	assert(cpu_mmu_enabled());
879 	return external_dt.blob;
880 }
881 
882 static TEE_Result release_external_dt(void)
883 {
884 	int ret = 0;
885 	paddr_t pa_dt = 0;
886 
887 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
888 		return TEE_SUCCESS;
889 
890 	if (!external_dt.blob)
891 		return TEE_SUCCESS;
892 
893 	pa_dt = virt_to_phys(external_dt.blob);
894 	/*
895 	 * Skip packing and un-mapping operations if the external DTB is mapped
896 	 * in a different memory area
897 	 */
898 	if (core_mmu_get_type_by_pa(pa_dt) != MEM_AREA_EXT_DT)
899 		return TEE_SUCCESS;
900 
901 	ret = fdt_pack(external_dt.blob);
902 	if (ret < 0) {
903 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
904 		     virt_to_phys(external_dt.blob), ret);
905 		panic();
906 	}
907 
908 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
909 				    CFG_DTB_MAX_SIZE))
910 		panic("Failed to remove temporary Device Tree mapping");
911 
912 	/* External DTB no more reached, reset pointer to invalid */
913 	external_dt.blob = NULL;
914 
915 	return TEE_SUCCESS;
916 }
917 
918 boot_final(release_external_dt);
919 
920 int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
921 			const char *subnode)
922 {
923 	int offs = 0;
924 
925 	offs = fdt_path_offset(dt->blob, path);
926 	if (offs < 0)
927 		return offs;
928 	offs = add_dt_overlay_fragment(dt, offs);
929 	if (offs < 0)
930 		return offs;
931 	return fdt_add_subnode(dt->blob, offs, subnode);
932 }
933 
934 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
935 {
936 	if (cell_size == 1) {
937 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
938 
939 		memcpy(data, &v, sizeof(v));
940 	} else {
941 		fdt64_t v = cpu_to_fdt64(val);
942 
943 		memcpy(data, &v, sizeof(v));
944 	}
945 }
946 
947 int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
948 			paddr_t pa, size_t size)
949 {
950 	int offs = 0;
951 	int ret = 0;
952 	int addr_size = -1;
953 	int len_size = -1;
954 	bool found = true;
955 	char subnode_name[80] = { };
956 
957 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
958 
959 	if (offs < 0) {
960 		found = false;
961 		offs = 0;
962 	}
963 
964 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
965 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
966 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
967 	} else {
968 		len_size = fdt_size_cells(dt->blob, offs);
969 		if (len_size < 0)
970 			return len_size;
971 		addr_size = fdt_address_cells(dt->blob, offs);
972 		if (addr_size < 0)
973 			return addr_size;
974 	}
975 
976 	if (!found) {
977 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
978 		if (offs < 0)
979 			return offs;
980 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
981 				       addr_size);
982 		if (ret < 0)
983 			return ret;
984 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
985 		if (ret < 0)
986 			return ret;
987 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
988 		if (ret < 0)
989 			return ret;
990 	}
991 
992 	ret = snprintf(subnode_name, sizeof(subnode_name),
993 		       "%s@%" PRIxPA, name, pa);
994 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
995 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
996 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
997 	if (offs >= 0) {
998 		uint32_t data[FDT_MAX_NCELLS * 2] = { };
999 
1000 		set_dt_val(data, addr_size, pa);
1001 		set_dt_val(data + addr_size, len_size, size);
1002 		ret = fdt_setprop(dt->blob, offs, "reg", data,
1003 				  sizeof(uint32_t) * (addr_size + len_size));
1004 		if (ret < 0)
1005 			return ret;
1006 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
1007 		if (ret < 0)
1008 			return ret;
1009 	} else {
1010 		return offs;
1011 	}
1012 	return 0;
1013 }
1014 
1015 #if defined(CFG_CORE_FFA)
1016 void init_manifest_dt(void *fdt)
1017 {
1018 	manifest_dt = fdt;
1019 }
1020 
1021 void reinit_manifest_dt(void)
1022 {
1023 	paddr_t pa = (unsigned long)manifest_dt;
1024 	void *fdt = NULL;
1025 	int ret = 0;
1026 
1027 	if (!pa) {
1028 		EMSG("No manifest DT found");
1029 		return;
1030 	}
1031 
1032 	fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, CFG_DTB_MAX_SIZE);
1033 	if (!fdt)
1034 		panic("Failed to map manifest DT");
1035 
1036 	manifest_dt = fdt;
1037 
1038 	ret = fdt_check_full(fdt, CFG_DTB_MAX_SIZE);
1039 	if (ret < 0) {
1040 		EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret);
1041 		panic();
1042 	}
1043 
1044 	IMSG("manifest DT found");
1045 }
1046 
1047 void *get_manifest_dt(void)
1048 {
1049 	return manifest_dt;
1050 }
1051 
1052 static TEE_Result release_manifest_dt(void)
1053 {
1054 	if (!manifest_dt)
1055 		return TEE_SUCCESS;
1056 
1057 	if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt,
1058 				    CFG_DTB_MAX_SIZE))
1059 		panic("Failed to remove temporary manifest DT mapping");
1060 	manifest_dt = NULL;
1061 
1062 	return TEE_SUCCESS;
1063 }
1064 
1065 boot_final(release_manifest_dt);
1066 #else
1067 void init_manifest_dt(void *fdt __unused)
1068 {
1069 }
1070 
1071 void reinit_manifest_dt(void)
1072 {
1073 }
1074 
1075 void *get_manifest_dt(void)
1076 {
1077 	return NULL;
1078 }
1079 #endif /*CFG_CORE_FFA*/
1080