xref: /optee_os/core/kernel/dt.c (revision 967e7c6292fd4e7765def46307224535bbd31986)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <kernel/dt.h>
10 #include <kernel/dt_driver.h>
11 #include <kernel/interrupt.h>
12 #include <libfdt.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <mm/phys_mem.h>
16 #include <stdio.h>
17 #include <string.h>
18 #include <trace.h>
19 
20 static struct dt_descriptor external_dt __nex_bss;
21 
22 #if defined(CFG_CORE_FFA)
23 static void *manifest_dt __nex_bss;
24 static size_t manifest_max_size __nex_bss;
25 #endif
26 
dt_find_compatible_driver(const void * fdt,int offs)27 const struct dt_driver *dt_find_compatible_driver(const void *fdt, int offs)
28 {
29 	const struct dt_device_match *dm;
30 	const struct dt_driver *drv;
31 
32 	for_each_dt_driver(drv) {
33 		for (dm = drv->match_table; dm; dm++) {
34 			if (!dm->compatible) {
35 				break;
36 			}
37 			if (!fdt_node_check_compatible(fdt, offs,
38 						       dm->compatible)) {
39 				return drv;
40 			}
41 		}
42 	}
43 
44 	return NULL;
45 }
46 
dt_have_prop(const void * fdt,int offs,const char * propname)47 bool dt_have_prop(const void *fdt, int offs, const char *propname)
48 {
49 	const void *prop;
50 
51 	prop = fdt_getprop(fdt, offs, propname, NULL);
52 
53 	return prop;
54 }
55 
dt_disable_status(void * fdt,int node)56 int dt_disable_status(void *fdt, int node)
57 {
58 	const char *prop = NULL;
59 	int len = 0;
60 
61 	prop = fdt_getprop(fdt, node, "status", &len);
62 	if (!prop) {
63 		if (fdt_setprop_string(fdt, node, "status", "disabled"))
64 			return -1;
65 	} else {
66 		/*
67 		 * Status is there, modify it.
68 		 * Ask to set "disabled" value to the property. The value
69 		 * will be automatically truncated with "len" size by the
70 		 * fdt_setprop_inplace function.
71 		 * Setting a value different from "ok" or "okay" will disable
72 		 * the property.
73 		 * Setting a truncated value of "disabled" with the original
74 		 * property "len" is preferred to not increase the DT size and
75 		 * losing time in recalculating the overall DT offsets.
76 		 * If original length of the status property is larger than
77 		 * "disabled", the property will start with "disabled" and be
78 		 * completed with the rest of the original property.
79 		 */
80 		if (fdt_setprop_inplace(fdt, node, "status", "disabled", len))
81 			return -1;
82 	}
83 
84 	return 0;
85 }
86 
dt_enable_secure_status_frag(void * fdt,int node)87 static int dt_enable_secure_status_frag(void *fdt, int node)
88 {
89 	int overlay = 0;
90 
91 	overlay = add_dt_node_overlay_fragment(node);
92 	if (overlay < 0)
93 		return overlay;
94 
95 	if (fdt_setprop_string(fdt, overlay, "status", "disabled")) {
96 		EMSG("Unable to disable Normal Status via fragment");
97 		return -1;
98 	}
99 
100 	if (fdt_setprop_string(fdt, overlay, "secure-status", "okay")) {
101 		EMSG("Unable to enable Secure Status via fragment");
102 		return -1;
103 	}
104 
105 	return 0;
106 }
107 
dt_enable_secure_status(void * fdt,int node)108 int dt_enable_secure_status(void *fdt, int node)
109 {
110 	if (dt_disable_status(fdt, node)) {
111 		EMSG("Unable to disable Normal Status");
112 		return -1;
113 	}
114 
115 	if (fdt_setprop_string(fdt, node, "secure-status", "okay"))
116 		return -1;
117 
118 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY))
119 		return dt_enable_secure_status_frag(fdt, node);
120 
121 	return 0;
122 }
123 
dt_map_dev(const void * fdt,int offs,vaddr_t * base,size_t * size,enum dt_map_dev_directive mapping)124 int dt_map_dev(const void *fdt, int offs, vaddr_t *base, size_t *size,
125 	       enum dt_map_dev_directive mapping)
126 {
127 	enum teecore_memtypes mtype;
128 	paddr_t pbase;
129 	vaddr_t vbase;
130 	size_t sz;
131 	int st;
132 
133 	assert(cpu_mmu_enabled());
134 
135 	st = fdt_get_status(fdt, offs);
136 	if (st == DT_STATUS_DISABLED)
137 		return -1;
138 
139 	if (fdt_reg_info(fdt, offs, &pbase, &sz))
140 		return -1;
141 
142 	switch (mapping) {
143 	case DT_MAP_AUTO:
144 		if ((st & DT_STATUS_OK_SEC) && !(st & DT_STATUS_OK_NSEC))
145 			mtype = MEM_AREA_IO_SEC;
146 		else
147 			mtype = MEM_AREA_IO_NSEC;
148 		break;
149 	case DT_MAP_SECURE:
150 		mtype = MEM_AREA_IO_SEC;
151 		break;
152 	case DT_MAP_NON_SECURE:
153 		mtype = MEM_AREA_IO_NSEC;
154 		break;
155 	default:
156 		panic("Invalid mapping specified");
157 		break;
158 	}
159 
160 	/* Check if we have a mapping, create one if needed */
161 	vbase = (vaddr_t)core_mmu_add_mapping(mtype, pbase, sz);
162 	if (!vbase) {
163 		EMSG("Failed to map %zu bytes at PA 0x%"PRIxPA,
164 		     (size_t)sz, pbase);
165 		return -1;
166 	}
167 
168 	*base = vbase;
169 	*size = sz;
170 	return 0;
171 }
172 
173 /* Read a physical address (n=1 or 2 cells) */
fdt_read_paddr(const uint32_t * cell,int n)174 static paddr_t fdt_read_paddr(const uint32_t *cell, int n)
175 {
176 	paddr_t addr;
177 
178 	if (n < 1 || n > 2)
179 		goto bad;
180 
181 	addr = fdt32_to_cpu(*cell);
182 	cell++;
183 	if (n == 2) {
184 #ifdef ARM32
185 		if (addr) {
186 			/* High order 32 bits can't be nonzero */
187 			goto bad;
188 		}
189 		addr = fdt32_to_cpu(*cell);
190 #else
191 		addr = (addr << 32) | fdt32_to_cpu(*cell);
192 #endif
193 	}
194 
195 	return addr;
196 bad:
197 	return DT_INFO_INVALID_REG;
198 
199 }
200 
fdt_read_size(const uint32_t * cell,int n)201 static size_t fdt_read_size(const uint32_t *cell, int n)
202 {
203 	uint32_t sz = 0;
204 
205 	sz = fdt32_to_cpu(*cell);
206 	if (n == 2) {
207 		if (sz)
208 			return DT_INFO_INVALID_REG_SIZE;
209 
210 		cell++;
211 		sz = fdt32_to_cpu(*cell);
212 	}
213 
214 	return sz;
215 }
216 
fdt_get_reg_props_by_index(const void * fdt,int offs,int index,paddr_t * base,size_t * size)217 int fdt_get_reg_props_by_index(const void *fdt, int offs, int index,
218 			       paddr_t *base, size_t *size)
219 {
220 	const fdt32_t *reg = NULL;
221 	int addr_ncells = 0;
222 	int size_ncells = 0;
223 	int cell_offset = 0;
224 	int parent = 0;
225 	int len = 0;
226 
227 	if (index < 0)
228 		return -FDT_ERR_BADOFFSET;
229 
230 	reg = (const uint32_t *)fdt_getprop(fdt, offs, "reg", &len);
231 	if (!reg)
232 		return -FDT_ERR_NOTFOUND;
233 
234 	if (fdt_find_cached_parent_reg_cells(fdt, offs, &addr_ncells,
235 					     &size_ncells) != 0) {
236 		parent = fdt_parent_offset(fdt, offs);
237 		if (parent < 0)
238 			return -FDT_ERR_NOTFOUND;
239 
240 		addr_ncells = fdt_address_cells(fdt, parent);
241 		if (addr_ncells < 0)
242 			return -FDT_ERR_NOTFOUND;
243 
244 		size_ncells = fdt_size_cells(fdt, parent);
245 		if (size_ncells < 0)
246 			return -FDT_ERR_NOTFOUND;
247 	}
248 
249 	cell_offset = index * (addr_ncells + size_ncells);
250 
251 	if ((size_t)len < (cell_offset + addr_ncells) * sizeof(*reg))
252 		return -FDT_ERR_BADSTRUCTURE;
253 
254 	if (base) {
255 		*base = fdt_read_paddr(reg + cell_offset, addr_ncells);
256 		if (*base == DT_INFO_INVALID_REG)
257 			return -FDT_ERR_NOTFOUND;
258 	}
259 
260 	if (size) {
261 		if ((size_t)len <
262 		    (cell_offset + addr_ncells + size_ncells) * sizeof(*reg))
263 			return -FDT_ERR_BADSTRUCTURE;
264 
265 		*size = fdt_read_size(reg + cell_offset + addr_ncells,
266 				      size_ncells);
267 		if (*size == DT_INFO_INVALID_REG_SIZE)
268 			return -FDT_ERR_NOTFOUND;
269 	}
270 
271 	return 0;
272 }
273 
fdt_reg_info(const void * fdt,int offs,paddr_t * base,size_t * size)274 int fdt_reg_info(const void *fdt, int offs, paddr_t *base, size_t *size)
275 {
276 	return fdt_get_reg_props_by_index(fdt, offs, 0, base, size);
277 }
278 
fdt_reg_base_address(const void * fdt,int offs)279 paddr_t fdt_reg_base_address(const void *fdt, int offs)
280 {
281 	paddr_t base = 0;
282 
283 	if (fdt_reg_info(fdt, offs, &base, NULL))
284 		return DT_INFO_INVALID_REG;
285 
286 	return base;
287 }
288 
fdt_reg_size(const void * fdt,int offs)289 size_t fdt_reg_size(const void *fdt, int offs)
290 {
291 	size_t size = 0;
292 
293 	if (fdt_reg_info(fdt, offs, NULL, &size))
294 		return DT_INFO_INVALID_REG_SIZE;
295 
296 	return size;
297 }
298 
is_okay(const char * st,int len)299 static bool is_okay(const char *st, int len)
300 {
301 	return !strncmp(st, "ok", len) || !strncmp(st, "okay", len);
302 }
303 
fdt_get_status(const void * fdt,int offs)304 int fdt_get_status(const void *fdt, int offs)
305 {
306 	const char *prop;
307 	int st = 0;
308 	int len;
309 
310 	prop = fdt_getprop(fdt, offs, "status", &len);
311 	if (!prop || is_okay(prop, len)) {
312 		/* If status is not specified, it defaults to "okay" */
313 		st |= DT_STATUS_OK_NSEC;
314 	}
315 
316 	prop = fdt_getprop(fdt, offs, "secure-status", &len);
317 	if (!prop) {
318 		/*
319 		 * When secure-status is not specified it defaults to the same
320 		 * value as status
321 		 */
322 		if (st & DT_STATUS_OK_NSEC)
323 			st |= DT_STATUS_OK_SEC;
324 	} else {
325 		if (is_okay(prop, len))
326 			st |= DT_STATUS_OK_SEC;
327 	}
328 
329 	return st;
330 }
331 
fdt_fill_device_info(const void * fdt,struct dt_node_info * info,int offs)332 void fdt_fill_device_info(const void *fdt, struct dt_node_info *info, int offs)
333 {
334 	struct dt_node_info dinfo = {
335 		.reg = DT_INFO_INVALID_REG,
336 		.reg_size = DT_INFO_INVALID_REG_SIZE,
337 		.clock = DT_INFO_INVALID_CLOCK,
338 		.reset = DT_INFO_INVALID_RESET,
339 		.interrupt = DT_INFO_INVALID_INTERRUPT,
340 	};
341 	const fdt32_t *cuint = NULL;
342 
343 	/* Intentionally discard fdt_reg_info() return value */
344 	fdt_reg_info(fdt, offs, &dinfo.reg, &dinfo.reg_size);
345 
346 	cuint = fdt_getprop(fdt, offs, "clocks", NULL);
347 	if (cuint) {
348 		cuint++;
349 		dinfo.clock = (int)fdt32_to_cpu(*cuint);
350 	}
351 
352 	cuint = fdt_getprop(fdt, offs, "resets", NULL);
353 	if (cuint) {
354 		cuint++;
355 		dinfo.reset = (int)fdt32_to_cpu(*cuint);
356 	}
357 
358 	dinfo.interrupt = dt_get_irq_type_prio(fdt, offs, &dinfo.type,
359 					       &dinfo.prio);
360 
361 	dinfo.status = fdt_get_status(fdt, offs);
362 
363 	*info = dinfo;
364 }
365 
fdt_read_uint32_array(const void * fdt,int node,const char * prop_name,uint32_t * array,size_t count)366 int fdt_read_uint32_array(const void *fdt, int node, const char *prop_name,
367 			  uint32_t *array, size_t count)
368 {
369 	const fdt32_t *cuint = NULL;
370 	int len = 0;
371 	uint32_t i = 0;
372 
373 	cuint = fdt_getprop(fdt, node, prop_name, &len);
374 	if (!cuint)
375 		return len;
376 
377 	if ((uint32_t)len != (count * sizeof(uint32_t)))
378 		return -FDT_ERR_BADLAYOUT;
379 
380 	for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++) {
381 		*array = fdt32_to_cpu(*cuint);
382 		array++;
383 		cuint++;
384 	}
385 
386 	return 0;
387 }
388 
fdt_read_uint32_index(const void * fdt,int node,const char * prop_name,int index,uint32_t * value)389 int fdt_read_uint32_index(const void *fdt, int node, const char *prop_name,
390 			  int index, uint32_t *value)
391 {
392 	const fdt32_t *cuint = NULL;
393 	int len = 0;
394 
395 	cuint = fdt_getprop(fdt, node, prop_name, &len);
396 	if (!cuint)
397 		return len;
398 
399 	if ((uint32_t)len < (sizeof(uint32_t) * (index + 1)))
400 		return -FDT_ERR_BADLAYOUT;
401 
402 	*value = fdt32_to_cpu(cuint[index]);
403 
404 	return 0;
405 }
406 
fdt_read_uint32(const void * fdt,int node,const char * prop_name,uint32_t * value)407 int fdt_read_uint32(const void *fdt, int node, const char *prop_name,
408 		    uint32_t *value)
409 {
410 	return fdt_read_uint32_array(fdt, node, prop_name, value, 1);
411 }
412 
fdt_read_uint32_default(const void * fdt,int node,const char * prop_name,uint32_t dflt_value)413 uint32_t fdt_read_uint32_default(const void *fdt, int node,
414 				 const char *prop_name, uint32_t dflt_value)
415 {
416 	uint32_t ret = dflt_value;
417 
418 	fdt_read_uint32_index(fdt, node, prop_name, 0, &ret);
419 
420 	return ret;
421 }
422 
fdt_get_reg_props_by_name(const void * fdt,int node,const char * name,paddr_t * base,size_t * size)423 int fdt_get_reg_props_by_name(const void *fdt, int node, const char *name,
424 			      paddr_t *base, size_t *size)
425 {
426 	int index = 0;
427 
428 	index = fdt_stringlist_search(fdt, node, "reg-names", name);
429 	if (index < 0)
430 		return index;
431 
432 	return fdt_get_reg_props_by_index(fdt, node, index, base, size);
433 }
434 
dt_getprop_as_number(const void * fdt,int nodeoffset,const char * name,uint64_t * num)435 int dt_getprop_as_number(const void *fdt, int nodeoffset, const char *name,
436 			 uint64_t *num)
437 {
438 	const void *prop = NULL;
439 	int len = 0;
440 
441 	prop = fdt_getprop(fdt, nodeoffset, name, &len);
442 	if (!prop)
443 		return len;
444 
445 	switch (len) {
446 	case sizeof(uint32_t):
447 		*num = fdt32_ld(prop);
448 		return 0;
449 	case sizeof(uint64_t):
450 		*num = fdt64_ld(prop);
451 		return 0;
452 	default:
453 		return -FDT_ERR_BADVALUE;
454 	}
455 }
456 
get_dt(void)457 void *get_dt(void)
458 {
459 	void *fdt = get_embedded_dt();
460 
461 	if (!fdt)
462 		fdt = get_external_dt();
463 
464 	if (!fdt)
465 		fdt = get_manifest_dt();
466 
467 	return fdt;
468 }
469 
get_secure_dt(void)470 void *get_secure_dt(void)
471 {
472 	void *fdt = get_embedded_dt();
473 
474 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
475 		fdt = get_external_dt();
476 
477 	if (!fdt)
478 		fdt = get_manifest_dt();
479 
480 	return fdt;
481 }
482 
483 #if defined(CFG_EMBED_DTB)
484 #ifdef CFG_DT_CACHED_NODE_INFO
485 /*
486  * struct cached_node - Cached information of a DT node
487  *
488  * @node_offset: Offset of the node in @cached_node_info_fdt
489  * @parent_offset: Offset of @node_offset parent node
490  * @address_cells: #address-cells property value of the parent node or 0
491  * @size_cells: #size-cells property value of the parent node or 0
492  * @phandle: Phandle associated to the node or 0 if none
493  */
494 struct cached_node {
495 	int node_offset;
496 	int parent_offset;
497 	int8_t address_cells;
498 	int8_t size_cells;
499 	uint32_t phandle;
500 };
501 
502 /*
503  * struct dt_node_cache - Reference to cached information of DT nodes
504  *
505  * @array: Array of the cached node
506  * @count: Number of initialized cells in @array
507  * @alloced_count: Number of allocated cells in @array
508  * @fdt: Reference to the FDT for which node information are cached
509  */
510 struct dt_node_cache {
511 	struct cached_node *array;
512 	size_t count;
513 	size_t alloced_count;
514 	const void *fdt;
515 };
516 
517 static struct dt_node_cache *dt_node_cache;
518 
fdt_node_info_are_cached(const void * fdt)519 static bool fdt_node_info_are_cached(const void *fdt)
520 {
521 	return dt_node_cache && dt_node_cache->fdt == fdt;
522 }
523 
find_cached_parent_node(const void * fdt,int node_offset)524 static struct cached_node *find_cached_parent_node(const void *fdt,
525 						   int node_offset)
526 {
527 	struct cached_node *cell = NULL;
528 	size_t n = 0;
529 
530 	if (!fdt_node_info_are_cached(fdt))
531 		return NULL;
532 
533 	for (n = 0; n < dt_node_cache->count; n++)
534 		if (dt_node_cache->array[n].node_offset == node_offset)
535 			cell = dt_node_cache->array + n;
536 
537 	return cell;
538 }
539 
fdt_find_cached_parent_node(const void * fdt,int node_offset,int * parent_offset)540 int fdt_find_cached_parent_node(const void *fdt, int node_offset,
541 				int *parent_offset)
542 {
543 	struct cached_node *cell = NULL;
544 
545 	cell = find_cached_parent_node(fdt, node_offset);
546 	if (!cell)
547 		return -FDT_ERR_NOTFOUND;
548 
549 	*parent_offset = cell->parent_offset;
550 
551 	return 0;
552 }
553 
fdt_find_cached_parent_reg_cells(const void * fdt,int node_offset,int * address_cells,int * size_cells)554 int fdt_find_cached_parent_reg_cells(const void *fdt, int node_offset,
555 				     int *address_cells, int *size_cells)
556 {
557 	struct cached_node *cell = NULL;
558 	int rc = 0;
559 
560 	cell = find_cached_parent_node(fdt, node_offset);
561 	if (!cell)
562 		return -FDT_ERR_NOTFOUND;
563 
564 	if (address_cells) {
565 		if (cell->address_cells >= 0)
566 			*address_cells = cell->address_cells;
567 		else
568 			rc = -FDT_ERR_NOTFOUND;
569 	}
570 
571 	if (size_cells) {
572 		if (cell->size_cells >= 0)
573 			*size_cells = cell->size_cells;
574 		else
575 			rc = -FDT_ERR_NOTFOUND;
576 	}
577 
578 	return rc;
579 }
580 
fdt_find_cached_node_phandle(const void * fdt,uint32_t phandle,int * node_offset)581 int fdt_find_cached_node_phandle(const void *fdt, uint32_t phandle,
582 				 int *node_offset)
583 {
584 	struct cached_node *cell = NULL;
585 	size_t n = 0;
586 
587 	if (!fdt_node_info_are_cached(fdt))
588 		return -FDT_ERR_NOTFOUND;
589 
590 	for (n = 0; n < dt_node_cache->count; n++)
591 		if (dt_node_cache->array[n].phandle == phandle)
592 			cell = dt_node_cache->array + n;
593 
594 	if (!cell)
595 		return -FDT_ERR_NOTFOUND;
596 
597 	*node_offset = cell->node_offset;
598 
599 	return 0;
600 }
601 
realloc_cached_node_array(void)602 static TEE_Result realloc_cached_node_array(void)
603 {
604 	assert(dt_node_cache);
605 
606 	if (dt_node_cache->count + 1 > dt_node_cache->alloced_count) {
607 		size_t new_count = dt_node_cache->alloced_count * 2;
608 		struct cached_node *new = NULL;
609 
610 		if (!new_count)
611 			new_count = 4;
612 
613 		new = realloc(dt_node_cache->array,
614 			      sizeof(*dt_node_cache->array) * new_count);
615 		if (!new)
616 			return TEE_ERROR_OUT_OF_MEMORY;
617 
618 		dt_node_cache->array = new;
619 		dt_node_cache->alloced_count = new_count;
620 	}
621 
622 	return TEE_SUCCESS;
623 }
624 
add_cached_node(int parent_offset,int node_offset,int address_cells,int size_cells)625 static TEE_Result add_cached_node(int parent_offset,
626 				  int node_offset, int address_cells,
627 				  int size_cells)
628 {
629 	TEE_Result res = TEE_ERROR_GENERIC;
630 
631 	res = realloc_cached_node_array();
632 	if (res)
633 		return res;
634 
635 	dt_node_cache->array[dt_node_cache->count] = (struct cached_node){
636 		.node_offset = node_offset,
637 		.parent_offset = parent_offset,
638 		.address_cells = address_cells,
639 		.size_cells = size_cells,
640 		.phandle = fdt_get_phandle(dt_node_cache->fdt, node_offset),
641 	};
642 
643 	dt_node_cache->count++;
644 
645 	return TEE_SUCCESS;
646 }
647 
add_cached_node_subtree(int node_offset)648 static TEE_Result add_cached_node_subtree(int node_offset)
649 {
650 	TEE_Result res = TEE_ERROR_GENERIC;
651 	const fdt32_t *cuint = NULL;
652 	int subnode_offset = 0;
653 	int8_t addr_cells = -1;
654 	int8_t size_cells = -1;
655 
656 	cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#address-cells",
657 			    NULL);
658 	if (cuint)
659 		addr_cells = (int)fdt32_to_cpu(*cuint);
660 
661 	cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#size-cells",
662 			    NULL);
663 	if (cuint)
664 		size_cells = (int)fdt32_to_cpu(*cuint);
665 
666 	fdt_for_each_subnode(subnode_offset, dt_node_cache->fdt, node_offset) {
667 		res = add_cached_node(node_offset, subnode_offset, addr_cells,
668 				      size_cells);
669 		if (res)
670 			return res;
671 
672 		res = add_cached_node_subtree(subnode_offset);
673 		if (res)
674 			return res;
675 	}
676 
677 	return TEE_SUCCESS;
678 }
679 
release_node_cache_info(void)680 static TEE_Result release_node_cache_info(void)
681 {
682 	if (dt_node_cache) {
683 		free(dt_node_cache->array);
684 		free(dt_node_cache);
685 		dt_node_cache = NULL;
686 	}
687 
688 	return TEE_SUCCESS;
689 }
690 
691 release_init_resource(release_node_cache_info);
692 
init_node_cache_info(const void * fdt)693 static void init_node_cache_info(const void *fdt)
694 {
695 	TEE_Result res = TEE_ERROR_GENERIC;
696 
697 	assert(!dt_node_cache);
698 
699 	dt_node_cache = calloc(1, sizeof(*dt_node_cache));
700 	if (dt_node_cache) {
701 		dt_node_cache->fdt = fdt;
702 		res = add_cached_node_subtree(0);
703 	} else {
704 		res = TEE_ERROR_OUT_OF_MEMORY;
705 	}
706 
707 	if (res) {
708 		EMSG("Error %#"PRIx32", disable DT cached info", res);
709 		release_node_cache_info();
710 	}
711 }
712 #else
init_node_cache_info(const void * fdt __unused)713 static void init_node_cache_info(const void *fdt __unused)
714 {
715 }
716 #endif /* CFG_DT_CACHED_NODE_INFO */
717 
get_embedded_dt(void)718 void *get_embedded_dt(void)
719 {
720 	static bool checked;
721 
722 	assert(cpu_mmu_enabled());
723 
724 	if (!checked) {
725 		IMSG("Embedded DTB found");
726 
727 		if (fdt_check_header(embedded_secure_dtb))
728 			panic("Invalid embedded DTB");
729 
730 		checked = true;
731 
732 		init_node_cache_info(embedded_secure_dtb);
733 	}
734 
735 	return embedded_secure_dtb;
736 }
737 #else
get_embedded_dt(void)738 void *get_embedded_dt(void)
739 {
740 	return NULL;
741 }
742 #endif /*CFG_EMBED_DTB*/
743 
744 #ifdef _CFG_USE_DTB_OVERLAY
add_dt_overlay_fragment(struct dt_descriptor * dt,int ioffs,const char * target_path)745 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs,
746 				   const char *target_path)
747 {
748 	char frag[32] = { };
749 	int offs = 0;
750 	int ret = 0;
751 
752 	ret = snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
753 	if (ret < 0 || (size_t)ret >= sizeof(frag))
754 		return -1;
755 
756 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
757 	if (offs < 0)
758 		return offs;
759 
760 	dt->frag_id += 1;
761 
762 	ret = fdt_setprop_string(dt->blob, offs, "target-path", target_path);
763 	if (ret < 0)
764 		return ret;
765 
766 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
767 }
768 
init_dt_overlay(struct dt_descriptor * dt,int __maybe_unused dt_size)769 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
770 {
771 	int fragment = 0;
772 
773 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
774 		if (!fdt_check_header(dt->blob)) {
775 			fdt_for_each_subnode(fragment, dt->blob, 0)
776 				dt->frag_id += 1;
777 			return 0;
778 		}
779 	}
780 
781 	return fdt_create_empty_tree(dt->blob, dt_size);
782 }
783 #else
add_dt_overlay_fragment(struct dt_descriptor * dt __unused,int offs,const char * target_path __unused)784 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs,
785 				   const char *target_path __unused)
786 {
787 	return offs;
788 }
789 
init_dt_overlay(struct dt_descriptor * dt __unused,int dt_size __unused)790 static int init_dt_overlay(struct dt_descriptor *dt __unused,
791 			   int dt_size __unused)
792 {
793 	return 0;
794 }
795 #endif /* _CFG_USE_DTB_OVERLAY */
796 
get_external_dt_desc(void)797 struct dt_descriptor *get_external_dt_desc(void)
798 {
799 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
800 		return NULL;
801 
802 	return &external_dt;
803 }
804 
init_external_dt(unsigned long phys_dt,size_t dt_sz)805 void init_external_dt(unsigned long phys_dt, size_t dt_sz)
806 {
807 	struct dt_descriptor *dt = &external_dt;
808 	int ret = 0;
809 	enum teecore_memtypes mtype = MEM_AREA_MAXTYPE;
810 
811 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
812 		return;
813 
814 	if (!phys_dt || !dt_sz) {
815 		/*
816 		 * No need to panic as we're not using the DT in OP-TEE
817 		 * yet, we're only adding some nodes for normal world use.
818 		 * This makes the switch to using DT easier as we can boot
819 		 * a newer OP-TEE with older boot loaders. Once we start to
820 		 * initialize devices based on DT we'll likely panic
821 		 * instead of returning here.
822 		 */
823 		IMSG("No non-secure external DT");
824 		return;
825 	}
826 
827 	mtype = core_mmu_get_type_by_pa(phys_dt);
828 	if (mtype == MEM_AREA_MAXTYPE) {
829 		/* Map the DTB if it is not yet mapped */
830 		dt->blob = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt,
831 						dt_sz);
832 		if (!dt->blob)
833 			panic("Failed to map external DTB");
834 	} else {
835 		/* Get the DTB address if already mapped in a memory area */
836 		dt->blob = phys_to_virt(phys_dt, mtype, dt_sz);
837 		if (!dt->blob) {
838 			EMSG("Failed to get a mapped external DTB for PA %#lx",
839 			     phys_dt);
840 			panic();
841 		}
842 	}
843 
844 	ret = init_dt_overlay(dt, dt_sz);
845 	if (ret < 0) {
846 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
847 		     ret);
848 		panic();
849 	}
850 
851 	ret = fdt_open_into(dt->blob, dt->blob, dt_sz);
852 	if (ret < 0) {
853 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
854 		panic();
855 	}
856 
857 	IMSG("Non-secure external DT found");
858 }
859 
get_external_dt(void)860 void *get_external_dt(void)
861 {
862 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
863 		return NULL;
864 
865 	assert(cpu_mmu_enabled());
866 	return external_dt.blob;
867 }
868 
release_external_dt(void)869 static TEE_Result release_external_dt(void)
870 {
871 	int ret = 0;
872 	paddr_t pa_dt = 0;
873 
874 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
875 		return TEE_SUCCESS;
876 
877 	if (!external_dt.blob)
878 		return TEE_SUCCESS;
879 
880 	pa_dt = virt_to_phys(external_dt.blob);
881 	/*
882 	 * Skip packing and un-mapping operations if the external DTB is mapped
883 	 * in a different memory area
884 	 */
885 	if (core_mmu_get_type_by_pa(pa_dt) != MEM_AREA_EXT_DT)
886 		return TEE_SUCCESS;
887 
888 	ret = fdt_pack(external_dt.blob);
889 	if (ret < 0) {
890 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
891 		     virt_to_phys(external_dt.blob), ret);
892 		panic();
893 	}
894 
895 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
896 				    CFG_DTB_MAX_SIZE))
897 		panic("Failed to remove temporary Device Tree mapping");
898 
899 	/* External DTB no more reached, reset pointer to invalid */
900 	external_dt.blob = NULL;
901 
902 	return TEE_SUCCESS;
903 }
904 
905 boot_final(release_external_dt);
906 
add_dt_path_subnode(struct dt_descriptor * dt,const char * path,const char * subnode)907 int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
908 			const char *subnode)
909 {
910 	int offs = 0;
911 
912 	offs = fdt_path_offset(dt->blob, path);
913 	if (offs < 0)
914 		return offs;
915 	offs = add_dt_overlay_fragment(dt, offs, "/");
916 	if (offs < 0)
917 		return offs;
918 	return fdt_add_subnode(dt->blob, offs, subnode);
919 }
920 
add_dt_node_overlay_fragment(int node)921 int add_dt_node_overlay_fragment(int node)
922 {
923 	struct dt_descriptor *dt = NULL;
924 	char full_node_name[256] = {};
925 	int root = 0;
926 	int ret = 0;
927 
928 	/* Fragments make only sense with an external DT */
929 	dt = get_external_dt_desc();
930 	if (!dt)
931 		return 0;
932 
933 	ret = fdt_get_path(dt->blob, node, full_node_name,
934 			   sizeof(full_node_name));
935 	if (ret)
936 		return ret;
937 
938 	/* Overlay fragments are always added to the root-node */
939 	root = fdt_path_offset(dt->blob, "/");
940 	if (root < 0)
941 		return root;
942 
943 	return add_dt_overlay_fragment(dt, root, full_node_name);
944 }
945 
set_dt_val(void * data,uint32_t cell_size,uint64_t val)946 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
947 {
948 	if (cell_size == 1) {
949 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
950 
951 		memcpy(data, &v, sizeof(v));
952 	} else {
953 		fdt64_t v = cpu_to_fdt64(val);
954 
955 		memcpy(data, &v, sizeof(v));
956 	}
957 }
958 
add_res_mem_dt_node(struct dt_descriptor * dt,const char * name,paddr_t pa,size_t size)959 int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
960 			paddr_t pa, size_t size)
961 {
962 	int offs = 0;
963 	int ret = 0;
964 	int addr_size = -1;
965 	int len_size = -1;
966 	bool found = true;
967 	char subnode_name[80] = { };
968 
969 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
970 
971 	if (offs < 0) {
972 		found = false;
973 		offs = 0;
974 	}
975 
976 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
977 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
978 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
979 		 /* Enforce adding a reserved-memory node */
980 		found = false;
981 	} else {
982 		len_size = fdt_size_cells(dt->blob, offs);
983 		if (len_size < 0)
984 			return len_size;
985 		addr_size = fdt_address_cells(dt->blob, offs);
986 		if (addr_size < 0)
987 			return addr_size;
988 	}
989 
990 	if (!found) {
991 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
992 		if (offs < 0)
993 			return offs;
994 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
995 				       addr_size);
996 		if (ret < 0)
997 			return ret;
998 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
999 		if (ret < 0)
1000 			return ret;
1001 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
1002 		if (ret < 0)
1003 			return ret;
1004 	}
1005 
1006 	ret = snprintf(subnode_name, sizeof(subnode_name),
1007 		       "%s@%" PRIxPA, name, pa);
1008 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
1009 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
1010 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
1011 	if (offs >= 0) {
1012 		uint32_t data[FDT_MAX_NCELLS * 2] = { };
1013 
1014 		set_dt_val(data, addr_size, pa);
1015 		set_dt_val(data + addr_size, len_size, size);
1016 		ret = fdt_setprop(dt->blob, offs, "reg", data,
1017 				  sizeof(uint32_t) * (addr_size + len_size));
1018 		if (ret < 0)
1019 			return ret;
1020 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
1021 		if (ret < 0)
1022 			return ret;
1023 	} else {
1024 		return offs;
1025 	}
1026 	return 0;
1027 }
1028 
1029 #if defined(CFG_CORE_FFA)
init_manifest_dt(void * fdt,size_t max_size)1030 void init_manifest_dt(void *fdt, size_t max_size)
1031 {
1032 	manifest_dt = fdt;
1033 	manifest_max_size = max_size;
1034 }
1035 
reinit_manifest_dt(void)1036 void reinit_manifest_dt(void)
1037 {
1038 	paddr_t end_pa = 0;
1039 	void *fdt = NULL;
1040 	paddr_t pa = 0;
1041 	int ret = 0;
1042 
1043 	if (!manifest_dt) {
1044 		EMSG("No manifest DT found");
1045 		return;
1046 	}
1047 
1048 	if (IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1049 		pa = (unsigned long)manifest_dt;
1050 		end_pa = pa + manifest_max_size;
1051 		pa = ROUNDDOWN(pa, SMALL_PAGE_SIZE);
1052 		end_pa = ROUNDUP(end_pa, SMALL_PAGE_SIZE);
1053 		if (!nex_phys_mem_alloc2(pa, end_pa - pa)) {
1054 			EMSG("Failed to reserve manifest DT physical memory %#"PRIxPA"..%#"PRIxPA" len %#zx",
1055 			     pa, end_pa - 1, end_pa - pa);
1056 			panic();
1057 		}
1058 	}
1059 
1060 	pa = (unsigned long)manifest_dt;
1061 	fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, manifest_max_size);
1062 	if (!fdt)
1063 		panic("Failed to map manifest DT");
1064 
1065 	manifest_dt = fdt;
1066 
1067 	ret = fdt_check_full(fdt, manifest_max_size);
1068 	if (ret < 0) {
1069 		EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret);
1070 		panic();
1071 	}
1072 
1073 	IMSG("manifest DT found");
1074 }
1075 
get_manifest_dt(void)1076 void *get_manifest_dt(void)
1077 {
1078 	return manifest_dt;
1079 }
1080 
release_manifest_dt(void)1081 static TEE_Result release_manifest_dt(void)
1082 {
1083 	paddr_t pa = 0;
1084 
1085 	if (!manifest_dt)
1086 		return TEE_SUCCESS;
1087 
1088 	if (IS_ENABLED(CFG_CORE_SEL2_SPMC))
1089 		pa = virt_to_phys(manifest_dt);
1090 
1091 	if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt,
1092 				    manifest_max_size))
1093 		panic("Failed to remove temporary manifest DT mapping");
1094 	manifest_dt = NULL;
1095 
1096 	if (IS_ENABLED(CFG_CORE_SEL2_SPMC))
1097 		tee_mm_free(nex_phys_mem_mm_find(pa));
1098 
1099 	return TEE_SUCCESS;
1100 }
1101 
1102 boot_final(release_manifest_dt);
1103 #else
init_manifest_dt(void * fdt __unused,size_t max_size __unused)1104 void init_manifest_dt(void *fdt __unused, size_t max_size __unused)
1105 {
1106 }
1107 
reinit_manifest_dt(void)1108 void reinit_manifest_dt(void)
1109 {
1110 }
1111 
get_manifest_dt(void)1112 void *get_manifest_dt(void)
1113 {
1114 	return NULL;
1115 }
1116 #endif /*CFG_CORE_FFA*/
1117