1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 */
5
6 #include <assert.h>
7 #include <config.h>
8 #include <initcall.h>
9 #include <kernel/dt.h>
10 #include <kernel/dt_driver.h>
11 #include <kernel/interrupt.h>
12 #include <libfdt.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <mm/phys_mem.h>
16 #include <stdio.h>
17 #include <string.h>
18 #include <trace.h>
19
20 static struct dt_descriptor external_dt __nex_bss;
21
22 #if defined(CFG_CORE_FFA)
23 static void *manifest_dt __nex_bss;
24 static size_t manifest_max_size __nex_bss;
25 #endif
26
dt_find_compatible_driver(const void * fdt,int offs)27 const struct dt_driver *dt_find_compatible_driver(const void *fdt, int offs)
28 {
29 const struct dt_device_match *dm;
30 const struct dt_driver *drv;
31
32 for_each_dt_driver(drv) {
33 for (dm = drv->match_table; dm; dm++) {
34 if (!dm->compatible) {
35 break;
36 }
37 if (!fdt_node_check_compatible(fdt, offs,
38 dm->compatible)) {
39 return drv;
40 }
41 }
42 }
43
44 return NULL;
45 }
46
dt_have_prop(const void * fdt,int offs,const char * propname)47 bool dt_have_prop(const void *fdt, int offs, const char *propname)
48 {
49 const void *prop;
50
51 prop = fdt_getprop(fdt, offs, propname, NULL);
52
53 return prop;
54 }
55
dt_disable_status(void * fdt,int node)56 int dt_disable_status(void *fdt, int node)
57 {
58 const char *prop = NULL;
59 int len = 0;
60
61 prop = fdt_getprop(fdt, node, "status", &len);
62 if (!prop) {
63 if (fdt_setprop_string(fdt, node, "status", "disabled"))
64 return -1;
65 } else {
66 /*
67 * Status is there, modify it.
68 * Ask to set "disabled" value to the property. The value
69 * will be automatically truncated with "len" size by the
70 * fdt_setprop_inplace function.
71 * Setting a value different from "ok" or "okay" will disable
72 * the property.
73 * Setting a truncated value of "disabled" with the original
74 * property "len" is preferred to not increase the DT size and
75 * losing time in recalculating the overall DT offsets.
76 * If original length of the status property is larger than
77 * "disabled", the property will start with "disabled" and be
78 * completed with the rest of the original property.
79 */
80 if (fdt_setprop_inplace(fdt, node, "status", "disabled", len))
81 return -1;
82 }
83
84 return 0;
85 }
86
dt_enable_secure_status(void * fdt,int node)87 int dt_enable_secure_status(void *fdt, int node)
88 {
89 if (dt_disable_status(fdt, node)) {
90 EMSG("Unable to disable Normal Status");
91 return -1;
92 }
93
94 if (fdt_setprop_string(fdt, node, "secure-status", "okay"))
95 return -1;
96
97 return 0;
98 }
99
dt_map_dev(const void * fdt,int offs,vaddr_t * base,size_t * size,enum dt_map_dev_directive mapping)100 int dt_map_dev(const void *fdt, int offs, vaddr_t *base, size_t *size,
101 enum dt_map_dev_directive mapping)
102 {
103 enum teecore_memtypes mtype;
104 paddr_t pbase;
105 vaddr_t vbase;
106 size_t sz;
107 int st;
108
109 assert(cpu_mmu_enabled());
110
111 st = fdt_get_status(fdt, offs);
112 if (st == DT_STATUS_DISABLED)
113 return -1;
114
115 if (fdt_reg_info(fdt, offs, &pbase, &sz))
116 return -1;
117
118 switch (mapping) {
119 case DT_MAP_AUTO:
120 if ((st & DT_STATUS_OK_SEC) && !(st & DT_STATUS_OK_NSEC))
121 mtype = MEM_AREA_IO_SEC;
122 else
123 mtype = MEM_AREA_IO_NSEC;
124 break;
125 case DT_MAP_SECURE:
126 mtype = MEM_AREA_IO_SEC;
127 break;
128 case DT_MAP_NON_SECURE:
129 mtype = MEM_AREA_IO_NSEC;
130 break;
131 default:
132 panic("Invalid mapping specified");
133 break;
134 }
135
136 /* Check if we have a mapping, create one if needed */
137 vbase = (vaddr_t)core_mmu_add_mapping(mtype, pbase, sz);
138 if (!vbase) {
139 EMSG("Failed to map %zu bytes at PA 0x%"PRIxPA,
140 (size_t)sz, pbase);
141 return -1;
142 }
143
144 *base = vbase;
145 *size = sz;
146 return 0;
147 }
148
149 /* Read a physical address (n=1 or 2 cells) */
fdt_read_paddr(const uint32_t * cell,int n)150 static paddr_t fdt_read_paddr(const uint32_t *cell, int n)
151 {
152 paddr_t addr;
153
154 if (n < 1 || n > 2)
155 goto bad;
156
157 addr = fdt32_to_cpu(*cell);
158 cell++;
159 if (n == 2) {
160 #ifdef ARM32
161 if (addr) {
162 /* High order 32 bits can't be nonzero */
163 goto bad;
164 }
165 addr = fdt32_to_cpu(*cell);
166 #else
167 addr = (addr << 32) | fdt32_to_cpu(*cell);
168 #endif
169 }
170
171 return addr;
172 bad:
173 return DT_INFO_INVALID_REG;
174
175 }
176
fdt_read_size(const uint32_t * cell,int n)177 static size_t fdt_read_size(const uint32_t *cell, int n)
178 {
179 uint32_t sz = 0;
180
181 sz = fdt32_to_cpu(*cell);
182 if (n == 2) {
183 if (sz)
184 return DT_INFO_INVALID_REG_SIZE;
185
186 cell++;
187 sz = fdt32_to_cpu(*cell);
188 }
189
190 return sz;
191 }
192
fdt_get_reg_props_by_index(const void * fdt,int offs,int index,paddr_t * base,size_t * size)193 int fdt_get_reg_props_by_index(const void *fdt, int offs, int index,
194 paddr_t *base, size_t *size)
195 {
196 const fdt32_t *reg = NULL;
197 int addr_ncells = 0;
198 int size_ncells = 0;
199 int cell_offset = 0;
200 int parent = 0;
201 int len = 0;
202
203 if (index < 0)
204 return -FDT_ERR_BADOFFSET;
205
206 reg = (const uint32_t *)fdt_getprop(fdt, offs, "reg", &len);
207 if (!reg)
208 return -FDT_ERR_NOTFOUND;
209
210 if (fdt_find_cached_parent_reg_cells(fdt, offs, &addr_ncells,
211 &size_ncells) != 0) {
212 parent = fdt_parent_offset(fdt, offs);
213 if (parent < 0)
214 return -FDT_ERR_NOTFOUND;
215
216 addr_ncells = fdt_address_cells(fdt, parent);
217 if (addr_ncells < 0)
218 return -FDT_ERR_NOTFOUND;
219
220 size_ncells = fdt_size_cells(fdt, parent);
221 if (size_ncells < 0)
222 return -FDT_ERR_NOTFOUND;
223 }
224
225 cell_offset = index * (addr_ncells + size_ncells);
226
227 if ((size_t)len < (cell_offset + addr_ncells) * sizeof(*reg))
228 return -FDT_ERR_BADSTRUCTURE;
229
230 if (base) {
231 *base = fdt_read_paddr(reg + cell_offset, addr_ncells);
232 if (*base == DT_INFO_INVALID_REG)
233 return -FDT_ERR_NOTFOUND;
234 }
235
236 if (size) {
237 if ((size_t)len <
238 (cell_offset + addr_ncells + size_ncells) * sizeof(*reg))
239 return -FDT_ERR_BADSTRUCTURE;
240
241 *size = fdt_read_size(reg + cell_offset + addr_ncells,
242 size_ncells);
243 if (*size == DT_INFO_INVALID_REG_SIZE)
244 return -FDT_ERR_NOTFOUND;
245 }
246
247 return 0;
248 }
249
fdt_reg_info(const void * fdt,int offs,paddr_t * base,size_t * size)250 int fdt_reg_info(const void *fdt, int offs, paddr_t *base, size_t *size)
251 {
252 return fdt_get_reg_props_by_index(fdt, offs, 0, base, size);
253 }
254
fdt_reg_base_address(const void * fdt,int offs)255 paddr_t fdt_reg_base_address(const void *fdt, int offs)
256 {
257 paddr_t base = 0;
258
259 if (fdt_reg_info(fdt, offs, &base, NULL))
260 return DT_INFO_INVALID_REG;
261
262 return base;
263 }
264
fdt_reg_size(const void * fdt,int offs)265 size_t fdt_reg_size(const void *fdt, int offs)
266 {
267 size_t size = 0;
268
269 if (fdt_reg_info(fdt, offs, NULL, &size))
270 return DT_INFO_INVALID_REG_SIZE;
271
272 return size;
273 }
274
is_okay(const char * st,int len)275 static bool is_okay(const char *st, int len)
276 {
277 return !strncmp(st, "ok", len) || !strncmp(st, "okay", len);
278 }
279
fdt_get_status(const void * fdt,int offs)280 int fdt_get_status(const void *fdt, int offs)
281 {
282 const char *prop;
283 int st = 0;
284 int len;
285
286 prop = fdt_getprop(fdt, offs, "status", &len);
287 if (!prop || is_okay(prop, len)) {
288 /* If status is not specified, it defaults to "okay" */
289 st |= DT_STATUS_OK_NSEC;
290 }
291
292 prop = fdt_getprop(fdt, offs, "secure-status", &len);
293 if (!prop) {
294 /*
295 * When secure-status is not specified it defaults to the same
296 * value as status
297 */
298 if (st & DT_STATUS_OK_NSEC)
299 st |= DT_STATUS_OK_SEC;
300 } else {
301 if (is_okay(prop, len))
302 st |= DT_STATUS_OK_SEC;
303 }
304
305 return st;
306 }
307
fdt_fill_device_info(const void * fdt,struct dt_node_info * info,int offs)308 void fdt_fill_device_info(const void *fdt, struct dt_node_info *info, int offs)
309 {
310 struct dt_node_info dinfo = {
311 .reg = DT_INFO_INVALID_REG,
312 .reg_size = DT_INFO_INVALID_REG_SIZE,
313 .clock = DT_INFO_INVALID_CLOCK,
314 .reset = DT_INFO_INVALID_RESET,
315 .interrupt = DT_INFO_INVALID_INTERRUPT,
316 };
317 const fdt32_t *cuint = NULL;
318
319 /* Intentionally discard fdt_reg_info() return value */
320 fdt_reg_info(fdt, offs, &dinfo.reg, &dinfo.reg_size);
321
322 cuint = fdt_getprop(fdt, offs, "clocks", NULL);
323 if (cuint) {
324 cuint++;
325 dinfo.clock = (int)fdt32_to_cpu(*cuint);
326 }
327
328 cuint = fdt_getprop(fdt, offs, "resets", NULL);
329 if (cuint) {
330 cuint++;
331 dinfo.reset = (int)fdt32_to_cpu(*cuint);
332 }
333
334 dinfo.interrupt = dt_get_irq_type_prio(fdt, offs, &dinfo.type,
335 &dinfo.prio);
336
337 dinfo.status = fdt_get_status(fdt, offs);
338
339 *info = dinfo;
340 }
341
fdt_read_uint32_array(const void * fdt,int node,const char * prop_name,uint32_t * array,size_t count)342 int fdt_read_uint32_array(const void *fdt, int node, const char *prop_name,
343 uint32_t *array, size_t count)
344 {
345 const fdt32_t *cuint = NULL;
346 int len = 0;
347 uint32_t i = 0;
348
349 cuint = fdt_getprop(fdt, node, prop_name, &len);
350 if (!cuint)
351 return len;
352
353 if ((uint32_t)len != (count * sizeof(uint32_t)))
354 return -FDT_ERR_BADLAYOUT;
355
356 for (i = 0; i < ((uint32_t)len / sizeof(uint32_t)); i++) {
357 *array = fdt32_to_cpu(*cuint);
358 array++;
359 cuint++;
360 }
361
362 return 0;
363 }
364
fdt_read_uint32_index(const void * fdt,int node,const char * prop_name,int index,uint32_t * value)365 int fdt_read_uint32_index(const void *fdt, int node, const char *prop_name,
366 int index, uint32_t *value)
367 {
368 const fdt32_t *cuint = NULL;
369 int len = 0;
370
371 cuint = fdt_getprop(fdt, node, prop_name, &len);
372 if (!cuint)
373 return len;
374
375 if ((uint32_t)len < (sizeof(uint32_t) * (index + 1)))
376 return -FDT_ERR_BADLAYOUT;
377
378 *value = fdt32_to_cpu(cuint[index]);
379
380 return 0;
381 }
382
fdt_read_uint32(const void * fdt,int node,const char * prop_name,uint32_t * value)383 int fdt_read_uint32(const void *fdt, int node, const char *prop_name,
384 uint32_t *value)
385 {
386 return fdt_read_uint32_array(fdt, node, prop_name, value, 1);
387 }
388
fdt_read_uint32_default(const void * fdt,int node,const char * prop_name,uint32_t dflt_value)389 uint32_t fdt_read_uint32_default(const void *fdt, int node,
390 const char *prop_name, uint32_t dflt_value)
391 {
392 uint32_t ret = dflt_value;
393
394 fdt_read_uint32_index(fdt, node, prop_name, 0, &ret);
395
396 return ret;
397 }
398
fdt_get_reg_props_by_name(const void * fdt,int node,const char * name,paddr_t * base,size_t * size)399 int fdt_get_reg_props_by_name(const void *fdt, int node, const char *name,
400 paddr_t *base, size_t *size)
401 {
402 int index = 0;
403
404 index = fdt_stringlist_search(fdt, node, "reg-names", name);
405 if (index < 0)
406 return index;
407
408 return fdt_get_reg_props_by_index(fdt, node, index, base, size);
409 }
410
dt_getprop_as_number(const void * fdt,int nodeoffset,const char * name,uint64_t * num)411 int dt_getprop_as_number(const void *fdt, int nodeoffset, const char *name,
412 uint64_t *num)
413 {
414 const void *prop = NULL;
415 int len = 0;
416
417 prop = fdt_getprop(fdt, nodeoffset, name, &len);
418 if (!prop)
419 return len;
420
421 switch (len) {
422 case sizeof(uint32_t):
423 *num = fdt32_ld(prop);
424 return 0;
425 case sizeof(uint64_t):
426 *num = fdt64_ld(prop);
427 return 0;
428 default:
429 return -FDT_ERR_BADVALUE;
430 }
431 }
432
get_dt(void)433 void *get_dt(void)
434 {
435 void *fdt = get_embedded_dt();
436
437 if (!fdt)
438 fdt = get_external_dt();
439
440 if (!fdt)
441 fdt = get_manifest_dt();
442
443 return fdt;
444 }
445
get_secure_dt(void)446 void *get_secure_dt(void)
447 {
448 void *fdt = get_embedded_dt();
449
450 if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
451 fdt = get_external_dt();
452
453 if (!fdt)
454 fdt = get_manifest_dt();
455
456 return fdt;
457 }
458
459 #if defined(CFG_EMBED_DTB)
460 #ifdef CFG_DT_CACHED_NODE_INFO
461 /*
462 * struct cached_node - Cached information of a DT node
463 *
464 * @node_offset: Offset of the node in @cached_node_info_fdt
465 * @parent_offset: Offset of @node_offset parent node
466 * @address_cells: #address-cells property value of the parent node or 0
467 * @size_cells: #size-cells property value of the parent node or 0
468 * @phandle: Phandle associated to the node or 0 if none
469 */
470 struct cached_node {
471 int node_offset;
472 int parent_offset;
473 int8_t address_cells;
474 int8_t size_cells;
475 uint32_t phandle;
476 };
477
478 /*
479 * struct dt_node_cache - Reference to cached information of DT nodes
480 *
481 * @array: Array of the cached node
482 * @count: Number of initialized cells in @array
483 * @alloced_count: Number of allocated cells in @array
484 * @fdt: Reference to the FDT for which node information are cached
485 */
486 struct dt_node_cache {
487 struct cached_node *array;
488 size_t count;
489 size_t alloced_count;
490 const void *fdt;
491 };
492
493 static struct dt_node_cache *dt_node_cache;
494
fdt_node_info_are_cached(const void * fdt)495 static bool fdt_node_info_are_cached(const void *fdt)
496 {
497 return dt_node_cache && dt_node_cache->fdt == fdt;
498 }
499
find_cached_parent_node(const void * fdt,int node_offset)500 static struct cached_node *find_cached_parent_node(const void *fdt,
501 int node_offset)
502 {
503 struct cached_node *cell = NULL;
504 size_t n = 0;
505
506 if (!fdt_node_info_are_cached(fdt))
507 return NULL;
508
509 for (n = 0; n < dt_node_cache->count; n++)
510 if (dt_node_cache->array[n].node_offset == node_offset)
511 cell = dt_node_cache->array + n;
512
513 return cell;
514 }
515
fdt_find_cached_parent_node(const void * fdt,int node_offset,int * parent_offset)516 int fdt_find_cached_parent_node(const void *fdt, int node_offset,
517 int *parent_offset)
518 {
519 struct cached_node *cell = NULL;
520
521 cell = find_cached_parent_node(fdt, node_offset);
522 if (!cell)
523 return -FDT_ERR_NOTFOUND;
524
525 *parent_offset = cell->parent_offset;
526
527 return 0;
528 }
529
fdt_find_cached_parent_reg_cells(const void * fdt,int node_offset,int * address_cells,int * size_cells)530 int fdt_find_cached_parent_reg_cells(const void *fdt, int node_offset,
531 int *address_cells, int *size_cells)
532 {
533 struct cached_node *cell = NULL;
534 int rc = 0;
535
536 cell = find_cached_parent_node(fdt, node_offset);
537 if (!cell)
538 return -FDT_ERR_NOTFOUND;
539
540 if (address_cells) {
541 if (cell->address_cells >= 0)
542 *address_cells = cell->address_cells;
543 else
544 rc = -FDT_ERR_NOTFOUND;
545 }
546
547 if (size_cells) {
548 if (cell->size_cells >= 0)
549 *size_cells = cell->size_cells;
550 else
551 rc = -FDT_ERR_NOTFOUND;
552 }
553
554 return rc;
555 }
556
fdt_find_cached_node_phandle(const void * fdt,uint32_t phandle,int * node_offset)557 int fdt_find_cached_node_phandle(const void *fdt, uint32_t phandle,
558 int *node_offset)
559 {
560 struct cached_node *cell = NULL;
561 size_t n = 0;
562
563 if (!fdt_node_info_are_cached(fdt))
564 return -FDT_ERR_NOTFOUND;
565
566 for (n = 0; n < dt_node_cache->count; n++)
567 if (dt_node_cache->array[n].phandle == phandle)
568 cell = dt_node_cache->array + n;
569
570 if (!cell)
571 return -FDT_ERR_NOTFOUND;
572
573 *node_offset = cell->node_offset;
574
575 return 0;
576 }
577
realloc_cached_node_array(void)578 static TEE_Result realloc_cached_node_array(void)
579 {
580 assert(dt_node_cache);
581
582 if (dt_node_cache->count + 1 > dt_node_cache->alloced_count) {
583 size_t new_count = dt_node_cache->alloced_count * 2;
584 struct cached_node *new = NULL;
585
586 if (!new_count)
587 new_count = 4;
588
589 new = realloc(dt_node_cache->array,
590 sizeof(*dt_node_cache->array) * new_count);
591 if (!new)
592 return TEE_ERROR_OUT_OF_MEMORY;
593
594 dt_node_cache->array = new;
595 dt_node_cache->alloced_count = new_count;
596 }
597
598 return TEE_SUCCESS;
599 }
600
add_cached_node(int parent_offset,int node_offset,int address_cells,int size_cells)601 static TEE_Result add_cached_node(int parent_offset,
602 int node_offset, int address_cells,
603 int size_cells)
604 {
605 TEE_Result res = TEE_ERROR_GENERIC;
606
607 res = realloc_cached_node_array();
608 if (res)
609 return res;
610
611 dt_node_cache->array[dt_node_cache->count] = (struct cached_node){
612 .node_offset = node_offset,
613 .parent_offset = parent_offset,
614 .address_cells = address_cells,
615 .size_cells = size_cells,
616 .phandle = fdt_get_phandle(dt_node_cache->fdt, node_offset),
617 };
618
619 dt_node_cache->count++;
620
621 return TEE_SUCCESS;
622 }
623
add_cached_node_subtree(int node_offset)624 static TEE_Result add_cached_node_subtree(int node_offset)
625 {
626 TEE_Result res = TEE_ERROR_GENERIC;
627 const fdt32_t *cuint = NULL;
628 int subnode_offset = 0;
629 int8_t addr_cells = -1;
630 int8_t size_cells = -1;
631
632 cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#address-cells",
633 NULL);
634 if (cuint)
635 addr_cells = (int)fdt32_to_cpu(*cuint);
636
637 cuint = fdt_getprop(dt_node_cache->fdt, node_offset, "#size-cells",
638 NULL);
639 if (cuint)
640 size_cells = (int)fdt32_to_cpu(*cuint);
641
642 fdt_for_each_subnode(subnode_offset, dt_node_cache->fdt, node_offset) {
643 res = add_cached_node(node_offset, subnode_offset, addr_cells,
644 size_cells);
645 if (res)
646 return res;
647
648 res = add_cached_node_subtree(subnode_offset);
649 if (res)
650 return res;
651 }
652
653 return TEE_SUCCESS;
654 }
655
release_node_cache_info(void)656 static TEE_Result release_node_cache_info(void)
657 {
658 if (dt_node_cache) {
659 free(dt_node_cache->array);
660 free(dt_node_cache);
661 dt_node_cache = NULL;
662 }
663
664 return TEE_SUCCESS;
665 }
666
667 release_init_resource(release_node_cache_info);
668
init_node_cache_info(const void * fdt)669 static void init_node_cache_info(const void *fdt)
670 {
671 TEE_Result res = TEE_ERROR_GENERIC;
672
673 assert(!dt_node_cache);
674
675 dt_node_cache = calloc(1, sizeof(*dt_node_cache));
676 if (dt_node_cache) {
677 dt_node_cache->fdt = fdt;
678 res = add_cached_node_subtree(0);
679 } else {
680 res = TEE_ERROR_OUT_OF_MEMORY;
681 }
682
683 if (res) {
684 EMSG("Error %#"PRIx32", disable DT cached info", res);
685 release_node_cache_info();
686 }
687 }
688 #else
init_node_cache_info(const void * fdt __unused)689 static void init_node_cache_info(const void *fdt __unused)
690 {
691 }
692 #endif /* CFG_DT_CACHED_NODE_INFO */
693
get_embedded_dt(void)694 void *get_embedded_dt(void)
695 {
696 static bool checked;
697
698 assert(cpu_mmu_enabled());
699
700 if (!checked) {
701 IMSG("Embedded DTB found");
702
703 if (fdt_check_header(embedded_secure_dtb))
704 panic("Invalid embedded DTB");
705
706 checked = true;
707
708 init_node_cache_info(embedded_secure_dtb);
709 }
710
711 return embedded_secure_dtb;
712 }
713 #else
get_embedded_dt(void)714 void *get_embedded_dt(void)
715 {
716 return NULL;
717 }
718 #endif /*CFG_EMBED_DTB*/
719
720 #ifdef _CFG_USE_DTB_OVERLAY
add_dt_overlay_fragment(struct dt_descriptor * dt,int ioffs)721 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
722 {
723 char frag[32] = { };
724 int offs = 0;
725 int ret = 0;
726
727 ret = snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
728 if (ret < 0 || (size_t)ret >= sizeof(frag))
729 return -1;
730
731 offs = fdt_add_subnode(dt->blob, ioffs, frag);
732 if (offs < 0)
733 return offs;
734
735 dt->frag_id += 1;
736
737 ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
738 if (ret < 0)
739 return ret;
740
741 return fdt_add_subnode(dt->blob, offs, "__overlay__");
742 }
743
init_dt_overlay(struct dt_descriptor * dt,int __maybe_unused dt_size)744 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
745 {
746 int fragment = 0;
747
748 if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
749 if (!fdt_check_header(dt->blob)) {
750 fdt_for_each_subnode(fragment, dt->blob, 0)
751 dt->frag_id += 1;
752 return 0;
753 }
754 }
755
756 return fdt_create_empty_tree(dt->blob, dt_size);
757 }
758 #else
add_dt_overlay_fragment(struct dt_descriptor * dt __unused,int offs)759 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
760 {
761 return offs;
762 }
763
init_dt_overlay(struct dt_descriptor * dt __unused,int dt_size __unused)764 static int init_dt_overlay(struct dt_descriptor *dt __unused,
765 int dt_size __unused)
766 {
767 return 0;
768 }
769 #endif /* _CFG_USE_DTB_OVERLAY */
770
get_external_dt_desc(void)771 struct dt_descriptor *get_external_dt_desc(void)
772 {
773 if (!IS_ENABLED(CFG_EXTERNAL_DT))
774 return NULL;
775
776 return &external_dt;
777 }
778
init_external_dt(unsigned long phys_dt,size_t dt_sz)779 void init_external_dt(unsigned long phys_dt, size_t dt_sz)
780 {
781 struct dt_descriptor *dt = &external_dt;
782 int ret = 0;
783 enum teecore_memtypes mtype = MEM_AREA_MAXTYPE;
784
785 if (!IS_ENABLED(CFG_EXTERNAL_DT))
786 return;
787
788 if (!phys_dt || !dt_sz) {
789 /*
790 * No need to panic as we're not using the DT in OP-TEE
791 * yet, we're only adding some nodes for normal world use.
792 * This makes the switch to using DT easier as we can boot
793 * a newer OP-TEE with older boot loaders. Once we start to
794 * initialize devices based on DT we'll likely panic
795 * instead of returning here.
796 */
797 IMSG("No non-secure external DT");
798 return;
799 }
800
801 mtype = core_mmu_get_type_by_pa(phys_dt);
802 if (mtype == MEM_AREA_MAXTYPE) {
803 /* Map the DTB if it is not yet mapped */
804 dt->blob = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt,
805 dt_sz);
806 if (!dt->blob)
807 panic("Failed to map external DTB");
808 } else {
809 /* Get the DTB address if already mapped in a memory area */
810 dt->blob = phys_to_virt(phys_dt, mtype, dt_sz);
811 if (!dt->blob) {
812 EMSG("Failed to get a mapped external DTB for PA %#lx",
813 phys_dt);
814 panic();
815 }
816 }
817
818 ret = init_dt_overlay(dt, dt_sz);
819 if (ret < 0) {
820 EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
821 ret);
822 panic();
823 }
824
825 ret = fdt_open_into(dt->blob, dt->blob, dt_sz);
826 if (ret < 0) {
827 EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
828 panic();
829 }
830
831 IMSG("Non-secure external DT found");
832 }
833
get_external_dt(void)834 void *get_external_dt(void)
835 {
836 if (!IS_ENABLED(CFG_EXTERNAL_DT))
837 return NULL;
838
839 assert(cpu_mmu_enabled());
840 return external_dt.blob;
841 }
842
release_external_dt(void)843 static TEE_Result release_external_dt(void)
844 {
845 int ret = 0;
846 paddr_t pa_dt = 0;
847
848 if (!IS_ENABLED(CFG_EXTERNAL_DT))
849 return TEE_SUCCESS;
850
851 if (!external_dt.blob)
852 return TEE_SUCCESS;
853
854 pa_dt = virt_to_phys(external_dt.blob);
855 /*
856 * Skip packing and un-mapping operations if the external DTB is mapped
857 * in a different memory area
858 */
859 if (core_mmu_get_type_by_pa(pa_dt) != MEM_AREA_EXT_DT)
860 return TEE_SUCCESS;
861
862 ret = fdt_pack(external_dt.blob);
863 if (ret < 0) {
864 EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
865 virt_to_phys(external_dt.blob), ret);
866 panic();
867 }
868
869 if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
870 CFG_DTB_MAX_SIZE))
871 panic("Failed to remove temporary Device Tree mapping");
872
873 /* External DTB no more reached, reset pointer to invalid */
874 external_dt.blob = NULL;
875
876 return TEE_SUCCESS;
877 }
878
879 boot_final(release_external_dt);
880
add_dt_path_subnode(struct dt_descriptor * dt,const char * path,const char * subnode)881 int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
882 const char *subnode)
883 {
884 int offs = 0;
885
886 offs = fdt_path_offset(dt->blob, path);
887 if (offs < 0)
888 return offs;
889 offs = add_dt_overlay_fragment(dt, offs);
890 if (offs < 0)
891 return offs;
892 return fdt_add_subnode(dt->blob, offs, subnode);
893 }
894
set_dt_val(void * data,uint32_t cell_size,uint64_t val)895 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
896 {
897 if (cell_size == 1) {
898 fdt32_t v = cpu_to_fdt32((uint32_t)val);
899
900 memcpy(data, &v, sizeof(v));
901 } else {
902 fdt64_t v = cpu_to_fdt64(val);
903
904 memcpy(data, &v, sizeof(v));
905 }
906 }
907
add_res_mem_dt_node(struct dt_descriptor * dt,const char * name,paddr_t pa,size_t size)908 int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
909 paddr_t pa, size_t size)
910 {
911 int offs = 0;
912 int ret = 0;
913 int addr_size = -1;
914 int len_size = -1;
915 bool found = true;
916 char subnode_name[80] = { };
917
918 offs = fdt_path_offset(dt->blob, "/reserved-memory");
919
920 if (offs < 0) {
921 found = false;
922 offs = 0;
923 }
924
925 if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
926 len_size = sizeof(paddr_t) / sizeof(uint32_t);
927 addr_size = sizeof(paddr_t) / sizeof(uint32_t);
928 } else {
929 len_size = fdt_size_cells(dt->blob, offs);
930 if (len_size < 0)
931 return len_size;
932 addr_size = fdt_address_cells(dt->blob, offs);
933 if (addr_size < 0)
934 return addr_size;
935 }
936
937 if (!found) {
938 offs = add_dt_path_subnode(dt, "/", "reserved-memory");
939 if (offs < 0)
940 return offs;
941 ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
942 addr_size);
943 if (ret < 0)
944 return ret;
945 ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
946 if (ret < 0)
947 return ret;
948 ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
949 if (ret < 0)
950 return ret;
951 }
952
953 ret = snprintf(subnode_name, sizeof(subnode_name),
954 "%s@%" PRIxPA, name, pa);
955 if (ret < 0 || ret >= (int)sizeof(subnode_name))
956 DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
957 offs = fdt_add_subnode(dt->blob, offs, subnode_name);
958 if (offs >= 0) {
959 uint32_t data[FDT_MAX_NCELLS * 2] = { };
960
961 set_dt_val(data, addr_size, pa);
962 set_dt_val(data + addr_size, len_size, size);
963 ret = fdt_setprop(dt->blob, offs, "reg", data,
964 sizeof(uint32_t) * (addr_size + len_size));
965 if (ret < 0)
966 return ret;
967 ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
968 if (ret < 0)
969 return ret;
970 } else {
971 return offs;
972 }
973 return 0;
974 }
975
976 #if defined(CFG_CORE_FFA)
init_manifest_dt(void * fdt,size_t max_size)977 void init_manifest_dt(void *fdt, size_t max_size)
978 {
979 manifest_dt = fdt;
980 manifest_max_size = max_size;
981 }
982
reinit_manifest_dt(void)983 void reinit_manifest_dt(void)
984 {
985 paddr_t end_pa = 0;
986 void *fdt = NULL;
987 paddr_t pa = 0;
988 int ret = 0;
989
990 if (!manifest_dt) {
991 EMSG("No manifest DT found");
992 return;
993 }
994
995 if (IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
996 pa = (unsigned long)manifest_dt;
997 end_pa = pa + manifest_max_size;
998 pa = ROUNDDOWN(pa, SMALL_PAGE_SIZE);
999 end_pa = ROUNDUP(end_pa, SMALL_PAGE_SIZE);
1000 if (!nex_phys_mem_alloc2(pa, end_pa - pa)) {
1001 EMSG("Failed to reserve manifest DT physical memory %#"PRIxPA"..%#"PRIxPA" len %#zx",
1002 pa, end_pa - 1, end_pa - pa);
1003 panic();
1004 }
1005 }
1006
1007 pa = (unsigned long)manifest_dt;
1008 fdt = core_mmu_add_mapping(MEM_AREA_MANIFEST_DT, pa, manifest_max_size);
1009 if (!fdt)
1010 panic("Failed to map manifest DT");
1011
1012 manifest_dt = fdt;
1013
1014 ret = fdt_check_full(fdt, manifest_max_size);
1015 if (ret < 0) {
1016 EMSG("Invalid manifest Device Tree at %#lx: error %d", pa, ret);
1017 panic();
1018 }
1019
1020 IMSG("manifest DT found");
1021 }
1022
get_manifest_dt(void)1023 void *get_manifest_dt(void)
1024 {
1025 return manifest_dt;
1026 }
1027
release_manifest_dt(void)1028 static TEE_Result release_manifest_dt(void)
1029 {
1030 paddr_t pa = 0;
1031
1032 if (!manifest_dt)
1033 return TEE_SUCCESS;
1034
1035 if (IS_ENABLED(CFG_CORE_SEL2_SPMC))
1036 pa = virt_to_phys(manifest_dt);
1037
1038 if (core_mmu_remove_mapping(MEM_AREA_MANIFEST_DT, manifest_dt,
1039 manifest_max_size))
1040 panic("Failed to remove temporary manifest DT mapping");
1041 manifest_dt = NULL;
1042
1043 if (IS_ENABLED(CFG_CORE_SEL2_SPMC))
1044 tee_mm_free(nex_phys_mem_mm_find(pa));
1045
1046 return TEE_SUCCESS;
1047 }
1048
1049 boot_final(release_manifest_dt);
1050 #else
init_manifest_dt(void * fdt __unused,size_t max_size __unused)1051 void init_manifest_dt(void *fdt __unused, size_t max_size __unused)
1052 {
1053 }
1054
reinit_manifest_dt(void)1055 void reinit_manifest_dt(void)
1056 {
1057 }
1058
get_manifest_dt(void)1059 void *get_manifest_dt(void)
1060 {
1061 return NULL;
1062 }
1063 #endif /*CFG_CORE_FFA*/
1064