xref: /optee_os/core/kernel/boot.c (revision 89da7ffe58b51e694eef722a0db34b19531ef770)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2023, Linaro Limited
4  * Copyright (c) 2023, Arm Limited
5  */
6 
7 #include <crypto/crypto.h>
8 #include <kernel/boot.h>
9 #include <kernel/dt.h>
10 #include <libfdt.h>
11 #include <mm/core_memprot.h>
12 
13 #ifdef CFG_CORE_DYN_SHM
14 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
15 				       uint32_t cell_size)
16 {
17 	uint64_t rv = 0;
18 
19 	if (cell_size == 1) {
20 		uint32_t v;
21 
22 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
23 		*offs += sizeof(v);
24 		rv = fdt32_to_cpu(v);
25 	} else {
26 		uint64_t v;
27 
28 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
29 		*offs += sizeof(v);
30 		rv = fdt64_to_cpu(v);
31 	}
32 
33 	return rv;
34 }
35 
36 /*
37  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
38  * World is ignored since it could not be mapped to be used as dynamic shared
39  * memory.
40  */
41 static int __maybe_unused get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
42 {
43 	const uint8_t *prop = NULL;
44 	uint64_t a = 0;
45 	uint64_t l = 0;
46 	size_t prop_offs = 0;
47 	size_t prop_len = 0;
48 	int elems_total = 0;
49 	int addr_size = 0;
50 	int len_size = 0;
51 	int offs = 0;
52 	size_t n = 0;
53 	int len = 0;
54 
55 	addr_size = fdt_address_cells(fdt, 0);
56 	if (addr_size < 0)
57 		return 0;
58 
59 	len_size = fdt_size_cells(fdt, 0);
60 	if (len_size < 0)
61 		return 0;
62 
63 	while (true) {
64 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
65 						     "memory",
66 						     sizeof("memory"));
67 		if (offs < 0)
68 			break;
69 
70 		if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
71 						   DT_STATUS_OK_SEC))
72 			continue;
73 
74 		prop = fdt_getprop(fdt, offs, "reg", &len);
75 		if (!prop)
76 			continue;
77 
78 		prop_len = len;
79 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
80 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
81 			if (prop_offs >= prop_len) {
82 				n--;
83 				break;
84 			}
85 
86 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
87 			if (mem) {
88 				mem->type = MEM_AREA_DDR_OVERALL;
89 				mem->addr = a;
90 				mem->size = l;
91 				mem++;
92 			}
93 		}
94 
95 		elems_total += n;
96 	}
97 
98 	return elems_total;
99 }
100 
101 #ifdef CFG_DT
102 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
103 {
104 	struct core_mmu_phys_mem *mem = NULL;
105 	int elems_total = 0;
106 
107 	elems_total = get_nsec_memory_helper(fdt, NULL);
108 	if (elems_total <= 0)
109 		return NULL;
110 
111 	mem = nex_calloc(elems_total, sizeof(*mem));
112 	if (!mem)
113 		panic();
114 
115 	elems_total = get_nsec_memory_helper(fdt, mem);
116 	assert(elems_total > 0);
117 
118 	*nelems = elems_total;
119 
120 	return mem;
121 }
122 #else /*CFG_DT*/
123 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
124 						 size_t *nelems __unused)
125 {
126 	return NULL;
127 }
128 #endif /*!CFG_DT*/
129 
130 void discover_nsec_memory(void)
131 {
132 	struct core_mmu_phys_mem *mem;
133 	const struct core_mmu_phys_mem *mem_begin = NULL;
134 	const struct core_mmu_phys_mem *mem_end = NULL;
135 	size_t nelems;
136 	void *fdt = get_external_dt();
137 
138 	if (fdt) {
139 		mem = get_nsec_memory(fdt, &nelems);
140 		if (mem) {
141 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
142 			return;
143 		}
144 
145 		DMSG("No non-secure memory found in FDT");
146 	}
147 
148 	mem_begin = phys_ddr_overall_begin;
149 	mem_end = phys_ddr_overall_end;
150 	nelems = mem_end - mem_begin;
151 	if (nelems) {
152 		/*
153 		 * Platform cannot use both register_ddr() and the now
154 		 * deprecated register_dynamic_shm().
155 		 */
156 		assert(phys_ddr_overall_compat_begin ==
157 		       phys_ddr_overall_compat_end);
158 	} else {
159 		mem_begin = phys_ddr_overall_compat_begin;
160 		mem_end = phys_ddr_overall_compat_end;
161 		nelems = mem_end - mem_begin;
162 		if (!nelems)
163 			return;
164 		DMSG("Warning register_dynamic_shm() is deprecated, "
165 		     "please use register_ddr() instead");
166 	}
167 
168 	mem = nex_calloc(nelems, sizeof(*mem));
169 	if (!mem)
170 		panic();
171 
172 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
173 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
174 }
175 #else /*CFG_CORE_DYN_SHM*/
176 void discover_nsec_memory(void)
177 {
178 }
179 #endif /*!CFG_CORE_DYN_SHM*/
180 
181 #ifdef CFG_CORE_RESERVED_SHM
182 int mark_static_shm_as_reserved(struct dt_descriptor *dt)
183 {
184 	vaddr_t shm_start;
185 	vaddr_t shm_end;
186 
187 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
188 	if (shm_start != shm_end)
189 		return add_res_mem_dt_node(dt, "optee_shm",
190 					   virt_to_phys((void *)shm_start),
191 					   shm_end - shm_start);
192 
193 	DMSG("No SHM configured");
194 	return -1;
195 }
196 #endif /*CFG_CORE_RESERVED_SHM*/
197 
198 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
199 /* Generate random stack canary value on boot up */
200 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
201 {
202 	TEE_Result ret = TEE_ERROR_GENERIC;
203 	size_t i = 0;
204 
205 	assert(buf && ncan && size);
206 
207 	/*
208 	 * With virtualization the RNG is not initialized in Nexus core.
209 	 * Need to override with platform specific implementation.
210 	 */
211 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
212 		IMSG("WARNING: Using fixed value for stack canary");
213 		memset(buf, 0xab, ncan * size);
214 		goto out;
215 	}
216 
217 	ret = crypto_rng_read(buf, ncan * size);
218 	if (ret != TEE_SUCCESS)
219 		panic("Failed to generate random stack canary");
220 
221 out:
222 	/* Leave null byte in canary to prevent string base exploit */
223 	for (i = 0; i < ncan; i++)
224 		*((uint8_t *)buf + size * i) = 0;
225 }
226 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
227