1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2015-2023, Linaro Limited
4 * Copyright (c) 2023, Arm Limited
5 */
6
7 #include <crypto/crypto.h>
8 #include <kernel/boot.h>
9 #include <kernel/dt.h>
10 #include <libfdt.h>
11 #include <mm/core_memprot.h>
12
13 #ifdef CFG_CORE_DYN_SHM
get_dt_val_and_advance(const void * data,size_t * offs,uint32_t cell_size)14 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
15 uint32_t cell_size)
16 {
17 uint64_t rv = 0;
18
19 if (cell_size == 1) {
20 uint32_t v;
21
22 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
23 *offs += sizeof(v);
24 rv = fdt32_to_cpu(v);
25 } else {
26 uint64_t v;
27
28 memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
29 *offs += sizeof(v);
30 rv = fdt64_to_cpu(v);
31 }
32
33 return rv;
34 }
35
36 /*
37 * Find all non-secure memory from DT. Memory marked inaccessible by Secure
38 * World is ignored since it could not be mapped to be used as dynamic shared
39 * memory.
40 */
get_nsec_memory_helper(void * fdt,struct core_mmu_phys_mem * mem,const char * dev_type)41 static int __maybe_unused get_nsec_memory_helper(void *fdt,
42 struct core_mmu_phys_mem *mem,
43 const char *dev_type)
44 {
45 size_t dev_type_size = strlen(dev_type) + 1;
46 const uint8_t *prop = NULL;
47 uint64_t a = 0;
48 uint64_t l = 0;
49 size_t prop_offs = 0;
50 size_t prop_len = 0;
51 int elems_total = 0;
52 int addr_size = 0;
53 int len_size = 0;
54 int offs = 0;
55 size_t n = 0;
56 int len = 0;
57
58 addr_size = fdt_address_cells(fdt, 0);
59 if (addr_size < 0)
60 return 0;
61
62 len_size = fdt_size_cells(fdt, 0);
63 if (len_size < 0)
64 return 0;
65
66 while (true) {
67 offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
68 dev_type, dev_type_size);
69 if (offs < 0)
70 break;
71
72 if (fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
73 DT_STATUS_OK_SEC))
74 continue;
75
76 prop = fdt_getprop(fdt, offs, "reg", &len);
77 if (!prop)
78 continue;
79
80 prop_len = len;
81 for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
82 a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
83 if (prop_offs >= prop_len) {
84 n--;
85 break;
86 }
87
88 l = get_dt_val_and_advance(prop, &prop_offs, len_size);
89 if (mem) {
90 mem->type = MEM_AREA_DDR_OVERALL;
91 mem->addr = a;
92 mem->size = l;
93 mem++;
94 }
95 }
96
97 elems_total += n;
98 }
99
100 return elems_total;
101 }
102
103 #ifdef CFG_DT
get_nsec_memory(void * fdt,size_t * nelems,const char * dev_type)104 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems,
105 const char *dev_type)
106 {
107 struct core_mmu_phys_mem *mem = NULL;
108 int elems_total = 0;
109
110 elems_total = get_nsec_memory_helper(fdt, NULL, dev_type);
111 if (elems_total <= 0)
112 return NULL;
113
114 mem = nex_calloc(elems_total, sizeof(*mem));
115 if (!mem)
116 panic();
117
118 elems_total = get_nsec_memory_helper(fdt, mem, dev_type);
119 assert(elems_total > 0);
120
121 *nelems = elems_total;
122
123 return mem;
124 }
125 #else /*CFG_DT*/
get_nsec_memory(void * fdt __unused,size_t * nelems __unused,const char * dev_type __unused)126 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
127 size_t *nelems __unused,
128 const char *dev_type __unused)
129 {
130 return NULL;
131 }
132 #endif /*!CFG_DT*/
133
discover_nsec_memory(void)134 void discover_nsec_memory(void)
135 {
136 struct core_mmu_phys_mem *mem = NULL;
137 const struct core_mmu_phys_mem *mem_begin = NULL;
138 const struct core_mmu_phys_mem *mem_end = NULL;
139 size_t nelems = 0;
140 void *fdt = NULL;
141
142 fdt = get_manifest_dt();
143 if (fdt) {
144 mem = get_nsec_memory(fdt, &nelems, "ns-memory");
145 if (mem) {
146 DMSG("Non-secure memory found in manifest DT");
147 core_mmu_set_discovered_nsec_ddr(mem, nelems);
148 return;
149 }
150
151 DMSG("No non-secure memory found in manifest DT");
152 }
153
154 fdt = get_external_dt();
155 if (fdt) {
156 mem = get_nsec_memory(fdt, &nelems, "memory");
157 if (mem) {
158 DMSG("Non-secure memory found in extern DT");
159 core_mmu_set_discovered_nsec_ddr(mem, nelems);
160 return;
161 }
162
163 DMSG("No non-secure memory found in external DT");
164 }
165
166 fdt = get_embedded_dt();
167 if (fdt) {
168 mem = get_nsec_memory(fdt, &nelems, "memory");
169 if (mem) {
170 DMSG("Non-secure memory found in embedded DT");
171 core_mmu_set_discovered_nsec_ddr(mem, nelems);
172 return;
173 }
174
175 DMSG("No non-secure memory found in embedded DT");
176 }
177
178 mem_begin = phys_ddr_overall_begin;
179 mem_end = phys_ddr_overall_end;
180 nelems = mem_end - mem_begin;
181 if (nelems) {
182 /*
183 * Platform cannot use both register_ddr() and the now
184 * deprecated register_dynamic_shm().
185 */
186 assert(phys_ddr_overall_compat_begin ==
187 phys_ddr_overall_compat_end);
188 } else {
189 mem_begin = phys_ddr_overall_compat_begin;
190 mem_end = phys_ddr_overall_compat_end;
191 nelems = mem_end - mem_begin;
192 if (!nelems)
193 return;
194 DMSG("Warning register_dynamic_shm() is deprecated, "
195 "please use register_ddr() instead");
196 }
197
198 mem = nex_calloc(nelems, sizeof(*mem));
199 if (!mem)
200 panic();
201
202 memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
203 core_mmu_set_discovered_nsec_ddr(mem, nelems);
204 }
205 #else /*CFG_CORE_DYN_SHM*/
discover_nsec_memory(void)206 void discover_nsec_memory(void)
207 {
208 }
209 #endif /*!CFG_CORE_DYN_SHM*/
210
211 #ifdef CFG_CORE_RESERVED_SHM
mark_static_shm_as_reserved(struct dt_descriptor * dt)212 int mark_static_shm_as_reserved(struct dt_descriptor *dt)
213 {
214 vaddr_t shm_start;
215 vaddr_t shm_end;
216
217 core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
218 if (shm_start != shm_end)
219 return add_res_mem_dt_node(dt, "optee_shm",
220 virt_to_phys((void *)shm_start),
221 shm_end - shm_start);
222
223 DMSG("No SHM configured");
224 return -1;
225 }
226 #endif /*CFG_CORE_RESERVED_SHM*/
227
228 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
229 /* Generate random stack canary value on boot up */
plat_get_random_stack_canaries(void * buf,size_t ncan,size_t size)230 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
231 {
232 TEE_Result ret = TEE_ERROR_GENERIC;
233 size_t i = 0;
234
235 assert(buf && ncan && size);
236
237 /*
238 * With virtualization the RNG is not initialized in Nexus core.
239 * Need to override with platform specific implementation.
240 */
241 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
242 IMSG("WARNING: Using fixed value for stack canary");
243 memset(buf, 0xab, ncan * size);
244 goto out;
245 }
246
247 ret = crypto_rng_read(buf, ncan * size);
248 if (ret != TEE_SUCCESS)
249 panic("Failed to generate random stack canary");
250
251 out:
252 /* Leave null byte in canary to prevent string base exploit */
253 for (i = 0; i < ncan; i++)
254 *((uint8_t *)buf + size * i) = 0;
255 }
256 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
257