xref: /optee_os/core/mm/boot_mem.c (revision bd8bea6f45b004aa153fa8d7922dec6a655f50e3)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2024-2025, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <kernel/asan.h>
8 #include <kernel/boot.h>
9 #include <mm/core_memprot.h>
10 #include <mm/core_mmu.h>
11 #include <mm/phys_mem.h>
12 #include <mm/tee_mm.h>
13 #include <stdalign.h>
14 #include <string.h>
15 #include <util.h>
16 
17 /*
18  * struct boot_mem_reloc - Pointers relocated in memory during boot
19  * @ptrs: Array of relocation
20  * @count: Number of cells used in @ptrs
21  * @next: Next relocation array when @ptrs is fully used
22  */
23 struct boot_mem_reloc {
24 	void **ptrs[64];
25 	size_t count;
26 	struct boot_mem_reloc *next;
27 };
28 
29 /*
30  * struct boot_mem_padding - unused memory between allocations
31  * @start: Start of padding
32  * @len: Length of padding
33  * @next: Next padding
34  */
35 struct boot_mem_padding {
36 	vaddr_t start;
37 	size_t len;
38 	struct boot_mem_padding *next;
39 };
40 
41 /*
42  * struct boot_mem_desc - Stack like boot memory allocation pool
43  * @orig_mem_start: Boot memory stack base address
44  * @orig_mem_end: Boot memory start end address
45  * @mem_start: Boot memory free space start address
46  * @mem_end: Boot memory free space end address
47  * @reloc: Boot memory pointers requiring relocation
48  * @padding: Linked list of unused memory between allocated blocks
49  */
50 struct boot_mem_desc {
51 	vaddr_t orig_mem_start;
52 	vaddr_t orig_mem_end;
53 	vaddr_t mem_start;
54 	vaddr_t mem_end;
55 	struct boot_mem_reloc *reloc;
56 	struct boot_mem_padding *padding;
57 };
58 
59 static struct boot_mem_desc *boot_mem_desc;
60 
mem_alloc_tmp(struct boot_mem_desc * desc,size_t len,size_t align)61 static void *mem_alloc_tmp(struct boot_mem_desc *desc, size_t len, size_t align)
62 {
63 	vaddr_t va = 0;
64 
65 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
66 		align = MAX(align, ASAN_BLOCK_SIZE);
67 
68 	assert(desc && desc->mem_start && desc->mem_end);
69 	assert(IS_POWER_OF_TWO(align) && !(len % align));
70 	if (SUB_OVERFLOW(desc->mem_end, len, &va))
71 		panic();
72 	va = ROUNDDOWN2(va, align);
73 	if (va < desc->mem_start)
74 		panic();
75 	desc->mem_end = va;
76 
77 	asan_tag_access((void *)va, (void *)(va + len));
78 
79 	return (void *)va;
80 }
81 
add_padding(struct boot_mem_desc * desc,vaddr_t va)82 static void add_padding(struct boot_mem_desc *desc, vaddr_t va)
83 {
84 	struct boot_mem_padding *pad = NULL;
85 	vaddr_t start = desc->mem_start;
86 	vaddr_t rounded = 0;
87 
88 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
89 		start = ROUNDUP(start, ASAN_BLOCK_SIZE);
90 
91 	rounded = ROUNDUP(start, alignof(*pad));
92 	if (rounded < va && va - rounded > sizeof(*pad)) {
93 		pad = (struct boot_mem_padding *)rounded;
94 		pad->start = start;
95 		pad->len = va - start;
96 		DMSG("%#"PRIxVA" len %#zx", pad->start, pad->len);
97 		pad->next = desc->padding;
98 		desc->padding = pad;
99 	}
100 }
101 
mem_alloc(struct boot_mem_desc * desc,size_t len,size_t align)102 static void *mem_alloc(struct boot_mem_desc *desc, size_t len, size_t align)
103 {
104 	vaddr_t va = 0;
105 	vaddr_t ve = 0;
106 
107 	if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS))
108 		align = MAX(align, ASAN_BLOCK_SIZE);
109 
110 	runtime_assert(!IS_ENABLED(CFG_WITH_PAGER));
111 	assert(desc && desc->mem_start && desc->mem_end);
112 	assert(IS_POWER_OF_TWO(align) && !(len % align));
113 	va = ROUNDUP2(desc->mem_start, align);
114 	if (ADD_OVERFLOW(va, len, &ve))
115 		panic();
116 	if (ve > desc->mem_end)
117 		panic();
118 	add_padding(desc, va);
119 	desc->mem_start = ve;
120 
121 	asan_tag_access((void *)va, (void *)(va + len));
122 
123 	return (void *)va;
124 }
125 
boot_mem_init(vaddr_t start,vaddr_t end,vaddr_t orig_end)126 void boot_mem_init(vaddr_t start, vaddr_t end, vaddr_t orig_end)
127 {
128 	struct boot_mem_desc desc = {
129 		.orig_mem_start = start,
130 		.orig_mem_end = orig_end,
131 		.mem_start = start,
132 		.mem_end = end,
133 	};
134 
135 	boot_mem_desc = mem_alloc_tmp(&desc, sizeof(desc), alignof(desc));
136 	*boot_mem_desc = desc;
137 	boot_mem_desc->reloc = mem_alloc_tmp(boot_mem_desc,
138 					     sizeof(*boot_mem_desc->reloc),
139 					     alignof(*boot_mem_desc->reloc));
140 	memset(boot_mem_desc->reloc, 0, sizeof(*boot_mem_desc->reloc));
141 }
142 
tag_padding_no_access(vaddr_t va __unused,size_t len __unused,void * ptr __unused)143 static bool tag_padding_no_access(vaddr_t va __unused, size_t len __unused,
144 				  void *ptr __unused)
145 {
146 	/*
147 	 * No need to do anything since boot_mem_foreach_padding() calls
148 	 * asan_tag_access() before this function is called and calls
149 	 * asan_tag_no_access() after this function has returned false.
150 	 */
151 	return false;
152 }
153 
boot_mem_init_asan(void)154 void boot_mem_init_asan(void)
155 {
156 	asan_tag_access((void *)ROUNDDOWN(boot_mem_desc->orig_mem_start,
157 					  ASAN_BLOCK_SIZE),
158 			(void *)boot_mem_desc->mem_start);
159 	asan_tag_access((void *)ROUNDDOWN(boot_mem_desc->mem_end,
160 					  ASAN_BLOCK_SIZE),
161 			(void *)boot_mem_desc->orig_mem_end);
162 	boot_mem_foreach_padding(tag_padding_no_access, NULL);
163 }
164 
boot_mem_add_reloc(void * ptr)165 void boot_mem_add_reloc(void *ptr)
166 {
167 	struct boot_mem_reloc *reloc = NULL;
168 
169 	assert(boot_mem_desc && boot_mem_desc->reloc);
170 	reloc = boot_mem_desc->reloc;
171 
172 	/* If the reloc struct is full, allocate a new and link it first */
173 	if (reloc->count == ARRAY_SIZE(reloc->ptrs)) {
174 		reloc = boot_mem_alloc_tmp(sizeof(*reloc), alignof(*reloc));
175 		reloc->next = boot_mem_desc->reloc;
176 		boot_mem_desc->reloc = reloc;
177 	}
178 
179 	reloc->ptrs[reloc->count] = ptr;
180 	reloc->count++;
181 }
182 
add_offs(void * p,size_t offs)183 static void *add_offs(void *p, size_t offs)
184 {
185 	assert(p);
186 	return (uint8_t *)p + offs;
187 }
188 
add_offs_or_null(void * p,size_t offs)189 static void *add_offs_or_null(void *p, size_t offs)
190 {
191 	if (!p)
192 		return NULL;
193 	return (uint8_t *)p + offs;
194 }
195 
boot_mem_relocate(size_t offs)196 void boot_mem_relocate(size_t offs)
197 {
198 	struct boot_mem_reloc *reloc = NULL;
199 	struct boot_mem_padding *pad = NULL;
200 	size_t n = 0;
201 
202 	boot_mem_desc = add_offs(boot_mem_desc, offs);
203 
204 	boot_mem_desc->orig_mem_start += offs;
205 	boot_mem_desc->orig_mem_end += offs;
206 	boot_mem_desc->mem_start += offs;
207 	boot_mem_desc->mem_end += offs;
208 	boot_mem_desc->reloc = add_offs(boot_mem_desc->reloc, offs);
209 
210 	for (reloc = boot_mem_desc->reloc;; reloc = reloc->next) {
211 		for (n = 0; n < reloc->count; n++) {
212 			reloc->ptrs[n] = add_offs(reloc->ptrs[n], offs);
213 			*reloc->ptrs[n] = add_offs_or_null(*reloc->ptrs[n],
214 							   offs);
215 		}
216 		if (!reloc->next)
217 			break;
218 		reloc->next = add_offs(reloc->next, offs);
219 	}
220 
221 	if (boot_mem_desc->padding) {
222 		boot_mem_desc->padding = add_offs(boot_mem_desc->padding, offs);
223 		pad = boot_mem_desc->padding;
224 		while (true) {
225 			pad->start += offs;
226 			if (!pad->next)
227 				break;
228 			pad->next = add_offs(pad->next, offs);
229 			pad = pad->next;
230 		}
231 	}
232 }
233 
boot_mem_alloc(size_t len,size_t align)234 void *boot_mem_alloc(size_t len, size_t align)
235 {
236 	return mem_alloc(boot_mem_desc, len, align);
237 }
238 
boot_mem_alloc_tmp(size_t len,size_t align)239 void *boot_mem_alloc_tmp(size_t len, size_t align)
240 {
241 	return mem_alloc_tmp(boot_mem_desc, len, align);
242 }
243 
244 /*
245  * Calls the supplied @func() for each padding and removes the paddings
246  * where @func() returns true.
247  */
boot_mem_foreach_padding(bool (* func)(vaddr_t va,size_t len,void * ptr),void * ptr)248 void boot_mem_foreach_padding(bool (*func)(vaddr_t va, size_t len, void *ptr),
249 			      void *ptr)
250 {
251 	struct boot_mem_padding **prev = NULL;
252 	struct boot_mem_padding *next = NULL;
253 	struct boot_mem_padding *pad = NULL;
254 
255 	assert(boot_mem_desc);
256 
257 	prev = &boot_mem_desc->padding;
258 	for (pad = boot_mem_desc->padding; pad; pad = next) {
259 		vaddr_t start = 0;
260 		size_t len = 0;
261 
262 		asan_tag_access(pad, (uint8_t *)pad + sizeof(*pad));
263 		start = pad->start;
264 		len = pad->len;
265 		asan_tag_access((void *)start, (void *)(start + len));
266 		next = pad->next;
267 		if (func(start, len, ptr)) {
268 			DMSG("consumed %p %#"PRIxVA" len %#zx",
269 			     pad, start, len);
270 			*prev = next;
271 		} else {
272 			DMSG("keeping %p %#"PRIxVA" len %#zx",
273 			     pad, start, len);
274 			prev = &pad->next;
275 			asan_tag_no_access((void *)start,
276 					   (void *)(start + len));
277 		}
278 	}
279 }
280 
boot_mem_release_unused(void)281 vaddr_t boot_mem_release_unused(void)
282 {
283 	tee_mm_entry_t *mm = NULL;
284 	paddr_t pa = 0;
285 	vaddr_t va = 0;
286 	size_t n = 0;
287 	vaddr_t tmp_va = 0;
288 	paddr_t tmp_pa = 0;
289 	size_t tmp_n = 0;
290 
291 	assert(boot_mem_desc);
292 
293 	n = boot_mem_desc->mem_start - boot_mem_desc->orig_mem_start;
294 	DMSG("Allocated %zu bytes at va %#"PRIxVA" pa %#"PRIxPA,
295 	     n, boot_mem_desc->orig_mem_start,
296 	     vaddr_to_phys(boot_mem_desc->orig_mem_start));
297 
298 	DMSG("Tempalloc %zu bytes at va %#"PRIxVA,
299 	     (size_t)(boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end),
300 	     boot_mem_desc->mem_end);
301 
302 	if (IS_ENABLED(CFG_WITH_PAGER))
303 		goto out;
304 
305 	pa = vaddr_to_phys(ROUNDUP(boot_mem_desc->orig_mem_start,
306 				   SMALL_PAGE_SIZE));
307 	mm = nex_phys_mem_mm_find(pa);
308 	if (!mm)
309 		panic();
310 
311 	va = ROUNDUP(boot_mem_desc->mem_start, SMALL_PAGE_SIZE);
312 
313 	tmp_va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE);
314 	tmp_n = boot_mem_desc->orig_mem_end - tmp_va;
315 	tmp_pa = vaddr_to_phys(tmp_va);
316 
317 	pa = tee_mm_get_smem(mm);
318 	n = vaddr_to_phys(boot_mem_desc->mem_start) - pa;
319 	tee_mm_free(mm);
320 	DMSG("Carving out %#"PRIxPA"..%#"PRIxPA, pa, pa + n - 1);
321 	mm = nex_phys_mem_alloc2(pa, n);
322 	if (!mm)
323 		panic();
324 	mm = nex_phys_mem_alloc2(tmp_pa, tmp_n);
325 	if (!mm)
326 		panic();
327 
328 	n = tmp_va - boot_mem_desc->mem_start;
329 	DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
330 
331 	/* Unmap the now unused pages */
332 	core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE);
333 
334 out:
335 	/* Stop further allocations. */
336 	boot_mem_desc->mem_start = boot_mem_desc->mem_end;
337 	return va;
338 }
339 
boot_mem_release_tmp_alloc(void)340 void boot_mem_release_tmp_alloc(void)
341 {
342 	tee_mm_entry_t *mm = NULL;
343 	vaddr_t va = 0;
344 	paddr_t pa = 0;
345 	size_t n = 0;
346 
347 	assert(boot_mem_desc &&
348 	       boot_mem_desc->mem_start == boot_mem_desc->mem_end);
349 
350 	if (IS_ENABLED(CFG_WITH_PAGER)) {
351 		n = boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end;
352 		va = boot_mem_desc->mem_end;
353 		boot_mem_desc = NULL;
354 		DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
355 		return;
356 	}
357 
358 	va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE);
359 	pa = vaddr_to_phys(va);
360 
361 	mm = nex_phys_mem_mm_find(pa);
362 	if (!mm)
363 		panic();
364 	assert(pa == tee_mm_get_smem(mm));
365 	n = tee_mm_get_bytes(mm);
366 
367 	/* Boot memory allocation is now done */
368 	boot_mem_desc = NULL;
369 
370 	DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
371 
372 	/* Unmap the now unused pages */
373 	core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE);
374 }
375