xref: /optee_os/core/mm/boot_mem.c (revision c95d740ab3604844575dc99dad8bd512781c5d07)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2024, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <kernel/boot.h>
8 #include <mm/core_memprot.h>
9 #include <mm/core_mmu.h>
10 #include <mm/phys_mem.h>
11 #include <mm/tee_mm.h>
12 #include <stdalign.h>
13 #include <string.h>
14 #include <util.h>
15 
16 /*
17  * struct boot_mem_reloc - Pointers relocated in memory during boot
18  * @ptrs: Array of relocation
19  * @count: Number of cells used in @ptrs
20  * @next: Next relocation array when @ptrs is fully used
21  */
22 struct boot_mem_reloc {
23 	void **ptrs[64];
24 	size_t count;
25 	struct boot_mem_reloc *next;
26 };
27 
28 /*
29  * struct boot_mem_padding - unused memory between allocations
30  * @start: Start of padding
31  * @len: Length of padding
32  * @next: Next padding
33  */
34 struct boot_mem_padding {
35 	vaddr_t start;
36 	size_t len;
37 	struct boot_mem_padding *next;
38 };
39 
40 /*
41  * struct boot_mem_desc - Stack like boot memory allocation pool
42  * @orig_mem_start: Boot memory stack base address
43  * @orig_mem_end: Boot memory start end address
44  * @mem_start: Boot memory free space start address
45  * @mem_end: Boot memory free space end address
46  * @reloc: Boot memory pointers requiring relocation
47  * @padding: Linked list of unused memory between allocated blocks
48  */
49 struct boot_mem_desc {
50 	vaddr_t orig_mem_start;
51 	vaddr_t orig_mem_end;
52 	vaddr_t mem_start;
53 	vaddr_t mem_end;
54 	struct boot_mem_reloc *reloc;
55 	struct boot_mem_padding *padding;
56 };
57 
58 static struct boot_mem_desc *boot_mem_desc;
59 
60 static void *mem_alloc_tmp(struct boot_mem_desc *desc, size_t len, size_t align)
61 {
62 	vaddr_t va = 0;
63 
64 	assert(desc && desc->mem_start && desc->mem_end);
65 	assert(IS_POWER_OF_TWO(align) && !(len % align));
66 	if (SUB_OVERFLOW(desc->mem_end, len, &va))
67 		panic();
68 	va = ROUNDDOWN2(va, align);
69 	if (va < desc->mem_start)
70 		panic();
71 	desc->mem_end = va;
72 	return (void *)va;
73 }
74 
75 static void add_padding(struct boot_mem_desc *desc, vaddr_t va)
76 {
77 	struct boot_mem_padding *pad = NULL;
78 	vaddr_t rounded = ROUNDUP(desc->mem_start, alignof(*pad));
79 
80 	if (rounded < va && va - rounded > sizeof(*pad)) {
81 		pad = (struct boot_mem_padding *)rounded;
82 		pad->start = desc->mem_start;
83 		pad->len = va - desc->mem_start;
84 		DMSG("%#"PRIxVA" len %#zx", pad->start, pad->len);
85 		pad->next = desc->padding;
86 		desc->padding = pad;
87 	}
88 }
89 
90 static void *mem_alloc(struct boot_mem_desc *desc, size_t len, size_t align)
91 {
92 	vaddr_t va = 0;
93 	vaddr_t ve = 0;
94 
95 	runtime_assert(!IS_ENABLED(CFG_WITH_PAGER));
96 	assert(desc && desc->mem_start && desc->mem_end);
97 	assert(IS_POWER_OF_TWO(align) && !(len % align));
98 	va = ROUNDUP2(desc->mem_start, align);
99 	if (ADD_OVERFLOW(va, len, &ve))
100 		panic();
101 	if (ve > desc->mem_end)
102 		panic();
103 	add_padding(desc, va);
104 	desc->mem_start = ve;
105 	return (void *)va;
106 }
107 
108 void boot_mem_init(vaddr_t start, vaddr_t end, vaddr_t orig_end)
109 {
110 	struct boot_mem_desc desc = {
111 		.orig_mem_start = start,
112 		.orig_mem_end = orig_end,
113 		.mem_start = start,
114 		.mem_end = end,
115 	};
116 
117 	boot_mem_desc = mem_alloc_tmp(&desc, sizeof(desc), alignof(desc));
118 	*boot_mem_desc = desc;
119 	boot_mem_desc->reloc = mem_alloc_tmp(boot_mem_desc,
120 					     sizeof(*boot_mem_desc->reloc),
121 					     alignof(*boot_mem_desc->reloc));
122 	memset(boot_mem_desc->reloc, 0, sizeof(*boot_mem_desc->reloc));
123 }
124 
125 void boot_mem_add_reloc(void *ptr)
126 {
127 	struct boot_mem_reloc *reloc = NULL;
128 
129 	assert(boot_mem_desc && boot_mem_desc->reloc);
130 	reloc = boot_mem_desc->reloc;
131 
132 	/* If the reloc struct is full, allocate a new and link it first */
133 	if (reloc->count == ARRAY_SIZE(reloc->ptrs)) {
134 		reloc = boot_mem_alloc_tmp(sizeof(*reloc), alignof(*reloc));
135 		reloc->next = boot_mem_desc->reloc;
136 		boot_mem_desc->reloc = reloc;
137 	}
138 
139 	reloc->ptrs[reloc->count] = ptr;
140 	reloc->count++;
141 }
142 
143 static void *add_offs(void *p, size_t offs)
144 {
145 	assert(p);
146 	return (uint8_t *)p + offs;
147 }
148 
149 static void *add_offs_or_null(void *p, size_t offs)
150 {
151 	if (!p)
152 		return NULL;
153 	return (uint8_t *)p + offs;
154 }
155 
156 void boot_mem_relocate(size_t offs)
157 {
158 	struct boot_mem_reloc *reloc = NULL;
159 	struct boot_mem_padding *pad = NULL;
160 	size_t n = 0;
161 
162 	boot_mem_desc = add_offs(boot_mem_desc, offs);
163 
164 	boot_mem_desc->orig_mem_start += offs;
165 	boot_mem_desc->orig_mem_end += offs;
166 	boot_mem_desc->mem_start += offs;
167 	boot_mem_desc->mem_end += offs;
168 	boot_mem_desc->reloc = add_offs(boot_mem_desc->reloc, offs);
169 
170 	for (reloc = boot_mem_desc->reloc;; reloc = reloc->next) {
171 		for (n = 0; n < reloc->count; n++) {
172 			reloc->ptrs[n] = add_offs(reloc->ptrs[n], offs);
173 			*reloc->ptrs[n] = add_offs_or_null(*reloc->ptrs[n],
174 							   offs);
175 		}
176 		if (!reloc->next)
177 			break;
178 		reloc->next = add_offs(reloc->next, offs);
179 	}
180 
181 	if (boot_mem_desc->padding) {
182 		boot_mem_desc->padding = add_offs(boot_mem_desc->padding, offs);
183 		pad = boot_mem_desc->padding;
184 		while (true) {
185 			pad->start += offs;
186 			if (!pad->next)
187 				break;
188 			pad->next = add_offs(pad->next, offs);
189 			pad = pad->next;
190 		}
191 	}
192 }
193 
194 void *boot_mem_alloc(size_t len, size_t align)
195 {
196 	return mem_alloc(boot_mem_desc, len, align);
197 }
198 
199 void *boot_mem_alloc_tmp(size_t len, size_t align)
200 {
201 	return mem_alloc_tmp(boot_mem_desc, len, align);
202 }
203 
204 /*
205  * Calls the supplied @func() for each padding and removes the paddings
206  * where @func() returns true.
207  */
208 void boot_mem_foreach_padding(bool (*func)(vaddr_t va, size_t len, void *ptr),
209 			      void *ptr)
210 {
211 	struct boot_mem_padding **prev = NULL;
212 	struct boot_mem_padding *next = NULL;
213 	struct boot_mem_padding *pad = NULL;
214 
215 	assert(boot_mem_desc);
216 
217 	prev = &boot_mem_desc->padding;
218 	for (pad = boot_mem_desc->padding; pad; pad = next) {
219 		vaddr_t start = pad->start;
220 		size_t len = pad->len;
221 
222 		next = pad->next;
223 		if (func(start, len, ptr)) {
224 			DMSG("consumed %p %#"PRIxVA" len %#zx",
225 			     pad, start, len);
226 			*prev = next;
227 		} else {
228 			DMSG("keeping %p %#"PRIxVA" len %#zx",
229 			     pad, start, len);
230 			prev = &pad->next;
231 		}
232 	}
233 }
234 
235 vaddr_t boot_mem_release_unused(void)
236 {
237 	tee_mm_entry_t *mm = NULL;
238 	paddr_t pa = 0;
239 	vaddr_t va = 0;
240 	size_t n = 0;
241 	vaddr_t tmp_va = 0;
242 	paddr_t tmp_pa = 0;
243 	size_t tmp_n = 0;
244 
245 	assert(boot_mem_desc);
246 
247 	n = boot_mem_desc->mem_start - boot_mem_desc->orig_mem_start;
248 	DMSG("Allocated %zu bytes at va %#"PRIxVA" pa %#"PRIxPA,
249 	     n, boot_mem_desc->orig_mem_start,
250 	     vaddr_to_phys(boot_mem_desc->orig_mem_start));
251 
252 	DMSG("Tempalloc %zu bytes at va %#"PRIxVA,
253 	     (size_t)(boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end),
254 	     boot_mem_desc->mem_end);
255 
256 	if (IS_ENABLED(CFG_WITH_PAGER))
257 		goto out;
258 
259 	pa = vaddr_to_phys(ROUNDUP(boot_mem_desc->orig_mem_start,
260 				   SMALL_PAGE_SIZE));
261 	mm = nex_phys_mem_mm_find(pa);
262 	if (!mm)
263 		panic();
264 
265 	va = ROUNDUP(boot_mem_desc->mem_start, SMALL_PAGE_SIZE);
266 
267 	tmp_va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE);
268 	tmp_n = boot_mem_desc->orig_mem_end - tmp_va;
269 	tmp_pa = vaddr_to_phys(tmp_va);
270 
271 	pa = tee_mm_get_smem(mm);
272 	n = vaddr_to_phys(boot_mem_desc->mem_start) - pa;
273 	tee_mm_free(mm);
274 	DMSG("Carving out %#"PRIxPA"..%#"PRIxPA, pa, pa + n - 1);
275 	mm = nex_phys_mem_alloc2(pa, n);
276 	if (!mm)
277 		panic();
278 	mm = nex_phys_mem_alloc2(tmp_pa, tmp_n);
279 	if (!mm)
280 		panic();
281 
282 	n = tmp_va - boot_mem_desc->mem_start;
283 	DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
284 
285 	/* Unmap the now unused pages */
286 	core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE);
287 
288 out:
289 	/* Stop further allocations. */
290 	boot_mem_desc->mem_start = boot_mem_desc->mem_end;
291 	return va;
292 }
293 
294 void boot_mem_release_tmp_alloc(void)
295 {
296 	tee_mm_entry_t *mm = NULL;
297 	vaddr_t va = 0;
298 	paddr_t pa = 0;
299 	size_t n = 0;
300 
301 	assert(boot_mem_desc &&
302 	       boot_mem_desc->mem_start == boot_mem_desc->mem_end);
303 
304 	if (IS_ENABLED(CFG_WITH_PAGER)) {
305 		n = boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end;
306 		va = boot_mem_desc->mem_end;
307 		boot_mem_desc = NULL;
308 		DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
309 		return;
310 	}
311 
312 	va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE);
313 	pa = vaddr_to_phys(va);
314 
315 	mm = nex_phys_mem_mm_find(pa);
316 	if (!mm)
317 		panic();
318 	assert(pa == tee_mm_get_smem(mm));
319 	n = tee_mm_get_bytes(mm);
320 
321 	/* Boot memory allocation is now done */
322 	boot_mem_desc = NULL;
323 
324 	DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
325 
326 	/* Unmap the now unused pages */
327 	core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE);
328 }
329