xref: /optee_os/core/mm/boot_mem.c (revision 6b61de6ccec1c0d1d5aa4e456472cd528d33de7c)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2024, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <kernel/boot.h>
8 #include <mm/core_memprot.h>
9 #include <mm/core_mmu.h>
10 #include <mm/phys_mem.h>
11 #include <mm/tee_mm.h>
12 #include <stdalign.h>
13 #include <string.h>
14 #include <util.h>
15 
16 /*
17  * struct boot_mem_reloc - Pointers relocated in memory during boot
18  * @ptrs: Array of relocation
19  * @count: Number of cells used in @ptrs
20  * @next: Next relocation array when @ptrs is fully used
21  */
22 struct boot_mem_reloc {
23 	void **ptrs[64];
24 	size_t count;
25 	struct boot_mem_reloc *next;
26 };
27 
28 /*
29  * struct boot_mem_desc - Stack like boot memory allocation pool
30  * @orig_mem_start: Boot memory stack base address
31  * @orig_mem_end: Boot memory start end address
32  * @mem_start: Boot memory free space start address
33  * @mem_end: Boot memory free space end address
34  * @reloc: Boot memory pointers requiring relocation
35  */
36 struct boot_mem_desc {
37 	vaddr_t orig_mem_start;
38 	vaddr_t orig_mem_end;
39 	vaddr_t mem_start;
40 	vaddr_t mem_end;
41 	struct boot_mem_reloc *reloc;
42 };
43 
44 static struct boot_mem_desc *boot_mem_desc;
45 
46 static void *mem_alloc_tmp(struct boot_mem_desc *desc, size_t len, size_t align)
47 {
48 	vaddr_t va = 0;
49 
50 	assert(desc && desc->mem_start && desc->mem_end);
51 	assert(IS_POWER_OF_TWO(align) && !(len % align));
52 	if (SUB_OVERFLOW(desc->mem_end, len, &va))
53 		panic();
54 	va = ROUNDDOWN2(va, align);
55 	if (va < desc->mem_start)
56 		panic();
57 	desc->mem_end = va;
58 	return (void *)va;
59 }
60 
61 static void *mem_alloc(struct boot_mem_desc *desc, size_t len, size_t align)
62 {
63 	vaddr_t va = 0;
64 	vaddr_t ve = 0;
65 
66 	runtime_assert(!IS_ENABLED(CFG_WITH_PAGER));
67 	assert(desc && desc->mem_start && desc->mem_end);
68 	assert(IS_POWER_OF_TWO(align) && !(len % align));
69 	va = ROUNDUP2(desc->mem_start, align);
70 	if (ADD_OVERFLOW(va, len, &ve))
71 		panic();
72 	if (ve > desc->mem_end)
73 		panic();
74 	desc->mem_start = ve;
75 	return (void *)va;
76 }
77 
78 void boot_mem_init(vaddr_t start, vaddr_t end, vaddr_t orig_end)
79 {
80 	struct boot_mem_desc desc = {
81 		.orig_mem_start = start,
82 		.orig_mem_end = orig_end,
83 		.mem_start = start,
84 		.mem_end = end,
85 	};
86 
87 	boot_mem_desc = mem_alloc_tmp(&desc, sizeof(desc), alignof(desc));
88 	*boot_mem_desc = desc;
89 	boot_mem_desc->reloc = mem_alloc_tmp(boot_mem_desc,
90 					     sizeof(*boot_mem_desc->reloc),
91 					     alignof(*boot_mem_desc->reloc));
92 	memset(boot_mem_desc->reloc, 0, sizeof(*boot_mem_desc->reloc));
93 }
94 
95 void boot_mem_add_reloc(void *ptr)
96 {
97 	struct boot_mem_reloc *reloc = NULL;
98 
99 	assert(boot_mem_desc && boot_mem_desc->reloc);
100 	reloc = boot_mem_desc->reloc;
101 
102 	/* If the reloc struct is full, allocate a new and link it first */
103 	if (reloc->count == ARRAY_SIZE(reloc->ptrs)) {
104 		reloc = boot_mem_alloc_tmp(sizeof(*reloc), alignof(*reloc));
105 		reloc->next = boot_mem_desc->reloc;
106 		boot_mem_desc->reloc = reloc;
107 	}
108 
109 	reloc->ptrs[reloc->count] = ptr;
110 	reloc->count++;
111 }
112 
113 static void *add_offs(void *p, size_t offs)
114 {
115 	assert(p);
116 	return (uint8_t *)p + offs;
117 }
118 
119 static void *add_offs_or_null(void *p, size_t offs)
120 {
121 	if (!p)
122 		return NULL;
123 	return (uint8_t *)p + offs;
124 }
125 
126 void boot_mem_relocate(size_t offs)
127 {
128 	struct boot_mem_reloc *reloc = NULL;
129 	size_t n = 0;
130 
131 	boot_mem_desc = add_offs(boot_mem_desc, offs);
132 
133 	boot_mem_desc->orig_mem_start += offs;
134 	boot_mem_desc->orig_mem_end += offs;
135 	boot_mem_desc->mem_start += offs;
136 	boot_mem_desc->mem_end += offs;
137 	boot_mem_desc->reloc = add_offs(boot_mem_desc->reloc, offs);
138 
139 	for (reloc = boot_mem_desc->reloc;; reloc = reloc->next) {
140 		for (n = 0; n < reloc->count; n++) {
141 			reloc->ptrs[n] = add_offs(reloc->ptrs[n], offs);
142 			*reloc->ptrs[n] = add_offs_or_null(*reloc->ptrs[n],
143 							   offs);
144 		}
145 		if (!reloc->next)
146 			break;
147 		reloc->next = add_offs(reloc->next, offs);
148 	}
149 }
150 
151 void *boot_mem_alloc(size_t len, size_t align)
152 {
153 	return mem_alloc(boot_mem_desc, len, align);
154 }
155 
156 void *boot_mem_alloc_tmp(size_t len, size_t align)
157 {
158 	return mem_alloc_tmp(boot_mem_desc, len, align);
159 }
160 
161 vaddr_t boot_mem_release_unused(void)
162 {
163 	tee_mm_entry_t *mm = NULL;
164 	paddr_t pa = 0;
165 	vaddr_t va = 0;
166 	size_t n = 0;
167 	vaddr_t tmp_va = 0;
168 	paddr_t tmp_pa = 0;
169 	size_t tmp_n = 0;
170 
171 	assert(boot_mem_desc);
172 
173 	n = boot_mem_desc->mem_start - boot_mem_desc->orig_mem_start;
174 	DMSG("Allocated %zu bytes at va %#"PRIxVA" pa %#"PRIxPA,
175 	     n, boot_mem_desc->orig_mem_start,
176 	     vaddr_to_phys(boot_mem_desc->orig_mem_start));
177 
178 	DMSG("Tempalloc %zu bytes at va %#"PRIxVA,
179 	     (size_t)(boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end),
180 	     boot_mem_desc->mem_end);
181 
182 	if (IS_ENABLED(CFG_WITH_PAGER))
183 		goto out;
184 
185 	pa = vaddr_to_phys(ROUNDUP(boot_mem_desc->orig_mem_start,
186 				   SMALL_PAGE_SIZE));
187 	mm = nex_phys_mem_mm_find(pa);
188 	if (!mm)
189 		panic();
190 
191 	va = ROUNDUP(boot_mem_desc->mem_start, SMALL_PAGE_SIZE);
192 
193 	tmp_va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE);
194 	tmp_n = boot_mem_desc->orig_mem_end - tmp_va;
195 	tmp_pa = vaddr_to_phys(tmp_va);
196 
197 	pa = tee_mm_get_smem(mm);
198 	n = vaddr_to_phys(boot_mem_desc->mem_start) - pa;
199 	tee_mm_free(mm);
200 	DMSG("Carving out %#"PRIxPA"..%#"PRIxPA, pa, pa + n - 1);
201 	mm = nex_phys_mem_alloc2(pa, n);
202 	if (!mm)
203 		panic();
204 	mm = nex_phys_mem_alloc2(tmp_pa, tmp_n);
205 	if (!mm)
206 		panic();
207 
208 	n = tmp_va - boot_mem_desc->mem_start;
209 	DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
210 
211 	/* Unmap the now unused pages */
212 	core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE);
213 
214 out:
215 	/* Stop further allocations. */
216 	boot_mem_desc->mem_start = boot_mem_desc->mem_end;
217 	return va;
218 }
219 
220 void boot_mem_release_tmp_alloc(void)
221 {
222 	tee_mm_entry_t *mm = NULL;
223 	vaddr_t va = 0;
224 	paddr_t pa = 0;
225 	size_t n = 0;
226 
227 	assert(boot_mem_desc &&
228 	       boot_mem_desc->mem_start == boot_mem_desc->mem_end);
229 
230 	if (IS_ENABLED(CFG_WITH_PAGER)) {
231 		n = boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end;
232 		va = boot_mem_desc->mem_end;
233 		boot_mem_desc = NULL;
234 		DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
235 		return;
236 	}
237 
238 	va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE);
239 	pa = vaddr_to_phys(va);
240 
241 	mm = nex_phys_mem_mm_find(pa);
242 	if (!mm)
243 		panic();
244 	assert(pa == tee_mm_get_smem(mm));
245 	n = tee_mm_get_bytes(mm);
246 
247 	/* Boot memory allocation is now done */
248 	boot_mem_desc = NULL;
249 
250 	DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va);
251 
252 	/* Unmap the now unused pages */
253 	core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE);
254 }
255