xref: /optee_os/core/mm/tee_mm.c (revision fe8de805a3bb12a214983afa3c9b364d098e2cbe)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <kernel/panic.h>
7 #include <kernel/spinlock.h>
8 #include <kernel/tee_common.h>
9 #include <mm/tee_mm.h>
10 #include <mm/tee_pager.h>
11 #include <pta_stats.h>
12 #include <trace.h>
13 #include <util.h>
14 
15 bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_size_t size,
16 		 uint8_t shift, uint32_t flags)
17 {
18 	paddr_size_t rounded = 0;
19 	paddr_t initial_lo = lo;
20 
21 	if (pool == NULL)
22 		return false;
23 
24 	lo = ROUNDUP2(lo, 1 << shift);
25 	rounded = lo - initial_lo;
26 	size = ROUNDDOWN2(size - rounded, 1 << shift);
27 
28 	assert(((uint64_t)size >> shift) < (uint64_t)UINT32_MAX);
29 
30 	*pool = (tee_mm_pool_t){
31 		.lo = lo,
32 		.size = size,
33 		.shift = shift,
34 		.flags = flags,
35 	};
36 
37 	pool->entry = malloc_flags(pool->flags | MAF_ZERO_INIT, NULL,
38 				   MALLOC_DEFAULT_ALIGNMENT,
39 				   sizeof(tee_mm_entry_t));
40 	if (pool->entry == NULL)
41 		return false;
42 
43 	if (pool->flags & TEE_MM_POOL_HI_ALLOC)
44 		pool->entry->offset = ((size - 1) >> shift) + 1;
45 
46 	pool->entry->pool = pool;
47 	pool->lock = SPINLOCK_UNLOCK;
48 
49 	return true;
50 }
51 
52 void tee_mm_final(tee_mm_pool_t *pool)
53 {
54 	if (pool == NULL || pool->entry == NULL)
55 		return;
56 
57 	while (pool->entry->next != NULL)
58 		tee_mm_free(pool->entry->next);
59 	free_flags(pool->flags, pool->entry);
60 	pool->entry = NULL;
61 }
62 
63 static void tee_mm_add(tee_mm_entry_t *p, tee_mm_entry_t *nn)
64 {
65 	/* add to list */
66 	nn->next = p->next;
67 	p->next = nn;
68 }
69 
70 #ifdef CFG_WITH_STATS
71 static size_t tee_mm_stats_allocated(tee_mm_pool_t *pool)
72 {
73 	tee_mm_entry_t *entry;
74 	uint32_t sz = 0;
75 
76 	if (!pool)
77 		return 0;
78 
79 	entry = pool->entry;
80 	while (entry) {
81 		sz += entry->size;
82 		entry = entry->next;
83 	}
84 
85 	return sz << pool->shift;
86 }
87 
88 void tee_mm_get_pool_stats(tee_mm_pool_t *pool, struct pta_stats_alloc *stats,
89 			   bool reset)
90 {
91 	uint32_t exceptions;
92 
93 	if (!pool)
94 		return;
95 
96 	memset(stats, 0, sizeof(*stats));
97 
98 	exceptions = cpu_spin_lock_xsave(&pool->lock);
99 
100 	stats->size = pool->size;
101 	stats->max_allocated = pool->max_allocated;
102 	stats->allocated = tee_mm_stats_allocated(pool);
103 
104 	if (reset)
105 		pool->max_allocated = 0;
106 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
107 }
108 
109 static void update_max_allocated(tee_mm_pool_t *pool)
110 {
111 	size_t sz = tee_mm_stats_allocated(pool);
112 
113 	if (sz > pool->max_allocated)
114 		pool->max_allocated = sz;
115 }
116 #else /* CFG_WITH_STATS */
117 static inline void update_max_allocated(tee_mm_pool_t *pool __unused)
118 {
119 }
120 #endif /* CFG_WITH_STATS */
121 
122 tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
123 {
124 	size_t psize;
125 	tee_mm_entry_t *entry;
126 	tee_mm_entry_t *nn;
127 	size_t remaining;
128 	uint32_t exceptions;
129 
130 	/* Check that pool is initialized */
131 	if (!pool || !pool->entry)
132 		return NULL;
133 
134 	nn  = malloc_flags(pool->flags, NULL, MALLOC_DEFAULT_ALIGNMENT,
135 			   sizeof(tee_mm_entry_t));
136 	if (!nn)
137 		return NULL;
138 
139 	exceptions = cpu_spin_lock_xsave(&pool->lock);
140 
141 	entry = pool->entry;
142 	if (!size)
143 		psize = 0;
144 	else
145 		psize = ((size - 1) >> pool->shift) + 1;
146 
147 	/* find free slot */
148 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
149 		while (entry->next != NULL && psize >
150 		       (entry->offset - entry->next->offset -
151 			entry->next->size))
152 			entry = entry->next;
153 	} else {
154 		while (entry->next != NULL && psize >
155 		       (entry->next->offset - entry->size - entry->offset))
156 			entry = entry->next;
157 	}
158 
159 	/* check if we have enough memory */
160 	if (entry->next == NULL) {
161 		if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
162 			/*
163 			 * entry->offset is a "block count" offset from
164 			 * pool->lo. The byte offset is
165 			 * (entry->offset << pool->shift).
166 			 * In the HI_ALLOC allocation scheme the memory is
167 			 * allocated from the end of the segment, thus to
168 			 * validate there is sufficient memory validate that
169 			 * (entry->offset << pool->shift) > size.
170 			 */
171 			if ((entry->offset << pool->shift) < size) {
172 				/* out of memory */
173 				goto err;
174 			}
175 		} else {
176 			if (!pool->size)
177 				panic("invalid pool");
178 
179 			remaining = pool->size;
180 			remaining -= ((entry->offset + entry->size) <<
181 				      pool->shift);
182 
183 			if (remaining < size) {
184 				/* out of memory */
185 				goto err;
186 			}
187 		}
188 	}
189 
190 	tee_mm_add(entry, nn);
191 
192 	if (pool->flags & TEE_MM_POOL_HI_ALLOC)
193 		nn->offset = entry->offset - psize;
194 	else
195 		nn->offset = entry->offset + entry->size;
196 	nn->size = psize;
197 	nn->pool = pool;
198 
199 	update_max_allocated(pool);
200 
201 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
202 	return nn;
203 err:
204 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
205 	free_flags(pool->flags, nn);
206 	return NULL;
207 }
208 
209 static inline bool fit_in_gap(tee_mm_pool_t *pool, tee_mm_entry_t *e,
210 			      paddr_t offslo, paddr_t offshi)
211 {
212 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
213 		if (offshi > e->offset ||
214 		    (e->next != NULL &&
215 		     (offslo < e->next->offset + e->next->size)) ||
216 		    (offshi << pool->shift) - 1 > pool->size)
217 			/* memory not available */
218 			return false;
219 	} else {
220 		if (offslo < (e->offset + e->size) ||
221 		    (e->next != NULL && (offshi > e->next->offset)) ||
222 		    (offshi << pool->shift) > pool->size)
223 			/* memory not available */
224 			return false;
225 	}
226 
227 	return true;
228 }
229 
230 tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size)
231 {
232 	tee_mm_entry_t *entry;
233 	paddr_t offslo;
234 	paddr_t offshi;
235 	tee_mm_entry_t *mm;
236 	uint32_t exceptions;
237 
238 	/* Check that pool is initialized */
239 	if (!pool || !pool->entry)
240 		return NULL;
241 
242 	/* Wrapping and sanity check */
243 	if ((base + size) < base || base < pool->lo)
244 		return NULL;
245 
246 	mm  = malloc_flags(pool->flags, NULL, MALLOC_DEFAULT_ALIGNMENT,
247 			   sizeof(tee_mm_entry_t));
248 	if (!mm)
249 		return NULL;
250 
251 	exceptions = cpu_spin_lock_xsave(&pool->lock);
252 
253 	entry = pool->entry;
254 	offslo = (base - pool->lo) >> pool->shift;
255 	offshi = ((base - pool->lo + size - 1) >> pool->shift) + 1;
256 
257 	/* find slot */
258 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
259 		while (entry->next != NULL &&
260 		       offshi < entry->next->offset + entry->next->size)
261 			entry = entry->next;
262 	} else {
263 		while (entry->next != NULL && offslo > entry->next->offset)
264 			entry = entry->next;
265 	}
266 
267 	/* Check that memory is available */
268 	if (!fit_in_gap(pool, entry, offslo, offshi))
269 		goto err;
270 
271 	tee_mm_add(entry, mm);
272 
273 	mm->offset = offslo;
274 	mm->size = offshi - offslo;
275 	mm->pool = pool;
276 
277 	update_max_allocated(pool);
278 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
279 	return mm;
280 err:
281 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
282 	free_flags(pool->flags, mm);
283 	return NULL;
284 }
285 
286 void tee_mm_free(tee_mm_entry_t *p)
287 {
288 	tee_mm_entry_t *entry;
289 	uint32_t exceptions;
290 
291 	if (!p || !p->pool)
292 		return;
293 
294 	exceptions = cpu_spin_lock_xsave(&p->pool->lock);
295 	entry = p->pool->entry;
296 
297 	/* remove entry from list */
298 	while (entry->next != NULL && entry->next != p)
299 		entry = entry->next;
300 
301 	if (!entry->next)
302 		panic("invalid mm_entry");
303 
304 	entry->next = entry->next->next;
305 	cpu_spin_unlock_xrestore(&p->pool->lock, exceptions);
306 
307 	free_flags(p->pool->flags, p);
308 }
309 
310 size_t tee_mm_get_bytes(const tee_mm_entry_t *mm)
311 {
312 	if (!mm || !mm->pool)
313 		return 0;
314 	else
315 		return mm->size << mm->pool->shift;
316 }
317 
318 bool tee_mm_addr_is_within_range(const tee_mm_pool_t *pool, paddr_t addr)
319 {
320 	return pool && addr >= pool->lo &&
321 		addr <= (pool->lo + (pool->size - 1));
322 }
323 
324 bool tee_mm_is_empty(tee_mm_pool_t *pool)
325 {
326 	bool ret;
327 	uint32_t exceptions;
328 
329 	if (pool == NULL || pool->entry == NULL)
330 		return true;
331 
332 	exceptions = cpu_spin_lock_xsave(&pool->lock);
333 	ret = pool->entry == NULL || pool->entry->next == NULL;
334 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
335 
336 	return ret;
337 }
338 
339 tee_mm_entry_t *tee_mm_find(const tee_mm_pool_t *pool, paddr_t addr)
340 {
341 	tee_mm_entry_t *entry = pool->entry;
342 	uint16_t offset = (addr - pool->lo) >> pool->shift;
343 	uint32_t exceptions;
344 
345 	if (!tee_mm_addr_is_within_range(pool, addr))
346 		return NULL;
347 
348 	exceptions = cpu_spin_lock_xsave(&((tee_mm_pool_t *)pool)->lock);
349 
350 	while (entry->next != NULL) {
351 		entry = entry->next;
352 
353 		if ((offset >= entry->offset) &&
354 		    (offset < (entry->offset + entry->size))) {
355 			cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock,
356 						 exceptions);
357 			return entry;
358 		}
359 	}
360 
361 	cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock, exceptions);
362 	return NULL;
363 }
364 
365 uintptr_t tee_mm_get_smem(const tee_mm_entry_t *mm)
366 {
367 	return (mm->offset << mm->pool->shift) + mm->pool->lo;
368 }
369