xref: /optee_os/core/mm/tee_mm.c (revision 7505c3588f443bf5edd6a01370e58b3a8651bfd8)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <kernel/panic.h>
7 #include <kernel/spinlock.h>
8 #include <kernel/tee_common.h>
9 #include <mm/tee_mm.h>
10 #include <mm/tee_pager.h>
11 #include <pta_stats.h>
12 #include <trace.h>
13 #include <util.h>
14 
15 static void *pmalloc(tee_mm_pool_t *pool, size_t size)
16 {
17 	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
18 		return nex_malloc(size);
19 	else
20 		return malloc(size);
21 }
22 
23 static void *pcalloc(tee_mm_pool_t *pool, size_t num_el, size_t size)
24 {
25 	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
26 		return nex_calloc(num_el, size);
27 	else
28 		return calloc(num_el, size);
29 }
30 
31 static void pfree(tee_mm_pool_t *pool, void *ptr)
32 {
33 	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
34 		nex_free(ptr);
35 	else
36 		free(ptr);
37 }
38 
39 bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_size_t size,
40 		 uint8_t shift, uint32_t flags)
41 {
42 	paddr_size_t rounded = 0;
43 	paddr_t initial_lo = lo;
44 
45 	if (pool == NULL)
46 		return false;
47 
48 	lo = ROUNDUP2(lo, 1 << shift);
49 	rounded = lo - initial_lo;
50 	size = ROUNDDOWN2(size - rounded, 1 << shift);
51 
52 	assert(((uint64_t)size >> shift) < (uint64_t)UINT32_MAX);
53 
54 	*pool = (tee_mm_pool_t){
55 		.lo = lo,
56 		.size = size,
57 		.shift = shift,
58 		.flags = flags,
59 	};
60 
61 	pool->entry = pcalloc(pool, 1, sizeof(tee_mm_entry_t));
62 	if (pool->entry == NULL)
63 		return false;
64 
65 	if (pool->flags & TEE_MM_POOL_HI_ALLOC)
66 		pool->entry->offset = ((size - 1) >> shift) + 1;
67 
68 	pool->entry->pool = pool;
69 	pool->lock = SPINLOCK_UNLOCK;
70 
71 	return true;
72 }
73 
74 void tee_mm_final(tee_mm_pool_t *pool)
75 {
76 	if (pool == NULL || pool->entry == NULL)
77 		return;
78 
79 	while (pool->entry->next != NULL)
80 		tee_mm_free(pool->entry->next);
81 	pfree(pool, pool->entry);
82 	pool->entry = NULL;
83 }
84 
85 static void tee_mm_add(tee_mm_entry_t *p, tee_mm_entry_t *nn)
86 {
87 	/* add to list */
88 	nn->next = p->next;
89 	p->next = nn;
90 }
91 
92 #ifdef CFG_WITH_STATS
93 static size_t tee_mm_stats_allocated(tee_mm_pool_t *pool)
94 {
95 	tee_mm_entry_t *entry;
96 	uint32_t sz = 0;
97 
98 	if (!pool)
99 		return 0;
100 
101 	entry = pool->entry;
102 	while (entry) {
103 		sz += entry->size;
104 		entry = entry->next;
105 	}
106 
107 	return sz << pool->shift;
108 }
109 
110 void tee_mm_get_pool_stats(tee_mm_pool_t *pool, struct pta_stats_alloc *stats,
111 			   bool reset)
112 {
113 	uint32_t exceptions;
114 
115 	if (!pool)
116 		return;
117 
118 	memset(stats, 0, sizeof(*stats));
119 
120 	exceptions = cpu_spin_lock_xsave(&pool->lock);
121 
122 	stats->size = pool->size;
123 	stats->max_allocated = pool->max_allocated;
124 	stats->allocated = tee_mm_stats_allocated(pool);
125 
126 	if (reset)
127 		pool->max_allocated = 0;
128 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
129 }
130 
131 static void update_max_allocated(tee_mm_pool_t *pool)
132 {
133 	size_t sz = tee_mm_stats_allocated(pool);
134 
135 	if (sz > pool->max_allocated)
136 		pool->max_allocated = sz;
137 }
138 #else /* CFG_WITH_STATS */
139 static inline void update_max_allocated(tee_mm_pool_t *pool __unused)
140 {
141 }
142 #endif /* CFG_WITH_STATS */
143 
144 tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
145 {
146 	size_t psize;
147 	tee_mm_entry_t *entry;
148 	tee_mm_entry_t *nn;
149 	size_t remaining;
150 	uint32_t exceptions;
151 
152 	/* Check that pool is initialized */
153 	if (!pool || !pool->entry)
154 		return NULL;
155 
156 	nn = pmalloc(pool, sizeof(tee_mm_entry_t));
157 	if (!nn)
158 		return NULL;
159 
160 	exceptions = cpu_spin_lock_xsave(&pool->lock);
161 
162 	entry = pool->entry;
163 	if (!size)
164 		psize = 0;
165 	else
166 		psize = ((size - 1) >> pool->shift) + 1;
167 
168 	/* find free slot */
169 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
170 		while (entry->next != NULL && psize >
171 		       (entry->offset - entry->next->offset -
172 			entry->next->size))
173 			entry = entry->next;
174 	} else {
175 		while (entry->next != NULL && psize >
176 		       (entry->next->offset - entry->size - entry->offset))
177 			entry = entry->next;
178 	}
179 
180 	/* check if we have enough memory */
181 	if (entry->next == NULL) {
182 		if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
183 			/*
184 			 * entry->offset is a "block count" offset from
185 			 * pool->lo. The byte offset is
186 			 * (entry->offset << pool->shift).
187 			 * In the HI_ALLOC allocation scheme the memory is
188 			 * allocated from the end of the segment, thus to
189 			 * validate there is sufficient memory validate that
190 			 * (entry->offset << pool->shift) > size.
191 			 */
192 			if ((entry->offset << pool->shift) < size) {
193 				/* out of memory */
194 				goto err;
195 			}
196 		} else {
197 			if (!pool->size)
198 				panic("invalid pool");
199 
200 			remaining = pool->size;
201 			remaining -= ((entry->offset + entry->size) <<
202 				      pool->shift);
203 
204 			if (remaining < size) {
205 				/* out of memory */
206 				goto err;
207 			}
208 		}
209 	}
210 
211 	tee_mm_add(entry, nn);
212 
213 	if (pool->flags & TEE_MM_POOL_HI_ALLOC)
214 		nn->offset = entry->offset - psize;
215 	else
216 		nn->offset = entry->offset + entry->size;
217 	nn->size = psize;
218 	nn->pool = pool;
219 
220 	update_max_allocated(pool);
221 
222 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
223 	return nn;
224 err:
225 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
226 	pfree(pool, nn);
227 	return NULL;
228 }
229 
230 static inline bool fit_in_gap(tee_mm_pool_t *pool, tee_mm_entry_t *e,
231 			      paddr_t offslo, paddr_t offshi)
232 {
233 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
234 		if (offshi > e->offset ||
235 		    (e->next != NULL &&
236 		     (offslo < e->next->offset + e->next->size)) ||
237 		    (offshi << pool->shift) - 1 > pool->size)
238 			/* memory not available */
239 			return false;
240 	} else {
241 		if (offslo < (e->offset + e->size) ||
242 		    (e->next != NULL && (offshi > e->next->offset)) ||
243 		    (offshi << pool->shift) > pool->size)
244 			/* memory not available */
245 			return false;
246 	}
247 
248 	return true;
249 }
250 
251 tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size)
252 {
253 	tee_mm_entry_t *entry;
254 	paddr_t offslo;
255 	paddr_t offshi;
256 	tee_mm_entry_t *mm;
257 	uint32_t exceptions;
258 
259 	/* Check that pool is initialized */
260 	if (!pool || !pool->entry)
261 		return NULL;
262 
263 	/* Wrapping and sanity check */
264 	if ((base + size) < base || base < pool->lo)
265 		return NULL;
266 
267 	mm = pmalloc(pool, sizeof(tee_mm_entry_t));
268 	if (!mm)
269 		return NULL;
270 
271 	exceptions = cpu_spin_lock_xsave(&pool->lock);
272 
273 	entry = pool->entry;
274 	offslo = (base - pool->lo) >> pool->shift;
275 	offshi = ((base - pool->lo + size - 1) >> pool->shift) + 1;
276 
277 	/* find slot */
278 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
279 		while (entry->next != NULL &&
280 		       offshi < entry->next->offset + entry->next->size)
281 			entry = entry->next;
282 	} else {
283 		while (entry->next != NULL && offslo > entry->next->offset)
284 			entry = entry->next;
285 	}
286 
287 	/* Check that memory is available */
288 	if (!fit_in_gap(pool, entry, offslo, offshi))
289 		goto err;
290 
291 	tee_mm_add(entry, mm);
292 
293 	mm->offset = offslo;
294 	mm->size = offshi - offslo;
295 	mm->pool = pool;
296 
297 	update_max_allocated(pool);
298 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
299 	return mm;
300 err:
301 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
302 	pfree(pool, mm);
303 	return NULL;
304 }
305 
306 void tee_mm_free(tee_mm_entry_t *p)
307 {
308 	tee_mm_entry_t *entry;
309 	uint32_t exceptions;
310 
311 	if (!p || !p->pool)
312 		return;
313 
314 	exceptions = cpu_spin_lock_xsave(&p->pool->lock);
315 	entry = p->pool->entry;
316 
317 	/* remove entry from list */
318 	while (entry->next != NULL && entry->next != p)
319 		entry = entry->next;
320 
321 	if (!entry->next)
322 		panic("invalid mm_entry");
323 
324 	entry->next = entry->next->next;
325 	cpu_spin_unlock_xrestore(&p->pool->lock, exceptions);
326 
327 	pfree(p->pool, p);
328 }
329 
330 size_t tee_mm_get_bytes(const tee_mm_entry_t *mm)
331 {
332 	if (!mm || !mm->pool)
333 		return 0;
334 	else
335 		return mm->size << mm->pool->shift;
336 }
337 
338 bool tee_mm_addr_is_within_range(const tee_mm_pool_t *pool, paddr_t addr)
339 {
340 	return pool && addr >= pool->lo &&
341 		addr <= (pool->lo + (pool->size - 1));
342 }
343 
344 bool tee_mm_is_empty(tee_mm_pool_t *pool)
345 {
346 	bool ret;
347 	uint32_t exceptions;
348 
349 	if (pool == NULL || pool->entry == NULL)
350 		return true;
351 
352 	exceptions = cpu_spin_lock_xsave(&pool->lock);
353 	ret = pool->entry == NULL || pool->entry->next == NULL;
354 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
355 
356 	return ret;
357 }
358 
359 tee_mm_entry_t *tee_mm_find(const tee_mm_pool_t *pool, paddr_t addr)
360 {
361 	tee_mm_entry_t *entry = pool->entry;
362 	uint16_t offset = (addr - pool->lo) >> pool->shift;
363 	uint32_t exceptions;
364 
365 	if (!tee_mm_addr_is_within_range(pool, addr))
366 		return NULL;
367 
368 	exceptions = cpu_spin_lock_xsave(&((tee_mm_pool_t *)pool)->lock);
369 
370 	while (entry->next != NULL) {
371 		entry = entry->next;
372 
373 		if ((offset >= entry->offset) &&
374 		    (offset < (entry->offset + entry->size))) {
375 			cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock,
376 						 exceptions);
377 			return entry;
378 		}
379 	}
380 
381 	cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock, exceptions);
382 	return NULL;
383 }
384 
385 uintptr_t tee_mm_get_smem(const tee_mm_entry_t *mm)
386 {
387 	return (mm->offset << mm->pool->shift) + mm->pool->lo;
388 }
389