xref: /optee_os/core/mm/tee_mm.c (revision a0f3154cfa75eda772785dfcb586b916514d7007)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #include <kernel/panic.h>
7 #include <kernel/spinlock.h>
8 #include <kernel/tee_common.h>
9 #include <mm/tee_mm.h>
10 #include <mm/tee_pager.h>
11 #include <pta_stats.h>
12 #include <trace.h>
13 #include <util.h>
14 
15 static void *pmalloc(tee_mm_pool_t *pool, size_t size)
16 {
17 	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
18 		return nex_malloc(size);
19 	else
20 		return malloc(size);
21 }
22 
23 static void *pcalloc(tee_mm_pool_t *pool, size_t num_el, size_t size)
24 {
25 	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
26 		return nex_calloc(num_el, size);
27 	else
28 		return calloc(num_el, size);
29 }
30 
31 static void pfree(tee_mm_pool_t *pool, void *ptr)
32 {
33 	if (pool->flags & TEE_MM_POOL_NEX_MALLOC)
34 		nex_free(ptr);
35 	else
36 		free(ptr);
37 }
38 
39 bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_size_t size,
40 		 uint8_t shift, uint32_t flags)
41 {
42 	paddr_size_t rounded = 0;
43 	paddr_t initial_lo = lo;
44 
45 	if (pool == NULL)
46 		return false;
47 
48 	lo = ROUNDUP2(lo, 1 << shift);
49 	rounded = lo - initial_lo;
50 	size = ROUNDDOWN2(size - rounded, 1 << shift);
51 
52 	assert(((uint64_t)size >> shift) < (uint64_t)UINT32_MAX);
53 
54 	pool->lo = lo;
55 	pool->size = size;
56 	pool->shift = shift;
57 	pool->flags = flags;
58 	pool->entry = pcalloc(pool, 1, sizeof(tee_mm_entry_t));
59 
60 	if (pool->entry == NULL)
61 		return false;
62 
63 	if (pool->flags & TEE_MM_POOL_HI_ALLOC)
64 		pool->entry->offset = ((size - 1) >> shift) + 1;
65 
66 	pool->entry->pool = pool;
67 	pool->lock = SPINLOCK_UNLOCK;
68 
69 	return true;
70 }
71 
72 void tee_mm_final(tee_mm_pool_t *pool)
73 {
74 	if (pool == NULL || pool->entry == NULL)
75 		return;
76 
77 	while (pool->entry->next != NULL)
78 		tee_mm_free(pool->entry->next);
79 	pfree(pool, pool->entry);
80 	pool->entry = NULL;
81 }
82 
83 static void tee_mm_add(tee_mm_entry_t *p, tee_mm_entry_t *nn)
84 {
85 	/* add to list */
86 	nn->next = p->next;
87 	p->next = nn;
88 }
89 
90 #ifdef CFG_WITH_STATS
91 static size_t tee_mm_stats_allocated(tee_mm_pool_t *pool)
92 {
93 	tee_mm_entry_t *entry;
94 	uint32_t sz = 0;
95 
96 	if (!pool)
97 		return 0;
98 
99 	entry = pool->entry;
100 	while (entry) {
101 		sz += entry->size;
102 		entry = entry->next;
103 	}
104 
105 	return sz << pool->shift;
106 }
107 
108 void tee_mm_get_pool_stats(tee_mm_pool_t *pool, struct pta_stats_alloc *stats,
109 			   bool reset)
110 {
111 	uint32_t exceptions;
112 
113 	if (!pool)
114 		return;
115 
116 	memset(stats, 0, sizeof(*stats));
117 
118 	exceptions = cpu_spin_lock_xsave(&pool->lock);
119 
120 	stats->size = pool->size;
121 	stats->max_allocated = pool->max_allocated;
122 	stats->allocated = tee_mm_stats_allocated(pool);
123 
124 	if (reset)
125 		pool->max_allocated = 0;
126 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
127 }
128 
129 static void update_max_allocated(tee_mm_pool_t *pool)
130 {
131 	size_t sz = tee_mm_stats_allocated(pool);
132 
133 	if (sz > pool->max_allocated)
134 		pool->max_allocated = sz;
135 }
136 #else /* CFG_WITH_STATS */
137 static inline void update_max_allocated(tee_mm_pool_t *pool __unused)
138 {
139 }
140 #endif /* CFG_WITH_STATS */
141 
142 tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
143 {
144 	size_t psize;
145 	tee_mm_entry_t *entry;
146 	tee_mm_entry_t *nn;
147 	size_t remaining;
148 	uint32_t exceptions;
149 
150 	/* Check that pool is initialized */
151 	if (!pool || !pool->entry)
152 		return NULL;
153 
154 	nn = pmalloc(pool, sizeof(tee_mm_entry_t));
155 	if (!nn)
156 		return NULL;
157 
158 	exceptions = cpu_spin_lock_xsave(&pool->lock);
159 
160 	entry = pool->entry;
161 	if (!size)
162 		psize = 0;
163 	else
164 		psize = ((size - 1) >> pool->shift) + 1;
165 
166 	/* find free slot */
167 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
168 		while (entry->next != NULL && psize >
169 		       (entry->offset - entry->next->offset -
170 			entry->next->size))
171 			entry = entry->next;
172 	} else {
173 		while (entry->next != NULL && psize >
174 		       (entry->next->offset - entry->size - entry->offset))
175 			entry = entry->next;
176 	}
177 
178 	/* check if we have enough memory */
179 	if (entry->next == NULL) {
180 		if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
181 			/*
182 			 * entry->offset is a "block count" offset from
183 			 * pool->lo. The byte offset is
184 			 * (entry->offset << pool->shift).
185 			 * In the HI_ALLOC allocation scheme the memory is
186 			 * allocated from the end of the segment, thus to
187 			 * validate there is sufficient memory validate that
188 			 * (entry->offset << pool->shift) > size.
189 			 */
190 			if ((entry->offset << pool->shift) < size) {
191 				/* out of memory */
192 				goto err;
193 			}
194 		} else {
195 			if (!pool->size)
196 				panic("invalid pool");
197 
198 			remaining = pool->size;
199 			remaining -= ((entry->offset + entry->size) <<
200 				      pool->shift);
201 
202 			if (remaining < size) {
203 				/* out of memory */
204 				goto err;
205 			}
206 		}
207 	}
208 
209 	tee_mm_add(entry, nn);
210 
211 	if (pool->flags & TEE_MM_POOL_HI_ALLOC)
212 		nn->offset = entry->offset - psize;
213 	else
214 		nn->offset = entry->offset + entry->size;
215 	nn->size = psize;
216 	nn->pool = pool;
217 
218 	update_max_allocated(pool);
219 
220 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
221 	return nn;
222 err:
223 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
224 	pfree(pool, nn);
225 	return NULL;
226 }
227 
228 static inline bool fit_in_gap(tee_mm_pool_t *pool, tee_mm_entry_t *e,
229 			      paddr_t offslo, paddr_t offshi)
230 {
231 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
232 		if (offshi > e->offset ||
233 		    (e->next != NULL &&
234 		     (offslo < e->next->offset + e->next->size)) ||
235 		    (offshi << pool->shift) - 1 > pool->size)
236 			/* memory not available */
237 			return false;
238 	} else {
239 		if (offslo < (e->offset + e->size) ||
240 		    (e->next != NULL && (offshi > e->next->offset)) ||
241 		    (offshi << pool->shift) > pool->size)
242 			/* memory not available */
243 			return false;
244 	}
245 
246 	return true;
247 }
248 
249 tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size)
250 {
251 	tee_mm_entry_t *entry;
252 	paddr_t offslo;
253 	paddr_t offshi;
254 	tee_mm_entry_t *mm;
255 	uint32_t exceptions;
256 
257 	/* Check that pool is initialized */
258 	if (!pool || !pool->entry)
259 		return NULL;
260 
261 	/* Wrapping and sanity check */
262 	if ((base + size) < base || base < pool->lo)
263 		return NULL;
264 
265 	mm = pmalloc(pool, sizeof(tee_mm_entry_t));
266 	if (!mm)
267 		return NULL;
268 
269 	exceptions = cpu_spin_lock_xsave(&pool->lock);
270 
271 	entry = pool->entry;
272 	offslo = (base - pool->lo) >> pool->shift;
273 	offshi = ((base - pool->lo + size - 1) >> pool->shift) + 1;
274 
275 	/* find slot */
276 	if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
277 		while (entry->next != NULL &&
278 		       offshi < entry->next->offset + entry->next->size)
279 			entry = entry->next;
280 	} else {
281 		while (entry->next != NULL && offslo > entry->next->offset)
282 			entry = entry->next;
283 	}
284 
285 	/* Check that memory is available */
286 	if (!fit_in_gap(pool, entry, offslo, offshi))
287 		goto err;
288 
289 	tee_mm_add(entry, mm);
290 
291 	mm->offset = offslo;
292 	mm->size = offshi - offslo;
293 	mm->pool = pool;
294 
295 	update_max_allocated(pool);
296 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
297 	return mm;
298 err:
299 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
300 	pfree(pool, mm);
301 	return NULL;
302 }
303 
304 void tee_mm_free(tee_mm_entry_t *p)
305 {
306 	tee_mm_entry_t *entry;
307 	uint32_t exceptions;
308 
309 	if (!p || !p->pool)
310 		return;
311 
312 	exceptions = cpu_spin_lock_xsave(&p->pool->lock);
313 	entry = p->pool->entry;
314 
315 	/* remove entry from list */
316 	while (entry->next != NULL && entry->next != p)
317 		entry = entry->next;
318 
319 	if (!entry->next)
320 		panic("invalid mm_entry");
321 
322 	entry->next = entry->next->next;
323 	cpu_spin_unlock_xrestore(&p->pool->lock, exceptions);
324 
325 	pfree(p->pool, p);
326 }
327 
328 size_t tee_mm_get_bytes(const tee_mm_entry_t *mm)
329 {
330 	if (!mm || !mm->pool)
331 		return 0;
332 	else
333 		return mm->size << mm->pool->shift;
334 }
335 
336 bool tee_mm_addr_is_within_range(const tee_mm_pool_t *pool, paddr_t addr)
337 {
338 	return pool && addr >= pool->lo &&
339 		addr <= (pool->lo + (pool->size - 1));
340 }
341 
342 bool tee_mm_is_empty(tee_mm_pool_t *pool)
343 {
344 	bool ret;
345 	uint32_t exceptions;
346 
347 	if (pool == NULL || pool->entry == NULL)
348 		return true;
349 
350 	exceptions = cpu_spin_lock_xsave(&pool->lock);
351 	ret = pool->entry == NULL || pool->entry->next == NULL;
352 	cpu_spin_unlock_xrestore(&pool->lock, exceptions);
353 
354 	return ret;
355 }
356 
357 tee_mm_entry_t *tee_mm_find(const tee_mm_pool_t *pool, paddr_t addr)
358 {
359 	tee_mm_entry_t *entry = pool->entry;
360 	uint16_t offset = (addr - pool->lo) >> pool->shift;
361 	uint32_t exceptions;
362 
363 	if (!tee_mm_addr_is_within_range(pool, addr))
364 		return NULL;
365 
366 	exceptions = cpu_spin_lock_xsave(&((tee_mm_pool_t *)pool)->lock);
367 
368 	while (entry->next != NULL) {
369 		entry = entry->next;
370 
371 		if ((offset >= entry->offset) &&
372 		    (offset < (entry->offset + entry->size))) {
373 			cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock,
374 						 exceptions);
375 			return entry;
376 		}
377 	}
378 
379 	cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock, exceptions);
380 	return NULL;
381 }
382 
383 uintptr_t tee_mm_get_smem(const tee_mm_entry_t *mm)
384 {
385 	return (mm->offset << mm->pool->shift) + mm->pool->lo;
386 }
387