1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 */
5
6 #include <kernel/panic.h>
7 #include <kernel/spinlock.h>
8 #include <kernel/tee_common.h>
9 #include <mm/tee_mm.h>
10 #include <mm/tee_pager.h>
11 #include <pta_stats.h>
12 #include <trace.h>
13 #include <util.h>
14
tee_mm_init(tee_mm_pool_t * pool,paddr_t lo,paddr_size_t size,uint8_t shift,uint32_t flags)15 bool tee_mm_init(tee_mm_pool_t *pool, paddr_t lo, paddr_size_t size,
16 uint8_t shift, uint32_t flags)
17 {
18 paddr_size_t rounded = 0;
19 paddr_t initial_lo = lo;
20
21 if (pool == NULL)
22 return false;
23
24 lo = ROUNDUP2(lo, 1 << shift);
25 rounded = lo - initial_lo;
26 size = ROUNDDOWN2(size - rounded, 1 << shift);
27
28 assert(((uint64_t)size >> shift) < (uint64_t)UINT32_MAX);
29
30 *pool = (tee_mm_pool_t){
31 .lo = lo,
32 .size = size,
33 .shift = shift,
34 .flags = flags,
35 };
36
37 pool->entry = malloc_flags(pool->flags | MAF_ZERO_INIT, NULL,
38 MALLOC_DEFAULT_ALIGNMENT,
39 sizeof(tee_mm_entry_t));
40 if (pool->entry == NULL)
41 return false;
42
43 if (pool->flags & TEE_MM_POOL_HI_ALLOC)
44 pool->entry->offset = ((size - 1) >> shift) + 1;
45
46 pool->entry->pool = pool;
47 pool->lock = SPINLOCK_UNLOCK;
48
49 return true;
50 }
51
tee_mm_final(tee_mm_pool_t * pool)52 void tee_mm_final(tee_mm_pool_t *pool)
53 {
54 if (pool == NULL || pool->entry == NULL)
55 return;
56
57 while (pool->entry->next != NULL)
58 tee_mm_free(pool->entry->next);
59 free_flags(pool->flags, pool->entry);
60 pool->entry = NULL;
61 }
62
tee_mm_add(tee_mm_entry_t * p,tee_mm_entry_t * nn)63 static void tee_mm_add(tee_mm_entry_t *p, tee_mm_entry_t *nn)
64 {
65 /* add to list */
66 nn->next = p->next;
67 p->next = nn;
68 }
69
70 #ifdef CFG_WITH_STATS
tee_mm_stats_allocated(tee_mm_pool_t * pool)71 static size_t tee_mm_stats_allocated(tee_mm_pool_t *pool)
72 {
73 tee_mm_entry_t *entry;
74 uint32_t sz = 0;
75
76 if (!pool)
77 return 0;
78
79 entry = pool->entry;
80 while (entry) {
81 sz += entry->size;
82 entry = entry->next;
83 }
84
85 return sz << pool->shift;
86 }
87
tee_mm_get_pool_stats(tee_mm_pool_t * pool,struct pta_stats_alloc * stats,bool reset)88 void tee_mm_get_pool_stats(tee_mm_pool_t *pool, struct pta_stats_alloc *stats,
89 bool reset)
90 {
91 uint32_t exceptions;
92
93 if (!pool)
94 return;
95
96 memset(stats, 0, sizeof(*stats));
97
98 exceptions = cpu_spin_lock_xsave(&pool->lock);
99
100 stats->size = pool->size;
101 stats->max_allocated = pool->max_allocated;
102 stats->allocated = tee_mm_stats_allocated(pool);
103
104 if (reset)
105 pool->max_allocated = 0;
106 cpu_spin_unlock_xrestore(&pool->lock, exceptions);
107 }
108
update_max_allocated(tee_mm_pool_t * pool)109 static void update_max_allocated(tee_mm_pool_t *pool)
110 {
111 size_t sz = tee_mm_stats_allocated(pool);
112
113 if (sz > pool->max_allocated)
114 pool->max_allocated = sz;
115 }
116 #else /* CFG_WITH_STATS */
update_max_allocated(tee_mm_pool_t * pool __unused)117 static inline void update_max_allocated(tee_mm_pool_t *pool __unused)
118 {
119 }
120 #endif /* CFG_WITH_STATS */
121
tee_mm_alloc_flags(tee_mm_pool_t * pool,size_t size,uint32_t flags)122 tee_mm_entry_t *tee_mm_alloc_flags(tee_mm_pool_t *pool, size_t size,
123 uint32_t flags)
124 {
125 size_t psize = 0;
126 tee_mm_entry_t *entry = NULL;
127 tee_mm_entry_t *nn = NULL;
128 size_t remaining = 0;
129 uint32_t exceptions = 0;
130
131 /* Check that pool is initialized */
132 if (!pool || !pool->entry)
133 return NULL;
134
135 flags &= ~MAF_NEX; /* This flag must come from pool->flags */
136 flags |= pool->flags;
137 nn = malloc_flags(flags, NULL, MALLOC_DEFAULT_ALIGNMENT,
138 sizeof(tee_mm_entry_t));
139 if (!nn)
140 return NULL;
141
142 exceptions = cpu_spin_lock_xsave(&pool->lock);
143
144 entry = pool->entry;
145 if (!size)
146 psize = 0;
147 else
148 psize = ((size - 1) >> pool->shift) + 1;
149
150 /* find free slot */
151 if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
152 while (entry->next != NULL && psize >
153 (entry->offset - entry->next->offset -
154 entry->next->size))
155 entry = entry->next;
156 } else {
157 while (entry->next != NULL && psize >
158 (entry->next->offset - entry->size - entry->offset))
159 entry = entry->next;
160 }
161
162 /* check if we have enough memory */
163 if (entry->next == NULL) {
164 if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
165 /*
166 * entry->offset is a "block count" offset from
167 * pool->lo. The byte offset is
168 * (entry->offset << pool->shift).
169 * In the HI_ALLOC allocation scheme the memory is
170 * allocated from the end of the segment, thus to
171 * validate there is sufficient memory validate that
172 * (entry->offset << pool->shift) > size.
173 */
174 if ((entry->offset << pool->shift) < size) {
175 /* out of memory */
176 goto err;
177 }
178 } else {
179 if (!pool->size)
180 panic("invalid pool");
181
182 remaining = pool->size;
183 remaining -= ((entry->offset + entry->size) <<
184 pool->shift);
185
186 if (remaining < size) {
187 /* out of memory */
188 goto err;
189 }
190 }
191 }
192
193 tee_mm_add(entry, nn);
194
195 if (pool->flags & TEE_MM_POOL_HI_ALLOC)
196 nn->offset = entry->offset - psize;
197 else
198 nn->offset = entry->offset + entry->size;
199 nn->size = psize;
200 nn->pool = pool;
201
202 update_max_allocated(pool);
203
204 cpu_spin_unlock_xrestore(&pool->lock, exceptions);
205 return nn;
206 err:
207 cpu_spin_unlock_xrestore(&pool->lock, exceptions);
208 free_flags(flags, nn);
209 return NULL;
210 }
211
fit_in_gap(tee_mm_pool_t * pool,tee_mm_entry_t * e,paddr_t offslo,paddr_t offshi)212 static inline bool fit_in_gap(tee_mm_pool_t *pool, tee_mm_entry_t *e,
213 paddr_t offslo, paddr_t offshi)
214 {
215 if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
216 if (offshi > e->offset ||
217 (e->next != NULL &&
218 (offslo < e->next->offset + e->next->size)) ||
219 (offshi << pool->shift) - 1 > pool->size)
220 /* memory not available */
221 return false;
222 } else {
223 if (offslo < (e->offset + e->size) ||
224 (e->next != NULL && (offshi > e->next->offset)) ||
225 (offshi << pool->shift) > pool->size)
226 /* memory not available */
227 return false;
228 }
229
230 return true;
231 }
232
tee_mm_alloc2(tee_mm_pool_t * pool,paddr_t base,size_t size)233 tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size)
234 {
235 tee_mm_entry_t *entry;
236 paddr_t offslo;
237 paddr_t offshi;
238 tee_mm_entry_t *mm;
239 uint32_t exceptions;
240
241 /* Check that pool is initialized */
242 if (!pool || !pool->entry)
243 return NULL;
244
245 /* Wrapping and sanity check */
246 if ((base + size) < base || base < pool->lo)
247 return NULL;
248
249 mm = malloc_flags(pool->flags, NULL, MALLOC_DEFAULT_ALIGNMENT,
250 sizeof(tee_mm_entry_t));
251 if (!mm)
252 return NULL;
253
254 exceptions = cpu_spin_lock_xsave(&pool->lock);
255
256 entry = pool->entry;
257 offslo = (base - pool->lo) >> pool->shift;
258 offshi = ((base - pool->lo + size - 1) >> pool->shift) + 1;
259
260 /* find slot */
261 if (pool->flags & TEE_MM_POOL_HI_ALLOC) {
262 while (entry->next != NULL &&
263 offshi < entry->next->offset + entry->next->size)
264 entry = entry->next;
265 } else {
266 while (entry->next != NULL && offslo > entry->next->offset)
267 entry = entry->next;
268 }
269
270 /* Check that memory is available */
271 if (!fit_in_gap(pool, entry, offslo, offshi))
272 goto err;
273
274 tee_mm_add(entry, mm);
275
276 mm->offset = offslo;
277 mm->size = offshi - offslo;
278 mm->pool = pool;
279
280 update_max_allocated(pool);
281 cpu_spin_unlock_xrestore(&pool->lock, exceptions);
282 return mm;
283 err:
284 cpu_spin_unlock_xrestore(&pool->lock, exceptions);
285 free_flags(pool->flags, mm);
286 return NULL;
287 }
288
tee_mm_free(tee_mm_entry_t * p)289 void tee_mm_free(tee_mm_entry_t *p)
290 {
291 tee_mm_entry_t *entry;
292 uint32_t exceptions;
293
294 if (!p || !p->pool)
295 return;
296
297 exceptions = cpu_spin_lock_xsave(&p->pool->lock);
298 entry = p->pool->entry;
299
300 /* remove entry from list */
301 while (entry->next != NULL && entry->next != p)
302 entry = entry->next;
303
304 if (!entry->next)
305 panic("invalid mm_entry");
306
307 entry->next = entry->next->next;
308 cpu_spin_unlock_xrestore(&p->pool->lock, exceptions);
309
310 free_flags(p->pool->flags, p);
311 }
312
tee_mm_get_bytes(const tee_mm_entry_t * mm)313 size_t tee_mm_get_bytes(const tee_mm_entry_t *mm)
314 {
315 if (!mm || !mm->pool)
316 return 0;
317 else
318 return mm->size << mm->pool->shift;
319 }
320
tee_mm_addr_is_within_range(const tee_mm_pool_t * pool,paddr_t addr)321 bool tee_mm_addr_is_within_range(const tee_mm_pool_t *pool, paddr_t addr)
322 {
323 return pool && addr >= pool->lo &&
324 addr <= (pool->lo + (pool->size - 1));
325 }
326
tee_mm_is_empty(tee_mm_pool_t * pool)327 bool tee_mm_is_empty(tee_mm_pool_t *pool)
328 {
329 bool ret;
330 uint32_t exceptions;
331
332 if (pool == NULL || pool->entry == NULL)
333 return true;
334
335 exceptions = cpu_spin_lock_xsave(&pool->lock);
336 ret = pool->entry == NULL || pool->entry->next == NULL;
337 cpu_spin_unlock_xrestore(&pool->lock, exceptions);
338
339 return ret;
340 }
341
tee_mm_find(const tee_mm_pool_t * pool,paddr_t addr)342 tee_mm_entry_t *tee_mm_find(const tee_mm_pool_t *pool, paddr_t addr)
343 {
344 tee_mm_entry_t *entry = pool->entry;
345 uint16_t offset = (addr - pool->lo) >> pool->shift;
346 uint32_t exceptions;
347
348 if (!tee_mm_addr_is_within_range(pool, addr))
349 return NULL;
350
351 exceptions = cpu_spin_lock_xsave(&((tee_mm_pool_t *)pool)->lock);
352
353 while (entry->next != NULL) {
354 entry = entry->next;
355
356 if ((offset >= entry->offset) &&
357 (offset < (entry->offset + entry->size))) {
358 cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock,
359 exceptions);
360 return entry;
361 }
362 }
363
364 cpu_spin_unlock_xrestore(&((tee_mm_pool_t *)pool)->lock, exceptions);
365 return NULL;
366 }
367
tee_mm_get_smem(const tee_mm_entry_t * mm)368 uintptr_t tee_mm_get_smem(const tee_mm_entry_t *mm)
369 {
370 return (mm->offset << mm->pool->shift) + mm->pool->lo;
371 }
372