xref: /optee_os/lib/libutils/ext/mempool.c (revision 9fc2442cc66c279cb962c90c4375746fc9b28bb9)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2018-2019, Linaro Limited
5  */
6 
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <malloc.h>
11 #include <mempool.h>
12 #include <string.h>
13 #include <util.h>
14 
15 #if defined(__KERNEL__)
16 #include <kernel/mutex.h>
17 #include <kernel/panic.h>
18 #include <kernel/thread.h>
19 #endif
20 
21 /*
22  * Allocation of temporary memory buffers which are used in a stack like
23  * fashion. One exmaple is when a Big Number is needed for a temporary
24  * variable in a Big Number computation: Big Number operations (add,...),
25  * crypto algorithms (rsa, ecc,,...).
26  *
27  *  The allocation algorithm takes memory buffers from a pool,
28  *  characterized by (cf. struct mempool):
29  * - the total size (in bytes) of the pool
30  * - the offset of the last item allocated in the pool (struct
31  *   mempool_item). This offset is -1 is nothing is allocated yet.
32  *
33  * Each item consists of (struct mempool_item)
34  * - the size of the item
35  * - the offsets, in the pool, of the previous and next items
36  *
37  * The allocation allocates an item for a given size.
38  * The allocation is performed in the pool after the last
39  * allocated items. This means:
40  * - the heap is never used.
41  * - there is no assumption on the size of the allocated memory buffers. Only
42  *   the size of the pool will limit the allocation.
43  * - a constant time allocation and free as there is no list scan
44  * - but a potentially fragmented memory as the allocation does not take into
45  *   account "holes" in the pool (allocation is performed after the last
46  *   allocated variable). Indeed, this interface is supposed to be used
47  *   with stack like allocations to avoid this issue. This means that
48  *   allocated items:
49  *   - should have a short life cycle
50  *   - if an item A is allocated before another item B, then A should be
51  *     released after B.
52  *   So the potential fragmentation is mitigated.
53  */
54 
55 
56 struct mempool {
57 	size_t size;  /* size of the memory pool, in bytes */
58 	ssize_t last_offset;   /* offset to the last one */
59 	vaddr_t data;
60 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
61 	ssize_t max_last_offset;
62 #endif
63 #if defined(__KERNEL__)
64 	void (*release_mem)(void *ptr, size_t size);
65 	struct recursive_mutex mu;
66 #endif
67 };
68 
69 #if defined(__KERNEL__)
70 struct mempool *mempool_default;
71 #endif
72 
73 static void get_pool(struct mempool *pool __maybe_unused)
74 {
75 #if defined(__KERNEL__)
76 	mutex_lock_recursive(&pool->mu);
77 #endif
78 }
79 
80 static void put_pool(struct mempool *pool __maybe_unused)
81 {
82 #if defined(__KERNEL__)
83 	if (mutex_get_recursive_lock_depth(&pool->mu) == 1) {
84 		/*
85 		 * As the refcount is about to become 0 there should be no items
86 		 * left
87 		 */
88 		if (pool->last_offset >= 0)
89 			panic();
90 		if (pool->release_mem)
91 			pool->release_mem((void *)pool->data, pool->size);
92 	}
93 	mutex_unlock_recursive(&pool->mu);
94 #endif
95 }
96 
97 struct mempool *
98 mempool_alloc_pool(void *data, size_t size,
99 		   void (*release_mem)(void *ptr, size_t size) __maybe_unused)
100 {
101 	struct mempool *pool = calloc(1, sizeof(*pool));
102 
103 	COMPILE_TIME_ASSERT(MEMPOOL_ALIGN >= __alignof__(struct mempool_item));
104 	assert(!((vaddr_t)data & (MEMPOOL_ALIGN - 1)));
105 
106 	if (pool) {
107 		pool->size = size;
108 		pool->data = (vaddr_t)data;
109 		pool->last_offset = -1;
110 #if defined(__KERNEL__)
111 		pool->release_mem = release_mem;
112 		mutex_init_recursive(&pool->mu);
113 #endif
114 	}
115 
116 	return pool;
117 }
118 
119 void *mempool_alloc(struct mempool *pool, size_t size)
120 {
121 	size_t offset;
122 	struct mempool_item *new_item;
123 	struct mempool_item *last_item = NULL;
124 
125 	get_pool(pool);
126 
127 	if (pool->last_offset < 0) {
128 		offset = 0;
129 	} else {
130 		last_item = (struct mempool_item *)(pool->data +
131 						    pool->last_offset);
132 		offset = pool->last_offset + last_item->size;
133 
134 		offset = ROUNDUP(offset, MEMPOOL_ALIGN);
135 		if (offset > pool->size)
136 			goto error;
137 	}
138 
139 	size = sizeof(struct mempool_item) + size;
140 	size = ROUNDUP(size, MEMPOOL_ALIGN);
141 	if (offset + size > pool->size)
142 		goto error;
143 
144 	new_item = (struct mempool_item *)(pool->data + offset);
145 	new_item->size = size;
146 	new_item->prev_item_offset = pool->last_offset;
147 	if (last_item)
148 		last_item->next_item_offset = offset;
149 	new_item->next_item_offset = -1;
150 	pool->last_offset = offset;
151 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
152 	if (pool->last_offset > pool->max_last_offset) {
153 		pool->max_last_offset = pool->last_offset;
154 		DMSG("Max memory usage increased to %zu",
155 		     (size_t)pool->max_last_offset);
156 	}
157 #endif
158 
159 	return new_item + 1;
160 
161 error:
162 	EMSG("Failed to allocate %zu bytes, please tune the pool size", size);
163 	put_pool(pool);
164 	return NULL;
165 }
166 
167 void *mempool_calloc(struct mempool *pool, size_t nmemb, size_t size)
168 {
169 	size_t sz;
170 	void *p;
171 
172 	if (MUL_OVERFLOW(nmemb, size, &sz))
173 		return NULL;
174 
175 	p = mempool_alloc(pool, sz);
176 	if (p)
177 		memset(p, 0, sz);
178 
179 	return p;
180 }
181 
182 void mempool_free(struct mempool *pool, void *ptr)
183 {
184 	struct mempool_item *item;
185 	struct mempool_item *prev_item;
186 	struct mempool_item *next_item;
187 	ssize_t last_offset = -1;
188 
189 	if (!ptr)
190 		return;
191 
192 	item = (struct mempool_item *)((vaddr_t)ptr -
193 				       sizeof(struct mempool_item));
194 	if (item->prev_item_offset >= 0) {
195 		prev_item = (struct mempool_item *)(pool->data +
196 						    item->prev_item_offset);
197 		prev_item->next_item_offset = item->next_item_offset;
198 		last_offset = item->prev_item_offset;
199 	}
200 
201 	if (item->next_item_offset >= 0) {
202 		next_item = (struct mempool_item *)(pool->data +
203 						    item->next_item_offset);
204 		next_item->prev_item_offset = item->prev_item_offset;
205 		last_offset = pool->last_offset;
206 	}
207 
208 	pool->last_offset = last_offset;
209 	put_pool(pool);
210 }
211