xref: /optee_os/lib/libutils/ext/mempool.c (revision 52c0b45cbb01f0d9e068cb3765c40c1b41d4337a)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2018-2019, Linaro Limited
5  */
6 
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <malloc.h>
11 #include <mempool.h>
12 #include <string.h>
13 #include <util.h>
14 
15 #if defined(__KERNEL__)
16 #include <kernel/mutex.h>
17 #include <kernel/panic.h>
18 #endif
19 
20 /*
21  * Allocation of temporary memory buffers which are used in a stack like
22  * fashion. One exmaple is when a Big Number is needed for a temporary
23  * variable in a Big Number computation: Big Number operations (add,...),
24  * crypto algorithms (rsa, ecc,,...).
25  *
26  *  The allocation algorithm takes memory buffers from a pool,
27  *  characterized by (cf. struct mempool):
28  * - the total size (in bytes) of the pool
29  * - the offset of the last item allocated in the pool (struct
30  *   mempool_item). This offset is -1 is nothing is allocated yet.
31  *
32  * Each item consists of (struct mempool_item)
33  * - the size of the item
34  * - the offsets, in the pool, of the previous and next items
35  *
36  * The allocation allocates an item for a given size.
37  * The allocation is performed in the pool after the last
38  * allocated items. This means:
39  * - the heap is never used.
40  * - there is no assumption on the size of the allocated memory buffers. Only
41  *   the size of the pool will limit the allocation.
42  * - a constant time allocation and free as there is no list scan
43  * - but a potentially fragmented memory as the allocation does not take into
44  *   account "holes" in the pool (allocation is performed after the last
45  *   allocated variable). Indeed, this interface is supposed to be used
46  *   with stack like allocations to avoid this issue. This means that
47  *   allocated items:
48  *   - should have a short life cycle
49  *   - if an item A is allocated before another item B, then A should be
50  *     released after B.
51  *   So the potential fragmentation is mitigated.
52  */
53 
54 
55 struct mempool {
56 	size_t size;  /* size of the memory pool, in bytes */
57 	ssize_t last_offset;   /* offset to the last one */
58 	vaddr_t data;
59 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
60 	ssize_t max_last_offset;
61 #endif
62 #if defined(__KERNEL__)
63 	void (*release_mem)(void *ptr, size_t size);
64 	struct recursive_mutex mu;
65 #endif
66 };
67 
68 #if defined(__KERNEL__)
69 struct mempool *mempool_default;
70 #endif
71 
72 static void get_pool(struct mempool *pool __maybe_unused)
73 {
74 #if defined(__KERNEL__)
75 	mutex_lock_recursive(&pool->mu);
76 #endif
77 }
78 
79 static void put_pool(struct mempool *pool __maybe_unused)
80 {
81 #if defined(__KERNEL__)
82 	if (mutex_get_recursive_lock_depth(&pool->mu) == 1) {
83 		/*
84 		 * As the refcount is about to become 0 there should be no items
85 		 * left
86 		 */
87 		if (pool->last_offset >= 0)
88 			panic();
89 		if (pool->release_mem)
90 			pool->release_mem((void *)pool->data, pool->size);
91 	}
92 	mutex_unlock_recursive(&pool->mu);
93 #endif
94 }
95 
96 struct mempool *
97 mempool_alloc_pool(void *data, size_t size,
98 		   void (*release_mem)(void *ptr, size_t size) __maybe_unused)
99 {
100 	struct mempool *pool = calloc(1, sizeof(*pool));
101 
102 	COMPILE_TIME_ASSERT(MEMPOOL_ALIGN >= __alignof__(struct mempool_item));
103 	assert(!((vaddr_t)data & (MEMPOOL_ALIGN - 1)));
104 
105 	if (pool) {
106 		pool->size = size;
107 		pool->data = (vaddr_t)data;
108 		pool->last_offset = -1;
109 #if defined(__KERNEL__)
110 		pool->release_mem = release_mem;
111 		mutex_init_recursive(&pool->mu);
112 #endif
113 	}
114 
115 	return pool;
116 }
117 
118 void *mempool_alloc(struct mempool *pool, size_t size)
119 {
120 	size_t offset;
121 	struct mempool_item *new_item;
122 	struct mempool_item *last_item = NULL;
123 
124 	get_pool(pool);
125 
126 	if (pool->last_offset < 0) {
127 		offset = 0;
128 	} else {
129 		last_item = (struct mempool_item *)(pool->data +
130 						    pool->last_offset);
131 		offset = pool->last_offset + last_item->size;
132 
133 		offset = ROUNDUP(offset, MEMPOOL_ALIGN);
134 		if (offset > pool->size)
135 			goto error;
136 	}
137 
138 	size = sizeof(struct mempool_item) + size;
139 	size = ROUNDUP(size, MEMPOOL_ALIGN);
140 	if (offset + size > pool->size)
141 		goto error;
142 
143 	new_item = (struct mempool_item *)(pool->data + offset);
144 	new_item->size = size;
145 	new_item->prev_item_offset = pool->last_offset;
146 	if (last_item)
147 		last_item->next_item_offset = offset;
148 	new_item->next_item_offset = -1;
149 	pool->last_offset = offset;
150 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
151 	if (pool->last_offset > pool->max_last_offset) {
152 		pool->max_last_offset = pool->last_offset;
153 		DMSG("Max memory usage increased to %zu",
154 		     (size_t)pool->max_last_offset);
155 	}
156 #endif
157 
158 	return new_item + 1;
159 
160 error:
161 	EMSG("Failed to allocate %zu bytes, please tune the pool size", size);
162 	put_pool(pool);
163 	return NULL;
164 }
165 
166 void *mempool_calloc(struct mempool *pool, size_t nmemb, size_t size)
167 {
168 	size_t sz;
169 	void *p;
170 
171 	if (MUL_OVERFLOW(nmemb, size, &sz))
172 		return NULL;
173 
174 	p = mempool_alloc(pool, sz);
175 	if (p)
176 		memset(p, 0, sz);
177 
178 	return p;
179 }
180 
181 void mempool_free(struct mempool *pool, void *ptr)
182 {
183 	struct mempool_item *item;
184 	struct mempool_item *prev_item;
185 	struct mempool_item *next_item;
186 	ssize_t last_offset = -1;
187 
188 	if (!ptr)
189 		return;
190 
191 	item = (struct mempool_item *)((vaddr_t)ptr -
192 				       sizeof(struct mempool_item));
193 	if (item->prev_item_offset >= 0) {
194 		prev_item = (struct mempool_item *)(pool->data +
195 						    item->prev_item_offset);
196 		prev_item->next_item_offset = item->next_item_offset;
197 		last_offset = item->prev_item_offset;
198 	}
199 
200 	if (item->next_item_offset >= 0) {
201 		next_item = (struct mempool_item *)(pool->data +
202 						    item->next_item_offset);
203 		next_item->prev_item_offset = item->prev_item_offset;
204 		last_offset = pool->last_offset;
205 	}
206 
207 	pool->last_offset = last_offset;
208 	put_pool(pool);
209 }
210