xref: /optee_os/core/arch/arm/mm/sp_mem.c (revision 6f3a5646acd1e87be4989bc165f06cf967f65201)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
4  */
5 #include <assert.h>
6 #include <bitstring.h>
7 #include <ffa.h>
8 #include <kernel/spinlock.h>
9 #include <mm/mobj.h>
10 #include <mm/sp_mem.h>
11 
12 #define NUM_SHARES	64
13 
14 static bitstr_t bit_decl(share_bits, NUM_SHARES);
15 static unsigned int sp_mem_lock = SPINLOCK_UNLOCK;
16 
17 /* mem_shares stores all active FF-A shares. */
18 SLIST_HEAD(sp_mem_head, sp_mem);
19 static struct sp_mem_head mem_shares = SLIST_HEAD_INITIALIZER(sp_mem_head);
20 /* Weak instance of mobj_sp_ops mandates it is not static */
21 const struct mobj_ops mobj_sp_ops;
22 
23 struct mobj_sp {
24 	struct mobj mobj;
25 	uint32_t mem_type;
26 	paddr_t pages[];
27 };
28 
29 static struct mobj_sp *to_mobj_sp(struct mobj *mobj)
30 {
31 	assert(mobj->ops == &mobj_sp_ops);
32 	return container_of(mobj, struct mobj_sp, mobj);
33 }
34 
35 static size_t mobj_sp_size(size_t num_pages)
36 {
37 	size_t s = 0;
38 
39 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
40 		return 0;
41 	if (ADD_OVERFLOW(sizeof(struct mobj_sp), s, &s))
42 		return 0;
43 	return s;
44 }
45 
46 struct mobj *sp_mem_new_mobj(uint64_t pages, uint32_t cache_type)
47 {
48 	struct mobj_sp *m = NULL;
49 	size_t s = 0;
50 
51 	s = mobj_sp_size(pages);
52 	if (!s)
53 		return NULL;
54 
55 	m = calloc(1, s);
56 	if (!m)
57 		return NULL;
58 
59 	m->mobj.ops = &mobj_sp_ops;
60 	m->mobj.size = pages * SMALL_PAGE_SIZE;
61 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
62 
63 	m->mem_type = cache_type;
64 
65 	refcount_set(&m->mobj.refc, 1);
66 	return &m->mobj;
67 }
68 
69 static size_t get_page_count(struct mobj_sp *ms)
70 {
71 	return ROUNDUP(ms->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
72 }
73 
74 /* Add some physical pages to the mobj object. */
75 int sp_mem_add_pages(struct mobj *mobj, unsigned int *idx,
76 		     paddr_t pa, unsigned int num_pages)
77 {
78 	struct mobj_sp *ms = to_mobj_sp(mobj);
79 	unsigned int n = 0;
80 	size_t tot_page_count = get_page_count(ms);
81 
82 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
83 		return TEE_ERROR_BAD_PARAMETERS;
84 
85 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
86 		return TEE_ERROR_BAD_PARAMETERS;
87 
88 	for (n = 0; n < num_pages; n++)
89 		ms->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
90 
91 	*idx += n;
92 	return TEE_SUCCESS;
93 }
94 
95 static TEE_Result sp_mem_get_cattr(struct mobj *mobj, uint32_t *cattr)
96 {
97 	struct mobj_sp *m = to_mobj_sp(mobj);
98 
99 	*cattr = m->mem_type;
100 
101 	return TEE_SUCCESS;
102 }
103 
104 static bool mobj_sp_matches(struct mobj *mobj __maybe_unused,
105 			    enum buf_is_attr attr)
106 {
107 	assert(mobj->ops == &mobj_sp_ops);
108 
109 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
110 }
111 
112 static TEE_Result get_pa(struct mobj *mobj, size_t offset,
113 			 size_t granule, paddr_t *pa)
114 {
115 	struct mobj_sp *ms = to_mobj_sp(mobj);
116 	paddr_t p = 0;
117 
118 	if (!pa)
119 		return TEE_ERROR_GENERIC;
120 
121 	if (offset >= mobj->size)
122 		return TEE_ERROR_GENERIC;
123 
124 	switch (granule) {
125 	case 0:
126 		p = ms->pages[offset / SMALL_PAGE_SIZE] +
127 		    (offset & SMALL_PAGE_MASK);
128 		break;
129 	case SMALL_PAGE_SIZE:
130 		p = ms->pages[offset / SMALL_PAGE_SIZE];
131 		break;
132 	default:
133 		return TEE_ERROR_GENERIC;
134 	}
135 	*pa = p;
136 
137 	return TEE_SUCCESS;
138 }
139 DECLARE_KEEP_PAGER(get_pa);
140 
141 static size_t get_phys_offs(struct mobj *mobj __maybe_unused,
142 			    size_t granule __maybe_unused)
143 {
144 	return 0;
145 }
146 
147 static void inactivate(struct mobj *mobj)
148 {
149 	struct mobj_sp *ms = to_mobj_sp(mobj);
150 	uint32_t exceptions = 0;
151 
152 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
153 	/*
154 	 * If refcount isn't 0 some other thread has found this mobj in
155 	 * shm_head after the mobj_put() that put us here and before we got
156 	 * the lock.
157 	 */
158 	if (!refcount_val(&mobj->refc))
159 		free(ms);
160 
161 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
162 }
163 
164 const struct mobj_ops mobj_sp_ops __weak __relrodata_unpaged("mobj_sp_ops") = {
165 	.get_pa = get_pa,
166 	.get_phys_offs = get_phys_offs,
167 	.get_cattr = sp_mem_get_cattr,
168 	.matches = mobj_sp_matches,
169 	.free = inactivate,
170 };
171 
172 struct sp_mem_receiver *sp_mem_get_receiver(uint32_t s_id, struct sp_mem *smem)
173 {
174 	struct sp_mem_receiver *r = NULL;
175 
176 	SLIST_FOREACH(r, &smem->receivers, link) {
177 		if (r->perm.endpoint_id == s_id)
178 			return r;
179 	}
180 	return NULL;
181 }
182 
183 struct sp_mem *sp_mem_get(uint64_t handle)
184 {
185 	struct sp_mem *smem = NULL;
186 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
187 
188 	SLIST_FOREACH(smem, &mem_shares, link) {
189 		if (smem->global_handle == handle)
190 			break;
191 	}
192 
193 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
194 	return smem;
195 }
196 
197 void *sp_mem_get_va(const struct user_mode_ctx *uctx, size_t offset,
198 		    struct mobj *mobj)
199 {
200 	struct vm_region *region = NULL;
201 
202 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
203 		if (region->mobj == mobj && region->offset == offset)
204 			return (void *)region->va;
205 	}
206 	return NULL;
207 }
208 
209 struct sp_mem *sp_mem_new(void)
210 {
211 	struct sp_mem *smem = NULL;
212 	uint32_t exceptions = 0;
213 	int i = 0;
214 
215 	smem = calloc(sizeof(*smem), 1);
216 	if (!smem)
217 		return NULL;
218 
219 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
220 
221 	bit_ffc(share_bits, NUM_SHARES, &i);
222 	if (i == -1) {
223 		cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
224 		free(smem);
225 		return NULL;
226 	}
227 
228 	bit_set(share_bits, i);
229 	/*
230 	 * OP-TEE SHAREs use bit 44 use bit 45 instead.
231 	 */
232 	smem->global_handle = i | FFA_MEMORY_HANDLE_SECURE_BIT;
233 	SLIST_INIT(&smem->regions);
234 	SLIST_INIT(&smem->receivers);
235 
236 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
237 
238 	return smem;
239 }
240 
241 void sp_mem_add(struct sp_mem *smem)
242 {
243 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
244 
245 	SLIST_INSERT_HEAD(&mem_shares, smem, link);
246 
247 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
248 }
249 
250 bool sp_mem_is_shared(struct sp_mem_map_region *new_reg)
251 {
252 	struct sp_mem *smem = NULL;
253 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
254 	uint64_t new_reg_end = new_reg->page_offset +
255 			       (new_reg->page_count * SMALL_PAGE_SIZE);
256 
257 	SLIST_FOREACH(smem, &mem_shares, link) {
258 		struct sp_mem_map_region *reg = NULL;
259 
260 		SLIST_FOREACH(reg, &smem->regions, link) {
261 			if (new_reg->mobj == reg->mobj) {
262 				uint64_t reg_end = 0;
263 
264 				reg_end = reg->page_offset +
265 					  (reg->page_count * SMALL_PAGE_SIZE);
266 
267 				if (new_reg->page_offset < reg_end &&
268 				    new_reg_end > reg->page_offset) {
269 					cpu_spin_unlock_xrestore(&sp_mem_lock,
270 								 exceptions);
271 					return true;
272 				}
273 			}
274 		}
275 	}
276 
277 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
278 	return false;
279 }
280 
281 void sp_mem_remove(struct sp_mem *smem)
282 {
283 	uint32_t exceptions = 0;
284 	int i = 0;
285 	struct sp_mem *tsmem = NULL;
286 
287 	if (!smem)
288 		return;
289 
290 	/* Remove all receivers */
291 	while (!SLIST_EMPTY(&smem->receivers)) {
292 		struct sp_mem_receiver *receiver = NULL;
293 
294 		receiver = SLIST_FIRST(&smem->receivers);
295 		SLIST_REMOVE_HEAD(&smem->receivers, link);
296 		free(receiver);
297 	}
298 	/* Remove all regions */
299 	while (!SLIST_EMPTY(&smem->regions)) {
300 		struct sp_mem_map_region *region = SLIST_FIRST(&smem->regions);
301 
302 		mobj_put(region->mobj);
303 
304 		SLIST_REMOVE_HEAD(&smem->regions, link);
305 		free(region);
306 	}
307 
308 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
309 
310 	i = smem->global_handle & ~FFA_MEMORY_HANDLE_SECURE_BIT;
311 	assert(i < NUM_SHARES);
312 
313 	bit_clear(share_bits, i);
314 
315 	SLIST_FOREACH(tsmem, &mem_shares, link) {
316 		if (tsmem == smem) {
317 			SLIST_REMOVE(&mem_shares, smem, sp_mem, link);
318 			break;
319 		}
320 	}
321 
322 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
323 
324 	free(smem);
325 }
326