xref: /optee_os/core/arch/arm/mm/sp_mem.c (revision 74bd878e0765e11f55580667e985bd408aed6167)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
4  */
5 #include <assert.h>
6 #include <bitstring.h>
7 #include <ffa.h>
8 #include <kernel/spinlock.h>
9 #include <mm/mobj.h>
10 #include <mm/sp_mem.h>
11 
12 #define NUM_SHARES	64
13 
14 static bitstr_t bit_decl(share_bits, NUM_SHARES);
15 static unsigned int sp_mem_lock = SPINLOCK_UNLOCK;
16 
17 /* mem_shares stores all active FF-A shares. */
18 SLIST_HEAD(sp_mem_head, sp_mem);
19 static struct sp_mem_head mem_shares = SLIST_HEAD_INITIALIZER(sp_mem_head);
20 /* Weak instance of mobj_sp_ops mandates it is not static */
21 const struct mobj_ops mobj_sp_ops;
22 
23 struct mobj_sp {
24 	struct mobj mobj;
25 	uint32_t mem_type;
26 	bool is_secure;
27 	paddr_t pages[];
28 };
29 
30 static struct mobj_sp *to_mobj_sp(struct mobj *mobj)
31 {
32 	assert(mobj->ops == &mobj_sp_ops);
33 	return container_of(mobj, struct mobj_sp, mobj);
34 }
35 
36 static size_t mobj_sp_size(size_t num_pages)
37 {
38 	size_t s = 0;
39 
40 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
41 		return 0;
42 	if (ADD_OVERFLOW(sizeof(struct mobj_sp), s, &s))
43 		return 0;
44 	return s;
45 }
46 
47 struct mobj *sp_mem_new_mobj(uint64_t pages, uint32_t cache_type,
48 			     bool is_secure)
49 {
50 	struct mobj_sp *m = NULL;
51 	size_t s = 0;
52 
53 	s = mobj_sp_size(pages);
54 	if (!s)
55 		return NULL;
56 
57 	m = calloc(1, s);
58 	if (!m)
59 		return NULL;
60 
61 	m->mobj.ops = &mobj_sp_ops;
62 	m->mobj.size = pages * SMALL_PAGE_SIZE;
63 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
64 
65 	m->mem_type = cache_type;
66 	m->is_secure = is_secure;
67 
68 	refcount_set(&m->mobj.refc, 1);
69 	return &m->mobj;
70 }
71 
72 static size_t get_page_count(struct mobj_sp *ms)
73 {
74 	return ROUNDUP(ms->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
75 }
76 
77 /* Add some physical pages to the mobj object. */
78 int sp_mem_add_pages(struct mobj *mobj, unsigned int *idx,
79 		     paddr_t pa, unsigned int num_pages)
80 {
81 	struct mobj_sp *ms = to_mobj_sp(mobj);
82 	unsigned int n = 0;
83 	size_t tot_page_count = get_page_count(ms);
84 
85 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
86 		return TEE_ERROR_BAD_PARAMETERS;
87 
88 	/* Don't check for device memory */
89 	if (ms->mem_type == TEE_MATTR_MEM_TYPE_CACHED) {
90 		if (ms->is_secure) {
91 			if (!tee_pbuf_is_sec(pa, num_pages * SMALL_PAGE_SIZE))
92 				return TEE_ERROR_BAD_PARAMETERS;
93 		} else {
94 			if (!tee_pbuf_is_non_sec(pa,
95 						 num_pages * SMALL_PAGE_SIZE))
96 				return TEE_ERROR_BAD_PARAMETERS;
97 		}
98 	}
99 
100 	for (n = 0; n < num_pages; n++)
101 		ms->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
102 
103 	*idx += n;
104 	return TEE_SUCCESS;
105 }
106 
107 static TEE_Result sp_mem_get_cattr(struct mobj *mobj, uint32_t *cattr)
108 {
109 	struct mobj_sp *m = to_mobj_sp(mobj);
110 
111 	*cattr = m->mem_type;
112 
113 	return TEE_SUCCESS;
114 }
115 
116 static bool mobj_sp_matches(struct mobj *mobj, enum buf_is_attr attr)
117 {
118 	struct mobj_sp *m = to_mobj_sp(mobj);
119 
120 	if (m->is_secure)
121 		return attr == CORE_MEM_SEC;
122 	else
123 		return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
124 }
125 
126 static TEE_Result get_pa(struct mobj *mobj, size_t offset,
127 			 size_t granule, paddr_t *pa)
128 {
129 	struct mobj_sp *ms = to_mobj_sp(mobj);
130 	paddr_t p = 0;
131 
132 	if (!pa)
133 		return TEE_ERROR_GENERIC;
134 
135 	if (offset >= mobj->size)
136 		return TEE_ERROR_GENERIC;
137 
138 	switch (granule) {
139 	case 0:
140 		p = ms->pages[offset / SMALL_PAGE_SIZE] +
141 		    (offset & SMALL_PAGE_MASK);
142 		break;
143 	case SMALL_PAGE_SIZE:
144 		p = ms->pages[offset / SMALL_PAGE_SIZE];
145 		break;
146 	default:
147 		return TEE_ERROR_GENERIC;
148 	}
149 	*pa = p;
150 
151 	return TEE_SUCCESS;
152 }
153 DECLARE_KEEP_PAGER(get_pa);
154 
155 static size_t get_phys_offs(struct mobj *mobj __maybe_unused,
156 			    size_t granule __maybe_unused)
157 {
158 	return 0;
159 }
160 
161 static void inactivate(struct mobj *mobj)
162 {
163 	struct mobj_sp *ms = to_mobj_sp(mobj);
164 	uint32_t exceptions = 0;
165 
166 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
167 	/*
168 	 * If refcount isn't 0 some other thread has found this mobj in
169 	 * shm_head after the mobj_put() that put us here and before we got
170 	 * the lock.
171 	 */
172 	if (!refcount_val(&mobj->refc))
173 		free(ms);
174 
175 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
176 }
177 
178 const struct mobj_ops mobj_sp_ops __weak __relrodata_unpaged("mobj_sp_ops") = {
179 	.get_pa = get_pa,
180 	.get_phys_offs = get_phys_offs,
181 	.get_cattr = sp_mem_get_cattr,
182 	.matches = mobj_sp_matches,
183 	.free = inactivate,
184 };
185 
186 struct sp_mem_receiver *sp_mem_get_receiver(uint32_t s_id, struct sp_mem *smem)
187 {
188 	struct sp_mem_receiver *r = NULL;
189 
190 	SLIST_FOREACH(r, &smem->receivers, link) {
191 		if (r->perm.endpoint_id == s_id)
192 			return r;
193 	}
194 	return NULL;
195 }
196 
197 struct sp_mem *sp_mem_get(uint64_t handle)
198 {
199 	struct sp_mem *smem = NULL;
200 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
201 
202 	SLIST_FOREACH(smem, &mem_shares, link) {
203 		if (smem->global_handle == handle)
204 			break;
205 	}
206 
207 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
208 	return smem;
209 }
210 
211 void *sp_mem_get_va(const struct user_mode_ctx *uctx, size_t offset,
212 		    struct mobj *mobj)
213 {
214 	struct vm_region *region = NULL;
215 
216 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
217 		if (region->mobj == mobj && region->offset == offset)
218 			return (void *)region->va;
219 	}
220 	return NULL;
221 }
222 
223 struct sp_mem *sp_mem_new(void)
224 {
225 	struct sp_mem *smem = NULL;
226 	uint32_t exceptions = 0;
227 	int i = 0;
228 
229 	smem = calloc(sizeof(*smem), 1);
230 	if (!smem)
231 		return NULL;
232 
233 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
234 
235 	bit_ffc(share_bits, NUM_SHARES, &i);
236 	if (i == -1) {
237 		cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
238 		free(smem);
239 		return NULL;
240 	}
241 
242 	bit_set(share_bits, i);
243 	/*
244 	 * OP-TEE SHAREs use bit 44 use bit 45 instead.
245 	 */
246 	smem->global_handle = i | FFA_MEMORY_HANDLE_SECURE_BIT;
247 	SLIST_INIT(&smem->regions);
248 	SLIST_INIT(&smem->receivers);
249 
250 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
251 
252 	return smem;
253 }
254 
255 void sp_mem_add(struct sp_mem *smem)
256 {
257 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
258 
259 	SLIST_INSERT_HEAD(&mem_shares, smem, link);
260 
261 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
262 }
263 
264 bool sp_mem_is_shared(struct sp_mem_map_region *new_reg)
265 {
266 	struct sp_mem *smem = NULL;
267 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
268 	uint64_t new_reg_end = new_reg->page_offset +
269 			       (new_reg->page_count * SMALL_PAGE_SIZE);
270 
271 	SLIST_FOREACH(smem, &mem_shares, link) {
272 		struct sp_mem_map_region *reg = NULL;
273 
274 		SLIST_FOREACH(reg, &smem->regions, link) {
275 			if (new_reg->mobj == reg->mobj) {
276 				uint64_t reg_end = 0;
277 
278 				reg_end = reg->page_offset +
279 					  (reg->page_count * SMALL_PAGE_SIZE);
280 
281 				if (new_reg->page_offset < reg_end &&
282 				    new_reg_end > reg->page_offset) {
283 					cpu_spin_unlock_xrestore(&sp_mem_lock,
284 								 exceptions);
285 					return true;
286 				}
287 			}
288 		}
289 	}
290 
291 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
292 	return false;
293 }
294 
295 void sp_mem_remove(struct sp_mem *smem)
296 {
297 	uint32_t exceptions = 0;
298 	int i = 0;
299 	struct sp_mem *tsmem = NULL;
300 
301 	if (!smem)
302 		return;
303 
304 	/* Remove all receivers */
305 	while (!SLIST_EMPTY(&smem->receivers)) {
306 		struct sp_mem_receiver *receiver = NULL;
307 
308 		receiver = SLIST_FIRST(&smem->receivers);
309 		SLIST_REMOVE_HEAD(&smem->receivers, link);
310 		free(receiver);
311 	}
312 	/* Remove all regions */
313 	while (!SLIST_EMPTY(&smem->regions)) {
314 		struct sp_mem_map_region *region = SLIST_FIRST(&smem->regions);
315 
316 		mobj_put(region->mobj);
317 
318 		SLIST_REMOVE_HEAD(&smem->regions, link);
319 		free(region);
320 	}
321 
322 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
323 
324 	i = smem->global_handle & ~FFA_MEMORY_HANDLE_SECURE_BIT;
325 	assert(i < NUM_SHARES);
326 
327 	bit_clear(share_bits, i);
328 
329 	SLIST_FOREACH(tsmem, &mem_shares, link) {
330 		if (tsmem == smem) {
331 			SLIST_REMOVE(&mem_shares, smem, sp_mem, link);
332 			break;
333 		}
334 	}
335 
336 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
337 
338 	free(smem);
339 }
340