xref: /optee_os/core/arch/arm/mm/sp_mem.c (revision c44d734b6366cbf4d12610310e809872db65f89d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
4  */
5 #include <assert.h>
6 #include <bitstring.h>
7 #include <ffa.h>
8 #include <kernel/spinlock.h>
9 #include <mm/mobj.h>
10 #include <mm/sp_mem.h>
11 
12 #define NUM_SHARES	64
13 
14 static bitstr_t bit_decl(share_bits, NUM_SHARES);
15 static unsigned int sp_mem_lock = SPINLOCK_UNLOCK;
16 
17 /* mem_shares stores all active FF-A shares. */
18 SLIST_HEAD(sp_mem_head, sp_mem);
19 static struct sp_mem_head mem_shares = SLIST_HEAD_INITIALIZER(sp_mem_head);
20 /* Weak instance of mobj_sp_ops mandates it is not static */
21 const struct mobj_ops mobj_sp_ops;
22 
23 struct mobj_sp {
24 	struct mobj mobj;
25 	uint32_t mem_type;
26 	bool is_secure;
27 	paddr_t pages[];
28 };
29 
30 static struct mobj_sp *to_mobj_sp(struct mobj *mobj)
31 {
32 	assert(mobj->ops == &mobj_sp_ops);
33 	return container_of(mobj, struct mobj_sp, mobj);
34 }
35 
36 static size_t mobj_sp_size(size_t num_pages)
37 {
38 	size_t s = 0;
39 
40 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
41 		return 0;
42 	if (ADD_OVERFLOW(sizeof(struct mobj_sp), s, &s))
43 		return 0;
44 	return s;
45 }
46 
47 struct mobj *sp_mem_new_mobj(uint64_t pages, uint32_t mem_type, bool is_secure)
48 {
49 	struct mobj_sp *m = NULL;
50 	size_t s = 0;
51 
52 	s = mobj_sp_size(pages);
53 	if (!s)
54 		return NULL;
55 
56 	m = calloc(1, s);
57 	if (!m)
58 		return NULL;
59 
60 	m->mobj.ops = &mobj_sp_ops;
61 	m->mobj.size = pages * SMALL_PAGE_SIZE;
62 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
63 
64 	m->mem_type = mem_type;
65 	m->is_secure = is_secure;
66 
67 	refcount_set(&m->mobj.refc, 1);
68 	return &m->mobj;
69 }
70 
71 static size_t get_page_count(struct mobj_sp *ms)
72 {
73 	return ROUNDUP(ms->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
74 }
75 
76 /* Add some physical pages to the mobj object. */
77 int sp_mem_add_pages(struct mobj *mobj, unsigned int *idx,
78 		     paddr_t pa, unsigned int num_pages)
79 {
80 	struct mobj_sp *ms = to_mobj_sp(mobj);
81 	unsigned int n = 0;
82 	size_t tot_page_count = get_page_count(ms);
83 
84 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
85 		return TEE_ERROR_BAD_PARAMETERS;
86 
87 	/* Don't check for device memory */
88 	if (ms->mem_type == TEE_MATTR_MEM_TYPE_CACHED) {
89 		if (ms->is_secure) {
90 			if (!tee_pbuf_is_sec(pa, num_pages * SMALL_PAGE_SIZE))
91 				return TEE_ERROR_BAD_PARAMETERS;
92 		} else {
93 			if (!tee_pbuf_is_non_sec(pa,
94 						 num_pages * SMALL_PAGE_SIZE))
95 				return TEE_ERROR_BAD_PARAMETERS;
96 		}
97 	}
98 
99 	for (n = 0; n < num_pages; n++)
100 		ms->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
101 
102 	*idx += n;
103 	return TEE_SUCCESS;
104 }
105 
106 static TEE_Result get_mem_type(struct mobj *mobj, uint32_t *mt)
107 {
108 	struct mobj_sp *m = to_mobj_sp(mobj);
109 
110 	*mt = m->mem_type;
111 
112 	return TEE_SUCCESS;
113 }
114 
115 static bool mobj_sp_matches(struct mobj *mobj, enum buf_is_attr attr)
116 {
117 	struct mobj_sp *m = to_mobj_sp(mobj);
118 
119 	if (m->is_secure)
120 		return attr == CORE_MEM_SEC;
121 	else
122 		return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
123 }
124 
125 static TEE_Result get_pa(struct mobj *mobj, size_t offset,
126 			 size_t granule, paddr_t *pa)
127 {
128 	struct mobj_sp *ms = to_mobj_sp(mobj);
129 	paddr_t p = 0;
130 
131 	if (!pa)
132 		return TEE_ERROR_GENERIC;
133 
134 	if (offset >= mobj->size)
135 		return TEE_ERROR_GENERIC;
136 
137 	switch (granule) {
138 	case 0:
139 		p = ms->pages[offset / SMALL_PAGE_SIZE] +
140 		    (offset & SMALL_PAGE_MASK);
141 		break;
142 	case SMALL_PAGE_SIZE:
143 		p = ms->pages[offset / SMALL_PAGE_SIZE];
144 		break;
145 	default:
146 		return TEE_ERROR_GENERIC;
147 	}
148 	*pa = p;
149 
150 	return TEE_SUCCESS;
151 }
152 DECLARE_KEEP_PAGER(get_pa);
153 
154 static size_t get_phys_offs(struct mobj *mobj __maybe_unused,
155 			    size_t granule __maybe_unused)
156 {
157 	return 0;
158 }
159 
160 static void inactivate(struct mobj *mobj)
161 {
162 	struct mobj_sp *ms = to_mobj_sp(mobj);
163 	uint32_t exceptions = 0;
164 
165 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
166 	/*
167 	 * If refcount isn't 0 some other thread has found this mobj in
168 	 * shm_head after the mobj_put() that put us here and before we got
169 	 * the lock.
170 	 */
171 	if (!refcount_val(&mobj->refc))
172 		free(ms);
173 
174 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
175 }
176 
177 const struct mobj_ops mobj_sp_ops __weak __relrodata_unpaged("mobj_sp_ops") = {
178 	.get_pa = get_pa,
179 	.get_phys_offs = get_phys_offs,
180 	.get_mem_type = get_mem_type,
181 	.matches = mobj_sp_matches,
182 	.free = inactivate,
183 };
184 
185 struct sp_mem_receiver *sp_mem_get_receiver(uint32_t s_id, struct sp_mem *smem)
186 {
187 	struct sp_mem_receiver *r = NULL;
188 
189 	SLIST_FOREACH(r, &smem->receivers, link) {
190 		if (r->perm.endpoint_id == s_id)
191 			return r;
192 	}
193 	return NULL;
194 }
195 
196 struct sp_mem *sp_mem_get(uint64_t handle)
197 {
198 	struct sp_mem *smem = NULL;
199 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
200 
201 	SLIST_FOREACH(smem, &mem_shares, link) {
202 		if (smem->global_handle == handle)
203 			break;
204 	}
205 
206 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
207 	return smem;
208 }
209 
210 void *sp_mem_get_va(const struct user_mode_ctx *uctx, size_t offset,
211 		    struct mobj *mobj)
212 {
213 	struct vm_region *region = NULL;
214 
215 	TAILQ_FOREACH(region, &uctx->vm_info.regions, link) {
216 		if (region->mobj == mobj && region->offset == offset)
217 			return (void *)region->va;
218 	}
219 	return NULL;
220 }
221 
222 struct sp_mem *sp_mem_new(void)
223 {
224 	struct sp_mem *smem = NULL;
225 	uint32_t exceptions = 0;
226 	int i = 0;
227 
228 	smem = calloc(sizeof(*smem), 1);
229 	if (!smem)
230 		return NULL;
231 
232 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
233 
234 	bit_ffc(share_bits, NUM_SHARES, &i);
235 	if (i == -1) {
236 		cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
237 		free(smem);
238 		return NULL;
239 	}
240 
241 	bit_set(share_bits, i);
242 	/*
243 	 * OP-TEE SHAREs use bit 44 use bit 45 instead.
244 	 */
245 	smem->global_handle = i | FFA_MEMORY_HANDLE_SECURE_BIT;
246 	SLIST_INIT(&smem->regions);
247 	SLIST_INIT(&smem->receivers);
248 
249 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
250 
251 	return smem;
252 }
253 
254 void sp_mem_add(struct sp_mem *smem)
255 {
256 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
257 
258 	SLIST_INSERT_HEAD(&mem_shares, smem, link);
259 
260 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
261 }
262 
263 bool sp_mem_is_shared(struct sp_mem_map_region *new_reg)
264 {
265 	struct sp_mem *smem = NULL;
266 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
267 	uint64_t new_reg_end = new_reg->page_offset +
268 			       (new_reg->page_count * SMALL_PAGE_SIZE);
269 
270 	SLIST_FOREACH(smem, &mem_shares, link) {
271 		struct sp_mem_map_region *reg = NULL;
272 
273 		SLIST_FOREACH(reg, &smem->regions, link) {
274 			if (new_reg->mobj == reg->mobj) {
275 				uint64_t reg_end = 0;
276 
277 				reg_end = reg->page_offset +
278 					  (reg->page_count * SMALL_PAGE_SIZE);
279 
280 				if (new_reg->page_offset < reg_end &&
281 				    new_reg_end > reg->page_offset) {
282 					cpu_spin_unlock_xrestore(&sp_mem_lock,
283 								 exceptions);
284 					return true;
285 				}
286 			}
287 		}
288 	}
289 
290 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
291 	return false;
292 }
293 
294 void sp_mem_remove(struct sp_mem *smem)
295 {
296 	uint32_t exceptions = 0;
297 	int i = 0;
298 	struct sp_mem *tsmem = NULL;
299 
300 	if (!smem)
301 		return;
302 
303 	/* Remove all receivers */
304 	while (!SLIST_EMPTY(&smem->receivers)) {
305 		struct sp_mem_receiver *receiver = NULL;
306 
307 		receiver = SLIST_FIRST(&smem->receivers);
308 		SLIST_REMOVE_HEAD(&smem->receivers, link);
309 		free(receiver);
310 	}
311 	/* Remove all regions */
312 	while (!SLIST_EMPTY(&smem->regions)) {
313 		struct sp_mem_map_region *region = SLIST_FIRST(&smem->regions);
314 
315 		mobj_put(region->mobj);
316 
317 		SLIST_REMOVE_HEAD(&smem->regions, link);
318 		free(region);
319 	}
320 
321 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
322 
323 	i = smem->global_handle & ~FFA_MEMORY_HANDLE_SECURE_BIT;
324 	assert(i < NUM_SHARES);
325 
326 	bit_clear(share_bits, i);
327 
328 	SLIST_FOREACH(tsmem, &mem_shares, link) {
329 		if (tsmem == smem) {
330 			SLIST_REMOVE(&mem_shares, smem, sp_mem, link);
331 			break;
332 		}
333 	}
334 
335 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
336 
337 	free(smem);
338 }
339