xref: /optee_os/core/arch/arm/mm/sp_mem.c (revision 6a1b230ce97c0aaeb1bd27b0c363147449b90afd)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2021, Arm Limited. All rights reserved.
4  */
5 #include <assert.h>
6 #include <bitstring.h>
7 #include <ffa.h>
8 #include <kernel/spinlock.h>
9 #include <mm/mobj.h>
10 #include <mm/sp_mem.h>
11 
12 #define NUM_SHARES	64
13 
14 static bitstr_t bit_decl(share_bits, NUM_SHARES);
15 static unsigned int sp_mem_lock = SPINLOCK_UNLOCK;
16 
17 /* mem_shares stores all active FF-A shares. */
18 SLIST_HEAD(sp_mem_head, sp_mem);
19 static struct sp_mem_head mem_shares = SLIST_HEAD_INITIALIZER(sp_mem_head);
20 const struct mobj_ops mobj_sp_ops;
21 
22 struct mobj_sp {
23 	struct mobj mobj;
24 	paddr_t pages[];
25 };
26 
27 static struct mobj_sp *to_mobj_sp(struct mobj *mobj)
28 {
29 	assert(mobj->ops == &mobj_sp_ops);
30 	return container_of(mobj, struct mobj_sp, mobj);
31 }
32 
33 static size_t mobj_sp_size(size_t num_pages)
34 {
35 	size_t s = 0;
36 
37 	if (MUL_OVERFLOW(sizeof(paddr_t), num_pages, &s))
38 		return 0;
39 	if (ADD_OVERFLOW(sizeof(struct mobj_sp), s, &s))
40 		return 0;
41 	return s;
42 }
43 
44 struct mobj *sp_mem_new_mobj(uint64_t pages)
45 {
46 	struct mobj_sp *m = NULL;
47 	size_t s = 0;
48 
49 	s = mobj_sp_size(pages);
50 	if (!s)
51 		return NULL;
52 
53 	m = calloc(1, s);
54 	if (!m)
55 		return NULL;
56 
57 	m->mobj.ops = &mobj_sp_ops;
58 	m->mobj.size = pages * SMALL_PAGE_SIZE;
59 	m->mobj.phys_granule = SMALL_PAGE_SIZE;
60 
61 	refcount_set(&m->mobj.refc, 1);
62 	return &m->mobj;
63 }
64 
65 static size_t get_page_count(struct mobj_sp *ms)
66 {
67 	return ROUNDUP(ms->mobj.size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
68 }
69 
70 /* Add some physical pages to the mobj object. */
71 int sp_mem_add_pages(struct mobj *mobj, unsigned int *idx,
72 		     paddr_t pa, unsigned int num_pages)
73 {
74 	struct mobj_sp *ms = to_mobj_sp(mobj);
75 	unsigned int n = 0;
76 	size_t tot_page_count = get_page_count(ms);
77 
78 	if (ADD_OVERFLOW(*idx, num_pages, &n) || n > tot_page_count)
79 		return TEE_ERROR_BAD_PARAMETERS;
80 
81 	if (!core_pbuf_is(CORE_MEM_NON_SEC, pa, num_pages * SMALL_PAGE_SIZE))
82 		return TEE_ERROR_BAD_PARAMETERS;
83 
84 	for (n = 0; n < num_pages; n++)
85 		ms->pages[n + *idx] = pa + n * SMALL_PAGE_SIZE;
86 
87 	*idx += n;
88 	return TEE_SUCCESS;
89 }
90 
91 static TEE_Result sp_mem_get_cattr(struct mobj *mobj __unused, uint32_t *cattr)
92 {
93 	*cattr = TEE_MATTR_CACHE_CACHED;
94 
95 	return TEE_SUCCESS;
96 }
97 
98 static bool mobj_sp_matches(struct mobj *mobj __maybe_unused,
99 			    enum buf_is_attr attr)
100 {
101 	assert(mobj->ops == &mobj_sp_ops);
102 
103 	return attr == CORE_MEM_NON_SEC || attr == CORE_MEM_REG_SHM;
104 }
105 
106 static TEE_Result get_pa(struct mobj *mobj, size_t offset,
107 			 size_t granule, paddr_t *pa)
108 {
109 	struct mobj_sp *ms = to_mobj_sp(mobj);
110 	paddr_t p = 0;
111 
112 	if (!pa)
113 		return TEE_ERROR_GENERIC;
114 
115 	if (offset >= mobj->size)
116 		return TEE_ERROR_GENERIC;
117 
118 	switch (granule) {
119 	case 0:
120 		p = ms->pages[offset / SMALL_PAGE_SIZE] +
121 		    (offset & SMALL_PAGE_MASK);
122 		break;
123 	case SMALL_PAGE_SIZE:
124 		p = ms->pages[offset / SMALL_PAGE_SIZE];
125 		break;
126 	default:
127 		return TEE_ERROR_GENERIC;
128 	}
129 	*pa = p;
130 
131 	return TEE_SUCCESS;
132 }
133 DECLARE_KEEP_PAGER(get_pa);
134 
135 static size_t get_phys_offs(struct mobj *mobj __maybe_unused,
136 			    size_t granule __maybe_unused)
137 {
138 	return 0;
139 }
140 
141 static void inactivate(struct mobj *mobj)
142 {
143 	struct mobj_sp *ms = to_mobj_sp(mobj);
144 	uint32_t exceptions = 0;
145 
146 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
147 	/*
148 	 * If refcount isn't 0 some other thread has found this mobj in
149 	 * shm_head after the mobj_put() that put us here and before we got
150 	 * the lock.
151 	 */
152 	if (!refcount_val(&mobj->refc))
153 		free(ms);
154 
155 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
156 }
157 
158 const struct mobj_ops mobj_sp_ops __weak __rodata_unpaged("mobj_sp_ops") = {
159 	.get_pa = get_pa,
160 	.get_phys_offs = get_phys_offs,
161 	.get_cattr = sp_mem_get_cattr,
162 	.matches = mobj_sp_matches,
163 	.free = inactivate,
164 };
165 
166 struct sp_mem *sp_mem_new(void)
167 {
168 	struct sp_mem *smem = NULL;
169 	uint32_t exceptions = 0;
170 	int i = 0;
171 
172 	smem = calloc(sizeof(*smem), 1);
173 	if (!smem)
174 		return NULL;
175 
176 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
177 
178 	bit_ffc(share_bits, NUM_SHARES, &i);
179 	if (i == -1) {
180 		cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
181 		free(smem);
182 		return NULL;
183 	}
184 
185 	bit_set(share_bits, i);
186 	/*
187 	 * OP-TEE SHAREs use bit 44 use bit 45 instead.
188 	 */
189 	smem->global_handle = i | FFA_MEMORY_HANDLE_SECURE_BIT;
190 	SLIST_INIT(&smem->regions);
191 	SLIST_INIT(&smem->receivers);
192 
193 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
194 
195 	return smem;
196 }
197 
198 void sp_mem_add(struct sp_mem *smem)
199 {
200 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
201 
202 	SLIST_INSERT_HEAD(&mem_shares, smem, link);
203 
204 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
205 }
206 
207 bool sp_mem_is_shared(struct sp_mem_map_region *new_reg)
208 {
209 	struct sp_mem *smem = NULL;
210 	uint32_t exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
211 	uint64_t new_reg_end = new_reg->page_offset +
212 			       (new_reg->page_count * SMALL_PAGE_SIZE);
213 
214 	SLIST_FOREACH(smem, &mem_shares, link) {
215 		struct sp_mem_map_region *reg = NULL;
216 
217 		SLIST_FOREACH(reg, &smem->regions, link) {
218 			if (new_reg->mobj == reg->mobj) {
219 				uint64_t reg_end = 0;
220 
221 				reg_end = reg->page_offset +
222 					  (reg->page_count * SMALL_PAGE_SIZE);
223 
224 				if (new_reg->page_offset < reg_end &&
225 				    new_reg_end > reg->page_offset) {
226 					cpu_spin_unlock_xrestore(&sp_mem_lock,
227 								 exceptions);
228 					return true;
229 				}
230 			}
231 		}
232 	}
233 
234 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
235 	return false;
236 }
237 
238 void sp_mem_remove(struct sp_mem *smem)
239 {
240 	uint32_t exceptions = 0;
241 	int i = 0;
242 	struct sp_mem *tsmem = NULL;
243 
244 	if (!smem)
245 		return;
246 
247 	/* Remove all receivers */
248 	while (!SLIST_EMPTY(&smem->receivers)) {
249 		struct sp_mem_receiver *receiver = NULL;
250 
251 		receiver = SLIST_FIRST(&smem->receivers);
252 		SLIST_REMOVE_HEAD(&smem->receivers, link);
253 		free(receiver);
254 	}
255 	/* Remove all regions */
256 	while (!SLIST_EMPTY(&smem->regions)) {
257 		struct sp_mem_map_region *region = SLIST_FIRST(&smem->regions);
258 
259 		mobj_put(region->mobj);
260 
261 		SLIST_REMOVE_HEAD(&smem->regions, link);
262 		free(region);
263 	}
264 
265 	exceptions = cpu_spin_lock_xsave(&sp_mem_lock);
266 
267 	i = smem->global_handle & ~FFA_MEMORY_HANDLE_SECURE_BIT;
268 	assert(i < NUM_SHARES);
269 
270 	bit_clear(share_bits, i);
271 
272 	SLIST_FOREACH(tsmem, &mem_shares, link) {
273 		if (tsmem == smem) {
274 			SLIST_REMOVE(&mem_shares, smem, sp_mem, link);
275 			break;
276 		}
277 	}
278 
279 	cpu_spin_unlock_xrestore(&sp_mem_lock, exceptions);
280 
281 	free(smem);
282 }
283