xref: /optee_os/core/mm/fobj.c (revision 5a913ee74d3c71af2a2860ce8a4e7aeab2916f9b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <crypto/crypto.h>
7 #include <crypto/internal_aes-gcm.h>
8 #include <kernel/panic.h>
9 #include <mm/core_memprot.h>
10 #include <mm/core_mmu.h>
11 #include <mm/fobj.h>
12 #include <mm/tee_mm.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <tee_api_types.h>
16 #include <types_ext.h>
17 #include <util.h>
18 
19 #ifdef CFG_WITH_PAGER
20 
21 #define RWP_AE_KEY_BITS		256
22 
23 struct rwp_aes_gcm_iv {
24 	uint32_t iv[3];
25 };
26 
27 #define RWP_AES_GCM_TAG_LEN	16
28 
29 struct rwp_state {
30 	uint64_t iv;
31 	uint8_t tag[RWP_AES_GCM_TAG_LEN];
32 };
33 
34 struct fobj_rwp {
35 	uint8_t *store;
36 	struct rwp_state *state;
37 	struct fobj fobj;
38 };
39 
40 static struct fobj_ops ops_rw_paged;
41 
42 static struct internal_aes_gcm_key rwp_ae_key;
43 
44 void fobj_generate_authenc_key(void)
45 {
46 	uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
47 
48 	if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
49 		panic("failed to generate random");
50 	if (internal_aes_gcm_expand_enc_key(key, sizeof(key), &rwp_ae_key))
51 		panic("failed to expand key");
52 }
53 
54 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
55 		      unsigned int num_pages)
56 {
57 	fobj->ops = ops;
58 	fobj->num_pages = num_pages;
59 	refcount_set(&fobj->refc, 1);
60 	TAILQ_INIT(&fobj->areas);
61 }
62 
63 static void fobj_uninit(struct fobj *fobj)
64 {
65 	assert(!refcount_val(&fobj->refc));
66 	assert(TAILQ_EMPTY(&fobj->areas));
67 	tee_pager_invalidate_fobj(fobj);
68 }
69 
70 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
71 {
72 	tee_mm_entry_t *mm = NULL;
73 	struct fobj_rwp *rwp = NULL;
74 	size_t size = 0;
75 
76 	assert(num_pages);
77 
78 	rwp = calloc(1, sizeof(*rwp));
79 	if (!rwp)
80 		return NULL;
81 
82 	rwp->state = calloc(num_pages, sizeof(*rwp->state));
83 	if (!rwp->state)
84 		goto err;
85 
86 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
87 		goto err;
88 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
89 	if (!mm)
90 		goto err;
91 	rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
92 	assert(rwp->store); /* to assist debugging if it would ever happen */
93 	if (!rwp->store)
94 		goto err;
95 
96 	fobj_init(&rwp->fobj, &ops_rw_paged, num_pages);
97 
98 	return &rwp->fobj;
99 
100 err:
101 	tee_mm_free(mm);
102 	free(rwp->state);
103 	free(rwp);
104 
105 	return NULL;
106 }
107 
108 static struct fobj_rwp *to_rwp(struct fobj *fobj)
109 {
110 	assert(fobj->ops == &ops_rw_paged);
111 
112 	return container_of(fobj, struct fobj_rwp, fobj);
113 }
114 
115 static void rwp_free(struct fobj *fobj)
116 {
117 	struct fobj_rwp *rwp = to_rwp(fobj);
118 
119 	fobj_uninit(fobj);
120 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store)));
121 	free(rwp->state);
122 	free(rwp);
123 }
124 
125 static TEE_Result rwp_load_page(struct fobj *fobj, unsigned int page_idx,
126 				void *va)
127 {
128 	struct fobj_rwp *rwp = to_rwp(fobj);
129 	struct rwp_state *state = rwp->state + page_idx;
130 	uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
131 	struct rwp_aes_gcm_iv iv = {
132 		.iv = { (vaddr_t)state, state->iv >> 32, state->iv }
133 	};
134 
135 	assert(refcount_val(&fobj->refc));
136 	assert(page_idx < fobj->num_pages);
137 
138 	if (!state->iv) {
139 		/*
140 		 * iv still zero which means that this is previously unused
141 		 * page.
142 		 */
143 		memset(va, 0, SMALL_PAGE_SIZE);
144 		return TEE_SUCCESS;
145 	}
146 
147 	return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
148 				    NULL, 0, src, SMALL_PAGE_SIZE, va,
149 				    state->tag, sizeof(state->tag));
150 }
151 KEEP_PAGER(rwp_load_page);
152 
153 static TEE_Result rwp_save_page(struct fobj *fobj, unsigned int page_idx,
154 				const void *va)
155 {
156 	struct fobj_rwp *rwp = to_rwp(fobj);
157 	struct rwp_state *state = rwp->state + page_idx;
158 	size_t tag_len = sizeof(state->tag);
159 	uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
160 	struct rwp_aes_gcm_iv iv;
161 
162 	memset(&iv, 0, sizeof(iv));
163 
164 	if (!refcount_val(&fobj->refc)) {
165 		/*
166 		 * This fobj is being teared down, it just hasn't had the time
167 		 * to call tee_pager_invalidate_fobj() yet.
168 		 */
169 		assert(TAILQ_EMPTY(&fobj->areas));
170 		return TEE_SUCCESS;
171 	}
172 
173 	assert(page_idx < fobj->num_pages);
174 	assert(state->iv + 1 > state->iv);
175 
176 	state->iv++;
177 	/*
178 	 * IV is constructed as recommended in section "8.2.1 Deterministic
179 	 * Construction" of "Recommendation for Block Cipher Modes of
180 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
181 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
182 	 */
183 
184 	iv.iv[0] = (vaddr_t)state;
185 	iv.iv[1] = state->iv >> 32;
186 	iv.iv[2] = state->iv;
187 
188 	return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
189 				    NULL, 0, va, SMALL_PAGE_SIZE, dst,
190 				    state->tag, &tag_len);
191 }
192 KEEP_PAGER(rwp_save_page);
193 
194 static struct fobj_ops ops_rw_paged __rodata_unpaged = {
195 	.free = rwp_free,
196 	.load_page = rwp_load_page,
197 	.save_page = rwp_save_page,
198 };
199 
200 struct fobj_rop {
201 	uint8_t *hashes;
202 	uint8_t *store;
203 	struct fobj fobj;
204 };
205 
206 static struct fobj_ops ops_ro_paged;
207 
208 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
209 				 void *store)
210 {
211 	struct fobj_rop *rop = NULL;
212 
213 	assert(num_pages && hashes && store);
214 
215 	rop = calloc(1, sizeof(*rop));
216 	if (!rop)
217 		return NULL;
218 
219 	rop->hashes = hashes;
220 	rop->store = store;
221 	fobj_init(&rop->fobj, &ops_ro_paged, num_pages);
222 
223 	return &rop->fobj;
224 }
225 
226 static struct fobj_rop *to_rop(struct fobj *fobj)
227 {
228 	assert(fobj->ops == &ops_ro_paged);
229 
230 	return container_of(fobj, struct fobj_rop, fobj);
231 }
232 
233 static void rop_free(struct fobj *fobj)
234 {
235 	struct fobj_rop *rop = to_rop(fobj);
236 
237 	fobj_uninit(fobj);
238 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
239 	free(rop->hashes);
240 	free(rop);
241 }
242 
243 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
244 				void *va)
245 {
246 	struct fobj_rop *rop = to_rop(fobj);
247 	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
248 	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
249 
250 	assert(refcount_val(&fobj->refc));
251 	assert(page_idx < fobj->num_pages);
252 	memcpy(va, src, SMALL_PAGE_SIZE);
253 
254 	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
255 }
256 KEEP_PAGER(rop_load_page);
257 
258 static TEE_Result rop_save_page(struct fobj *fobj __unused,
259 				unsigned int page_idx __unused,
260 				const void *va __unused)
261 {
262 	return TEE_ERROR_GENERIC;
263 }
264 KEEP_PAGER(rop_save_page);
265 
266 static struct fobj_ops ops_ro_paged __rodata_unpaged = {
267 	.free = rop_free,
268 	.load_page = rop_load_page,
269 	.save_page = rop_save_page,
270 };
271 
272 static struct fobj_ops ops_locked_paged;
273 
274 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
275 {
276 	struct fobj *f = NULL;
277 
278 	assert(num_pages);
279 
280 	f = calloc(1, sizeof(*f));
281 	if (!f)
282 		return NULL;
283 
284 	fobj_init(f, &ops_locked_paged, num_pages);
285 
286 	return f;
287 }
288 
289 static void lop_free(struct fobj *fobj)
290 {
291 	assert(fobj->ops == &ops_locked_paged);
292 	fobj_uninit(fobj);
293 	free(fobj);
294 }
295 
296 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
297 				unsigned int page_idx __maybe_unused,
298 				void *va)
299 {
300 	assert(fobj->ops == &ops_locked_paged);
301 	assert(refcount_val(&fobj->refc));
302 	assert(page_idx < fobj->num_pages);
303 
304 	memset(va, 0, SMALL_PAGE_SIZE);
305 
306 	return TEE_SUCCESS;
307 }
308 KEEP_PAGER(lop_load_page);
309 
310 static TEE_Result lop_save_page(struct fobj *fobj __unused,
311 				unsigned int page_idx __unused,
312 				const void *va __unused)
313 {
314 	return TEE_ERROR_GENERIC;
315 }
316 KEEP_PAGER(lop_save_page);
317 
318 static struct fobj_ops ops_locked_paged __rodata_unpaged = {
319 	.free = lop_free,
320 	.load_page = lop_load_page,
321 	.save_page = lop_save_page,
322 };
323 #endif /*CFG_WITH_PAGER*/
324 
325 #ifndef CFG_PAGED_USER_TA
326 
327 struct fobj_sec_mem {
328 	tee_mm_entry_t *mm;
329 	struct fobj fobj;
330 };
331 
332 static struct fobj_ops ops_sec_mem;
333 
334 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
335 {
336 	struct fobj_sec_mem *f = calloc(1, sizeof(*f));
337 	size_t size = 0;
338 	void *va = NULL;
339 
340 	if (!f)
341 		return NULL;
342 
343 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
344 		goto err;
345 
346 	f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
347 	if (!f->mm)
348 		goto err;
349 
350 	va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM);
351 	if (!va)
352 		goto err;
353 
354 	memset(va, 0, size);
355 	f->fobj.ops = &ops_sec_mem;
356 	f->fobj.num_pages = num_pages;
357 	refcount_set(&f->fobj.refc, 1);
358 
359 	return &f->fobj;
360 err:
361 	tee_mm_free(f->mm);
362 	free(f);
363 
364 	return NULL;
365 }
366 
367 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
368 {
369 	assert(fobj->ops == &ops_sec_mem);
370 
371 	return container_of(fobj, struct fobj_sec_mem, fobj);
372 }
373 
374 static void sec_mem_free(struct fobj *fobj)
375 {
376 	struct fobj_sec_mem *f = to_sec_mem(fobj);
377 
378 	assert(!refcount_val(&fobj->refc));
379 	tee_mm_free(f->mm);
380 	free(f);
381 }
382 
383 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
384 {
385 	struct fobj_sec_mem *f = to_sec_mem(fobj);
386 
387 	assert(refcount_val(&fobj->refc));
388 	assert(page_idx < fobj->num_pages);
389 
390 	return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
391 }
392 
393 static struct fobj_ops ops_sec_mem __rodata_unpaged = {
394 	.free = sec_mem_free,
395 	.get_pa = sec_mem_get_pa,
396 };
397 
398 #endif /*PAGED_USER_TA*/
399