xref: /optee_os/core/mm/fobj.c (revision 5b25c76ac40f830867e3d60800120ffd7874e8dc)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <crypto/crypto.h>
7 #include <crypto/internal_aes-gcm.h>
8 #include <kernel/generic_boot.h>
9 #include <kernel/panic.h>
10 #include <mm/core_memprot.h>
11 #include <mm/core_mmu.h>
12 #include <mm/fobj.h>
13 #include <mm/tee_mm.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <tee_api_types.h>
17 #include <types_ext.h>
18 #include <util.h>
19 
20 #ifdef CFG_WITH_PAGER
21 
22 #define RWP_AE_KEY_BITS		256
23 
24 struct rwp_aes_gcm_iv {
25 	uint32_t iv[3];
26 };
27 
28 #define RWP_AES_GCM_TAG_LEN	16
29 
30 struct rwp_state {
31 	uint64_t iv;
32 	uint8_t tag[RWP_AES_GCM_TAG_LEN];
33 };
34 
35 struct fobj_rwp {
36 	uint8_t *store;
37 	struct rwp_state *state;
38 	struct fobj fobj;
39 };
40 
41 static const struct fobj_ops ops_rw_paged;
42 
43 static struct internal_aes_gcm_key rwp_ae_key;
44 
45 void fobj_generate_authenc_key(void)
46 {
47 	uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
48 
49 	if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
50 		panic("failed to generate random");
51 	if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
52 				      sizeof(rwp_ae_key.data),
53 				      &rwp_ae_key.rounds))
54 		panic("failed to expand key");
55 }
56 
57 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
58 		      unsigned int num_pages)
59 {
60 	fobj->ops = ops;
61 	fobj->num_pages = num_pages;
62 	refcount_set(&fobj->refc, 1);
63 	TAILQ_INIT(&fobj->areas);
64 }
65 
66 static void fobj_uninit(struct fobj *fobj)
67 {
68 	assert(!refcount_val(&fobj->refc));
69 	assert(TAILQ_EMPTY(&fobj->areas));
70 	tee_pager_invalidate_fobj(fobj);
71 }
72 
73 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
74 {
75 	tee_mm_entry_t *mm = NULL;
76 	struct fobj_rwp *rwp = NULL;
77 	size_t size = 0;
78 
79 	assert(num_pages);
80 
81 	rwp = calloc(1, sizeof(*rwp));
82 	if (!rwp)
83 		return NULL;
84 
85 	rwp->state = calloc(num_pages, sizeof(*rwp->state));
86 	if (!rwp->state)
87 		goto err;
88 
89 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
90 		goto err;
91 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
92 	if (!mm)
93 		goto err;
94 	rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
95 	assert(rwp->store); /* to assist debugging if it would ever happen */
96 	if (!rwp->store)
97 		goto err;
98 
99 	fobj_init(&rwp->fobj, &ops_rw_paged, num_pages);
100 
101 	return &rwp->fobj;
102 
103 err:
104 	tee_mm_free(mm);
105 	free(rwp->state);
106 	free(rwp);
107 
108 	return NULL;
109 }
110 
111 static struct fobj_rwp *to_rwp(struct fobj *fobj)
112 {
113 	assert(fobj->ops == &ops_rw_paged);
114 
115 	return container_of(fobj, struct fobj_rwp, fobj);
116 }
117 
118 static void rwp_free(struct fobj *fobj)
119 {
120 	struct fobj_rwp *rwp = to_rwp(fobj);
121 
122 	fobj_uninit(fobj);
123 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store)));
124 	free(rwp->state);
125 	free(rwp);
126 }
127 
128 static TEE_Result rwp_load_page(struct fobj *fobj, unsigned int page_idx,
129 				void *va)
130 {
131 	struct fobj_rwp *rwp = to_rwp(fobj);
132 	struct rwp_state *state = rwp->state + page_idx;
133 	uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
134 	struct rwp_aes_gcm_iv iv = {
135 		.iv = { (vaddr_t)state, state->iv >> 32, state->iv }
136 	};
137 
138 	assert(refcount_val(&fobj->refc));
139 	assert(page_idx < fobj->num_pages);
140 
141 	if (!state->iv) {
142 		/*
143 		 * iv still zero which means that this is previously unused
144 		 * page.
145 		 */
146 		memset(va, 0, SMALL_PAGE_SIZE);
147 		return TEE_SUCCESS;
148 	}
149 
150 	return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
151 				    NULL, 0, src, SMALL_PAGE_SIZE, va,
152 				    state->tag, sizeof(state->tag));
153 }
154 KEEP_PAGER(rwp_load_page);
155 
156 static TEE_Result rwp_save_page(struct fobj *fobj, unsigned int page_idx,
157 				const void *va)
158 {
159 	struct fobj_rwp *rwp = to_rwp(fobj);
160 	struct rwp_state *state = rwp->state + page_idx;
161 	size_t tag_len = sizeof(state->tag);
162 	uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
163 	struct rwp_aes_gcm_iv iv;
164 
165 	memset(&iv, 0, sizeof(iv));
166 
167 	if (!refcount_val(&fobj->refc)) {
168 		/*
169 		 * This fobj is being teared down, it just hasn't had the time
170 		 * to call tee_pager_invalidate_fobj() yet.
171 		 */
172 		assert(TAILQ_EMPTY(&fobj->areas));
173 		return TEE_SUCCESS;
174 	}
175 
176 	assert(page_idx < fobj->num_pages);
177 	assert(state->iv + 1 > state->iv);
178 
179 	state->iv++;
180 	/*
181 	 * IV is constructed as recommended in section "8.2.1 Deterministic
182 	 * Construction" of "Recommendation for Block Cipher Modes of
183 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
184 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
185 	 */
186 
187 	iv.iv[0] = (vaddr_t)state;
188 	iv.iv[1] = state->iv >> 32;
189 	iv.iv[2] = state->iv;
190 
191 	return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
192 				    NULL, 0, va, SMALL_PAGE_SIZE, dst,
193 				    state->tag, &tag_len);
194 }
195 KEEP_PAGER(rwp_save_page);
196 
197 static const struct fobj_ops ops_rw_paged __rodata_unpaged = {
198 	.free = rwp_free,
199 	.load_page = rwp_load_page,
200 	.save_page = rwp_save_page,
201 };
202 
203 struct fobj_rop {
204 	uint8_t *hashes;
205 	uint8_t *store;
206 	struct fobj fobj;
207 };
208 
209 static const struct fobj_ops ops_ro_paged;
210 
211 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
212 		     unsigned int num_pages, void *hashes, void *store)
213 {
214 	rop->hashes = hashes;
215 	rop->store = store;
216 	fobj_init(&rop->fobj, ops, num_pages);
217 }
218 
219 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
220 				 void *store)
221 {
222 	struct fobj_rop *rop = NULL;
223 
224 	assert(num_pages && hashes && store);
225 
226 	rop = calloc(1, sizeof(*rop));
227 	if (!rop)
228 		return NULL;
229 
230 	rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
231 
232 	return &rop->fobj;
233 }
234 
235 static struct fobj_rop *to_rop(struct fobj *fobj)
236 {
237 	assert(fobj->ops == &ops_ro_paged);
238 
239 	return container_of(fobj, struct fobj_rop, fobj);
240 }
241 
242 static void rop_uninit(struct fobj_rop *rop)
243 {
244 	fobj_uninit(&rop->fobj);
245 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
246 	free(rop->hashes);
247 }
248 
249 static void rop_free(struct fobj *fobj)
250 {
251 	struct fobj_rop *rop = to_rop(fobj);
252 
253 	rop_uninit(rop);
254 	free(rop);
255 }
256 
257 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
258 				       unsigned int page_idx, void *va)
259 {
260 	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
261 	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
262 
263 	assert(refcount_val(&rop->fobj.refc));
264 	assert(page_idx < rop->fobj.num_pages);
265 	memcpy(va, src, SMALL_PAGE_SIZE);
266 
267 	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
268 }
269 
270 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
271 				void *va)
272 {
273 	return rop_load_page_helper(to_rop(fobj), page_idx, va);
274 }
275 KEEP_PAGER(rop_load_page);
276 
277 static TEE_Result rop_save_page(struct fobj *fobj __unused,
278 				unsigned int page_idx __unused,
279 				const void *va __unused)
280 {
281 	return TEE_ERROR_GENERIC;
282 }
283 KEEP_PAGER(rop_save_page);
284 
285 static const struct fobj_ops ops_ro_paged __rodata_unpaged = {
286 	.free = rop_free,
287 	.load_page = rop_load_page,
288 	.save_page = rop_save_page,
289 };
290 
291 #ifdef CFG_CORE_ASLR
292 /*
293  * When using relocated pages the relocation information must be applied
294  * before the pages can be used. With read-only paging the content is only
295  * integrity protected so relocation cannot be applied on pages in the less
296  * secure "store" or the load_address selected by ASLR could be given away.
297  * This means that each time a page has been loaded and verified it has to
298  * have its relocation information applied before it can be used.
299  *
300  * Only the relative relocations are supported, this allows a rather compact
301  * represenation of the needed relocation information in this struct.
302  * r_offset is replaced with the offset into the page that need to be updated,
303  * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
304  * used to represent it.
305  *
306  * All relocations are converted and stored in @relocs. @page_reloc_idx is
307  * an array of length @rop.fobj.num_pages with an entry for each page. If
308  * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
309  */
310 struct fobj_ro_reloc_paged {
311 	uint16_t *page_reloc_idx;
312 	uint16_t *relocs;
313 	unsigned int num_relocs;
314 	struct fobj_rop rop;
315 };
316 
317 static const struct fobj_ops ops_ro_reloc_paged;
318 
319 static unsigned int get_num_rels(unsigned int num_pages,
320 				 unsigned int reloc_offs,
321 				 const uint32_t *reloc, unsigned int num_relocs)
322 {
323 	const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
324 	unsigned int nrels = 0;
325 	unsigned int n = 0;
326 	vaddr_t offs = 0;
327 
328 	/*
329 	 * Count the number of relocations which are needed for these
330 	 * pages.  Also check that the data is well formed, only expected
331 	 * relocations and sorted in order of address which it applies to.
332 	 */
333 	for (; n < num_relocs; n++) {
334 		assert(ALIGNMENT_IS_OK(reloc[n], unsigned long));
335 		assert(offs < reloc[n]);	/* check that it's sorted */
336 		offs = reloc[n];
337 		if (offs >= reloc_offs &&
338 		    offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
339 			nrels++;
340 	}
341 
342 	return nrels;
343 }
344 
345 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
346 		      const uint32_t *reloc, unsigned int num_relocs)
347 {
348 	unsigned int npg = rrp->rop.fobj.num_pages;
349 	unsigned int pg_idx = 0;
350 	unsigned int reln = 0;
351 	unsigned int n = 0;
352 	uint32_t r = 0;
353 
354 	for (n = 0; n < npg; n++)
355 		rrp->page_reloc_idx[n] = UINT16_MAX;
356 
357 	for (n = 0; n < num_relocs ; n++) {
358 		if (reloc[n] < reloc_offs)
359 			continue;
360 
361 		/* r is the offset from beginning of this fobj */
362 		r = reloc[n] - reloc_offs;
363 
364 		pg_idx = r / SMALL_PAGE_SIZE;
365 		if (pg_idx >= npg)
366 			break;
367 
368 		if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
369 			rrp->page_reloc_idx[pg_idx] = reln;
370 		rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
371 		reln++;
372 	}
373 
374 	assert(reln == rrp->num_relocs);
375 }
376 
377 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
378 				       unsigned int reloc_offs,
379 				       const void *reloc,
380 				       unsigned int reloc_len, void *store)
381 {
382 	struct fobj_ro_reloc_paged *rrp = NULL;
383 	const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
384 	unsigned int nrels = 0;
385 
386 	assert(ALIGNMENT_IS_OK(reloc, uint32_t));
387 	assert(ALIGNMENT_IS_OK(reloc_len, uint32_t));
388 	assert(num_pages && hashes && store);
389 	if (!reloc_len) {
390 		assert(!reloc);
391 		return fobj_ro_paged_alloc(num_pages, hashes, store);
392 	}
393 	assert(reloc);
394 
395 	nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
396 	if (!nrels)
397 		return fobj_ro_paged_alloc(num_pages, hashes, store);
398 
399 	rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
400 			nrels * sizeof(uint16_t));
401 	if (!rrp)
402 		return NULL;
403 	rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
404 	rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
405 	rrp->relocs = rrp->page_reloc_idx + num_pages;
406 	rrp->num_relocs = nrels;
407 	init_rels(rrp, reloc_offs, reloc, num_relocs);
408 
409 	return &rrp->rop.fobj;
410 }
411 
412 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
413 {
414 	assert(fobj->ops == &ops_ro_reloc_paged);
415 
416 	return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
417 }
418 
419 static void rrp_free(struct fobj *fobj)
420 {
421 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
422 
423 	rop_uninit(&rrp->rop);
424 	free(rrp);
425 }
426 
427 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
428 				void *va)
429 {
430 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
431 	unsigned int end_rel = rrp->num_relocs;
432 	TEE_Result res = TEE_SUCCESS;
433 	unsigned long *where = NULL;
434 	unsigned int n = 0;
435 
436 	res = rop_load_page_helper(&rrp->rop, page_idx, va);
437 	if (res)
438 		return res;
439 
440 	/* Find the reloc index of the next page to tell when we're done */
441 	for (n = page_idx + 1; n < fobj->num_pages; n++) {
442 		if (rrp->page_reloc_idx[n] != UINT16_MAX) {
443 			end_rel = rrp->page_reloc_idx[n];
444 			break;
445 		}
446 	}
447 
448 	for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
449 		where = (void *)((vaddr_t)va + rrp->relocs[n]);
450 		*where += boot_mmu_config.load_offset;
451 	}
452 
453 	return TEE_SUCCESS;
454 }
455 KEEP_PAGER(rrp_load_page);
456 
457 static const struct fobj_ops ops_ro_reloc_paged __rodata_unpaged = {
458 	.free = rrp_free,
459 	.load_page = rrp_load_page,
460 	.save_page = rop_save_page, /* Direct reuse */
461 };
462 #endif /*CFG_CORE_ASLR*/
463 
464 static const struct fobj_ops ops_locked_paged;
465 
466 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
467 {
468 	struct fobj *f = NULL;
469 
470 	assert(num_pages);
471 
472 	f = calloc(1, sizeof(*f));
473 	if (!f)
474 		return NULL;
475 
476 	fobj_init(f, &ops_locked_paged, num_pages);
477 
478 	return f;
479 }
480 
481 static void lop_free(struct fobj *fobj)
482 {
483 	assert(fobj->ops == &ops_locked_paged);
484 	fobj_uninit(fobj);
485 	free(fobj);
486 }
487 
488 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
489 				unsigned int page_idx __maybe_unused,
490 				void *va)
491 {
492 	assert(fobj->ops == &ops_locked_paged);
493 	assert(refcount_val(&fobj->refc));
494 	assert(page_idx < fobj->num_pages);
495 
496 	memset(va, 0, SMALL_PAGE_SIZE);
497 
498 	return TEE_SUCCESS;
499 }
500 KEEP_PAGER(lop_load_page);
501 
502 static TEE_Result lop_save_page(struct fobj *fobj __unused,
503 				unsigned int page_idx __unused,
504 				const void *va __unused)
505 {
506 	return TEE_ERROR_GENERIC;
507 }
508 KEEP_PAGER(lop_save_page);
509 
510 static const struct fobj_ops ops_locked_paged __rodata_unpaged = {
511 	.free = lop_free,
512 	.load_page = lop_load_page,
513 	.save_page = lop_save_page,
514 };
515 #endif /*CFG_WITH_PAGER*/
516 
517 #ifndef CFG_PAGED_USER_TA
518 
519 struct fobj_sec_mem {
520 	tee_mm_entry_t *mm;
521 	struct fobj fobj;
522 };
523 
524 static struct fobj_ops ops_sec_mem;
525 
526 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
527 {
528 	struct fobj_sec_mem *f = calloc(1, sizeof(*f));
529 	size_t size = 0;
530 	void *va = NULL;
531 
532 	if (!f)
533 		return NULL;
534 
535 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
536 		goto err;
537 
538 	f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
539 	if (!f->mm)
540 		goto err;
541 
542 	va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM);
543 	if (!va)
544 		goto err;
545 
546 	memset(va, 0, size);
547 	f->fobj.ops = &ops_sec_mem;
548 	f->fobj.num_pages = num_pages;
549 	refcount_set(&f->fobj.refc, 1);
550 
551 	return &f->fobj;
552 err:
553 	tee_mm_free(f->mm);
554 	free(f);
555 
556 	return NULL;
557 }
558 
559 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
560 {
561 	assert(fobj->ops == &ops_sec_mem);
562 
563 	return container_of(fobj, struct fobj_sec_mem, fobj);
564 }
565 
566 static void sec_mem_free(struct fobj *fobj)
567 {
568 	struct fobj_sec_mem *f = to_sec_mem(fobj);
569 
570 	assert(!refcount_val(&fobj->refc));
571 	tee_mm_free(f->mm);
572 	free(f);
573 }
574 
575 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
576 {
577 	struct fobj_sec_mem *f = to_sec_mem(fobj);
578 
579 	assert(refcount_val(&fobj->refc));
580 	assert(page_idx < fobj->num_pages);
581 
582 	return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
583 }
584 
585 static struct fobj_ops ops_sec_mem __rodata_unpaged = {
586 	.free = sec_mem_free,
587 	.get_pa = sec_mem_get_pa,
588 };
589 
590 #endif /*PAGED_USER_TA*/
591