xref: /optee_os/core/mm/fobj.c (revision df24e6517b6454cf906c16979ea0e7546c5c99d5)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <crypto/crypto.h>
7 #include <crypto/internal_aes-gcm.h>
8 #include <kernel/generic_boot.h>
9 #include <kernel/panic.h>
10 #include <mm/core_memprot.h>
11 #include <mm/core_mmu.h>
12 #include <mm/fobj.h>
13 #include <mm/tee_mm.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <tee_api_types.h>
17 #include <types_ext.h>
18 #include <util.h>
19 
20 #ifdef CFG_WITH_PAGER
21 
22 #define RWP_AE_KEY_BITS		256
23 
24 struct rwp_aes_gcm_iv {
25 	uint32_t iv[3];
26 };
27 
28 #define RWP_AES_GCM_TAG_LEN	16
29 
30 struct rwp_state {
31 	uint64_t iv;
32 	uint8_t tag[RWP_AES_GCM_TAG_LEN];
33 };
34 
35 struct fobj_rwp {
36 	uint8_t *store;
37 	struct rwp_state *state;
38 	struct fobj fobj;
39 };
40 
41 static const struct fobj_ops ops_rw_paged;
42 
43 static struct internal_aes_gcm_key rwp_ae_key;
44 
45 void fobj_generate_authenc_key(void)
46 {
47 	uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
48 
49 	if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
50 		panic("failed to generate random");
51 	if (internal_aes_gcm_expand_enc_key(key, sizeof(key), &rwp_ae_key))
52 		panic("failed to expand key");
53 }
54 
55 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
56 		      unsigned int num_pages)
57 {
58 	fobj->ops = ops;
59 	fobj->num_pages = num_pages;
60 	refcount_set(&fobj->refc, 1);
61 	TAILQ_INIT(&fobj->areas);
62 }
63 
64 static void fobj_uninit(struct fobj *fobj)
65 {
66 	assert(!refcount_val(&fobj->refc));
67 	assert(TAILQ_EMPTY(&fobj->areas));
68 	tee_pager_invalidate_fobj(fobj);
69 }
70 
71 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
72 {
73 	tee_mm_entry_t *mm = NULL;
74 	struct fobj_rwp *rwp = NULL;
75 	size_t size = 0;
76 
77 	assert(num_pages);
78 
79 	rwp = calloc(1, sizeof(*rwp));
80 	if (!rwp)
81 		return NULL;
82 
83 	rwp->state = calloc(num_pages, sizeof(*rwp->state));
84 	if (!rwp->state)
85 		goto err;
86 
87 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
88 		goto err;
89 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
90 	if (!mm)
91 		goto err;
92 	rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
93 	assert(rwp->store); /* to assist debugging if it would ever happen */
94 	if (!rwp->store)
95 		goto err;
96 
97 	fobj_init(&rwp->fobj, &ops_rw_paged, num_pages);
98 
99 	return &rwp->fobj;
100 
101 err:
102 	tee_mm_free(mm);
103 	free(rwp->state);
104 	free(rwp);
105 
106 	return NULL;
107 }
108 
109 static struct fobj_rwp *to_rwp(struct fobj *fobj)
110 {
111 	assert(fobj->ops == &ops_rw_paged);
112 
113 	return container_of(fobj, struct fobj_rwp, fobj);
114 }
115 
116 static void rwp_free(struct fobj *fobj)
117 {
118 	struct fobj_rwp *rwp = to_rwp(fobj);
119 
120 	fobj_uninit(fobj);
121 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store)));
122 	free(rwp->state);
123 	free(rwp);
124 }
125 
126 static TEE_Result rwp_load_page(struct fobj *fobj, unsigned int page_idx,
127 				void *va)
128 {
129 	struct fobj_rwp *rwp = to_rwp(fobj);
130 	struct rwp_state *state = rwp->state + page_idx;
131 	uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
132 	struct rwp_aes_gcm_iv iv = {
133 		.iv = { (vaddr_t)state, state->iv >> 32, state->iv }
134 	};
135 
136 	assert(refcount_val(&fobj->refc));
137 	assert(page_idx < fobj->num_pages);
138 
139 	if (!state->iv) {
140 		/*
141 		 * iv still zero which means that this is previously unused
142 		 * page.
143 		 */
144 		memset(va, 0, SMALL_PAGE_SIZE);
145 		return TEE_SUCCESS;
146 	}
147 
148 	return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
149 				    NULL, 0, src, SMALL_PAGE_SIZE, va,
150 				    state->tag, sizeof(state->tag));
151 }
152 KEEP_PAGER(rwp_load_page);
153 
154 static TEE_Result rwp_save_page(struct fobj *fobj, unsigned int page_idx,
155 				const void *va)
156 {
157 	struct fobj_rwp *rwp = to_rwp(fobj);
158 	struct rwp_state *state = rwp->state + page_idx;
159 	size_t tag_len = sizeof(state->tag);
160 	uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
161 	struct rwp_aes_gcm_iv iv;
162 
163 	memset(&iv, 0, sizeof(iv));
164 
165 	if (!refcount_val(&fobj->refc)) {
166 		/*
167 		 * This fobj is being teared down, it just hasn't had the time
168 		 * to call tee_pager_invalidate_fobj() yet.
169 		 */
170 		assert(TAILQ_EMPTY(&fobj->areas));
171 		return TEE_SUCCESS;
172 	}
173 
174 	assert(page_idx < fobj->num_pages);
175 	assert(state->iv + 1 > state->iv);
176 
177 	state->iv++;
178 	/*
179 	 * IV is constructed as recommended in section "8.2.1 Deterministic
180 	 * Construction" of "Recommendation for Block Cipher Modes of
181 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
182 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
183 	 */
184 
185 	iv.iv[0] = (vaddr_t)state;
186 	iv.iv[1] = state->iv >> 32;
187 	iv.iv[2] = state->iv;
188 
189 	return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
190 				    NULL, 0, va, SMALL_PAGE_SIZE, dst,
191 				    state->tag, &tag_len);
192 }
193 KEEP_PAGER(rwp_save_page);
194 
195 static const struct fobj_ops ops_rw_paged __rodata_unpaged = {
196 	.free = rwp_free,
197 	.load_page = rwp_load_page,
198 	.save_page = rwp_save_page,
199 };
200 
201 struct fobj_rop {
202 	uint8_t *hashes;
203 	uint8_t *store;
204 	struct fobj fobj;
205 };
206 
207 static const struct fobj_ops ops_ro_paged;
208 
209 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
210 		     unsigned int num_pages, void *hashes, void *store)
211 {
212 	rop->hashes = hashes;
213 	rop->store = store;
214 	fobj_init(&rop->fobj, ops, num_pages);
215 }
216 
217 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
218 				 void *store)
219 {
220 	struct fobj_rop *rop = NULL;
221 
222 	assert(num_pages && hashes && store);
223 
224 	rop = calloc(1, sizeof(*rop));
225 	if (!rop)
226 		return NULL;
227 
228 	rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
229 
230 	return &rop->fobj;
231 }
232 
233 static struct fobj_rop *to_rop(struct fobj *fobj)
234 {
235 	assert(fobj->ops == &ops_ro_paged);
236 
237 	return container_of(fobj, struct fobj_rop, fobj);
238 }
239 
240 static void rop_uninit(struct fobj_rop *rop)
241 {
242 	fobj_uninit(&rop->fobj);
243 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
244 	free(rop->hashes);
245 }
246 
247 static void rop_free(struct fobj *fobj)
248 {
249 	struct fobj_rop *rop = to_rop(fobj);
250 
251 	rop_uninit(rop);
252 	free(rop);
253 }
254 
255 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
256 				       unsigned int page_idx, void *va)
257 {
258 	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
259 	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
260 
261 	assert(refcount_val(&rop->fobj.refc));
262 	assert(page_idx < rop->fobj.num_pages);
263 	memcpy(va, src, SMALL_PAGE_SIZE);
264 
265 	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
266 }
267 
268 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
269 				void *va)
270 {
271 	return rop_load_page_helper(to_rop(fobj), page_idx, va);
272 }
273 KEEP_PAGER(rop_load_page);
274 
275 static TEE_Result rop_save_page(struct fobj *fobj __unused,
276 				unsigned int page_idx __unused,
277 				const void *va __unused)
278 {
279 	return TEE_ERROR_GENERIC;
280 }
281 KEEP_PAGER(rop_save_page);
282 
283 static const struct fobj_ops ops_ro_paged __rodata_unpaged = {
284 	.free = rop_free,
285 	.load_page = rop_load_page,
286 	.save_page = rop_save_page,
287 };
288 
289 #ifdef CFG_CORE_ASLR
290 /*
291  * When using relocated pages the relocation information must be applied
292  * before the pages can be used. With read-only paging the content is only
293  * integrity protected so relocation cannot be applied on pages in the less
294  * secure "store" or the load_address selected by ASLR could be given away.
295  * This means that each time a page has been loaded and verified it has to
296  * have its relocation information applied before it can be used.
297  *
298  * Only the relative relocations are supported, this allows a rather compact
299  * represenation of the needed relocation information in this struct.
300  * r_offset is replaced with the offset into the page that need to be updated,
301  * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
302  * used to represent it.
303  *
304  * All relocations are converted and stored in @relocs. @page_reloc_idx is
305  * an array of length @rop.fobj.num_pages with an entry for each page. If
306  * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
307  */
308 struct fobj_ro_reloc_paged {
309 	uint16_t *page_reloc_idx;
310 	uint16_t *relocs;
311 	unsigned int num_relocs;
312 	struct fobj_rop rop;
313 };
314 
315 static const struct fobj_ops ops_ro_reloc_paged;
316 
317 static unsigned int get_num_rels(unsigned int num_pages,
318 				 unsigned int reloc_offs,
319 				 const uint32_t *reloc, unsigned int num_relocs)
320 {
321 	const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
322 	unsigned int nrels = 0;
323 	unsigned int n = 0;
324 	vaddr_t offs = 0;
325 
326 	/*
327 	 * Count the number of relocations which are needed for these
328 	 * pages.  Also check that the data is well formed, only expected
329 	 * relocations and sorted in order of address which it applies to.
330 	 */
331 	for (; n < num_relocs; n++) {
332 		assert(ALIGNMENT_IS_OK(reloc[n], unsigned long));
333 		assert(offs < reloc[n]);	/* check that it's sorted */
334 		offs = reloc[n];
335 		if (offs >= reloc_offs &&
336 		    offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
337 			nrels++;
338 	}
339 
340 	return nrels;
341 }
342 
343 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
344 		      const uint32_t *reloc, unsigned int num_relocs)
345 {
346 	unsigned int npg = rrp->rop.fobj.num_pages;
347 	unsigned int pg_idx = 0;
348 	unsigned int reln = 0;
349 	unsigned int n = 0;
350 	uint32_t r = 0;
351 
352 	for (n = 0; n < npg; n++)
353 		rrp->page_reloc_idx[n] = UINT16_MAX;
354 
355 	for (n = 0; n < num_relocs ; n++) {
356 		if (reloc[n] < reloc_offs)
357 			continue;
358 
359 		/* r is the offset from beginning of this fobj */
360 		r = reloc[n] - reloc_offs;
361 
362 		pg_idx = r / SMALL_PAGE_SIZE;
363 		if (pg_idx >= npg)
364 			break;
365 
366 		if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
367 			rrp->page_reloc_idx[pg_idx] = reln;
368 		rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
369 		reln++;
370 	}
371 
372 	assert(reln == rrp->num_relocs);
373 }
374 
375 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
376 				       unsigned int reloc_offs,
377 				       const void *reloc,
378 				       unsigned int reloc_len, void *store)
379 {
380 	struct fobj_ro_reloc_paged *rrp = NULL;
381 	const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
382 	unsigned int nrels = 0;
383 
384 	assert(ALIGNMENT_IS_OK(reloc, uint32_t));
385 	assert(ALIGNMENT_IS_OK(reloc_len, uint32_t));
386 	assert(num_pages && hashes && store);
387 	if (!reloc_len) {
388 		assert(!reloc);
389 		return fobj_ro_paged_alloc(num_pages, hashes, store);
390 	}
391 	assert(reloc);
392 
393 	nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
394 	if (!nrels)
395 		return fobj_ro_paged_alloc(num_pages, hashes, store);
396 
397 	rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
398 			nrels * sizeof(uint16_t));
399 	if (!rrp)
400 		return NULL;
401 	rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
402 	rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
403 	rrp->relocs = rrp->page_reloc_idx + num_pages;
404 	rrp->num_relocs = nrels;
405 	init_rels(rrp, reloc_offs, reloc, num_relocs);
406 
407 	return &rrp->rop.fobj;
408 }
409 
410 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
411 {
412 	assert(fobj->ops == &ops_ro_reloc_paged);
413 
414 	return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
415 }
416 
417 static void rrp_free(struct fobj *fobj)
418 {
419 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
420 
421 	rop_uninit(&rrp->rop);
422 	free(rrp);
423 }
424 
425 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
426 				void *va)
427 {
428 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
429 	unsigned int end_rel = rrp->num_relocs;
430 	TEE_Result res = TEE_SUCCESS;
431 	unsigned long *where = NULL;
432 	unsigned int n = 0;
433 
434 	res = rop_load_page_helper(&rrp->rop, page_idx, va);
435 	if (res)
436 		return res;
437 
438 	/* Find the reloc index of the next page to tell when we're done */
439 	for (n = page_idx + 1; n < fobj->num_pages; n++) {
440 		if (rrp->page_reloc_idx[n] != UINT16_MAX) {
441 			end_rel = rrp->page_reloc_idx[n];
442 			break;
443 		}
444 	}
445 
446 	for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
447 		where = (void *)((vaddr_t)va + rrp->relocs[n]);
448 		*where += boot_mmu_config.load_offset;
449 	}
450 
451 	return TEE_SUCCESS;
452 }
453 KEEP_PAGER(rrp_load_page);
454 
455 static const struct fobj_ops ops_ro_reloc_paged __rodata_unpaged = {
456 	.free = rrp_free,
457 	.load_page = rrp_load_page,
458 	.save_page = rop_save_page, /* Direct reuse */
459 };
460 #endif /*CFG_CORE_ASLR*/
461 
462 static const struct fobj_ops ops_locked_paged;
463 
464 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
465 {
466 	struct fobj *f = NULL;
467 
468 	assert(num_pages);
469 
470 	f = calloc(1, sizeof(*f));
471 	if (!f)
472 		return NULL;
473 
474 	fobj_init(f, &ops_locked_paged, num_pages);
475 
476 	return f;
477 }
478 
479 static void lop_free(struct fobj *fobj)
480 {
481 	assert(fobj->ops == &ops_locked_paged);
482 	fobj_uninit(fobj);
483 	free(fobj);
484 }
485 
486 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
487 				unsigned int page_idx __maybe_unused,
488 				void *va)
489 {
490 	assert(fobj->ops == &ops_locked_paged);
491 	assert(refcount_val(&fobj->refc));
492 	assert(page_idx < fobj->num_pages);
493 
494 	memset(va, 0, SMALL_PAGE_SIZE);
495 
496 	return TEE_SUCCESS;
497 }
498 KEEP_PAGER(lop_load_page);
499 
500 static TEE_Result lop_save_page(struct fobj *fobj __unused,
501 				unsigned int page_idx __unused,
502 				const void *va __unused)
503 {
504 	return TEE_ERROR_GENERIC;
505 }
506 KEEP_PAGER(lop_save_page);
507 
508 static const struct fobj_ops ops_locked_paged __rodata_unpaged = {
509 	.free = lop_free,
510 	.load_page = lop_load_page,
511 	.save_page = lop_save_page,
512 };
513 #endif /*CFG_WITH_PAGER*/
514 
515 #ifndef CFG_PAGED_USER_TA
516 
517 struct fobj_sec_mem {
518 	tee_mm_entry_t *mm;
519 	struct fobj fobj;
520 };
521 
522 static struct fobj_ops ops_sec_mem;
523 
524 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
525 {
526 	struct fobj_sec_mem *f = calloc(1, sizeof(*f));
527 	size_t size = 0;
528 	void *va = NULL;
529 
530 	if (!f)
531 		return NULL;
532 
533 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
534 		goto err;
535 
536 	f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
537 	if (!f->mm)
538 		goto err;
539 
540 	va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM);
541 	if (!va)
542 		goto err;
543 
544 	memset(va, 0, size);
545 	f->fobj.ops = &ops_sec_mem;
546 	f->fobj.num_pages = num_pages;
547 	refcount_set(&f->fobj.refc, 1);
548 
549 	return &f->fobj;
550 err:
551 	tee_mm_free(f->mm);
552 	free(f);
553 
554 	return NULL;
555 }
556 
557 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
558 {
559 	assert(fobj->ops == &ops_sec_mem);
560 
561 	return container_of(fobj, struct fobj_sec_mem, fobj);
562 }
563 
564 static void sec_mem_free(struct fobj *fobj)
565 {
566 	struct fobj_sec_mem *f = to_sec_mem(fobj);
567 
568 	assert(!refcount_val(&fobj->refc));
569 	tee_mm_free(f->mm);
570 	free(f);
571 }
572 
573 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
574 {
575 	struct fobj_sec_mem *f = to_sec_mem(fobj);
576 
577 	assert(refcount_val(&fobj->refc));
578 	assert(page_idx < fobj->num_pages);
579 
580 	return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
581 }
582 
583 static struct fobj_ops ops_sec_mem __rodata_unpaged = {
584 	.free = sec_mem_free,
585 	.get_pa = sec_mem_get_pa,
586 };
587 
588 #endif /*PAGED_USER_TA*/
589