xref: /optee_os/core/mm/fobj.c (revision c84eee6397bb8ae0745d9aa24b5228a58793378b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <crypto/crypto.h>
7 #include <crypto/internal_aes-gcm.h>
8 #include <initcall.h>
9 #include <kernel/boot.h>
10 #include <kernel/panic.h>
11 #include <mm/core_memprot.h>
12 #include <mm/core_mmu.h>
13 #include <mm/fobj.h>
14 #include <mm/tee_mm.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <types_ext.h>
19 #include <util.h>
20 
21 #ifdef CFG_WITH_PAGER
22 
23 #define RWP_AE_KEY_BITS		256
24 
25 struct rwp_aes_gcm_iv {
26 	uint32_t iv[3];
27 };
28 
29 #define RWP_AES_GCM_TAG_LEN	16
30 
31 struct rwp_state {
32 	uint64_t iv;
33 	uint8_t tag[RWP_AES_GCM_TAG_LEN];
34 };
35 
36 struct fobj_rwp {
37 	uint8_t *store;
38 	struct rwp_state *state;
39 	struct fobj fobj;
40 };
41 
42 static const struct fobj_ops ops_rw_paged;
43 
44 static struct internal_aes_gcm_key rwp_ae_key;
45 
46 /*
47  * fobj_generate_authenc_key() - Generate authentication key
48  *
49  * Generates the authentication key used in all fobjs allocated with
50  * fobj_rw_paged_alloc().
51  */
52 static TEE_Result fobj_generate_authenc_key(void)
53 {
54 	uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
55 
56 	if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
57 		panic("failed to generate random");
58 	if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
59 				      sizeof(rwp_ae_key.data),
60 				      &rwp_ae_key.rounds))
61 		panic("failed to expand key");
62 
63 	return TEE_SUCCESS;
64 }
65 driver_init_late(fobj_generate_authenc_key);
66 
67 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
68 		      unsigned int num_pages)
69 {
70 	fobj->ops = ops;
71 	fobj->num_pages = num_pages;
72 	refcount_set(&fobj->refc, 1);
73 	TAILQ_INIT(&fobj->areas);
74 }
75 
76 static void fobj_uninit(struct fobj *fobj)
77 {
78 	assert(!refcount_val(&fobj->refc));
79 	assert(TAILQ_EMPTY(&fobj->areas));
80 	tee_pager_invalidate_fobj(fobj);
81 }
82 
83 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
84 {
85 	tee_mm_entry_t *mm = NULL;
86 	struct fobj_rwp *rwp = NULL;
87 	size_t size = 0;
88 
89 	assert(num_pages);
90 
91 	rwp = calloc(1, sizeof(*rwp));
92 	if (!rwp)
93 		return NULL;
94 
95 	rwp->state = calloc(num_pages, sizeof(*rwp->state));
96 	if (!rwp->state)
97 		goto err;
98 
99 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
100 		goto err;
101 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
102 	if (!mm)
103 		goto err;
104 	rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
105 	assert(rwp->store); /* to assist debugging if it would ever happen */
106 	if (!rwp->store)
107 		goto err;
108 
109 	fobj_init(&rwp->fobj, &ops_rw_paged, num_pages);
110 
111 	return &rwp->fobj;
112 
113 err:
114 	tee_mm_free(mm);
115 	free(rwp->state);
116 	free(rwp);
117 
118 	return NULL;
119 }
120 
121 static struct fobj_rwp *to_rwp(struct fobj *fobj)
122 {
123 	assert(fobj->ops == &ops_rw_paged);
124 
125 	return container_of(fobj, struct fobj_rwp, fobj);
126 }
127 
128 static void rwp_free(struct fobj *fobj)
129 {
130 	struct fobj_rwp *rwp = to_rwp(fobj);
131 
132 	fobj_uninit(fobj);
133 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store)));
134 	free(rwp->state);
135 	free(rwp);
136 }
137 
138 static TEE_Result rwp_load_page(struct fobj *fobj, unsigned int page_idx,
139 				void *va)
140 {
141 	struct fobj_rwp *rwp = to_rwp(fobj);
142 	struct rwp_state *state = rwp->state + page_idx;
143 	uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
144 	struct rwp_aes_gcm_iv iv = {
145 		.iv = { (vaddr_t)state, state->iv >> 32, state->iv }
146 	};
147 
148 	assert(refcount_val(&fobj->refc));
149 	assert(page_idx < fobj->num_pages);
150 
151 	if (!state->iv) {
152 		/*
153 		 * iv still zero which means that this is previously unused
154 		 * page.
155 		 */
156 		memset(va, 0, SMALL_PAGE_SIZE);
157 		return TEE_SUCCESS;
158 	}
159 
160 	return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
161 				    NULL, 0, src, SMALL_PAGE_SIZE, va,
162 				    state->tag, sizeof(state->tag));
163 }
164 DECLARE_KEEP_PAGER(rwp_load_page);
165 
166 static TEE_Result rwp_save_page(struct fobj *fobj, unsigned int page_idx,
167 				const void *va)
168 {
169 	struct fobj_rwp *rwp = to_rwp(fobj);
170 	struct rwp_state *state = rwp->state + page_idx;
171 	size_t tag_len = sizeof(state->tag);
172 	uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
173 	struct rwp_aes_gcm_iv iv;
174 
175 	memset(&iv, 0, sizeof(iv));
176 
177 	if (!refcount_val(&fobj->refc)) {
178 		/*
179 		 * This fobj is being teared down, it just hasn't had the time
180 		 * to call tee_pager_invalidate_fobj() yet.
181 		 */
182 		assert(TAILQ_EMPTY(&fobj->areas));
183 		return TEE_SUCCESS;
184 	}
185 
186 	assert(page_idx < fobj->num_pages);
187 	assert(state->iv + 1 > state->iv);
188 
189 	state->iv++;
190 	/*
191 	 * IV is constructed as recommended in section "8.2.1 Deterministic
192 	 * Construction" of "Recommendation for Block Cipher Modes of
193 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
194 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
195 	 */
196 
197 	iv.iv[0] = (vaddr_t)state;
198 	iv.iv[1] = state->iv >> 32;
199 	iv.iv[2] = state->iv;
200 
201 	return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
202 				    NULL, 0, va, SMALL_PAGE_SIZE, dst,
203 				    state->tag, &tag_len);
204 }
205 DECLARE_KEEP_PAGER(rwp_save_page);
206 
207 static const struct fobj_ops ops_rw_paged __rodata_unpaged = {
208 	.free = rwp_free,
209 	.load_page = rwp_load_page,
210 	.save_page = rwp_save_page,
211 };
212 
213 struct fobj_rop {
214 	uint8_t *hashes;
215 	uint8_t *store;
216 	struct fobj fobj;
217 };
218 
219 static const struct fobj_ops ops_ro_paged;
220 
221 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
222 		     unsigned int num_pages, void *hashes, void *store)
223 {
224 	rop->hashes = hashes;
225 	rop->store = store;
226 	fobj_init(&rop->fobj, ops, num_pages);
227 }
228 
229 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
230 				 void *store)
231 {
232 	struct fobj_rop *rop = NULL;
233 
234 	assert(num_pages && hashes && store);
235 
236 	rop = calloc(1, sizeof(*rop));
237 	if (!rop)
238 		return NULL;
239 
240 	rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
241 
242 	return &rop->fobj;
243 }
244 
245 static struct fobj_rop *to_rop(struct fobj *fobj)
246 {
247 	assert(fobj->ops == &ops_ro_paged);
248 
249 	return container_of(fobj, struct fobj_rop, fobj);
250 }
251 
252 static void rop_uninit(struct fobj_rop *rop)
253 {
254 	fobj_uninit(&rop->fobj);
255 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
256 	free(rop->hashes);
257 }
258 
259 static void rop_free(struct fobj *fobj)
260 {
261 	struct fobj_rop *rop = to_rop(fobj);
262 
263 	rop_uninit(rop);
264 	free(rop);
265 }
266 
267 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
268 				       unsigned int page_idx, void *va)
269 {
270 	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
271 	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
272 
273 	assert(refcount_val(&rop->fobj.refc));
274 	assert(page_idx < rop->fobj.num_pages);
275 	memcpy(va, src, SMALL_PAGE_SIZE);
276 
277 	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
278 }
279 
280 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
281 				void *va)
282 {
283 	return rop_load_page_helper(to_rop(fobj), page_idx, va);
284 }
285 DECLARE_KEEP_PAGER(rop_load_page);
286 
287 static TEE_Result rop_save_page(struct fobj *fobj __unused,
288 				unsigned int page_idx __unused,
289 				const void *va __unused)
290 {
291 	return TEE_ERROR_GENERIC;
292 }
293 DECLARE_KEEP_PAGER(rop_save_page);
294 
295 static const struct fobj_ops ops_ro_paged __rodata_unpaged = {
296 	.free = rop_free,
297 	.load_page = rop_load_page,
298 	.save_page = rop_save_page,
299 };
300 
301 #ifdef CFG_CORE_ASLR
302 /*
303  * When using relocated pages the relocation information must be applied
304  * before the pages can be used. With read-only paging the content is only
305  * integrity protected so relocation cannot be applied on pages in the less
306  * secure "store" or the load_address selected by ASLR could be given away.
307  * This means that each time a page has been loaded and verified it has to
308  * have its relocation information applied before it can be used.
309  *
310  * Only the relative relocations are supported, this allows a rather compact
311  * represenation of the needed relocation information in this struct.
312  * r_offset is replaced with the offset into the page that need to be updated,
313  * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
314  * used to represent it.
315  *
316  * All relocations are converted and stored in @relocs. @page_reloc_idx is
317  * an array of length @rop.fobj.num_pages with an entry for each page. If
318  * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
319  */
320 struct fobj_ro_reloc_paged {
321 	uint16_t *page_reloc_idx;
322 	uint16_t *relocs;
323 	unsigned int num_relocs;
324 	struct fobj_rop rop;
325 };
326 
327 static const struct fobj_ops ops_ro_reloc_paged;
328 
329 static unsigned int get_num_rels(unsigned int num_pages,
330 				 unsigned int reloc_offs,
331 				 const uint32_t *reloc, unsigned int num_relocs)
332 {
333 	const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
334 	unsigned int nrels = 0;
335 	unsigned int n = 0;
336 	vaddr_t offs = 0;
337 
338 	/*
339 	 * Count the number of relocations which are needed for these
340 	 * pages.  Also check that the data is well formed, only expected
341 	 * relocations and sorted in order of address which it applies to.
342 	 */
343 	for (; n < num_relocs; n++) {
344 		assert(ALIGNMENT_IS_OK(reloc[n], unsigned long));
345 		assert(offs < reloc[n]);	/* check that it's sorted */
346 		offs = reloc[n];
347 		if (offs >= reloc_offs &&
348 		    offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
349 			nrels++;
350 	}
351 
352 	return nrels;
353 }
354 
355 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
356 		      const uint32_t *reloc, unsigned int num_relocs)
357 {
358 	unsigned int npg = rrp->rop.fobj.num_pages;
359 	unsigned int pg_idx = 0;
360 	unsigned int reln = 0;
361 	unsigned int n = 0;
362 	uint32_t r = 0;
363 
364 	for (n = 0; n < npg; n++)
365 		rrp->page_reloc_idx[n] = UINT16_MAX;
366 
367 	for (n = 0; n < num_relocs ; n++) {
368 		if (reloc[n] < reloc_offs)
369 			continue;
370 
371 		/* r is the offset from beginning of this fobj */
372 		r = reloc[n] - reloc_offs;
373 
374 		pg_idx = r / SMALL_PAGE_SIZE;
375 		if (pg_idx >= npg)
376 			break;
377 
378 		if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
379 			rrp->page_reloc_idx[pg_idx] = reln;
380 		rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
381 		reln++;
382 	}
383 
384 	assert(reln == rrp->num_relocs);
385 }
386 
387 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
388 				       unsigned int reloc_offs,
389 				       const void *reloc,
390 				       unsigned int reloc_len, void *store)
391 {
392 	struct fobj_ro_reloc_paged *rrp = NULL;
393 	const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
394 	unsigned int nrels = 0;
395 
396 	assert(ALIGNMENT_IS_OK(reloc, uint32_t));
397 	assert(ALIGNMENT_IS_OK(reloc_len, uint32_t));
398 	assert(num_pages && hashes && store);
399 	if (!reloc_len) {
400 		assert(!reloc);
401 		return fobj_ro_paged_alloc(num_pages, hashes, store);
402 	}
403 	assert(reloc);
404 
405 	nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
406 	if (!nrels)
407 		return fobj_ro_paged_alloc(num_pages, hashes, store);
408 
409 	rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
410 			nrels * sizeof(uint16_t));
411 	if (!rrp)
412 		return NULL;
413 	rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
414 	rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
415 	rrp->relocs = rrp->page_reloc_idx + num_pages;
416 	rrp->num_relocs = nrels;
417 	init_rels(rrp, reloc_offs, reloc, num_relocs);
418 
419 	return &rrp->rop.fobj;
420 }
421 
422 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
423 {
424 	assert(fobj->ops == &ops_ro_reloc_paged);
425 
426 	return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
427 }
428 
429 static void rrp_free(struct fobj *fobj)
430 {
431 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
432 
433 	rop_uninit(&rrp->rop);
434 	free(rrp);
435 }
436 
437 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
438 				void *va)
439 {
440 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
441 	unsigned int end_rel = rrp->num_relocs;
442 	TEE_Result res = TEE_SUCCESS;
443 	unsigned long *where = NULL;
444 	unsigned int n = 0;
445 
446 	res = rop_load_page_helper(&rrp->rop, page_idx, va);
447 	if (res)
448 		return res;
449 
450 	/* Find the reloc index of the next page to tell when we're done */
451 	for (n = page_idx + 1; n < fobj->num_pages; n++) {
452 		if (rrp->page_reloc_idx[n] != UINT16_MAX) {
453 			end_rel = rrp->page_reloc_idx[n];
454 			break;
455 		}
456 	}
457 
458 	for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
459 		where = (void *)((vaddr_t)va + rrp->relocs[n]);
460 		*where += boot_mmu_config.load_offset;
461 	}
462 
463 	return TEE_SUCCESS;
464 }
465 DECLARE_KEEP_PAGER(rrp_load_page);
466 
467 static const struct fobj_ops ops_ro_reloc_paged __rodata_unpaged = {
468 	.free = rrp_free,
469 	.load_page = rrp_load_page,
470 	.save_page = rop_save_page, /* Direct reuse */
471 };
472 #endif /*CFG_CORE_ASLR*/
473 
474 static const struct fobj_ops ops_locked_paged;
475 
476 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
477 {
478 	struct fobj *f = NULL;
479 
480 	assert(num_pages);
481 
482 	f = calloc(1, sizeof(*f));
483 	if (!f)
484 		return NULL;
485 
486 	fobj_init(f, &ops_locked_paged, num_pages);
487 
488 	return f;
489 }
490 
491 static void lop_free(struct fobj *fobj)
492 {
493 	assert(fobj->ops == &ops_locked_paged);
494 	fobj_uninit(fobj);
495 	free(fobj);
496 }
497 
498 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
499 				unsigned int page_idx __maybe_unused,
500 				void *va)
501 {
502 	assert(fobj->ops == &ops_locked_paged);
503 	assert(refcount_val(&fobj->refc));
504 	assert(page_idx < fobj->num_pages);
505 
506 	memset(va, 0, SMALL_PAGE_SIZE);
507 
508 	return TEE_SUCCESS;
509 }
510 DECLARE_KEEP_PAGER(lop_load_page);
511 
512 static TEE_Result lop_save_page(struct fobj *fobj __unused,
513 				unsigned int page_idx __unused,
514 				const void *va __unused)
515 {
516 	return TEE_ERROR_GENERIC;
517 }
518 DECLARE_KEEP_PAGER(lop_save_page);
519 
520 static const struct fobj_ops ops_locked_paged __rodata_unpaged = {
521 	.free = lop_free,
522 	.load_page = lop_load_page,
523 	.save_page = lop_save_page,
524 };
525 #endif /*CFG_WITH_PAGER*/
526 
527 #ifndef CFG_PAGED_USER_TA
528 
529 struct fobj_sec_mem {
530 	tee_mm_entry_t *mm;
531 	struct fobj fobj;
532 };
533 
534 static const struct fobj_ops ops_sec_mem;
535 
536 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
537 {
538 	struct fobj_sec_mem *f = calloc(1, sizeof(*f));
539 	size_t size = 0;
540 	void *va = NULL;
541 
542 	if (!f)
543 		return NULL;
544 
545 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
546 		goto err;
547 
548 	f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
549 	if (!f->mm)
550 		goto err;
551 
552 	va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM);
553 	if (!va)
554 		goto err;
555 
556 	memset(va, 0, size);
557 	f->fobj.ops = &ops_sec_mem;
558 	f->fobj.num_pages = num_pages;
559 	refcount_set(&f->fobj.refc, 1);
560 
561 	return &f->fobj;
562 err:
563 	tee_mm_free(f->mm);
564 	free(f);
565 
566 	return NULL;
567 }
568 
569 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
570 {
571 	assert(fobj->ops == &ops_sec_mem);
572 
573 	return container_of(fobj, struct fobj_sec_mem, fobj);
574 }
575 
576 static void sec_mem_free(struct fobj *fobj)
577 {
578 	struct fobj_sec_mem *f = to_sec_mem(fobj);
579 
580 	assert(!refcount_val(&fobj->refc));
581 	tee_mm_free(f->mm);
582 	free(f);
583 }
584 
585 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
586 {
587 	struct fobj_sec_mem *f = to_sec_mem(fobj);
588 
589 	assert(refcount_val(&fobj->refc));
590 	assert(page_idx < fobj->num_pages);
591 
592 	return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
593 }
594 
595 static const struct fobj_ops ops_sec_mem __rodata_unpaged = {
596 	.free = sec_mem_free,
597 	.get_pa = sec_mem_get_pa,
598 };
599 
600 #endif /*PAGED_USER_TA*/
601