xref: /optee_os/core/mm/fobj.c (revision aad1cf6bbe80ed145f48e8650b96119d835ee0a0)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019-2021, Linaro Limited
4  */
5 
6 #include <crypto/crypto.h>
7 #include <crypto/internal_aes-gcm.h>
8 #include <initcall.h>
9 #include <kernel/boot.h>
10 #include <kernel/panic.h>
11 #include <mm/core_memprot.h>
12 #include <mm/core_mmu.h>
13 #include <mm/fobj.h>
14 #include <mm/tee_mm.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <tee_api_types.h>
18 #include <types_ext.h>
19 #include <util.h>
20 
21 #ifdef CFG_WITH_PAGER
22 
23 #define RWP_AE_KEY_BITS		256
24 
25 struct rwp_aes_gcm_iv {
26 	uint32_t iv[3];
27 };
28 
29 #define RWP_AES_GCM_TAG_LEN	16
30 
31 struct rwp_state {
32 	uint64_t iv;
33 	uint8_t tag[RWP_AES_GCM_TAG_LEN];
34 };
35 
36 /*
37  * Note that this struct is padded to a size which is a power of 2, this
38  * guarantees that this state will not span two pages. This avoids a corner
39  * case in the pager when making the state available.
40  */
41 struct rwp_state_padded {
42 	struct rwp_state state;
43 	uint64_t pad;
44 };
45 
46 struct fobj_rwp_unpaged_iv {
47 	uint8_t *store;
48 	struct rwp_state *state;
49 	struct fobj fobj;
50 };
51 
52 struct fobj_rwp_paged_iv {
53 	size_t idx;
54 	struct fobj fobj;
55 };
56 
57 static const struct fobj_ops ops_rwp_paged_iv;
58 static const struct fobj_ops ops_rwp_unpaged_iv;
59 
60 static struct internal_aes_gcm_key rwp_ae_key;
61 
62 static struct rwp_state_padded *rwp_state_base;
63 static uint8_t *rwp_store_base;
64 
65 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
66 		      unsigned int num_pages)
67 {
68 	fobj->ops = ops;
69 	fobj->num_pages = num_pages;
70 	refcount_set(&fobj->refc, 1);
71 	TAILQ_INIT(&fobj->areas);
72 }
73 
74 static void fobj_uninit(struct fobj *fobj)
75 {
76 	assert(!refcount_val(&fobj->refc));
77 	assert(TAILQ_EMPTY(&fobj->areas));
78 	tee_pager_invalidate_fobj(fobj);
79 }
80 
81 static TEE_Result rwp_load_page(void *va, struct rwp_state *state,
82 				const uint8_t *src)
83 {
84 	struct rwp_aes_gcm_iv iv = {
85 		.iv = { (vaddr_t)state, state->iv >> 32, state->iv }
86 	};
87 
88 	if (!state->iv) {
89 		/*
90 		 * IV still zero which means that this is previously unused
91 		 * page.
92 		 */
93 		memset(va, 0, SMALL_PAGE_SIZE);
94 		return TEE_SUCCESS;
95 	}
96 
97 	return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
98 				    NULL, 0, src, SMALL_PAGE_SIZE, va,
99 				    state->tag, sizeof(state->tag));
100 }
101 
102 static TEE_Result rwp_save_page(const void *va, struct rwp_state *state,
103 				uint8_t *dst)
104 {
105 	size_t tag_len = sizeof(state->tag);
106 	struct rwp_aes_gcm_iv iv = { };
107 
108 	assert(state->iv + 1 > state->iv);
109 
110 	state->iv++;
111 
112 	/*
113 	 * IV is constructed as recommended in section "8.2.1 Deterministic
114 	 * Construction" of "Recommendation for Block Cipher Modes of
115 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
116 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
117 	 */
118 	iv.iv[0] = (vaddr_t)state;
119 	iv.iv[1] = state->iv >> 32;
120 	iv.iv[2] = state->iv;
121 
122 	return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
123 				    NULL, 0, va, SMALL_PAGE_SIZE, dst,
124 				    state->tag, &tag_len);
125 }
126 
127 static struct rwp_state_padded *idx_to_state_padded(size_t idx)
128 {
129 	assert(rwp_state_base);
130 	return rwp_state_base + idx;
131 }
132 
133 static uint8_t *idx_to_store(size_t idx)
134 {
135 	assert(rwp_store_base);
136 	return rwp_store_base + idx * SMALL_PAGE_SIZE;
137 }
138 
139 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
140 {
141 	struct fobj_rwp_paged_iv *rwp = NULL;
142 	tee_mm_entry_t *mm = NULL;
143 	size_t size = 0;
144 
145 	COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded)));
146 	assert(num_pages);
147 
148 	rwp = calloc(1, sizeof(*rwp));
149 	if (!rwp)
150 		return NULL;
151 
152 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
153 		goto err;
154 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
155 	if (!mm)
156 		goto err;
157 	rwp->idx = (tee_mm_get_smem(mm) - tee_mm_sec_ddr.lo) / SMALL_PAGE_SIZE;
158 
159 	memset(idx_to_state_padded(rwp->idx), 0,
160 	       num_pages * sizeof(struct rwp_state_padded));
161 
162 	fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages);
163 
164 	return &rwp->fobj;
165 err:
166 	tee_mm_free(mm);
167 	free(rwp);
168 
169 	return NULL;
170 }
171 
172 static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj)
173 {
174 	assert(fobj->ops == &ops_rwp_paged_iv);
175 
176 	return container_of(fobj, struct fobj_rwp_paged_iv, fobj);
177 }
178 
179 static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj,
180 					 unsigned int page_idx, void *va)
181 {
182 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
183 	uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
184 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
185 
186 	assert(refcount_val(&fobj->refc));
187 	assert(page_idx < fobj->num_pages);
188 
189 	return rwp_load_page(va, &st->state, src);
190 }
191 DECLARE_KEEP_PAGER(rwp_paged_iv_load_page);
192 
193 static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj,
194 					 unsigned int page_idx, const void *va)
195 {
196 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
197 	uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
198 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
199 
200 	assert(page_idx < fobj->num_pages);
201 
202 	if (!refcount_val(&fobj->refc)) {
203 		/*
204 		 * This fobj is being teared down, it just hasn't had the time
205 		 * to call tee_pager_invalidate_fobj() yet.
206 		 */
207 		assert(TAILQ_EMPTY(&fobj->areas));
208 		return TEE_SUCCESS;
209 	}
210 
211 	return rwp_save_page(va, &st->state, dst);
212 }
213 DECLARE_KEEP_PAGER(rwp_paged_iv_save_page);
214 
215 static void rwp_paged_iv_free(struct fobj *fobj)
216 {
217 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
218 	paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + tee_mm_sec_ddr.lo;
219 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_sec_ddr, pa);
220 
221 	assert(mm);
222 
223 	fobj_uninit(fobj);
224 	tee_mm_free(mm);
225 	free(rwp);
226 }
227 
228 static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj,
229 					 unsigned int page_idx)
230 {
231 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
232 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
233 
234 	assert(page_idx < fobj->num_pages);
235 	return (vaddr_t)&st->state & ~SMALL_PAGE_MASK;
236 }
237 DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr);
238 
239 static const struct fobj_ops ops_rwp_paged_iv __rodata_unpaged = {
240 	.free = rwp_paged_iv_free,
241 	.load_page = rwp_paged_iv_load_page,
242 	.save_page = rwp_paged_iv_save_page,
243 	.get_iv_vaddr = rwp_paged_iv_get_iv_vaddr,
244 };
245 
246 static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj)
247 {
248 	assert(fobj->ops == &ops_rwp_unpaged_iv);
249 
250 	return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj);
251 }
252 
253 static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj,
254 					   unsigned int page_idx, void *va)
255 {
256 	struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
257 	uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
258 
259 	assert(refcount_val(&fobj->refc));
260 	assert(page_idx < fobj->num_pages);
261 
262 	return rwp_load_page(va, rwp->state + page_idx, src);
263 }
264 DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page);
265 
266 static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj,
267 					   unsigned int page_idx,
268 					   const void *va)
269 {
270 	struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
271 	uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
272 
273 	assert(page_idx < fobj->num_pages);
274 
275 	if (!refcount_val(&fobj->refc)) {
276 		/*
277 		 * This fobj is being teared down, it just hasn't had the time
278 		 * to call tee_pager_invalidate_fobj() yet.
279 		 */
280 		assert(TAILQ_EMPTY(&fobj->areas));
281 		return TEE_SUCCESS;
282 	}
283 
284 	return rwp_save_page(va, rwp->state + page_idx, dst);
285 }
286 DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page);
287 
288 static void rwp_unpaged_iv_free(struct fobj *fobj __unused)
289 {
290 	panic();
291 }
292 
293 static const struct fobj_ops ops_rwp_unpaged_iv __rodata_unpaged = {
294 	.free = rwp_unpaged_iv_free,
295 	.load_page = rwp_unpaged_iv_load_page,
296 	.save_page = rwp_unpaged_iv_save_page,
297 };
298 
299 static TEE_Result rwp_init(void)
300 {
301 	uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
302 	struct fobj_rwp_unpaged_iv *rwp = NULL;
303 	tee_mm_entry_t *mm = NULL;
304 	size_t num_pool_pages = 0;
305 	size_t num_fobj_pages = 0;
306 	size_t sz = 0;
307 
308 	if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
309 		panic("failed to generate random");
310 	if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
311 				      sizeof(rwp_ae_key.data),
312 				      &rwp_ae_key.rounds))
313 		panic("failed to expand key");
314 
315 	assert(tee_mm_sec_ddr.hi > tee_mm_sec_ddr.lo);
316 	sz = tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo;
317 	assert(!(sz & SMALL_PAGE_SIZE));
318 
319 	num_pool_pages = sz / SMALL_PAGE_SIZE;
320 	num_fobj_pages = ROUNDUP(num_pool_pages * sizeof(*rwp_state_base),
321 				 SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
322 
323 	/*
324 	 * Each page in the pool needs a struct rwp_state.
325 	 *
326 	 * This isn't entirely true, the pages not used by
327 	 * fobj_rw_paged_alloc() don't need any. A future optimization
328 	 * may try to avoid allocating for such pages.
329 	 */
330 
331 	rwp = calloc(1, sizeof(*rwp));
332 	if (!rwp)
333 		panic();
334 
335 	rwp->state = calloc(num_fobj_pages, sizeof(*rwp->state));
336 	if (!rwp->state)
337 		panic();
338 	mm = tee_mm_alloc(&tee_mm_sec_ddr, num_fobj_pages * SMALL_PAGE_SIZE);
339 	if (!mm)
340 		panic();
341 	rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
342 	assert(rwp->store);
343 
344 	fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_fobj_pages);
345 
346 	rwp_state_base = (void *)tee_pager_init_iv_area(&rwp->fobj);
347 	assert(rwp_state_base);
348 
349 	rwp_store_base = phys_to_virt(tee_mm_sec_ddr.lo, MEM_AREA_TA_RAM);
350 	assert(rwp_store_base);
351 
352 	return TEE_SUCCESS;
353 }
354 driver_init_late(rwp_init);
355 
356 struct fobj_rop {
357 	uint8_t *hashes;
358 	uint8_t *store;
359 	struct fobj fobj;
360 };
361 
362 static const struct fobj_ops ops_ro_paged;
363 
364 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
365 		     unsigned int num_pages, void *hashes, void *store)
366 {
367 	rop->hashes = hashes;
368 	rop->store = store;
369 	fobj_init(&rop->fobj, ops, num_pages);
370 }
371 
372 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
373 				 void *store)
374 {
375 	struct fobj_rop *rop = NULL;
376 
377 	assert(num_pages && hashes && store);
378 
379 	rop = calloc(1, sizeof(*rop));
380 	if (!rop)
381 		return NULL;
382 
383 	rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
384 
385 	return &rop->fobj;
386 }
387 
388 static struct fobj_rop *to_rop(struct fobj *fobj)
389 {
390 	assert(fobj->ops == &ops_ro_paged);
391 
392 	return container_of(fobj, struct fobj_rop, fobj);
393 }
394 
395 static void rop_uninit(struct fobj_rop *rop)
396 {
397 	fobj_uninit(&rop->fobj);
398 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
399 	free(rop->hashes);
400 }
401 
402 static void rop_free(struct fobj *fobj)
403 {
404 	struct fobj_rop *rop = to_rop(fobj);
405 
406 	rop_uninit(rop);
407 	free(rop);
408 }
409 
410 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
411 				       unsigned int page_idx, void *va)
412 {
413 	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
414 	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
415 
416 	assert(refcount_val(&rop->fobj.refc));
417 	assert(page_idx < rop->fobj.num_pages);
418 	memcpy(va, src, SMALL_PAGE_SIZE);
419 
420 	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
421 }
422 
423 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
424 				void *va)
425 {
426 	return rop_load_page_helper(to_rop(fobj), page_idx, va);
427 }
428 DECLARE_KEEP_PAGER(rop_load_page);
429 
430 static TEE_Result rop_save_page(struct fobj *fobj __unused,
431 				unsigned int page_idx __unused,
432 				const void *va __unused)
433 {
434 	return TEE_ERROR_GENERIC;
435 }
436 DECLARE_KEEP_PAGER(rop_save_page);
437 
438 static const struct fobj_ops ops_ro_paged __rodata_unpaged = {
439 	.free = rop_free,
440 	.load_page = rop_load_page,
441 	.save_page = rop_save_page,
442 };
443 
444 #ifdef CFG_CORE_ASLR
445 /*
446  * When using relocated pages the relocation information must be applied
447  * before the pages can be used. With read-only paging the content is only
448  * integrity protected so relocation cannot be applied on pages in the less
449  * secure "store" or the load_address selected by ASLR could be given away.
450  * This means that each time a page has been loaded and verified it has to
451  * have its relocation information applied before it can be used.
452  *
453  * Only the relative relocations are supported, this allows a rather compact
454  * represenation of the needed relocation information in this struct.
455  * r_offset is replaced with the offset into the page that need to be updated,
456  * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
457  * used to represent it.
458  *
459  * All relocations are converted and stored in @relocs. @page_reloc_idx is
460  * an array of length @rop.fobj.num_pages with an entry for each page. If
461  * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
462  */
463 struct fobj_ro_reloc_paged {
464 	uint16_t *page_reloc_idx;
465 	uint16_t *relocs;
466 	unsigned int num_relocs;
467 	struct fobj_rop rop;
468 };
469 
470 static const struct fobj_ops ops_ro_reloc_paged;
471 
472 static unsigned int get_num_rels(unsigned int num_pages,
473 				 unsigned int reloc_offs,
474 				 const uint32_t *reloc, unsigned int num_relocs)
475 {
476 	const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
477 	unsigned int nrels = 0;
478 	unsigned int n = 0;
479 	vaddr_t offs = 0;
480 
481 	/*
482 	 * Count the number of relocations which are needed for these
483 	 * pages.  Also check that the data is well formed, only expected
484 	 * relocations and sorted in order of address which it applies to.
485 	 */
486 	for (; n < num_relocs; n++) {
487 		assert(ALIGNMENT_IS_OK(reloc[n], unsigned long));
488 		assert(offs < reloc[n]);	/* check that it's sorted */
489 		offs = reloc[n];
490 		if (offs >= reloc_offs &&
491 		    offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
492 			nrels++;
493 	}
494 
495 	return nrels;
496 }
497 
498 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
499 		      const uint32_t *reloc, unsigned int num_relocs)
500 {
501 	unsigned int npg = rrp->rop.fobj.num_pages;
502 	unsigned int pg_idx = 0;
503 	unsigned int reln = 0;
504 	unsigned int n = 0;
505 	uint32_t r = 0;
506 
507 	for (n = 0; n < npg; n++)
508 		rrp->page_reloc_idx[n] = UINT16_MAX;
509 
510 	for (n = 0; n < num_relocs ; n++) {
511 		if (reloc[n] < reloc_offs)
512 			continue;
513 
514 		/* r is the offset from beginning of this fobj */
515 		r = reloc[n] - reloc_offs;
516 
517 		pg_idx = r / SMALL_PAGE_SIZE;
518 		if (pg_idx >= npg)
519 			break;
520 
521 		if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
522 			rrp->page_reloc_idx[pg_idx] = reln;
523 		rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
524 		reln++;
525 	}
526 
527 	assert(reln == rrp->num_relocs);
528 }
529 
530 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
531 				       unsigned int reloc_offs,
532 				       const void *reloc,
533 				       unsigned int reloc_len, void *store)
534 {
535 	struct fobj_ro_reloc_paged *rrp = NULL;
536 	const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
537 	unsigned int nrels = 0;
538 
539 	assert(ALIGNMENT_IS_OK(reloc, uint32_t));
540 	assert(ALIGNMENT_IS_OK(reloc_len, uint32_t));
541 	assert(num_pages && hashes && store);
542 	if (!reloc_len) {
543 		assert(!reloc);
544 		return fobj_ro_paged_alloc(num_pages, hashes, store);
545 	}
546 	assert(reloc);
547 
548 	nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
549 	if (!nrels)
550 		return fobj_ro_paged_alloc(num_pages, hashes, store);
551 
552 	rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
553 			nrels * sizeof(uint16_t));
554 	if (!rrp)
555 		return NULL;
556 	rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
557 	rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
558 	rrp->relocs = rrp->page_reloc_idx + num_pages;
559 	rrp->num_relocs = nrels;
560 	init_rels(rrp, reloc_offs, reloc, num_relocs);
561 
562 	return &rrp->rop.fobj;
563 }
564 
565 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
566 {
567 	assert(fobj->ops == &ops_ro_reloc_paged);
568 
569 	return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
570 }
571 
572 static void rrp_free(struct fobj *fobj)
573 {
574 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
575 
576 	rop_uninit(&rrp->rop);
577 	free(rrp);
578 }
579 
580 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
581 				void *va)
582 {
583 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
584 	unsigned int end_rel = rrp->num_relocs;
585 	TEE_Result res = TEE_SUCCESS;
586 	unsigned long *where = NULL;
587 	unsigned int n = 0;
588 
589 	res = rop_load_page_helper(&rrp->rop, page_idx, va);
590 	if (res)
591 		return res;
592 
593 	/* Find the reloc index of the next page to tell when we're done */
594 	for (n = page_idx + 1; n < fobj->num_pages; n++) {
595 		if (rrp->page_reloc_idx[n] != UINT16_MAX) {
596 			end_rel = rrp->page_reloc_idx[n];
597 			break;
598 		}
599 	}
600 
601 	for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
602 		where = (void *)((vaddr_t)va + rrp->relocs[n]);
603 		*where += boot_mmu_config.load_offset;
604 	}
605 
606 	return TEE_SUCCESS;
607 }
608 DECLARE_KEEP_PAGER(rrp_load_page);
609 
610 static const struct fobj_ops ops_ro_reloc_paged __rodata_unpaged = {
611 	.free = rrp_free,
612 	.load_page = rrp_load_page,
613 	.save_page = rop_save_page, /* Direct reuse */
614 };
615 #endif /*CFG_CORE_ASLR*/
616 
617 static const struct fobj_ops ops_locked_paged;
618 
619 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
620 {
621 	struct fobj *f = NULL;
622 
623 	assert(num_pages);
624 
625 	f = calloc(1, sizeof(*f));
626 	if (!f)
627 		return NULL;
628 
629 	fobj_init(f, &ops_locked_paged, num_pages);
630 
631 	return f;
632 }
633 
634 static void lop_free(struct fobj *fobj)
635 {
636 	assert(fobj->ops == &ops_locked_paged);
637 	fobj_uninit(fobj);
638 	free(fobj);
639 }
640 
641 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
642 				unsigned int page_idx __maybe_unused,
643 				void *va)
644 {
645 	assert(fobj->ops == &ops_locked_paged);
646 	assert(refcount_val(&fobj->refc));
647 	assert(page_idx < fobj->num_pages);
648 
649 	memset(va, 0, SMALL_PAGE_SIZE);
650 
651 	return TEE_SUCCESS;
652 }
653 DECLARE_KEEP_PAGER(lop_load_page);
654 
655 static TEE_Result lop_save_page(struct fobj *fobj __unused,
656 				unsigned int page_idx __unused,
657 				const void *va __unused)
658 {
659 	return TEE_ERROR_GENERIC;
660 }
661 DECLARE_KEEP_PAGER(lop_save_page);
662 
663 static const struct fobj_ops ops_locked_paged __rodata_unpaged = {
664 	.free = lop_free,
665 	.load_page = lop_load_page,
666 	.save_page = lop_save_page,
667 };
668 #endif /*CFG_WITH_PAGER*/
669 
670 #ifndef CFG_PAGED_USER_TA
671 
672 struct fobj_sec_mem {
673 	tee_mm_entry_t *mm;
674 	struct fobj fobj;
675 };
676 
677 static const struct fobj_ops ops_sec_mem;
678 
679 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
680 {
681 	struct fobj_sec_mem *f = calloc(1, sizeof(*f));
682 	size_t size = 0;
683 	void *va = NULL;
684 
685 	if (!f)
686 		return NULL;
687 
688 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
689 		goto err;
690 
691 	f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
692 	if (!f->mm)
693 		goto err;
694 
695 	va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM);
696 	if (!va)
697 		goto err;
698 
699 	memset(va, 0, size);
700 	f->fobj.ops = &ops_sec_mem;
701 	f->fobj.num_pages = num_pages;
702 	refcount_set(&f->fobj.refc, 1);
703 
704 	return &f->fobj;
705 err:
706 	tee_mm_free(f->mm);
707 	free(f);
708 
709 	return NULL;
710 }
711 
712 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
713 {
714 	assert(fobj->ops == &ops_sec_mem);
715 
716 	return container_of(fobj, struct fobj_sec_mem, fobj);
717 }
718 
719 static void sec_mem_free(struct fobj *fobj)
720 {
721 	struct fobj_sec_mem *f = to_sec_mem(fobj);
722 
723 	assert(!refcount_val(&fobj->refc));
724 	tee_mm_free(f->mm);
725 	free(f);
726 }
727 
728 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
729 {
730 	struct fobj_sec_mem *f = to_sec_mem(fobj);
731 
732 	assert(refcount_val(&fobj->refc));
733 	assert(page_idx < fobj->num_pages);
734 
735 	return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
736 }
737 
738 static const struct fobj_ops ops_sec_mem __rodata_unpaged = {
739 	.free = sec_mem_free,
740 	.get_pa = sec_mem_get_pa,
741 };
742 
743 #endif /*PAGED_USER_TA*/
744