xref: /optee_os/core/mm/fobj.c (revision 77bdbf67c42209142ef43129e01113d29d9c62f6)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019-2021, Linaro Limited
4  */
5 
6 #include <config.h>
7 #include <crypto/crypto.h>
8 #include <crypto/internal_aes-gcm.h>
9 #include <initcall.h>
10 #include <kernel/boot.h>
11 #include <kernel/panic.h>
12 #include <mm/core_memprot.h>
13 #include <mm/core_mmu.h>
14 #include <mm/fobj.h>
15 #include <mm/tee_mm.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <tee_api_types.h>
19 #include <types_ext.h>
20 #include <util.h>
21 
22 #ifdef CFG_WITH_PAGER
23 
24 #define RWP_AE_KEY_BITS		256
25 
26 struct rwp_aes_gcm_iv {
27 	uint32_t iv[3];
28 };
29 
30 #define RWP_AES_GCM_TAG_LEN	16
31 
32 struct rwp_state {
33 	uint64_t iv;
34 	uint8_t tag[RWP_AES_GCM_TAG_LEN];
35 };
36 
37 /*
38  * Note that this struct is padded to a size which is a power of 2, this
39  * guarantees that this state will not span two pages. This avoids a corner
40  * case in the pager when making the state available.
41  */
42 struct rwp_state_padded {
43 	struct rwp_state state;
44 	uint64_t pad;
45 };
46 
47 struct fobj_rwp_unpaged_iv {
48 	uint8_t *store;
49 	struct rwp_state *state;
50 	struct fobj fobj;
51 };
52 
53 struct fobj_rwp_paged_iv {
54 	size_t idx;
55 	struct fobj fobj;
56 };
57 
58 static const struct fobj_ops ops_rwp_paged_iv;
59 static const struct fobj_ops ops_rwp_unpaged_iv;
60 
61 static struct internal_aes_gcm_key rwp_ae_key;
62 
63 static struct rwp_state_padded *rwp_state_base;
64 static uint8_t *rwp_store_base;
65 
66 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
67 		      unsigned int num_pages)
68 {
69 	fobj->ops = ops;
70 	fobj->num_pages = num_pages;
71 	refcount_set(&fobj->refc, 1);
72 	TAILQ_INIT(&fobj->areas);
73 }
74 
75 static void fobj_uninit(struct fobj *fobj)
76 {
77 	assert(!refcount_val(&fobj->refc));
78 	assert(TAILQ_EMPTY(&fobj->areas));
79 	tee_pager_invalidate_fobj(fobj);
80 }
81 
82 static TEE_Result rwp_load_page(void *va, struct rwp_state *state,
83 				const uint8_t *src)
84 {
85 	struct rwp_aes_gcm_iv iv = {
86 		.iv = { (vaddr_t)state, state->iv >> 32, state->iv }
87 	};
88 
89 	if (!state->iv) {
90 		/*
91 		 * IV still zero which means that this is previously unused
92 		 * page.
93 		 */
94 		memset(va, 0, SMALL_PAGE_SIZE);
95 		return TEE_SUCCESS;
96 	}
97 
98 	return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
99 				    NULL, 0, src, SMALL_PAGE_SIZE, va,
100 				    state->tag, sizeof(state->tag));
101 }
102 
103 static TEE_Result rwp_save_page(const void *va, struct rwp_state *state,
104 				uint8_t *dst)
105 {
106 	size_t tag_len = sizeof(state->tag);
107 	struct rwp_aes_gcm_iv iv = { };
108 
109 	assert(state->iv + 1 > state->iv);
110 
111 	state->iv++;
112 
113 	/*
114 	 * IV is constructed as recommended in section "8.2.1 Deterministic
115 	 * Construction" of "Recommendation for Block Cipher Modes of
116 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
117 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
118 	 */
119 	iv.iv[0] = (vaddr_t)state;
120 	iv.iv[1] = state->iv >> 32;
121 	iv.iv[2] = state->iv;
122 
123 	return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
124 				    NULL, 0, va, SMALL_PAGE_SIZE, dst,
125 				    state->tag, &tag_len);
126 }
127 
128 static struct rwp_state_padded *idx_to_state_padded(size_t idx)
129 {
130 	assert(rwp_state_base);
131 	return rwp_state_base + idx;
132 }
133 
134 static uint8_t *idx_to_store(size_t idx)
135 {
136 	assert(rwp_store_base);
137 	return rwp_store_base + idx * SMALL_PAGE_SIZE;
138 }
139 
140 static struct fobj *rwp_paged_iv_alloc(unsigned int num_pages)
141 {
142 	struct fobj_rwp_paged_iv *rwp = NULL;
143 	tee_mm_entry_t *mm = NULL;
144 	size_t size = 0;
145 
146 	COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded)));
147 
148 	rwp = calloc(1, sizeof(*rwp));
149 	if (!rwp)
150 		return NULL;
151 
152 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
153 		goto err;
154 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
155 	if (!mm)
156 		goto err;
157 	rwp->idx = (tee_mm_get_smem(mm) - tee_mm_sec_ddr.lo) / SMALL_PAGE_SIZE;
158 
159 	memset(idx_to_state_padded(rwp->idx), 0,
160 	       num_pages * sizeof(struct rwp_state_padded));
161 
162 	fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages);
163 
164 	return &rwp->fobj;
165 err:
166 	tee_mm_free(mm);
167 	free(rwp);
168 
169 	return NULL;
170 }
171 
172 static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj)
173 {
174 	assert(fobj->ops == &ops_rwp_paged_iv);
175 
176 	return container_of(fobj, struct fobj_rwp_paged_iv, fobj);
177 }
178 
179 static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj,
180 					 unsigned int page_idx, void *va)
181 {
182 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
183 	uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
184 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
185 
186 	assert(refcount_val(&fobj->refc));
187 	assert(page_idx < fobj->num_pages);
188 
189 	return rwp_load_page(va, &st->state, src);
190 }
191 DECLARE_KEEP_PAGER(rwp_paged_iv_load_page);
192 
193 static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj,
194 					 unsigned int page_idx, const void *va)
195 {
196 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
197 	uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
198 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
199 
200 	assert(page_idx < fobj->num_pages);
201 
202 	if (!refcount_val(&fobj->refc)) {
203 		/*
204 		 * This fobj is being teared down, it just hasn't had the time
205 		 * to call tee_pager_invalidate_fobj() yet.
206 		 */
207 		assert(TAILQ_EMPTY(&fobj->areas));
208 		return TEE_SUCCESS;
209 	}
210 
211 	return rwp_save_page(va, &st->state, dst);
212 }
213 DECLARE_KEEP_PAGER(rwp_paged_iv_save_page);
214 
215 static void rwp_paged_iv_free(struct fobj *fobj)
216 {
217 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
218 	paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + tee_mm_sec_ddr.lo;
219 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_sec_ddr, pa);
220 
221 	assert(mm);
222 
223 	fobj_uninit(fobj);
224 	tee_mm_free(mm);
225 	free(rwp);
226 }
227 
228 static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj,
229 					 unsigned int page_idx)
230 {
231 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
232 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
233 
234 	assert(page_idx < fobj->num_pages);
235 	return (vaddr_t)&st->state & ~SMALL_PAGE_MASK;
236 }
237 DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr);
238 
239 static const struct fobj_ops ops_rwp_paged_iv __rodata_unpaged = {
240 	.free = rwp_paged_iv_free,
241 	.load_page = rwp_paged_iv_load_page,
242 	.save_page = rwp_paged_iv_save_page,
243 	.get_iv_vaddr = rwp_paged_iv_get_iv_vaddr,
244 };
245 
246 static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages)
247 {
248 	struct fobj_rwp_unpaged_iv *rwp = NULL;
249 	tee_mm_entry_t *mm = NULL;
250 	size_t size = 0;
251 
252 	rwp = calloc(1, sizeof(*rwp));
253 	if (!rwp)
254 		return NULL;
255 
256 	rwp->state = calloc(num_pages, sizeof(*rwp->state));
257 	if (!rwp->state)
258 		goto err_free_rwp;
259 
260 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
261 		goto err_free_state;
262 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
263 	if (!mm)
264 		goto err_free_state;
265 	rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
266 	assert(rwp->store);
267 
268 	fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_pages);
269 
270 	return &rwp->fobj;
271 
272 err_free_state:
273 	free(rwp->state);
274 err_free_rwp:
275 	free(rwp);
276 	return NULL;
277 }
278 
279 static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj)
280 {
281 	assert(fobj->ops == &ops_rwp_unpaged_iv);
282 
283 	return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj);
284 }
285 
286 static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj,
287 					   unsigned int page_idx, void *va)
288 {
289 	struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
290 	uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
291 
292 	assert(refcount_val(&fobj->refc));
293 	assert(page_idx < fobj->num_pages);
294 
295 	return rwp_load_page(va, rwp->state + page_idx, src);
296 }
297 DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page);
298 
299 static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj,
300 					   unsigned int page_idx,
301 					   const void *va)
302 {
303 	struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
304 	uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
305 
306 	assert(page_idx < fobj->num_pages);
307 
308 	if (!refcount_val(&fobj->refc)) {
309 		/*
310 		 * This fobj is being teared down, it just hasn't had the time
311 		 * to call tee_pager_invalidate_fobj() yet.
312 		 */
313 		assert(TAILQ_EMPTY(&fobj->areas));
314 		return TEE_SUCCESS;
315 	}
316 
317 	return rwp_save_page(va, rwp->state + page_idx, dst);
318 }
319 DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page);
320 
321 static void rwp_unpaged_iv_free(struct fobj *fobj)
322 {
323 	struct fobj_rwp_unpaged_iv *rwp = NULL;
324 	tee_mm_entry_t *mm = NULL;
325 
326 	if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
327 		panic();
328 
329 	rwp = to_rwp_unpaged_iv(fobj);
330 	mm = tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store));
331 
332 	assert(mm);
333 
334 	fobj_uninit(fobj);
335 	tee_mm_free(mm);
336 	free(rwp->state);
337 	free(rwp);
338 }
339 
340 static const struct fobj_ops ops_rwp_unpaged_iv __rodata_unpaged = {
341 	.free = rwp_unpaged_iv_free,
342 	.load_page = rwp_unpaged_iv_load_page,
343 	.save_page = rwp_unpaged_iv_save_page,
344 };
345 
346 static TEE_Result rwp_init(void)
347 {
348 	uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
349 	struct fobj *fobj = NULL;
350 	size_t num_pool_pages = 0;
351 	size_t num_fobj_pages = 0;
352 	size_t sz = 0;
353 
354 	if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
355 		panic("failed to generate random");
356 	if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
357 				      sizeof(rwp_ae_key.data),
358 				      &rwp_ae_key.rounds))
359 		panic("failed to expand key");
360 
361 	if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
362 		return TEE_SUCCESS;
363 
364 	assert(tee_mm_sec_ddr.hi > tee_mm_sec_ddr.lo);
365 	sz = tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo;
366 	assert(!(sz & SMALL_PAGE_SIZE));
367 
368 	num_pool_pages = sz / SMALL_PAGE_SIZE;
369 	num_fobj_pages = ROUNDUP(num_pool_pages * sizeof(*rwp_state_base),
370 				 SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
371 
372 	/*
373 	 * Each page in the pool needs a struct rwp_state.
374 	 *
375 	 * This isn't entirely true, the pages not used by
376 	 * fobj_rw_paged_alloc() don't need any. A future optimization
377 	 * may try to avoid allocating for such pages.
378 	 */
379 	fobj = rwp_unpaged_iv_alloc(num_fobj_pages);
380 	if (!fobj)
381 		panic();
382 
383 	rwp_state_base = (void *)tee_pager_init_iv_area(fobj);
384 	assert(rwp_state_base);
385 
386 	rwp_store_base = phys_to_virt(tee_mm_sec_ddr.lo, MEM_AREA_TA_RAM);
387 	assert(rwp_store_base);
388 
389 	return TEE_SUCCESS;
390 }
391 driver_init_late(rwp_init);
392 
393 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
394 {
395 	assert(num_pages);
396 
397 	if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
398 		return rwp_paged_iv_alloc(num_pages);
399 	else
400 		return rwp_unpaged_iv_alloc(num_pages);
401 }
402 
403 struct fobj_rop {
404 	uint8_t *hashes;
405 	uint8_t *store;
406 	struct fobj fobj;
407 };
408 
409 static const struct fobj_ops ops_ro_paged;
410 
411 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
412 		     unsigned int num_pages, void *hashes, void *store)
413 {
414 	rop->hashes = hashes;
415 	rop->store = store;
416 	fobj_init(&rop->fobj, ops, num_pages);
417 }
418 
419 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
420 				 void *store)
421 {
422 	struct fobj_rop *rop = NULL;
423 
424 	assert(num_pages && hashes && store);
425 
426 	rop = calloc(1, sizeof(*rop));
427 	if (!rop)
428 		return NULL;
429 
430 	rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
431 
432 	return &rop->fobj;
433 }
434 
435 static struct fobj_rop *to_rop(struct fobj *fobj)
436 {
437 	assert(fobj->ops == &ops_ro_paged);
438 
439 	return container_of(fobj, struct fobj_rop, fobj);
440 }
441 
442 static void rop_uninit(struct fobj_rop *rop)
443 {
444 	fobj_uninit(&rop->fobj);
445 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
446 	free(rop->hashes);
447 }
448 
449 static void rop_free(struct fobj *fobj)
450 {
451 	struct fobj_rop *rop = to_rop(fobj);
452 
453 	rop_uninit(rop);
454 	free(rop);
455 }
456 
457 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
458 				       unsigned int page_idx, void *va)
459 {
460 	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
461 	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
462 
463 	assert(refcount_val(&rop->fobj.refc));
464 	assert(page_idx < rop->fobj.num_pages);
465 	memcpy(va, src, SMALL_PAGE_SIZE);
466 
467 	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
468 }
469 
470 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
471 				void *va)
472 {
473 	return rop_load_page_helper(to_rop(fobj), page_idx, va);
474 }
475 DECLARE_KEEP_PAGER(rop_load_page);
476 
477 static TEE_Result rop_save_page(struct fobj *fobj __unused,
478 				unsigned int page_idx __unused,
479 				const void *va __unused)
480 {
481 	return TEE_ERROR_GENERIC;
482 }
483 DECLARE_KEEP_PAGER(rop_save_page);
484 
485 static const struct fobj_ops ops_ro_paged __rodata_unpaged = {
486 	.free = rop_free,
487 	.load_page = rop_load_page,
488 	.save_page = rop_save_page,
489 };
490 
491 #ifdef CFG_CORE_ASLR
492 /*
493  * When using relocated pages the relocation information must be applied
494  * before the pages can be used. With read-only paging the content is only
495  * integrity protected so relocation cannot be applied on pages in the less
496  * secure "store" or the load_address selected by ASLR could be given away.
497  * This means that each time a page has been loaded and verified it has to
498  * have its relocation information applied before it can be used.
499  *
500  * Only the relative relocations are supported, this allows a rather compact
501  * represenation of the needed relocation information in this struct.
502  * r_offset is replaced with the offset into the page that need to be updated,
503  * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
504  * used to represent it.
505  *
506  * All relocations are converted and stored in @relocs. @page_reloc_idx is
507  * an array of length @rop.fobj.num_pages with an entry for each page. If
508  * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
509  */
510 struct fobj_ro_reloc_paged {
511 	uint16_t *page_reloc_idx;
512 	uint16_t *relocs;
513 	unsigned int num_relocs;
514 	struct fobj_rop rop;
515 };
516 
517 static const struct fobj_ops ops_ro_reloc_paged;
518 
519 static unsigned int get_num_rels(unsigned int num_pages,
520 				 unsigned int reloc_offs,
521 				 const uint32_t *reloc, unsigned int num_relocs)
522 {
523 	const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
524 	unsigned int nrels = 0;
525 	unsigned int n = 0;
526 	vaddr_t offs = 0;
527 
528 	/*
529 	 * Count the number of relocations which are needed for these
530 	 * pages.  Also check that the data is well formed, only expected
531 	 * relocations and sorted in order of address which it applies to.
532 	 */
533 	for (; n < num_relocs; n++) {
534 		assert(ALIGNMENT_IS_OK(reloc[n], unsigned long));
535 		assert(offs < reloc[n]);	/* check that it's sorted */
536 		offs = reloc[n];
537 		if (offs >= reloc_offs &&
538 		    offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
539 			nrels++;
540 	}
541 
542 	return nrels;
543 }
544 
545 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
546 		      const uint32_t *reloc, unsigned int num_relocs)
547 {
548 	unsigned int npg = rrp->rop.fobj.num_pages;
549 	unsigned int pg_idx = 0;
550 	unsigned int reln = 0;
551 	unsigned int n = 0;
552 	uint32_t r = 0;
553 
554 	for (n = 0; n < npg; n++)
555 		rrp->page_reloc_idx[n] = UINT16_MAX;
556 
557 	for (n = 0; n < num_relocs ; n++) {
558 		if (reloc[n] < reloc_offs)
559 			continue;
560 
561 		/* r is the offset from beginning of this fobj */
562 		r = reloc[n] - reloc_offs;
563 
564 		pg_idx = r / SMALL_PAGE_SIZE;
565 		if (pg_idx >= npg)
566 			break;
567 
568 		if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
569 			rrp->page_reloc_idx[pg_idx] = reln;
570 		rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
571 		reln++;
572 	}
573 
574 	assert(reln == rrp->num_relocs);
575 }
576 
577 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
578 				       unsigned int reloc_offs,
579 				       const void *reloc,
580 				       unsigned int reloc_len, void *store)
581 {
582 	struct fobj_ro_reloc_paged *rrp = NULL;
583 	const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
584 	unsigned int nrels = 0;
585 
586 	assert(ALIGNMENT_IS_OK(reloc, uint32_t));
587 	assert(ALIGNMENT_IS_OK(reloc_len, uint32_t));
588 	assert(num_pages && hashes && store);
589 	if (!reloc_len) {
590 		assert(!reloc);
591 		return fobj_ro_paged_alloc(num_pages, hashes, store);
592 	}
593 	assert(reloc);
594 
595 	nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
596 	if (!nrels)
597 		return fobj_ro_paged_alloc(num_pages, hashes, store);
598 
599 	rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
600 			nrels * sizeof(uint16_t));
601 	if (!rrp)
602 		return NULL;
603 	rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
604 	rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
605 	rrp->relocs = rrp->page_reloc_idx + num_pages;
606 	rrp->num_relocs = nrels;
607 	init_rels(rrp, reloc_offs, reloc, num_relocs);
608 
609 	return &rrp->rop.fobj;
610 }
611 
612 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
613 {
614 	assert(fobj->ops == &ops_ro_reloc_paged);
615 
616 	return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
617 }
618 
619 static void rrp_free(struct fobj *fobj)
620 {
621 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
622 
623 	rop_uninit(&rrp->rop);
624 	free(rrp);
625 }
626 
627 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
628 				void *va)
629 {
630 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
631 	unsigned int end_rel = rrp->num_relocs;
632 	TEE_Result res = TEE_SUCCESS;
633 	unsigned long *where = NULL;
634 	unsigned int n = 0;
635 
636 	res = rop_load_page_helper(&rrp->rop, page_idx, va);
637 	if (res)
638 		return res;
639 
640 	/* Find the reloc index of the next page to tell when we're done */
641 	for (n = page_idx + 1; n < fobj->num_pages; n++) {
642 		if (rrp->page_reloc_idx[n] != UINT16_MAX) {
643 			end_rel = rrp->page_reloc_idx[n];
644 			break;
645 		}
646 	}
647 
648 	for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
649 		where = (void *)((vaddr_t)va + rrp->relocs[n]);
650 		*where += boot_mmu_config.load_offset;
651 	}
652 
653 	return TEE_SUCCESS;
654 }
655 DECLARE_KEEP_PAGER(rrp_load_page);
656 
657 static const struct fobj_ops ops_ro_reloc_paged __rodata_unpaged = {
658 	.free = rrp_free,
659 	.load_page = rrp_load_page,
660 	.save_page = rop_save_page, /* Direct reuse */
661 };
662 #endif /*CFG_CORE_ASLR*/
663 
664 static const struct fobj_ops ops_locked_paged;
665 
666 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
667 {
668 	struct fobj *f = NULL;
669 
670 	assert(num_pages);
671 
672 	f = calloc(1, sizeof(*f));
673 	if (!f)
674 		return NULL;
675 
676 	fobj_init(f, &ops_locked_paged, num_pages);
677 
678 	return f;
679 }
680 
681 static void lop_free(struct fobj *fobj)
682 {
683 	assert(fobj->ops == &ops_locked_paged);
684 	fobj_uninit(fobj);
685 	free(fobj);
686 }
687 
688 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
689 				unsigned int page_idx __maybe_unused,
690 				void *va)
691 {
692 	assert(fobj->ops == &ops_locked_paged);
693 	assert(refcount_val(&fobj->refc));
694 	assert(page_idx < fobj->num_pages);
695 
696 	memset(va, 0, SMALL_PAGE_SIZE);
697 
698 	return TEE_SUCCESS;
699 }
700 DECLARE_KEEP_PAGER(lop_load_page);
701 
702 static TEE_Result lop_save_page(struct fobj *fobj __unused,
703 				unsigned int page_idx __unused,
704 				const void *va __unused)
705 {
706 	return TEE_ERROR_GENERIC;
707 }
708 DECLARE_KEEP_PAGER(lop_save_page);
709 
710 static const struct fobj_ops ops_locked_paged __rodata_unpaged = {
711 	.free = lop_free,
712 	.load_page = lop_load_page,
713 	.save_page = lop_save_page,
714 };
715 #endif /*CFG_WITH_PAGER*/
716 
717 #ifndef CFG_PAGED_USER_TA
718 
719 struct fobj_sec_mem {
720 	tee_mm_entry_t *mm;
721 	struct fobj fobj;
722 };
723 
724 static const struct fobj_ops ops_sec_mem;
725 
726 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
727 {
728 	struct fobj_sec_mem *f = calloc(1, sizeof(*f));
729 	size_t size = 0;
730 	void *va = NULL;
731 
732 	if (!f)
733 		return NULL;
734 
735 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
736 		goto err;
737 
738 	f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
739 	if (!f->mm)
740 		goto err;
741 
742 	va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM);
743 	if (!va)
744 		goto err;
745 
746 	memset(va, 0, size);
747 	f->fobj.ops = &ops_sec_mem;
748 	f->fobj.num_pages = num_pages;
749 	refcount_set(&f->fobj.refc, 1);
750 
751 	return &f->fobj;
752 err:
753 	tee_mm_free(f->mm);
754 	free(f);
755 
756 	return NULL;
757 }
758 
759 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
760 {
761 	assert(fobj->ops == &ops_sec_mem);
762 
763 	return container_of(fobj, struct fobj_sec_mem, fobj);
764 }
765 
766 static void sec_mem_free(struct fobj *fobj)
767 {
768 	struct fobj_sec_mem *f = to_sec_mem(fobj);
769 
770 	assert(!refcount_val(&fobj->refc));
771 	tee_mm_free(f->mm);
772 	free(f);
773 }
774 
775 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
776 {
777 	struct fobj_sec_mem *f = to_sec_mem(fobj);
778 
779 	assert(refcount_val(&fobj->refc));
780 	assert(page_idx < fobj->num_pages);
781 
782 	return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
783 }
784 
785 static const struct fobj_ops ops_sec_mem __rodata_unpaged = {
786 	.free = sec_mem_free,
787 	.get_pa = sec_mem_get_pa,
788 };
789 
790 #endif /*PAGED_USER_TA*/
791