xref: /optee_os/core/mm/fobj.c (revision 9f34db38245c9b3a4e6e7e63eb78a75e23ab2da3)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019-2022, Linaro Limited
4  */
5 
6 #include <config.h>
7 #include <crypto/crypto.h>
8 #include <crypto/internal_aes-gcm.h>
9 #include <initcall.h>
10 #include <kernel/boot.h>
11 #include <kernel/panic.h>
12 #include <memtag.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <mm/fobj.h>
16 #include <mm/phys_mem.h>
17 #include <mm/tee_mm.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <tee_api_types.h>
21 #include <types_ext.h>
22 #include <util.h>
23 
24 #ifdef CFG_WITH_PAGER
25 
26 #define RWP_AE_KEY_BITS		256
27 
28 struct rwp_aes_gcm_iv {
29 	uint32_t iv[3];
30 };
31 
32 #define RWP_AES_GCM_TAG_LEN	16
33 
34 struct rwp_state {
35 	uint64_t iv;
36 	uint8_t tag[RWP_AES_GCM_TAG_LEN];
37 };
38 
39 /*
40  * Note that this struct is padded to a size which is a power of 2, this
41  * guarantees that this state will not span two pages. This avoids a corner
42  * case in the pager when making the state available.
43  */
44 struct rwp_state_padded {
45 	struct rwp_state state;
46 	uint64_t pad;
47 };
48 
49 struct fobj_rwp_unpaged_iv {
50 	uint8_t *store;
51 	struct rwp_state *state;
52 	struct fobj fobj;
53 };
54 
55 struct fobj_rwp_paged_iv {
56 	size_t idx;
57 	struct fobj fobj;
58 };
59 
60 const struct fobj_ops ops_rwp_paged_iv;
61 const struct fobj_ops ops_rwp_unpaged_iv;
62 
63 static struct internal_aes_gcm_key rwp_ae_key;
64 
65 static struct rwp_state_padded *rwp_state_base;
66 static uint8_t *rwp_store_base;
67 
68 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
69 		      unsigned int num_pages)
70 {
71 	fobj->ops = ops;
72 	fobj->num_pages = num_pages;
73 	refcount_set(&fobj->refc, 1);
74 	TAILQ_INIT(&fobj->regions);
75 }
76 
77 static void fobj_uninit(struct fobj *fobj)
78 {
79 	assert(!refcount_val(&fobj->refc));
80 	assert(TAILQ_EMPTY(&fobj->regions));
81 	tee_pager_invalidate_fobj(fobj);
82 }
83 
84 static TEE_Result rwp_load_page(void *va, struct rwp_state *state,
85 				const uint8_t *src)
86 {
87 	struct rwp_aes_gcm_iv iv = {
88 		.iv = { (vaddr_t)state, state->iv >> 32, state->iv }
89 	};
90 
91 	if (!state->iv) {
92 		/*
93 		 * IV still zero which means that this is previously unused
94 		 * page.
95 		 */
96 		memset(va, 0, SMALL_PAGE_SIZE);
97 		return TEE_SUCCESS;
98 	}
99 
100 	return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
101 				    NULL, 0, src, SMALL_PAGE_SIZE, va,
102 				    state->tag, sizeof(state->tag));
103 }
104 
105 static TEE_Result rwp_save_page(const void *va, struct rwp_state *state,
106 				uint8_t *dst)
107 {
108 	size_t tag_len = sizeof(state->tag);
109 	struct rwp_aes_gcm_iv iv = { };
110 
111 	assert(state->iv + 1 > state->iv);
112 
113 	state->iv++;
114 
115 	/*
116 	 * IV is constructed as recommended in section "8.2.1 Deterministic
117 	 * Construction" of "Recommendation for Block Cipher Modes of
118 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
119 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
120 	 */
121 	iv.iv[0] = (vaddr_t)state;
122 	iv.iv[1] = state->iv >> 32;
123 	iv.iv[2] = state->iv;
124 
125 	return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
126 				    NULL, 0, va, SMALL_PAGE_SIZE, dst,
127 				    state->tag, &tag_len);
128 }
129 
130 static struct rwp_state_padded *idx_to_state_padded(size_t idx)
131 {
132 	assert(rwp_state_base);
133 	return rwp_state_base + idx;
134 }
135 
136 static uint8_t *idx_to_store(size_t idx)
137 {
138 	assert(rwp_store_base);
139 	return rwp_store_base + idx * SMALL_PAGE_SIZE;
140 }
141 
142 static struct fobj *rwp_paged_iv_alloc(unsigned int num_pages)
143 {
144 	struct fobj_rwp_paged_iv *rwp = NULL;
145 	tee_mm_entry_t *mm = NULL;
146 	size_t size = 0;
147 
148 	COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded)));
149 
150 	rwp = calloc(1, sizeof(*rwp));
151 	if (!rwp)
152 		return NULL;
153 
154 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
155 		goto err;
156 	mm = nex_phys_mem_ta_alloc(size);
157 	if (!mm)
158 		goto err;
159 	rwp->idx = (tee_mm_get_smem(mm) - nex_phys_mem_get_ta_base()) /
160 		   SMALL_PAGE_SIZE;
161 
162 	memset(idx_to_state_padded(rwp->idx), 0,
163 	       num_pages * sizeof(struct rwp_state_padded));
164 
165 	fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages);
166 
167 	return &rwp->fobj;
168 err:
169 	tee_mm_free(mm);
170 	free(rwp);
171 
172 	return NULL;
173 }
174 
175 static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj)
176 {
177 	assert(fobj->ops == &ops_rwp_paged_iv);
178 
179 	return container_of(fobj, struct fobj_rwp_paged_iv, fobj);
180 }
181 
182 static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj,
183 					 unsigned int page_idx, void *va)
184 {
185 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
186 	uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
187 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
188 
189 	assert(refcount_val(&fobj->refc));
190 	assert(page_idx < fobj->num_pages);
191 
192 	return rwp_load_page(va, &st->state, src);
193 }
194 DECLARE_KEEP_PAGER(rwp_paged_iv_load_page);
195 
196 static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj,
197 					 unsigned int page_idx, const void *va)
198 {
199 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
200 	uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
201 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
202 
203 	assert(page_idx < fobj->num_pages);
204 
205 	if (!refcount_val(&fobj->refc)) {
206 		/*
207 		 * This fobj is being teared down, it just hasn't had the time
208 		 * to call tee_pager_invalidate_fobj() yet.
209 		 */
210 		assert(TAILQ_EMPTY(&fobj->regions));
211 		return TEE_SUCCESS;
212 	}
213 
214 	return rwp_save_page(va, &st->state, dst);
215 }
216 DECLARE_KEEP_PAGER(rwp_paged_iv_save_page);
217 
218 static void rwp_paged_iv_free(struct fobj *fobj)
219 {
220 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
221 	paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + nex_phys_mem_get_ta_base();
222 	tee_mm_entry_t *mm = nex_phys_mem_mm_find(pa);
223 
224 	assert(mm);
225 
226 	fobj_uninit(fobj);
227 	tee_mm_free(mm);
228 	free(rwp);
229 }
230 
231 static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj,
232 					 unsigned int page_idx)
233 {
234 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
235 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
236 
237 	assert(page_idx < fobj->num_pages);
238 	return (vaddr_t)&st->state & ~SMALL_PAGE_MASK;
239 }
240 DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr);
241 
242 /*
243  * Note: this variable is weak just to ease breaking its dependency chain
244  * when added to the unpaged area.
245  */
246 const struct fobj_ops ops_rwp_paged_iv
247 __weak __relrodata_unpaged("ops_rwp_paged_iv") = {
248 	.free = rwp_paged_iv_free,
249 	.load_page = rwp_paged_iv_load_page,
250 	.save_page = rwp_paged_iv_save_page,
251 	.get_iv_vaddr = rwp_paged_iv_get_iv_vaddr,
252 };
253 
254 static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages)
255 {
256 	struct fobj_rwp_unpaged_iv *rwp = NULL;
257 	tee_mm_entry_t *mm = NULL;
258 	size_t size = 0;
259 
260 	rwp = calloc(1, sizeof(*rwp));
261 	if (!rwp)
262 		return NULL;
263 
264 	rwp->state = calloc(num_pages, sizeof(*rwp->state));
265 	if (!rwp->state)
266 		goto err_free_rwp;
267 
268 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
269 		goto err_free_state;
270 	mm = nex_phys_mem_ta_alloc(size);
271 	if (!mm)
272 		goto err_free_state;
273 	rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, size);
274 	assert(rwp->store);
275 
276 	fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_pages);
277 
278 	return &rwp->fobj;
279 
280 err_free_state:
281 	free(rwp->state);
282 err_free_rwp:
283 	free(rwp);
284 	return NULL;
285 }
286 
287 static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj)
288 {
289 	assert(fobj->ops == &ops_rwp_unpaged_iv);
290 
291 	return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj);
292 }
293 
294 static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj,
295 					   unsigned int page_idx, void *va)
296 {
297 	struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
298 	uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
299 
300 	assert(refcount_val(&fobj->refc));
301 	assert(page_idx < fobj->num_pages);
302 
303 	return rwp_load_page(va, rwp->state + page_idx, src);
304 }
305 DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page);
306 
307 static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj,
308 					   unsigned int page_idx,
309 					   const void *va)
310 {
311 	struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
312 	uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
313 
314 	assert(page_idx < fobj->num_pages);
315 
316 	if (!refcount_val(&fobj->refc)) {
317 		/*
318 		 * This fobj is being teared down, it just hasn't had the time
319 		 * to call tee_pager_invalidate_fobj() yet.
320 		 */
321 		assert(TAILQ_EMPTY(&fobj->regions));
322 		return TEE_SUCCESS;
323 	}
324 
325 	return rwp_save_page(va, rwp->state + page_idx, dst);
326 }
327 DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page);
328 
329 static void rwp_unpaged_iv_free(struct fobj *fobj)
330 {
331 	struct fobj_rwp_unpaged_iv *rwp = NULL;
332 	tee_mm_entry_t *mm = NULL;
333 
334 	if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
335 		panic();
336 
337 	rwp = to_rwp_unpaged_iv(fobj);
338 	mm = nex_phys_mem_mm_find(virt_to_phys(rwp->store));
339 
340 	assert(mm);
341 
342 	fobj_uninit(fobj);
343 	tee_mm_free(mm);
344 	free(rwp->state);
345 	free(rwp);
346 }
347 
348 /*
349  * Note: this variable is weak just to ease breaking its dependency chain
350  * when added to the unpaged area.
351  */
352 const struct fobj_ops ops_rwp_unpaged_iv
353 __weak __relrodata_unpaged("ops_rwp_unpaged_iv") = {
354 	.free = rwp_unpaged_iv_free,
355 	.load_page = rwp_unpaged_iv_load_page,
356 	.save_page = rwp_unpaged_iv_save_page,
357 };
358 
359 static TEE_Result rwp_init(void)
360 {
361 	paddr_size_t ta_size = nex_phys_mem_get_ta_size();
362 	uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
363 	struct fobj *fobj = NULL;
364 	size_t num_pool_pages = 0;
365 	size_t num_fobj_pages = 0;
366 
367 	if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
368 		panic("failed to generate random");
369 	if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
370 				      sizeof(rwp_ae_key.data),
371 				      &rwp_ae_key.rounds))
372 		panic("failed to expand key");
373 
374 	if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
375 		return TEE_SUCCESS;
376 
377 	assert(ta_size && !(ta_size & SMALL_PAGE_SIZE));
378 
379 	num_pool_pages = ta_size / SMALL_PAGE_SIZE;
380 	num_fobj_pages = ROUNDUP(num_pool_pages * sizeof(*rwp_state_base),
381 				 SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
382 
383 	/*
384 	 * Each page in the pool needs a struct rwp_state.
385 	 *
386 	 * This isn't entirely true, the pages not used by
387 	 * fobj_rw_paged_alloc() don't need any. A future optimization
388 	 * may try to avoid allocating for such pages.
389 	 */
390 	fobj = rwp_unpaged_iv_alloc(num_fobj_pages);
391 	if (!fobj)
392 		panic();
393 
394 	rwp_state_base = (void *)tee_pager_init_iv_region(fobj);
395 	assert(rwp_state_base);
396 
397 	rwp_store_base = phys_to_virt(nex_phys_mem_get_ta_base(),
398 				      MEM_AREA_TA_RAM, ta_size);
399 	assert(rwp_store_base);
400 
401 	return TEE_SUCCESS;
402 }
403 driver_init_late(rwp_init);
404 
405 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
406 {
407 	assert(num_pages);
408 
409 	if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
410 		return rwp_paged_iv_alloc(num_pages);
411 	else
412 		return rwp_unpaged_iv_alloc(num_pages);
413 }
414 
415 struct fobj_rop {
416 	uint8_t *hashes;
417 	uint8_t *store;
418 	struct fobj fobj;
419 };
420 
421 const struct fobj_ops ops_ro_paged;
422 
423 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
424 		     unsigned int num_pages, void *hashes, void *store)
425 {
426 	rop->hashes = hashes;
427 	rop->store = store;
428 	fobj_init(&rop->fobj, ops, num_pages);
429 }
430 
431 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
432 				 void *store)
433 {
434 	struct fobj_rop *rop = NULL;
435 
436 	assert(num_pages && hashes && store);
437 
438 	rop = calloc(1, sizeof(*rop));
439 	if (!rop)
440 		return NULL;
441 
442 	rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
443 
444 	return &rop->fobj;
445 }
446 
447 static struct fobj_rop *to_rop(struct fobj *fobj)
448 {
449 	assert(fobj->ops == &ops_ro_paged);
450 
451 	return container_of(fobj, struct fobj_rop, fobj);
452 }
453 
454 static void rop_uninit(struct fobj_rop *rop)
455 {
456 	fobj_uninit(&rop->fobj);
457 	tee_mm_free(nex_phys_mem_mm_find(virt_to_phys(rop->store)));
458 	free(rop->hashes);
459 }
460 
461 static void rop_free(struct fobj *fobj)
462 {
463 	struct fobj_rop *rop = to_rop(fobj);
464 
465 	rop_uninit(rop);
466 	free(rop);
467 }
468 
469 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
470 				       unsigned int page_idx, void *va)
471 {
472 	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
473 	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
474 
475 	assert(refcount_val(&rop->fobj.refc));
476 	assert(page_idx < rop->fobj.num_pages);
477 	memcpy(va, src, SMALL_PAGE_SIZE);
478 
479 	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
480 }
481 
482 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
483 				void *va)
484 {
485 	return rop_load_page_helper(to_rop(fobj), page_idx, va);
486 }
487 DECLARE_KEEP_PAGER(rop_load_page);
488 
489 static TEE_Result rop_save_page(struct fobj *fobj __unused,
490 				unsigned int page_idx __unused,
491 				const void *va __unused)
492 {
493 	return TEE_ERROR_GENERIC;
494 }
495 DECLARE_KEEP_PAGER(rop_save_page);
496 
497 /*
498  * Note: this variable is weak just to ease breaking its dependency chain
499  * when added to the unpaged area.
500  */
501 const struct fobj_ops ops_ro_paged
502 __weak __relrodata_unpaged("ops_ro_paged") = {
503 	.free = rop_free,
504 	.load_page = rop_load_page,
505 	.save_page = rop_save_page,
506 };
507 
508 #ifdef CFG_CORE_ASLR
509 /*
510  * When using relocated pages the relocation information must be applied
511  * before the pages can be used. With read-only paging the content is only
512  * integrity protected so relocation cannot be applied on pages in the less
513  * secure "store" or the load_address selected by ASLR could be given away.
514  * This means that each time a page has been loaded and verified it has to
515  * have its relocation information applied before it can be used.
516  *
517  * Only the relative relocations are supported, this allows a rather compact
518  * represenation of the needed relocation information in this struct.
519  * r_offset is replaced with the offset into the page that need to be updated,
520  * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
521  * used to represent it.
522  *
523  * All relocations are converted and stored in @relocs. @page_reloc_idx is
524  * an array of length @rop.fobj.num_pages with an entry for each page. If
525  * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
526  */
527 struct fobj_ro_reloc_paged {
528 	uint16_t *page_reloc_idx;
529 	uint16_t *relocs;
530 	unsigned int num_relocs;
531 	struct fobj_rop rop;
532 };
533 
534 const struct fobj_ops ops_ro_reloc_paged;
535 
536 static unsigned int get_num_rels(unsigned int num_pages,
537 				 unsigned int reloc_offs,
538 				 const uint32_t *reloc, unsigned int num_relocs)
539 {
540 	const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
541 	unsigned int nrels = 0;
542 	unsigned int n = 0;
543 	vaddr_t offs = 0;
544 
545 	/*
546 	 * Count the number of relocations which are needed for these
547 	 * pages.  Also check that the data is well formed, only expected
548 	 * relocations and sorted in order of address which it applies to.
549 	 */
550 	for (; n < num_relocs; n++) {
551 		assert(IS_ALIGNED_WITH_TYPE(reloc[n], unsigned long));
552 		assert(offs < reloc[n]);	/* check that it's sorted */
553 		offs = reloc[n];
554 		if (offs >= reloc_offs &&
555 		    offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
556 			nrels++;
557 	}
558 
559 	return nrels;
560 }
561 
562 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
563 		      const uint32_t *reloc, unsigned int num_relocs)
564 {
565 	unsigned int npg = rrp->rop.fobj.num_pages;
566 	unsigned int pg_idx = 0;
567 	unsigned int reln = 0;
568 	unsigned int n = 0;
569 	uint32_t r = 0;
570 
571 	for (n = 0; n < npg; n++)
572 		rrp->page_reloc_idx[n] = UINT16_MAX;
573 
574 	for (n = 0; n < num_relocs ; n++) {
575 		if (reloc[n] < reloc_offs)
576 			continue;
577 
578 		/* r is the offset from beginning of this fobj */
579 		r = reloc[n] - reloc_offs;
580 
581 		pg_idx = r / SMALL_PAGE_SIZE;
582 		if (pg_idx >= npg)
583 			break;
584 
585 		if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
586 			rrp->page_reloc_idx[pg_idx] = reln;
587 		rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
588 		reln++;
589 	}
590 
591 	assert(reln == rrp->num_relocs);
592 }
593 
594 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
595 				       unsigned int reloc_offs,
596 				       const void *reloc,
597 				       unsigned int reloc_len, void *store)
598 {
599 	struct fobj_ro_reloc_paged *rrp = NULL;
600 	const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
601 	unsigned int nrels = 0;
602 
603 	assert(IS_ALIGNED_WITH_TYPE(reloc, uint32_t));
604 	assert(IS_ALIGNED_WITH_TYPE(reloc_len, uint32_t));
605 	assert(num_pages && hashes && store);
606 	if (!reloc_len) {
607 		assert(!reloc);
608 		return fobj_ro_paged_alloc(num_pages, hashes, store);
609 	}
610 	assert(reloc);
611 
612 	nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
613 	if (!nrels)
614 		return fobj_ro_paged_alloc(num_pages, hashes, store);
615 
616 	rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
617 			nrels * sizeof(uint16_t));
618 	if (!rrp)
619 		return NULL;
620 	rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
621 	rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
622 	rrp->relocs = rrp->page_reloc_idx + num_pages;
623 	rrp->num_relocs = nrels;
624 	init_rels(rrp, reloc_offs, reloc, num_relocs);
625 
626 	return &rrp->rop.fobj;
627 }
628 
629 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
630 {
631 	assert(fobj->ops == &ops_ro_reloc_paged);
632 
633 	return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
634 }
635 
636 static void rrp_free(struct fobj *fobj)
637 {
638 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
639 
640 	rop_uninit(&rrp->rop);
641 	free(rrp);
642 }
643 
644 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
645 				void *va)
646 {
647 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
648 	unsigned int end_rel = rrp->num_relocs;
649 	TEE_Result res = TEE_SUCCESS;
650 	unsigned long *where = NULL;
651 	unsigned int n = 0;
652 
653 	res = rop_load_page_helper(&rrp->rop, page_idx, va);
654 	if (res)
655 		return res;
656 
657 	/* Find the reloc index of the next page to tell when we're done */
658 	for (n = page_idx + 1; n < fobj->num_pages; n++) {
659 		if (rrp->page_reloc_idx[n] != UINT16_MAX) {
660 			end_rel = rrp->page_reloc_idx[n];
661 			break;
662 		}
663 	}
664 
665 	for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
666 		where = (void *)((vaddr_t)va + rrp->relocs[n]);
667 		*where += boot_mmu_config.map_offset;
668 	}
669 
670 	return TEE_SUCCESS;
671 }
672 DECLARE_KEEP_PAGER(rrp_load_page);
673 
674 /*
675  * Note: this variable is weak just to ease breaking its dependency chain
676  * when added to the unpaged area.
677  */
678 const struct fobj_ops ops_ro_reloc_paged
679 __weak __relrodata_unpaged("ops_ro_reloc_paged") = {
680 	.free = rrp_free,
681 	.load_page = rrp_load_page,
682 	.save_page = rop_save_page, /* Direct reuse */
683 };
684 #endif /*CFG_CORE_ASLR*/
685 
686 const struct fobj_ops ops_locked_paged;
687 
688 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
689 {
690 	struct fobj *f = NULL;
691 
692 	assert(num_pages);
693 
694 	f = calloc(1, sizeof(*f));
695 	if (!f)
696 		return NULL;
697 
698 	fobj_init(f, &ops_locked_paged, num_pages);
699 
700 	return f;
701 }
702 
703 static void lop_free(struct fobj *fobj)
704 {
705 	assert(fobj->ops == &ops_locked_paged);
706 	fobj_uninit(fobj);
707 	free(fobj);
708 }
709 
710 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
711 				unsigned int page_idx __maybe_unused,
712 				void *va)
713 {
714 	assert(fobj->ops == &ops_locked_paged);
715 	assert(refcount_val(&fobj->refc));
716 	assert(page_idx < fobj->num_pages);
717 
718 	memset(va, 0, SMALL_PAGE_SIZE);
719 
720 	return TEE_SUCCESS;
721 }
722 DECLARE_KEEP_PAGER(lop_load_page);
723 
724 static TEE_Result lop_save_page(struct fobj *fobj __unused,
725 				unsigned int page_idx __unused,
726 				const void *va __unused)
727 {
728 	return TEE_ERROR_GENERIC;
729 }
730 DECLARE_KEEP_PAGER(lop_save_page);
731 
732 /*
733  * Note: this variable is weak just to ease breaking its dependency chain
734  * when added to the unpaged area.
735  */
736 const struct fobj_ops ops_locked_paged
737 __weak __relrodata_unpaged("ops_locked_paged") = {
738 	.free = lop_free,
739 	.load_page = lop_load_page,
740 	.save_page = lop_save_page,
741 };
742 #endif /*CFG_WITH_PAGER*/
743 
744 #ifndef CFG_PAGED_USER_TA
745 
746 struct fobj_sec_mem {
747 	tee_mm_entry_t *mm;
748 	struct fobj fobj;
749 };
750 
751 const struct fobj_ops ops_sec_mem;
752 
753 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
754 {
755 	struct fobj_sec_mem *f = calloc(1, sizeof(*f));
756 	size_t size = 0;
757 	void *va = NULL;
758 
759 	if (!f)
760 		return NULL;
761 
762 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
763 		goto err;
764 
765 	f->mm = phys_mem_ta_alloc(size);
766 	if (!f->mm)
767 		goto err;
768 
769 	va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM, size);
770 	if (!va)
771 		goto err;
772 
773 	memtag_clear_mem(va, size);
774 	f->fobj.ops = &ops_sec_mem;
775 	f->fobj.num_pages = num_pages;
776 	refcount_set(&f->fobj.refc, 1);
777 
778 	return &f->fobj;
779 err:
780 	tee_mm_free(f->mm);
781 	free(f);
782 
783 	return NULL;
784 }
785 
786 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
787 {
788 	assert(fobj->ops == &ops_sec_mem);
789 
790 	return container_of(fobj, struct fobj_sec_mem, fobj);
791 }
792 
793 static void sec_mem_free(struct fobj *fobj)
794 {
795 	struct fobj_sec_mem *f = to_sec_mem(fobj);
796 
797 	assert(!refcount_val(&fobj->refc));
798 	tee_mm_free(f->mm);
799 	free(f);
800 }
801 
802 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
803 {
804 	struct fobj_sec_mem *f = to_sec_mem(fobj);
805 
806 	assert(refcount_val(&fobj->refc));
807 	assert(page_idx < fobj->num_pages);
808 
809 	return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
810 }
811 
812 /*
813  * Note: this variable is weak just to ease breaking its dependency chain
814  * when added to the unpaged area.
815  */
816 const struct fobj_ops ops_sec_mem __weak __relrodata_unpaged("ops_sec_mem") = {
817 	.free = sec_mem_free,
818 	.get_pa = sec_mem_get_pa,
819 };
820 
821 #endif /*PAGED_USER_TA*/
822