xref: /optee_os/core/mm/fobj.c (revision c282ebd61200b0cb0830399c1c33514dbd129dfd)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019-2021, Linaro Limited
4  */
5 
6 #include <config.h>
7 #include <crypto/crypto.h>
8 #include <crypto/internal_aes-gcm.h>
9 #include <initcall.h>
10 #include <kernel/boot.h>
11 #include <kernel/panic.h>
12 #include <mm/core_memprot.h>
13 #include <mm/core_mmu.h>
14 #include <mm/fobj.h>
15 #include <mm/tee_mm.h>
16 #include <stdlib.h>
17 #include <string.h>
18 #include <tee_api_types.h>
19 #include <types_ext.h>
20 #include <util.h>
21 
22 #ifdef CFG_WITH_PAGER
23 
24 #define RWP_AE_KEY_BITS		256
25 
26 struct rwp_aes_gcm_iv {
27 	uint32_t iv[3];
28 };
29 
30 #define RWP_AES_GCM_TAG_LEN	16
31 
32 struct rwp_state {
33 	uint64_t iv;
34 	uint8_t tag[RWP_AES_GCM_TAG_LEN];
35 };
36 
37 /*
38  * Note that this struct is padded to a size which is a power of 2, this
39  * guarantees that this state will not span two pages. This avoids a corner
40  * case in the pager when making the state available.
41  */
42 struct rwp_state_padded {
43 	struct rwp_state state;
44 	uint64_t pad;
45 };
46 
47 struct fobj_rwp_unpaged_iv {
48 	uint8_t *store;
49 	struct rwp_state *state;
50 	struct fobj fobj;
51 };
52 
53 struct fobj_rwp_paged_iv {
54 	size_t idx;
55 	struct fobj fobj;
56 };
57 
58 const struct fobj_ops ops_rwp_paged_iv;
59 const struct fobj_ops ops_rwp_unpaged_iv;
60 
61 static struct internal_aes_gcm_key rwp_ae_key;
62 
63 static struct rwp_state_padded *rwp_state_base;
64 static uint8_t *rwp_store_base;
65 
66 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
67 		      unsigned int num_pages)
68 {
69 	fobj->ops = ops;
70 	fobj->num_pages = num_pages;
71 	refcount_set(&fobj->refc, 1);
72 	TAILQ_INIT(&fobj->regions);
73 }
74 
75 static void fobj_uninit(struct fobj *fobj)
76 {
77 	assert(!refcount_val(&fobj->refc));
78 	assert(TAILQ_EMPTY(&fobj->regions));
79 	tee_pager_invalidate_fobj(fobj);
80 }
81 
82 static TEE_Result rwp_load_page(void *va, struct rwp_state *state,
83 				const uint8_t *src)
84 {
85 	struct rwp_aes_gcm_iv iv = {
86 		.iv = { (vaddr_t)state, state->iv >> 32, state->iv }
87 	};
88 
89 	if (!state->iv) {
90 		/*
91 		 * IV still zero which means that this is previously unused
92 		 * page.
93 		 */
94 		memset(va, 0, SMALL_PAGE_SIZE);
95 		return TEE_SUCCESS;
96 	}
97 
98 	return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
99 				    NULL, 0, src, SMALL_PAGE_SIZE, va,
100 				    state->tag, sizeof(state->tag));
101 }
102 
103 static TEE_Result rwp_save_page(const void *va, struct rwp_state *state,
104 				uint8_t *dst)
105 {
106 	size_t tag_len = sizeof(state->tag);
107 	struct rwp_aes_gcm_iv iv = { };
108 
109 	assert(state->iv + 1 > state->iv);
110 
111 	state->iv++;
112 
113 	/*
114 	 * IV is constructed as recommended in section "8.2.1 Deterministic
115 	 * Construction" of "Recommendation for Block Cipher Modes of
116 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
117 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
118 	 */
119 	iv.iv[0] = (vaddr_t)state;
120 	iv.iv[1] = state->iv >> 32;
121 	iv.iv[2] = state->iv;
122 
123 	return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
124 				    NULL, 0, va, SMALL_PAGE_SIZE, dst,
125 				    state->tag, &tag_len);
126 }
127 
128 static struct rwp_state_padded *idx_to_state_padded(size_t idx)
129 {
130 	assert(rwp_state_base);
131 	return rwp_state_base + idx;
132 }
133 
134 static uint8_t *idx_to_store(size_t idx)
135 {
136 	assert(rwp_store_base);
137 	return rwp_store_base + idx * SMALL_PAGE_SIZE;
138 }
139 
140 static struct fobj *rwp_paged_iv_alloc(unsigned int num_pages)
141 {
142 	struct fobj_rwp_paged_iv *rwp = NULL;
143 	tee_mm_entry_t *mm = NULL;
144 	size_t size = 0;
145 
146 	COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded)));
147 
148 	rwp = calloc(1, sizeof(*rwp));
149 	if (!rwp)
150 		return NULL;
151 
152 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
153 		goto err;
154 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
155 	if (!mm)
156 		goto err;
157 	rwp->idx = (tee_mm_get_smem(mm) - tee_mm_sec_ddr.lo) / SMALL_PAGE_SIZE;
158 
159 	memset(idx_to_state_padded(rwp->idx), 0,
160 	       num_pages * sizeof(struct rwp_state_padded));
161 
162 	fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages);
163 
164 	return &rwp->fobj;
165 err:
166 	tee_mm_free(mm);
167 	free(rwp);
168 
169 	return NULL;
170 }
171 
172 static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj)
173 {
174 	assert(fobj->ops == &ops_rwp_paged_iv);
175 
176 	return container_of(fobj, struct fobj_rwp_paged_iv, fobj);
177 }
178 
179 static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj,
180 					 unsigned int page_idx, void *va)
181 {
182 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
183 	uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
184 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
185 
186 	assert(refcount_val(&fobj->refc));
187 	assert(page_idx < fobj->num_pages);
188 
189 	return rwp_load_page(va, &st->state, src);
190 }
191 DECLARE_KEEP_PAGER(rwp_paged_iv_load_page);
192 
193 static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj,
194 					 unsigned int page_idx, const void *va)
195 {
196 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
197 	uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
198 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
199 
200 	assert(page_idx < fobj->num_pages);
201 
202 	if (!refcount_val(&fobj->refc)) {
203 		/*
204 		 * This fobj is being teared down, it just hasn't had the time
205 		 * to call tee_pager_invalidate_fobj() yet.
206 		 */
207 		assert(TAILQ_EMPTY(&fobj->regions));
208 		return TEE_SUCCESS;
209 	}
210 
211 	return rwp_save_page(va, &st->state, dst);
212 }
213 DECLARE_KEEP_PAGER(rwp_paged_iv_save_page);
214 
215 static void rwp_paged_iv_free(struct fobj *fobj)
216 {
217 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
218 	paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + tee_mm_sec_ddr.lo;
219 	tee_mm_entry_t *mm = tee_mm_find(&tee_mm_sec_ddr, pa);
220 
221 	assert(mm);
222 
223 	fobj_uninit(fobj);
224 	tee_mm_free(mm);
225 	free(rwp);
226 }
227 
228 static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj,
229 					 unsigned int page_idx)
230 {
231 	struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
232 	struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
233 
234 	assert(page_idx < fobj->num_pages);
235 	return (vaddr_t)&st->state & ~SMALL_PAGE_MASK;
236 }
237 DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr);
238 
239 /*
240  * Note: this variable is weak just to ease breaking its dependency chain
241  * when added to the unpaged area.
242  */
243 const struct fobj_ops ops_rwp_paged_iv
244 __weak __rodata_unpaged("ops_rwp_paged_iv") = {
245 	.free = rwp_paged_iv_free,
246 	.load_page = rwp_paged_iv_load_page,
247 	.save_page = rwp_paged_iv_save_page,
248 	.get_iv_vaddr = rwp_paged_iv_get_iv_vaddr,
249 };
250 
251 static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages)
252 {
253 	struct fobj_rwp_unpaged_iv *rwp = NULL;
254 	tee_mm_entry_t *mm = NULL;
255 	size_t size = 0;
256 
257 	rwp = calloc(1, sizeof(*rwp));
258 	if (!rwp)
259 		return NULL;
260 
261 	rwp->state = calloc(num_pages, sizeof(*rwp->state));
262 	if (!rwp->state)
263 		goto err_free_rwp;
264 
265 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
266 		goto err_free_state;
267 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
268 	if (!mm)
269 		goto err_free_state;
270 	rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, size);
271 	assert(rwp->store);
272 
273 	fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_pages);
274 
275 	return &rwp->fobj;
276 
277 err_free_state:
278 	free(rwp->state);
279 err_free_rwp:
280 	free(rwp);
281 	return NULL;
282 }
283 
284 static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj)
285 {
286 	assert(fobj->ops == &ops_rwp_unpaged_iv);
287 
288 	return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj);
289 }
290 
291 static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj,
292 					   unsigned int page_idx, void *va)
293 {
294 	struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
295 	uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
296 
297 	assert(refcount_val(&fobj->refc));
298 	assert(page_idx < fobj->num_pages);
299 
300 	return rwp_load_page(va, rwp->state + page_idx, src);
301 }
302 DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page);
303 
304 static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj,
305 					   unsigned int page_idx,
306 					   const void *va)
307 {
308 	struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
309 	uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
310 
311 	assert(page_idx < fobj->num_pages);
312 
313 	if (!refcount_val(&fobj->refc)) {
314 		/*
315 		 * This fobj is being teared down, it just hasn't had the time
316 		 * to call tee_pager_invalidate_fobj() yet.
317 		 */
318 		assert(TAILQ_EMPTY(&fobj->regions));
319 		return TEE_SUCCESS;
320 	}
321 
322 	return rwp_save_page(va, rwp->state + page_idx, dst);
323 }
324 DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page);
325 
326 static void rwp_unpaged_iv_free(struct fobj *fobj)
327 {
328 	struct fobj_rwp_unpaged_iv *rwp = NULL;
329 	tee_mm_entry_t *mm = NULL;
330 
331 	if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
332 		panic();
333 
334 	rwp = to_rwp_unpaged_iv(fobj);
335 	mm = tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store));
336 
337 	assert(mm);
338 
339 	fobj_uninit(fobj);
340 	tee_mm_free(mm);
341 	free(rwp->state);
342 	free(rwp);
343 }
344 
345 /*
346  * Note: this variable is weak just to ease breaking its dependency chain
347  * when added to the unpaged area.
348  */
349 const struct fobj_ops ops_rwp_unpaged_iv
350 __weak __rodata_unpaged("ops_rwp_unpaged_iv") = {
351 	.free = rwp_unpaged_iv_free,
352 	.load_page = rwp_unpaged_iv_load_page,
353 	.save_page = rwp_unpaged_iv_save_page,
354 };
355 
356 static TEE_Result rwp_init(void)
357 {
358 	uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
359 	struct fobj *fobj = NULL;
360 	size_t num_pool_pages = 0;
361 	size_t num_fobj_pages = 0;
362 	size_t sz = 0;
363 
364 	if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
365 		panic("failed to generate random");
366 	if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
367 				      sizeof(rwp_ae_key.data),
368 				      &rwp_ae_key.rounds))
369 		panic("failed to expand key");
370 
371 	if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
372 		return TEE_SUCCESS;
373 
374 	assert(tee_mm_sec_ddr.hi > tee_mm_sec_ddr.lo);
375 	sz = tee_mm_sec_ddr.hi - tee_mm_sec_ddr.lo;
376 	assert(!(sz & SMALL_PAGE_SIZE));
377 
378 	num_pool_pages = sz / SMALL_PAGE_SIZE;
379 	num_fobj_pages = ROUNDUP(num_pool_pages * sizeof(*rwp_state_base),
380 				 SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE;
381 
382 	/*
383 	 * Each page in the pool needs a struct rwp_state.
384 	 *
385 	 * This isn't entirely true, the pages not used by
386 	 * fobj_rw_paged_alloc() don't need any. A future optimization
387 	 * may try to avoid allocating for such pages.
388 	 */
389 	fobj = rwp_unpaged_iv_alloc(num_fobj_pages);
390 	if (!fobj)
391 		panic();
392 
393 	rwp_state_base = (void *)tee_pager_init_iv_region(fobj);
394 	assert(rwp_state_base);
395 
396 	rwp_store_base = phys_to_virt(tee_mm_sec_ddr.lo, MEM_AREA_TA_RAM, sz);
397 	assert(rwp_store_base);
398 
399 	return TEE_SUCCESS;
400 }
401 driver_init_late(rwp_init);
402 
403 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
404 {
405 	assert(num_pages);
406 
407 	if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
408 		return rwp_paged_iv_alloc(num_pages);
409 	else
410 		return rwp_unpaged_iv_alloc(num_pages);
411 }
412 
413 struct fobj_rop {
414 	uint8_t *hashes;
415 	uint8_t *store;
416 	struct fobj fobj;
417 };
418 
419 const struct fobj_ops ops_ro_paged;
420 
421 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
422 		     unsigned int num_pages, void *hashes, void *store)
423 {
424 	rop->hashes = hashes;
425 	rop->store = store;
426 	fobj_init(&rop->fobj, ops, num_pages);
427 }
428 
429 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
430 				 void *store)
431 {
432 	struct fobj_rop *rop = NULL;
433 
434 	assert(num_pages && hashes && store);
435 
436 	rop = calloc(1, sizeof(*rop));
437 	if (!rop)
438 		return NULL;
439 
440 	rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
441 
442 	return &rop->fobj;
443 }
444 
445 static struct fobj_rop *to_rop(struct fobj *fobj)
446 {
447 	assert(fobj->ops == &ops_ro_paged);
448 
449 	return container_of(fobj, struct fobj_rop, fobj);
450 }
451 
452 static void rop_uninit(struct fobj_rop *rop)
453 {
454 	fobj_uninit(&rop->fobj);
455 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
456 	free(rop->hashes);
457 }
458 
459 static void rop_free(struct fobj *fobj)
460 {
461 	struct fobj_rop *rop = to_rop(fobj);
462 
463 	rop_uninit(rop);
464 	free(rop);
465 }
466 
467 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
468 				       unsigned int page_idx, void *va)
469 {
470 	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
471 	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
472 
473 	assert(refcount_val(&rop->fobj.refc));
474 	assert(page_idx < rop->fobj.num_pages);
475 	memcpy(va, src, SMALL_PAGE_SIZE);
476 
477 	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
478 }
479 
480 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
481 				void *va)
482 {
483 	return rop_load_page_helper(to_rop(fobj), page_idx, va);
484 }
485 DECLARE_KEEP_PAGER(rop_load_page);
486 
487 static TEE_Result rop_save_page(struct fobj *fobj __unused,
488 				unsigned int page_idx __unused,
489 				const void *va __unused)
490 {
491 	return TEE_ERROR_GENERIC;
492 }
493 DECLARE_KEEP_PAGER(rop_save_page);
494 
495 /*
496  * Note: this variable is weak just to ease breaking its dependency chain
497  * when added to the unpaged area.
498  */
499 const struct fobj_ops ops_ro_paged __weak __rodata_unpaged("ops_ro_paged") = {
500 	.free = rop_free,
501 	.load_page = rop_load_page,
502 	.save_page = rop_save_page,
503 };
504 
505 #ifdef CFG_CORE_ASLR
506 /*
507  * When using relocated pages the relocation information must be applied
508  * before the pages can be used. With read-only paging the content is only
509  * integrity protected so relocation cannot be applied on pages in the less
510  * secure "store" or the load_address selected by ASLR could be given away.
511  * This means that each time a page has been loaded and verified it has to
512  * have its relocation information applied before it can be used.
513  *
514  * Only the relative relocations are supported, this allows a rather compact
515  * represenation of the needed relocation information in this struct.
516  * r_offset is replaced with the offset into the page that need to be updated,
517  * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
518  * used to represent it.
519  *
520  * All relocations are converted and stored in @relocs. @page_reloc_idx is
521  * an array of length @rop.fobj.num_pages with an entry for each page. If
522  * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
523  */
524 struct fobj_ro_reloc_paged {
525 	uint16_t *page_reloc_idx;
526 	uint16_t *relocs;
527 	unsigned int num_relocs;
528 	struct fobj_rop rop;
529 };
530 
531 const struct fobj_ops ops_ro_reloc_paged;
532 
533 static unsigned int get_num_rels(unsigned int num_pages,
534 				 unsigned int reloc_offs,
535 				 const uint32_t *reloc, unsigned int num_relocs)
536 {
537 	const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
538 	unsigned int nrels = 0;
539 	unsigned int n = 0;
540 	vaddr_t offs = 0;
541 
542 	/*
543 	 * Count the number of relocations which are needed for these
544 	 * pages.  Also check that the data is well formed, only expected
545 	 * relocations and sorted in order of address which it applies to.
546 	 */
547 	for (; n < num_relocs; n++) {
548 		assert(IS_ALIGNED_WITH_TYPE(reloc[n], unsigned long));
549 		assert(offs < reloc[n]);	/* check that it's sorted */
550 		offs = reloc[n];
551 		if (offs >= reloc_offs &&
552 		    offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
553 			nrels++;
554 	}
555 
556 	return nrels;
557 }
558 
559 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
560 		      const uint32_t *reloc, unsigned int num_relocs)
561 {
562 	unsigned int npg = rrp->rop.fobj.num_pages;
563 	unsigned int pg_idx = 0;
564 	unsigned int reln = 0;
565 	unsigned int n = 0;
566 	uint32_t r = 0;
567 
568 	for (n = 0; n < npg; n++)
569 		rrp->page_reloc_idx[n] = UINT16_MAX;
570 
571 	for (n = 0; n < num_relocs ; n++) {
572 		if (reloc[n] < reloc_offs)
573 			continue;
574 
575 		/* r is the offset from beginning of this fobj */
576 		r = reloc[n] - reloc_offs;
577 
578 		pg_idx = r / SMALL_PAGE_SIZE;
579 		if (pg_idx >= npg)
580 			break;
581 
582 		if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
583 			rrp->page_reloc_idx[pg_idx] = reln;
584 		rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
585 		reln++;
586 	}
587 
588 	assert(reln == rrp->num_relocs);
589 }
590 
591 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
592 				       unsigned int reloc_offs,
593 				       const void *reloc,
594 				       unsigned int reloc_len, void *store)
595 {
596 	struct fobj_ro_reloc_paged *rrp = NULL;
597 	const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
598 	unsigned int nrels = 0;
599 
600 	assert(IS_ALIGNED_WITH_TYPE(reloc, uint32_t));
601 	assert(IS_ALIGNED_WITH_TYPE(reloc_len, uint32_t));
602 	assert(num_pages && hashes && store);
603 	if (!reloc_len) {
604 		assert(!reloc);
605 		return fobj_ro_paged_alloc(num_pages, hashes, store);
606 	}
607 	assert(reloc);
608 
609 	nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
610 	if (!nrels)
611 		return fobj_ro_paged_alloc(num_pages, hashes, store);
612 
613 	rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
614 			nrels * sizeof(uint16_t));
615 	if (!rrp)
616 		return NULL;
617 	rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
618 	rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
619 	rrp->relocs = rrp->page_reloc_idx + num_pages;
620 	rrp->num_relocs = nrels;
621 	init_rels(rrp, reloc_offs, reloc, num_relocs);
622 
623 	return &rrp->rop.fobj;
624 }
625 
626 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
627 {
628 	assert(fobj->ops == &ops_ro_reloc_paged);
629 
630 	return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
631 }
632 
633 static void rrp_free(struct fobj *fobj)
634 {
635 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
636 
637 	rop_uninit(&rrp->rop);
638 	free(rrp);
639 }
640 
641 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
642 				void *va)
643 {
644 	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
645 	unsigned int end_rel = rrp->num_relocs;
646 	TEE_Result res = TEE_SUCCESS;
647 	unsigned long *where = NULL;
648 	unsigned int n = 0;
649 
650 	res = rop_load_page_helper(&rrp->rop, page_idx, va);
651 	if (res)
652 		return res;
653 
654 	/* Find the reloc index of the next page to tell when we're done */
655 	for (n = page_idx + 1; n < fobj->num_pages; n++) {
656 		if (rrp->page_reloc_idx[n] != UINT16_MAX) {
657 			end_rel = rrp->page_reloc_idx[n];
658 			break;
659 		}
660 	}
661 
662 	for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
663 		where = (void *)((vaddr_t)va + rrp->relocs[n]);
664 		*where += boot_mmu_config.load_offset;
665 	}
666 
667 	return TEE_SUCCESS;
668 }
669 DECLARE_KEEP_PAGER(rrp_load_page);
670 
671 /*
672  * Note: this variable is weak just to ease breaking its dependency chain
673  * when added to the unpaged area.
674  */
675 const struct fobj_ops ops_ro_reloc_paged
676 __weak __rodata_unpaged("ops_ro_reloc_paged") = {
677 	.free = rrp_free,
678 	.load_page = rrp_load_page,
679 	.save_page = rop_save_page, /* Direct reuse */
680 };
681 #endif /*CFG_CORE_ASLR*/
682 
683 const struct fobj_ops ops_locked_paged;
684 
685 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
686 {
687 	struct fobj *f = NULL;
688 
689 	assert(num_pages);
690 
691 	f = calloc(1, sizeof(*f));
692 	if (!f)
693 		return NULL;
694 
695 	fobj_init(f, &ops_locked_paged, num_pages);
696 
697 	return f;
698 }
699 
700 static void lop_free(struct fobj *fobj)
701 {
702 	assert(fobj->ops == &ops_locked_paged);
703 	fobj_uninit(fobj);
704 	free(fobj);
705 }
706 
707 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
708 				unsigned int page_idx __maybe_unused,
709 				void *va)
710 {
711 	assert(fobj->ops == &ops_locked_paged);
712 	assert(refcount_val(&fobj->refc));
713 	assert(page_idx < fobj->num_pages);
714 
715 	memset(va, 0, SMALL_PAGE_SIZE);
716 
717 	return TEE_SUCCESS;
718 }
719 DECLARE_KEEP_PAGER(lop_load_page);
720 
721 static TEE_Result lop_save_page(struct fobj *fobj __unused,
722 				unsigned int page_idx __unused,
723 				const void *va __unused)
724 {
725 	return TEE_ERROR_GENERIC;
726 }
727 DECLARE_KEEP_PAGER(lop_save_page);
728 
729 /*
730  * Note: this variable is weak just to ease breaking its dependency chain
731  * when added to the unpaged area.
732  */
733 const struct fobj_ops ops_locked_paged
734 __weak __rodata_unpaged("ops_locked_paged") = {
735 	.free = lop_free,
736 	.load_page = lop_load_page,
737 	.save_page = lop_save_page,
738 };
739 #endif /*CFG_WITH_PAGER*/
740 
741 #ifndef CFG_PAGED_USER_TA
742 
743 struct fobj_sec_mem {
744 	tee_mm_entry_t *mm;
745 	struct fobj fobj;
746 };
747 
748 const struct fobj_ops ops_sec_mem;
749 
750 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
751 {
752 	struct fobj_sec_mem *f = calloc(1, sizeof(*f));
753 	size_t size = 0;
754 	void *va = NULL;
755 
756 	if (!f)
757 		return NULL;
758 
759 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
760 		goto err;
761 
762 	f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
763 	if (!f->mm)
764 		goto err;
765 
766 	va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM, size);
767 	if (!va)
768 		goto err;
769 
770 	memset(va, 0, size);
771 	f->fobj.ops = &ops_sec_mem;
772 	f->fobj.num_pages = num_pages;
773 	refcount_set(&f->fobj.refc, 1);
774 
775 	return &f->fobj;
776 err:
777 	tee_mm_free(f->mm);
778 	free(f);
779 
780 	return NULL;
781 }
782 
783 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
784 {
785 	assert(fobj->ops == &ops_sec_mem);
786 
787 	return container_of(fobj, struct fobj_sec_mem, fobj);
788 }
789 
790 static void sec_mem_free(struct fobj *fobj)
791 {
792 	struct fobj_sec_mem *f = to_sec_mem(fobj);
793 
794 	assert(!refcount_val(&fobj->refc));
795 	tee_mm_free(f->mm);
796 	free(f);
797 }
798 
799 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
800 {
801 	struct fobj_sec_mem *f = to_sec_mem(fobj);
802 
803 	assert(refcount_val(&fobj->refc));
804 	assert(page_idx < fobj->num_pages);
805 
806 	return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
807 }
808 
809 /*
810  * Note: this variable is weak just to ease breaking its dependency chain
811  * when added to the unpaged area.
812  */
813 const struct fobj_ops ops_sec_mem __weak __rodata_unpaged("ops_sec_mem") = {
814 	.free = sec_mem_free,
815 	.get_pa = sec_mem_get_pa,
816 };
817 
818 #endif /*PAGED_USER_TA*/
819