1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2019-2022, Linaro Limited
4 */
5
6 #include <config.h>
7 #include <crypto/crypto.h>
8 #include <crypto/internal_aes-gcm.h>
9 #include <initcall.h>
10 #include <kernel/boot.h>
11 #include <kernel/panic.h>
12 #include <memtag.h>
13 #include <mm/core_memprot.h>
14 #include <mm/core_mmu.h>
15 #include <mm/fobj.h>
16 #include <mm/phys_mem.h>
17 #include <mm/tee_mm.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <tee_api_types.h>
21 #include <types_ext.h>
22 #include <util.h>
23
24 #ifdef CFG_WITH_PAGER
25
26 #define RWP_AE_KEY_BITS 256
27
28 struct rwp_aes_gcm_iv {
29 uint32_t iv[3];
30 };
31
32 #define RWP_AES_GCM_TAG_LEN 16
33
34 struct rwp_state {
35 uint64_t iv;
36 uint8_t tag[RWP_AES_GCM_TAG_LEN];
37 };
38
39 /*
40 * Note that this struct is padded to a size which is a power of 2, this
41 * guarantees that this state will not span two pages. This avoids a corner
42 * case in the pager when making the state available.
43 */
44 struct rwp_state_padded {
45 struct rwp_state state;
46 uint64_t pad;
47 };
48
49 struct fobj_rwp_unpaged_iv {
50 uint8_t *store;
51 struct rwp_state *state;
52 struct fobj fobj;
53 };
54
55 struct fobj_rwp_paged_iv {
56 size_t idx;
57 struct fobj fobj;
58 };
59
60 const struct fobj_ops ops_rwp_paged_iv;
61 const struct fobj_ops ops_rwp_unpaged_iv;
62
63 static struct internal_aes_gcm_key rwp_ae_key;
64
65 static struct rwp_state_padded *rwp_state_base;
66 static uint8_t *rwp_store_base;
67
fobj_init(struct fobj * fobj,const struct fobj_ops * ops,unsigned int num_pages)68 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
69 unsigned int num_pages)
70 {
71 fobj->ops = ops;
72 fobj->num_pages = num_pages;
73 refcount_set(&fobj->refc, 1);
74 TAILQ_INIT(&fobj->regions);
75 }
76
fobj_uninit(struct fobj * fobj)77 static void fobj_uninit(struct fobj *fobj)
78 {
79 assert(!refcount_val(&fobj->refc));
80 assert(TAILQ_EMPTY(&fobj->regions));
81 tee_pager_invalidate_fobj(fobj);
82 }
83
rwp_load_page(void * va,struct rwp_state * state,const uint8_t * src)84 static TEE_Result rwp_load_page(void *va, struct rwp_state *state,
85 const uint8_t *src)
86 {
87 struct rwp_aes_gcm_iv iv = {
88 .iv = { (vaddr_t)state, state->iv >> 32, state->iv }
89 };
90
91 if (!state->iv) {
92 /*
93 * IV still zero which means that this is previously unused
94 * page.
95 */
96 memset(va, 0, SMALL_PAGE_SIZE);
97 return TEE_SUCCESS;
98 }
99
100 return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
101 NULL, 0, src, SMALL_PAGE_SIZE, va,
102 state->tag, sizeof(state->tag));
103 }
104
rwp_save_page(const void * va,struct rwp_state * state,uint8_t * dst)105 static TEE_Result rwp_save_page(const void *va, struct rwp_state *state,
106 uint8_t *dst)
107 {
108 size_t tag_len = sizeof(state->tag);
109 struct rwp_aes_gcm_iv iv = { };
110
111 assert(state->iv + 1 > state->iv);
112
113 state->iv++;
114
115 /*
116 * IV is constructed as recommended in section "8.2.1 Deterministic
117 * Construction" of "Recommendation for Block Cipher Modes of
118 * Operation: Galois/Counter Mode (GCM) and GMAC",
119 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
120 */
121 iv.iv[0] = (vaddr_t)state;
122 iv.iv[1] = state->iv >> 32;
123 iv.iv[2] = state->iv;
124
125 return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
126 NULL, 0, va, SMALL_PAGE_SIZE, dst,
127 state->tag, &tag_len);
128 }
129
idx_to_state_padded(size_t idx)130 static struct rwp_state_padded *idx_to_state_padded(size_t idx)
131 {
132 assert(rwp_state_base);
133 return rwp_state_base + idx;
134 }
135
idx_to_store(size_t idx)136 static uint8_t *idx_to_store(size_t idx)
137 {
138 assert(rwp_store_base);
139 return rwp_store_base + idx * SMALL_PAGE_SIZE;
140 }
141
rwp_paged_iv_alloc(unsigned int num_pages)142 static struct fobj *rwp_paged_iv_alloc(unsigned int num_pages)
143 {
144 struct fobj_rwp_paged_iv *rwp = NULL;
145 tee_mm_entry_t *mm = NULL;
146 size_t size = 0;
147
148 COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded)));
149
150 rwp = calloc(1, sizeof(*rwp));
151 if (!rwp)
152 return NULL;
153
154 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
155 goto err;
156 mm = nex_phys_mem_ta_alloc(size);
157 if (!mm)
158 goto err;
159 rwp->idx = (tee_mm_get_smem(mm) - nex_phys_mem_get_ta_base()) /
160 SMALL_PAGE_SIZE;
161
162 memset(idx_to_state_padded(rwp->idx), 0,
163 num_pages * sizeof(struct rwp_state_padded));
164
165 fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages);
166
167 return &rwp->fobj;
168 err:
169 tee_mm_free(mm);
170 free(rwp);
171
172 return NULL;
173 }
174
to_rwp_paged_iv(struct fobj * fobj)175 static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj)
176 {
177 assert(fobj->ops == &ops_rwp_paged_iv);
178
179 return container_of(fobj, struct fobj_rwp_paged_iv, fobj);
180 }
181
rwp_paged_iv_load_page(struct fobj * fobj,unsigned int page_idx,void * va)182 static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj,
183 unsigned int page_idx, void *va)
184 {
185 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
186 uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
187 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
188
189 assert(refcount_val(&fobj->refc));
190 assert(page_idx < fobj->num_pages);
191
192 return rwp_load_page(va, &st->state, src);
193 }
194 DECLARE_KEEP_PAGER(rwp_paged_iv_load_page);
195
rwp_paged_iv_save_page(struct fobj * fobj,unsigned int page_idx,const void * va)196 static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj,
197 unsigned int page_idx, const void *va)
198 {
199 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
200 uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
201 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
202
203 assert(page_idx < fobj->num_pages);
204
205 if (!refcount_val(&fobj->refc)) {
206 /*
207 * This fobj is being teared down, it just hasn't had the time
208 * to call tee_pager_invalidate_fobj() yet.
209 */
210 assert(TAILQ_EMPTY(&fobj->regions));
211 return TEE_SUCCESS;
212 }
213
214 return rwp_save_page(va, &st->state, dst);
215 }
216 DECLARE_KEEP_PAGER(rwp_paged_iv_save_page);
217
rwp_paged_iv_free(struct fobj * fobj)218 static void rwp_paged_iv_free(struct fobj *fobj)
219 {
220 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
221 paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + nex_phys_mem_get_ta_base();
222 tee_mm_entry_t *mm = nex_phys_mem_mm_find(pa);
223
224 assert(mm);
225
226 fobj_uninit(fobj);
227 tee_mm_free(mm);
228 free(rwp);
229 }
230
rwp_paged_iv_get_iv_vaddr(struct fobj * fobj,unsigned int page_idx)231 static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj,
232 unsigned int page_idx)
233 {
234 struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
235 struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
236
237 assert(page_idx < fobj->num_pages);
238 return (vaddr_t)&st->state & ~SMALL_PAGE_MASK;
239 }
240 DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr);
241
242 /*
243 * Note: this variable is weak just to ease breaking its dependency chain
244 * when added to the unpaged area.
245 */
246 const struct fobj_ops ops_rwp_paged_iv
247 __weak __relrodata_unpaged("ops_rwp_paged_iv") = {
248 .free = rwp_paged_iv_free,
249 .load_page = rwp_paged_iv_load_page,
250 .save_page = rwp_paged_iv_save_page,
251 .get_iv_vaddr = rwp_paged_iv_get_iv_vaddr,
252 };
253
rwp_unpaged_iv_alloc(unsigned int num_pages)254 static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages)
255 {
256 struct fobj_rwp_unpaged_iv *rwp = NULL;
257 tee_mm_entry_t *mm = NULL;
258 size_t size = 0;
259
260 rwp = calloc(1, sizeof(*rwp));
261 if (!rwp)
262 return NULL;
263
264 rwp->state = calloc(num_pages, sizeof(*rwp->state));
265 if (!rwp->state)
266 goto err_free_rwp;
267
268 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
269 goto err_free_state;
270 mm = nex_phys_mem_ta_alloc(size);
271 if (!mm)
272 goto err_free_state;
273 rwp->store = phys_to_virt(tee_mm_get_smem(mm),
274 MEM_AREA_SEC_RAM_OVERALL, size);
275 assert(rwp->store);
276
277 fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_pages);
278
279 return &rwp->fobj;
280
281 err_free_state:
282 free(rwp->state);
283 err_free_rwp:
284 free(rwp);
285 return NULL;
286 }
287
to_rwp_unpaged_iv(struct fobj * fobj)288 static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj)
289 {
290 assert(fobj->ops == &ops_rwp_unpaged_iv);
291
292 return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj);
293 }
294
rwp_unpaged_iv_load_page(struct fobj * fobj,unsigned int page_idx,void * va)295 static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj,
296 unsigned int page_idx, void *va)
297 {
298 struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
299 uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
300
301 assert(refcount_val(&fobj->refc));
302 assert(page_idx < fobj->num_pages);
303
304 return rwp_load_page(va, rwp->state + page_idx, src);
305 }
306 DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page);
307
rwp_unpaged_iv_save_page(struct fobj * fobj,unsigned int page_idx,const void * va)308 static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj,
309 unsigned int page_idx,
310 const void *va)
311 {
312 struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
313 uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
314
315 assert(page_idx < fobj->num_pages);
316
317 if (!refcount_val(&fobj->refc)) {
318 /*
319 * This fobj is being teared down, it just hasn't had the time
320 * to call tee_pager_invalidate_fobj() yet.
321 */
322 assert(TAILQ_EMPTY(&fobj->regions));
323 return TEE_SUCCESS;
324 }
325
326 return rwp_save_page(va, rwp->state + page_idx, dst);
327 }
328 DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page);
329
rwp_unpaged_iv_free(struct fobj * fobj)330 static void rwp_unpaged_iv_free(struct fobj *fobj)
331 {
332 struct fobj_rwp_unpaged_iv *rwp = NULL;
333 tee_mm_entry_t *mm = NULL;
334
335 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
336 panic();
337
338 rwp = to_rwp_unpaged_iv(fobj);
339 mm = nex_phys_mem_mm_find(virt_to_phys(rwp->store));
340
341 assert(mm);
342
343 fobj_uninit(fobj);
344 tee_mm_free(mm);
345 free(rwp->state);
346 free(rwp);
347 }
348
349 /*
350 * Note: this variable is weak just to ease breaking its dependency chain
351 * when added to the unpaged area.
352 */
353 const struct fobj_ops ops_rwp_unpaged_iv
354 __weak __relrodata_unpaged("ops_rwp_unpaged_iv") = {
355 .free = rwp_unpaged_iv_free,
356 .load_page = rwp_unpaged_iv_load_page,
357 .save_page = rwp_unpaged_iv_save_page,
358 };
359
rwp_init(void)360 static TEE_Result rwp_init(void)
361 {
362 paddr_size_t ta_size = nex_phys_mem_get_ta_size();
363 uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
364 struct fobj *fobj = NULL;
365 size_t num_pool_pages = 0;
366 size_t num_fobj_pages = 0;
367
368 if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
369 panic("failed to generate random");
370 if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
371 sizeof(rwp_ae_key.data),
372 &rwp_ae_key.rounds))
373 panic("failed to expand key");
374
375 if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
376 return TEE_SUCCESS;
377
378 assert(ta_size && !(ta_size & SMALL_PAGE_SIZE));
379
380 num_pool_pages = ta_size / SMALL_PAGE_SIZE;
381 num_fobj_pages = ROUNDUP_DIV(num_pool_pages * sizeof(*rwp_state_base),
382 SMALL_PAGE_SIZE);
383
384 /*
385 * Each page in the pool needs a struct rwp_state.
386 *
387 * This isn't entirely true, the pages not used by
388 * fobj_rw_paged_alloc() don't need any. A future optimization
389 * may try to avoid allocating for such pages.
390 */
391 fobj = rwp_unpaged_iv_alloc(num_fobj_pages);
392 if (!fobj)
393 panic();
394
395 rwp_state_base = (void *)tee_pager_init_iv_region(fobj);
396 assert(rwp_state_base);
397
398 rwp_store_base = phys_to_virt(nex_phys_mem_get_ta_base(),
399 MEM_AREA_SEC_RAM_OVERALL, ta_size);
400 assert(rwp_store_base);
401
402 return TEE_SUCCESS;
403 }
404 driver_init_late(rwp_init);
405
fobj_rw_paged_alloc(unsigned int num_pages)406 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
407 {
408 assert(num_pages);
409
410 if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
411 return rwp_paged_iv_alloc(num_pages);
412 else
413 return rwp_unpaged_iv_alloc(num_pages);
414 }
415
416 struct fobj_rop {
417 uint8_t *hashes;
418 uint8_t *store;
419 struct fobj fobj;
420 };
421
422 const struct fobj_ops ops_ro_paged;
423
rop_init(struct fobj_rop * rop,const struct fobj_ops * ops,unsigned int num_pages,void * hashes,void * store)424 static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
425 unsigned int num_pages, void *hashes, void *store)
426 {
427 rop->hashes = hashes;
428 rop->store = store;
429 fobj_init(&rop->fobj, ops, num_pages);
430 }
431
fobj_ro_paged_alloc(unsigned int num_pages,void * hashes,void * store)432 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
433 void *store)
434 {
435 struct fobj_rop *rop = NULL;
436
437 assert(num_pages && hashes && store);
438
439 rop = calloc(1, sizeof(*rop));
440 if (!rop)
441 return NULL;
442
443 rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
444
445 return &rop->fobj;
446 }
447
to_rop(struct fobj * fobj)448 static struct fobj_rop *to_rop(struct fobj *fobj)
449 {
450 assert(fobj->ops == &ops_ro_paged);
451
452 return container_of(fobj, struct fobj_rop, fobj);
453 }
454
rop_uninit(struct fobj_rop * rop)455 static void rop_uninit(struct fobj_rop *rop)
456 {
457 fobj_uninit(&rop->fobj);
458 tee_mm_free(nex_phys_mem_mm_find(virt_to_phys(rop->store)));
459 free(rop->hashes);
460 }
461
rop_free(struct fobj * fobj)462 static void rop_free(struct fobj *fobj)
463 {
464 struct fobj_rop *rop = to_rop(fobj);
465
466 rop_uninit(rop);
467 free(rop);
468 }
469
rop_load_page_helper(struct fobj_rop * rop,unsigned int page_idx,void * va)470 static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
471 unsigned int page_idx, void *va)
472 {
473 const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
474 const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
475
476 assert(refcount_val(&rop->fobj.refc));
477 assert(page_idx < rop->fobj.num_pages);
478 memcpy(va, src, SMALL_PAGE_SIZE);
479
480 return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
481 }
482
rop_load_page(struct fobj * fobj,unsigned int page_idx,void * va)483 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
484 void *va)
485 {
486 return rop_load_page_helper(to_rop(fobj), page_idx, va);
487 }
488 DECLARE_KEEP_PAGER(rop_load_page);
489
rop_save_page(struct fobj * fobj __unused,unsigned int page_idx __unused,const void * va __unused)490 static TEE_Result rop_save_page(struct fobj *fobj __unused,
491 unsigned int page_idx __unused,
492 const void *va __unused)
493 {
494 return TEE_ERROR_GENERIC;
495 }
496 DECLARE_KEEP_PAGER(rop_save_page);
497
498 /*
499 * Note: this variable is weak just to ease breaking its dependency chain
500 * when added to the unpaged area.
501 */
502 const struct fobj_ops ops_ro_paged
503 __weak __relrodata_unpaged("ops_ro_paged") = {
504 .free = rop_free,
505 .load_page = rop_load_page,
506 .save_page = rop_save_page,
507 };
508
509 #ifdef CFG_CORE_ASLR
510 /*
511 * When using relocated pages the relocation information must be applied
512 * before the pages can be used. With read-only paging the content is only
513 * integrity protected so relocation cannot be applied on pages in the less
514 * secure "store" or the load_address selected by ASLR could be given away.
515 * This means that each time a page has been loaded and verified it has to
516 * have its relocation information applied before it can be used.
517 *
518 * Only the relative relocations are supported, this allows a rather compact
519 * represenation of the needed relocation information in this struct.
520 * r_offset is replaced with the offset into the page that need to be updated,
521 * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
522 * used to represent it.
523 *
524 * All relocations are converted and stored in @relocs. @page_reloc_idx is
525 * an array of length @rop.fobj.num_pages with an entry for each page. If
526 * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
527 */
528 struct fobj_ro_reloc_paged {
529 uint16_t *page_reloc_idx;
530 uint16_t *relocs;
531 unsigned int num_relocs;
532 struct fobj_rop rop;
533 };
534
535 const struct fobj_ops ops_ro_reloc_paged;
536
get_num_rels(unsigned int num_pages,unsigned int reloc_offs,const uint32_t * reloc,unsigned int num_relocs)537 static unsigned int get_num_rels(unsigned int num_pages,
538 unsigned int reloc_offs,
539 const uint32_t *reloc, unsigned int num_relocs)
540 {
541 const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
542 unsigned int nrels = 0;
543 unsigned int n = 0;
544 vaddr_t offs = 0;
545
546 /*
547 * Count the number of relocations which are needed for these
548 * pages. Also check that the data is well formed, only expected
549 * relocations and sorted in order of address which it applies to.
550 */
551 for (; n < num_relocs; n++) {
552 assert(IS_ALIGNED_WITH_TYPE(reloc[n], unsigned long));
553 assert(offs < reloc[n]); /* check that it's sorted */
554 offs = reloc[n];
555 if (offs >= reloc_offs &&
556 offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
557 nrels++;
558 }
559
560 return nrels;
561 }
562
init_rels(struct fobj_ro_reloc_paged * rrp,unsigned int reloc_offs,const uint32_t * reloc,unsigned int num_relocs)563 static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
564 const uint32_t *reloc, unsigned int num_relocs)
565 {
566 unsigned int npg = rrp->rop.fobj.num_pages;
567 unsigned int pg_idx = 0;
568 unsigned int reln = 0;
569 unsigned int n = 0;
570 uint32_t r = 0;
571
572 for (n = 0; n < npg; n++)
573 rrp->page_reloc_idx[n] = UINT16_MAX;
574
575 for (n = 0; n < num_relocs ; n++) {
576 if (reloc[n] < reloc_offs)
577 continue;
578
579 /* r is the offset from beginning of this fobj */
580 r = reloc[n] - reloc_offs;
581
582 pg_idx = r / SMALL_PAGE_SIZE;
583 if (pg_idx >= npg)
584 break;
585
586 if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
587 rrp->page_reloc_idx[pg_idx] = reln;
588 rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
589 reln++;
590 }
591
592 assert(reln == rrp->num_relocs);
593 }
594
fobj_ro_reloc_paged_alloc(unsigned int num_pages,void * hashes,unsigned int reloc_offs,const void * reloc,unsigned int reloc_len,void * store)595 struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
596 unsigned int reloc_offs,
597 const void *reloc,
598 unsigned int reloc_len, void *store)
599 {
600 struct fobj_ro_reloc_paged *rrp = NULL;
601 const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
602 unsigned int nrels = 0;
603
604 assert(IS_ALIGNED_WITH_TYPE(reloc, uint32_t));
605 assert(IS_ALIGNED_WITH_TYPE(reloc_len, uint32_t));
606 assert(num_pages && hashes && store);
607 if (!reloc_len) {
608 assert(!reloc);
609 return fobj_ro_paged_alloc(num_pages, hashes, store);
610 }
611 assert(reloc);
612
613 nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
614 if (!nrels)
615 return fobj_ro_paged_alloc(num_pages, hashes, store);
616
617 rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
618 nrels * sizeof(uint16_t));
619 if (!rrp)
620 return NULL;
621 rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
622 rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
623 rrp->relocs = rrp->page_reloc_idx + num_pages;
624 rrp->num_relocs = nrels;
625 init_rels(rrp, reloc_offs, reloc, num_relocs);
626
627 return &rrp->rop.fobj;
628 }
629
to_rrp(struct fobj * fobj)630 static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
631 {
632 assert(fobj->ops == &ops_ro_reloc_paged);
633
634 return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
635 }
636
rrp_free(struct fobj * fobj)637 static void rrp_free(struct fobj *fobj)
638 {
639 struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
640
641 rop_uninit(&rrp->rop);
642 free(rrp);
643 }
644
rrp_load_page(struct fobj * fobj,unsigned int page_idx,void * va)645 static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
646 void *va)
647 {
648 struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
649 unsigned int end_rel = rrp->num_relocs;
650 TEE_Result res = TEE_SUCCESS;
651 unsigned long *where = NULL;
652 unsigned int n = 0;
653
654 res = rop_load_page_helper(&rrp->rop, page_idx, va);
655 if (res)
656 return res;
657
658 /* Find the reloc index of the next page to tell when we're done */
659 for (n = page_idx + 1; n < fobj->num_pages; n++) {
660 if (rrp->page_reloc_idx[n] != UINT16_MAX) {
661 end_rel = rrp->page_reloc_idx[n];
662 break;
663 }
664 }
665
666 for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
667 where = (void *)((vaddr_t)va + rrp->relocs[n]);
668 *where += boot_mmu_config.map_offset;
669 }
670
671 return TEE_SUCCESS;
672 }
673 DECLARE_KEEP_PAGER(rrp_load_page);
674
675 /*
676 * Note: this variable is weak just to ease breaking its dependency chain
677 * when added to the unpaged area.
678 */
679 const struct fobj_ops ops_ro_reloc_paged
680 __weak __relrodata_unpaged("ops_ro_reloc_paged") = {
681 .free = rrp_free,
682 .load_page = rrp_load_page,
683 .save_page = rop_save_page, /* Direct reuse */
684 };
685 #endif /*CFG_CORE_ASLR*/
686
687 const struct fobj_ops ops_locked_paged;
688
fobj_locked_paged_alloc(unsigned int num_pages)689 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
690 {
691 struct fobj *f = NULL;
692
693 assert(num_pages);
694
695 f = calloc(1, sizeof(*f));
696 if (!f)
697 return NULL;
698
699 fobj_init(f, &ops_locked_paged, num_pages);
700
701 return f;
702 }
703
lop_free(struct fobj * fobj)704 static void lop_free(struct fobj *fobj)
705 {
706 assert(fobj->ops == &ops_locked_paged);
707 fobj_uninit(fobj);
708 free(fobj);
709 }
710
lop_load_page(struct fobj * fobj __maybe_unused,unsigned int page_idx __maybe_unused,void * va)711 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
712 unsigned int page_idx __maybe_unused,
713 void *va)
714 {
715 assert(fobj->ops == &ops_locked_paged);
716 assert(refcount_val(&fobj->refc));
717 assert(page_idx < fobj->num_pages);
718
719 memset(va, 0, SMALL_PAGE_SIZE);
720
721 return TEE_SUCCESS;
722 }
723 DECLARE_KEEP_PAGER(lop_load_page);
724
lop_save_page(struct fobj * fobj __unused,unsigned int page_idx __unused,const void * va __unused)725 static TEE_Result lop_save_page(struct fobj *fobj __unused,
726 unsigned int page_idx __unused,
727 const void *va __unused)
728 {
729 return TEE_ERROR_GENERIC;
730 }
731 DECLARE_KEEP_PAGER(lop_save_page);
732
733 /*
734 * Note: this variable is weak just to ease breaking its dependency chain
735 * when added to the unpaged area.
736 */
737 const struct fobj_ops ops_locked_paged
738 __weak __relrodata_unpaged("ops_locked_paged") = {
739 .free = lop_free,
740 .load_page = lop_load_page,
741 .save_page = lop_save_page,
742 };
743 #endif /*CFG_WITH_PAGER*/
744
745 #ifndef CFG_PAGED_USER_TA
746
747 struct fobj_sec_mem {
748 tee_mm_entry_t *mm;
749 struct fobj fobj;
750 };
751
752 const struct fobj_ops ops_sec_mem;
753
fobj_sec_mem_alloc(unsigned int num_pages)754 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
755 {
756 struct fobj_sec_mem *f = calloc(1, sizeof(*f));
757 size_t size = 0;
758 void *va = NULL;
759
760 if (!f)
761 return NULL;
762
763 if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
764 goto err;
765
766 f->mm = phys_mem_ta_alloc(size);
767 if (!f->mm)
768 goto err;
769
770 va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_SEC_RAM_OVERALL,
771 size);
772 if (!va)
773 goto err;
774
775 memtag_clear_mem(va, size);
776 f->fobj.ops = &ops_sec_mem;
777 f->fobj.num_pages = num_pages;
778 refcount_set(&f->fobj.refc, 1);
779
780 return &f->fobj;
781 err:
782 tee_mm_free(f->mm);
783 free(f);
784
785 return NULL;
786 }
787
to_sec_mem(struct fobj * fobj)788 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
789 {
790 assert(fobj->ops == &ops_sec_mem);
791
792 return container_of(fobj, struct fobj_sec_mem, fobj);
793 }
794
sec_mem_free(struct fobj * fobj)795 static void sec_mem_free(struct fobj *fobj)
796 {
797 struct fobj_sec_mem *f = to_sec_mem(fobj);
798
799 assert(!refcount_val(&fobj->refc));
800 tee_mm_free(f->mm);
801 free(f);
802 }
803
sec_mem_get_pa(struct fobj * fobj,unsigned int page_idx)804 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
805 {
806 struct fobj_sec_mem *f = to_sec_mem(fobj);
807
808 assert(refcount_val(&fobj->refc));
809 assert(page_idx < fobj->num_pages);
810
811 return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
812 }
813
814 /*
815 * Note: this variable is weak just to ease breaking its dependency chain
816 * when added to the unpaged area.
817 */
818 const struct fobj_ops ops_sec_mem __weak __relrodata_unpaged("ops_sec_mem") = {
819 .free = sec_mem_free,
820 .get_pa = sec_mem_get_pa,
821 };
822
823 #endif /*PAGED_USER_TA*/
824