1ee546289SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause
2ee546289SJens Wiklander /*
36105aa86SJens Wiklander * Copyright (c) 2019-2022, Linaro Limited
4ee546289SJens Wiklander */
5ee546289SJens Wiklander
6b757e307SJens Wiklander #include <config.h>
7ee546289SJens Wiklander #include <crypto/crypto.h>
8ee546289SJens Wiklander #include <crypto/internal_aes-gcm.h>
9cfde90a6SJens Wiklander #include <initcall.h>
1065401337SJens Wiklander #include <kernel/boot.h>
11ee546289SJens Wiklander #include <kernel/panic.h>
12a0e8ffe9SJens Wiklander #include <memtag.h>
13ee546289SJens Wiklander #include <mm/core_memprot.h>
14ee546289SJens Wiklander #include <mm/core_mmu.h>
15ee546289SJens Wiklander #include <mm/fobj.h>
16de19cacbSJens Wiklander #include <mm/phys_mem.h>
17ee546289SJens Wiklander #include <mm/tee_mm.h>
18ee546289SJens Wiklander #include <stdlib.h>
19ee546289SJens Wiklander #include <string.h>
20ee546289SJens Wiklander #include <tee_api_types.h>
21ee546289SJens Wiklander #include <types_ext.h>
22ee546289SJens Wiklander #include <util.h>
23ee546289SJens Wiklander
24ee546289SJens Wiklander #ifdef CFG_WITH_PAGER
25ee546289SJens Wiklander
26ee546289SJens Wiklander #define RWP_AE_KEY_BITS 256
27ee546289SJens Wiklander
28ee546289SJens Wiklander struct rwp_aes_gcm_iv {
29ee546289SJens Wiklander uint32_t iv[3];
30ee546289SJens Wiklander };
31ee546289SJens Wiklander
32ee546289SJens Wiklander #define RWP_AES_GCM_TAG_LEN 16
33ee546289SJens Wiklander
34ee546289SJens Wiklander struct rwp_state {
35ee546289SJens Wiklander uint64_t iv;
36ee546289SJens Wiklander uint8_t tag[RWP_AES_GCM_TAG_LEN];
37ee546289SJens Wiklander };
38ee546289SJens Wiklander
39aad1cf6bSJens Wiklander /*
40aad1cf6bSJens Wiklander * Note that this struct is padded to a size which is a power of 2, this
41aad1cf6bSJens Wiklander * guarantees that this state will not span two pages. This avoids a corner
42aad1cf6bSJens Wiklander * case in the pager when making the state available.
43aad1cf6bSJens Wiklander */
44aad1cf6bSJens Wiklander struct rwp_state_padded {
45aad1cf6bSJens Wiklander struct rwp_state state;
46aad1cf6bSJens Wiklander uint64_t pad;
47aad1cf6bSJens Wiklander };
48aad1cf6bSJens Wiklander
49aad1cf6bSJens Wiklander struct fobj_rwp_unpaged_iv {
50ee546289SJens Wiklander uint8_t *store;
51ee546289SJens Wiklander struct rwp_state *state;
52ee546289SJens Wiklander struct fobj fobj;
53ee546289SJens Wiklander };
54ee546289SJens Wiklander
55aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv {
56aad1cf6bSJens Wiklander size_t idx;
57aad1cf6bSJens Wiklander struct fobj fobj;
58aad1cf6bSJens Wiklander };
59aad1cf6bSJens Wiklander
6000361c18SJens Wiklander const struct fobj_ops ops_rwp_paged_iv;
6100361c18SJens Wiklander const struct fobj_ops ops_rwp_unpaged_iv;
62ee546289SJens Wiklander
63ee546289SJens Wiklander static struct internal_aes_gcm_key rwp_ae_key;
64ee546289SJens Wiklander
65aad1cf6bSJens Wiklander static struct rwp_state_padded *rwp_state_base;
66aad1cf6bSJens Wiklander static uint8_t *rwp_store_base;
67ee546289SJens Wiklander
fobj_init(struct fobj * fobj,const struct fobj_ops * ops,unsigned int num_pages)68ee546289SJens Wiklander static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
69ee546289SJens Wiklander unsigned int num_pages)
70ee546289SJens Wiklander {
71ee546289SJens Wiklander fobj->ops = ops;
72ee546289SJens Wiklander fobj->num_pages = num_pages;
73ee546289SJens Wiklander refcount_set(&fobj->refc, 1);
74d5ad7ccfSJens Wiklander TAILQ_INIT(&fobj->regions);
75ee546289SJens Wiklander }
76ee546289SJens Wiklander
fobj_uninit(struct fobj * fobj)77ee546289SJens Wiklander static void fobj_uninit(struct fobj *fobj)
78ee546289SJens Wiklander {
79ee546289SJens Wiklander assert(!refcount_val(&fobj->refc));
80d5ad7ccfSJens Wiklander assert(TAILQ_EMPTY(&fobj->regions));
81b83c0d5fSJens Wiklander tee_pager_invalidate_fobj(fobj);
82ee546289SJens Wiklander }
83ee546289SJens Wiklander
rwp_load_page(void * va,struct rwp_state * state,const uint8_t * src)84aad1cf6bSJens Wiklander static TEE_Result rwp_load_page(void *va, struct rwp_state *state,
85aad1cf6bSJens Wiklander const uint8_t *src)
86ee546289SJens Wiklander {
87ee546289SJens Wiklander struct rwp_aes_gcm_iv iv = {
88ee546289SJens Wiklander .iv = { (vaddr_t)state, state->iv >> 32, state->iv }
89ee546289SJens Wiklander };
90ee546289SJens Wiklander
91ee546289SJens Wiklander if (!state->iv) {
92ee546289SJens Wiklander /*
93aad1cf6bSJens Wiklander * IV still zero which means that this is previously unused
94ee546289SJens Wiklander * page.
95ee546289SJens Wiklander */
96ee546289SJens Wiklander memset(va, 0, SMALL_PAGE_SIZE);
97ee546289SJens Wiklander return TEE_SUCCESS;
98ee546289SJens Wiklander }
99ee546289SJens Wiklander
100ee546289SJens Wiklander return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
101ee546289SJens Wiklander NULL, 0, src, SMALL_PAGE_SIZE, va,
102ee546289SJens Wiklander state->tag, sizeof(state->tag));
103ee546289SJens Wiklander }
104ee546289SJens Wiklander
rwp_save_page(const void * va,struct rwp_state * state,uint8_t * dst)105aad1cf6bSJens Wiklander static TEE_Result rwp_save_page(const void *va, struct rwp_state *state,
106aad1cf6bSJens Wiklander uint8_t *dst)
107ee546289SJens Wiklander {
108ee546289SJens Wiklander size_t tag_len = sizeof(state->tag);
109aad1cf6bSJens Wiklander struct rwp_aes_gcm_iv iv = { };
110ee546289SJens Wiklander
111aad1cf6bSJens Wiklander assert(state->iv + 1 > state->iv);
112aad1cf6bSJens Wiklander
113aad1cf6bSJens Wiklander state->iv++;
114aad1cf6bSJens Wiklander
115aad1cf6bSJens Wiklander /*
116aad1cf6bSJens Wiklander * IV is constructed as recommended in section "8.2.1 Deterministic
117aad1cf6bSJens Wiklander * Construction" of "Recommendation for Block Cipher Modes of
118aad1cf6bSJens Wiklander * Operation: Galois/Counter Mode (GCM) and GMAC",
119aad1cf6bSJens Wiklander * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
120aad1cf6bSJens Wiklander */
121aad1cf6bSJens Wiklander iv.iv[0] = (vaddr_t)state;
122aad1cf6bSJens Wiklander iv.iv[1] = state->iv >> 32;
123aad1cf6bSJens Wiklander iv.iv[2] = state->iv;
124aad1cf6bSJens Wiklander
125aad1cf6bSJens Wiklander return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
126aad1cf6bSJens Wiklander NULL, 0, va, SMALL_PAGE_SIZE, dst,
127aad1cf6bSJens Wiklander state->tag, &tag_len);
128aad1cf6bSJens Wiklander }
129aad1cf6bSJens Wiklander
idx_to_state_padded(size_t idx)130aad1cf6bSJens Wiklander static struct rwp_state_padded *idx_to_state_padded(size_t idx)
131aad1cf6bSJens Wiklander {
132aad1cf6bSJens Wiklander assert(rwp_state_base);
133aad1cf6bSJens Wiklander return rwp_state_base + idx;
134aad1cf6bSJens Wiklander }
135aad1cf6bSJens Wiklander
idx_to_store(size_t idx)136aad1cf6bSJens Wiklander static uint8_t *idx_to_store(size_t idx)
137aad1cf6bSJens Wiklander {
138aad1cf6bSJens Wiklander assert(rwp_store_base);
139aad1cf6bSJens Wiklander return rwp_store_base + idx * SMALL_PAGE_SIZE;
140aad1cf6bSJens Wiklander }
141aad1cf6bSJens Wiklander
rwp_paged_iv_alloc(unsigned int num_pages)142b757e307SJens Wiklander static struct fobj *rwp_paged_iv_alloc(unsigned int num_pages)
143aad1cf6bSJens Wiklander {
144aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = NULL;
145aad1cf6bSJens Wiklander tee_mm_entry_t *mm = NULL;
146aad1cf6bSJens Wiklander size_t size = 0;
147aad1cf6bSJens Wiklander
148aad1cf6bSJens Wiklander COMPILE_TIME_ASSERT(IS_POWER_OF_TWO(sizeof(struct rwp_state_padded)));
149aad1cf6bSJens Wiklander
150aad1cf6bSJens Wiklander rwp = calloc(1, sizeof(*rwp));
151aad1cf6bSJens Wiklander if (!rwp)
152aad1cf6bSJens Wiklander return NULL;
153aad1cf6bSJens Wiklander
154aad1cf6bSJens Wiklander if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
155aad1cf6bSJens Wiklander goto err;
156de19cacbSJens Wiklander mm = nex_phys_mem_ta_alloc(size);
157aad1cf6bSJens Wiklander if (!mm)
158aad1cf6bSJens Wiklander goto err;
159de19cacbSJens Wiklander rwp->idx = (tee_mm_get_smem(mm) - nex_phys_mem_get_ta_base()) /
160de19cacbSJens Wiklander SMALL_PAGE_SIZE;
161aad1cf6bSJens Wiklander
162aad1cf6bSJens Wiklander memset(idx_to_state_padded(rwp->idx), 0,
163aad1cf6bSJens Wiklander num_pages * sizeof(struct rwp_state_padded));
164aad1cf6bSJens Wiklander
165aad1cf6bSJens Wiklander fobj_init(&rwp->fobj, &ops_rwp_paged_iv, num_pages);
166aad1cf6bSJens Wiklander
167aad1cf6bSJens Wiklander return &rwp->fobj;
168aad1cf6bSJens Wiklander err:
169aad1cf6bSJens Wiklander tee_mm_free(mm);
170aad1cf6bSJens Wiklander free(rwp);
171aad1cf6bSJens Wiklander
172aad1cf6bSJens Wiklander return NULL;
173aad1cf6bSJens Wiklander }
174aad1cf6bSJens Wiklander
to_rwp_paged_iv(struct fobj * fobj)175aad1cf6bSJens Wiklander static struct fobj_rwp_paged_iv *to_rwp_paged_iv(struct fobj *fobj)
176aad1cf6bSJens Wiklander {
177aad1cf6bSJens Wiklander assert(fobj->ops == &ops_rwp_paged_iv);
178aad1cf6bSJens Wiklander
179aad1cf6bSJens Wiklander return container_of(fobj, struct fobj_rwp_paged_iv, fobj);
180aad1cf6bSJens Wiklander }
181aad1cf6bSJens Wiklander
rwp_paged_iv_load_page(struct fobj * fobj,unsigned int page_idx,void * va)182aad1cf6bSJens Wiklander static TEE_Result rwp_paged_iv_load_page(struct fobj *fobj,
183aad1cf6bSJens Wiklander unsigned int page_idx, void *va)
184aad1cf6bSJens Wiklander {
185aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
186aad1cf6bSJens Wiklander uint8_t *src = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
187aad1cf6bSJens Wiklander struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
188aad1cf6bSJens Wiklander
189aad1cf6bSJens Wiklander assert(refcount_val(&fobj->refc));
190aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages);
191aad1cf6bSJens Wiklander
192aad1cf6bSJens Wiklander return rwp_load_page(va, &st->state, src);
193aad1cf6bSJens Wiklander }
194aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_paged_iv_load_page);
195aad1cf6bSJens Wiklander
rwp_paged_iv_save_page(struct fobj * fobj,unsigned int page_idx,const void * va)196aad1cf6bSJens Wiklander static TEE_Result rwp_paged_iv_save_page(struct fobj *fobj,
197aad1cf6bSJens Wiklander unsigned int page_idx, const void *va)
198aad1cf6bSJens Wiklander {
199aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
200aad1cf6bSJens Wiklander uint8_t *dst = idx_to_store(rwp->idx) + page_idx * SMALL_PAGE_SIZE;
201aad1cf6bSJens Wiklander struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
202aad1cf6bSJens Wiklander
203aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages);
204b83c0d5fSJens Wiklander
205b83c0d5fSJens Wiklander if (!refcount_val(&fobj->refc)) {
206b83c0d5fSJens Wiklander /*
207b83c0d5fSJens Wiklander * This fobj is being teared down, it just hasn't had the time
208b83c0d5fSJens Wiklander * to call tee_pager_invalidate_fobj() yet.
209b83c0d5fSJens Wiklander */
210d5ad7ccfSJens Wiklander assert(TAILQ_EMPTY(&fobj->regions));
211b83c0d5fSJens Wiklander return TEE_SUCCESS;
212b83c0d5fSJens Wiklander }
213b83c0d5fSJens Wiklander
214aad1cf6bSJens Wiklander return rwp_save_page(va, &st->state, dst);
215aad1cf6bSJens Wiklander }
216aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_paged_iv_save_page);
217ee546289SJens Wiklander
rwp_paged_iv_free(struct fobj * fobj)218aad1cf6bSJens Wiklander static void rwp_paged_iv_free(struct fobj *fobj)
219aad1cf6bSJens Wiklander {
220aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
221de19cacbSJens Wiklander paddr_t pa = rwp->idx * SMALL_PAGE_SIZE + nex_phys_mem_get_ta_base();
222de19cacbSJens Wiklander tee_mm_entry_t *mm = nex_phys_mem_mm_find(pa);
223aad1cf6bSJens Wiklander
224aad1cf6bSJens Wiklander assert(mm);
225aad1cf6bSJens Wiklander
226aad1cf6bSJens Wiklander fobj_uninit(fobj);
227aad1cf6bSJens Wiklander tee_mm_free(mm);
228aad1cf6bSJens Wiklander free(rwp);
229aad1cf6bSJens Wiklander }
230aad1cf6bSJens Wiklander
rwp_paged_iv_get_iv_vaddr(struct fobj * fobj,unsigned int page_idx)231aad1cf6bSJens Wiklander static vaddr_t rwp_paged_iv_get_iv_vaddr(struct fobj *fobj,
232aad1cf6bSJens Wiklander unsigned int page_idx)
233aad1cf6bSJens Wiklander {
234aad1cf6bSJens Wiklander struct fobj_rwp_paged_iv *rwp = to_rwp_paged_iv(fobj);
235aad1cf6bSJens Wiklander struct rwp_state_padded *st = idx_to_state_padded(rwp->idx + page_idx);
236aad1cf6bSJens Wiklander
237aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages);
238aad1cf6bSJens Wiklander return (vaddr_t)&st->state & ~SMALL_PAGE_MASK;
239aad1cf6bSJens Wiklander }
240aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_paged_iv_get_iv_vaddr);
241aad1cf6bSJens Wiklander
24200361c18SJens Wiklander /*
24300361c18SJens Wiklander * Note: this variable is weak just to ease breaking its dependency chain
24400361c18SJens Wiklander * when added to the unpaged area.
24500361c18SJens Wiklander */
24600361c18SJens Wiklander const struct fobj_ops ops_rwp_paged_iv
24739e8c200SJerome Forissier __weak __relrodata_unpaged("ops_rwp_paged_iv") = {
248aad1cf6bSJens Wiklander .free = rwp_paged_iv_free,
249aad1cf6bSJens Wiklander .load_page = rwp_paged_iv_load_page,
250aad1cf6bSJens Wiklander .save_page = rwp_paged_iv_save_page,
251aad1cf6bSJens Wiklander .get_iv_vaddr = rwp_paged_iv_get_iv_vaddr,
252aad1cf6bSJens Wiklander };
253aad1cf6bSJens Wiklander
rwp_unpaged_iv_alloc(unsigned int num_pages)254b757e307SJens Wiklander static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages)
255b757e307SJens Wiklander {
256b757e307SJens Wiklander struct fobj_rwp_unpaged_iv *rwp = NULL;
257b757e307SJens Wiklander tee_mm_entry_t *mm = NULL;
258b757e307SJens Wiklander size_t size = 0;
259b757e307SJens Wiklander
260b757e307SJens Wiklander rwp = calloc(1, sizeof(*rwp));
261b757e307SJens Wiklander if (!rwp)
262b757e307SJens Wiklander return NULL;
263b757e307SJens Wiklander
264b757e307SJens Wiklander rwp->state = calloc(num_pages, sizeof(*rwp->state));
265b757e307SJens Wiklander if (!rwp->state)
266b757e307SJens Wiklander goto err_free_rwp;
267b757e307SJens Wiklander
268b757e307SJens Wiklander if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
269b757e307SJens Wiklander goto err_free_state;
270de19cacbSJens Wiklander mm = nex_phys_mem_ta_alloc(size);
271b757e307SJens Wiklander if (!mm)
272b757e307SJens Wiklander goto err_free_state;
2732f2f69dfSJens Wiklander rwp->store = phys_to_virt(tee_mm_get_smem(mm),
2742f2f69dfSJens Wiklander MEM_AREA_SEC_RAM_OVERALL, size);
275b757e307SJens Wiklander assert(rwp->store);
276b757e307SJens Wiklander
277b757e307SJens Wiklander fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_pages);
278b757e307SJens Wiklander
279b757e307SJens Wiklander return &rwp->fobj;
280b757e307SJens Wiklander
281b757e307SJens Wiklander err_free_state:
282b757e307SJens Wiklander free(rwp->state);
283b757e307SJens Wiklander err_free_rwp:
284b757e307SJens Wiklander free(rwp);
285b757e307SJens Wiklander return NULL;
286b757e307SJens Wiklander }
287b757e307SJens Wiklander
to_rwp_unpaged_iv(struct fobj * fobj)288aad1cf6bSJens Wiklander static struct fobj_rwp_unpaged_iv *to_rwp_unpaged_iv(struct fobj *fobj)
289aad1cf6bSJens Wiklander {
290aad1cf6bSJens Wiklander assert(fobj->ops == &ops_rwp_unpaged_iv);
291aad1cf6bSJens Wiklander
292aad1cf6bSJens Wiklander return container_of(fobj, struct fobj_rwp_unpaged_iv, fobj);
293aad1cf6bSJens Wiklander }
294aad1cf6bSJens Wiklander
rwp_unpaged_iv_load_page(struct fobj * fobj,unsigned int page_idx,void * va)295aad1cf6bSJens Wiklander static TEE_Result rwp_unpaged_iv_load_page(struct fobj *fobj,
296aad1cf6bSJens Wiklander unsigned int page_idx, void *va)
297aad1cf6bSJens Wiklander {
298aad1cf6bSJens Wiklander struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
299aad1cf6bSJens Wiklander uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
300aad1cf6bSJens Wiklander
301aad1cf6bSJens Wiklander assert(refcount_val(&fobj->refc));
302aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages);
303aad1cf6bSJens Wiklander
304aad1cf6bSJens Wiklander return rwp_load_page(va, rwp->state + page_idx, src);
305aad1cf6bSJens Wiklander }
306aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_unpaged_iv_load_page);
307aad1cf6bSJens Wiklander
rwp_unpaged_iv_save_page(struct fobj * fobj,unsigned int page_idx,const void * va)308aad1cf6bSJens Wiklander static TEE_Result rwp_unpaged_iv_save_page(struct fobj *fobj,
309aad1cf6bSJens Wiklander unsigned int page_idx,
310aad1cf6bSJens Wiklander const void *va)
311aad1cf6bSJens Wiklander {
312aad1cf6bSJens Wiklander struct fobj_rwp_unpaged_iv *rwp = to_rwp_unpaged_iv(fobj);
313aad1cf6bSJens Wiklander uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
314aad1cf6bSJens Wiklander
315aad1cf6bSJens Wiklander assert(page_idx < fobj->num_pages);
316aad1cf6bSJens Wiklander
317aad1cf6bSJens Wiklander if (!refcount_val(&fobj->refc)) {
318ee546289SJens Wiklander /*
319aad1cf6bSJens Wiklander * This fobj is being teared down, it just hasn't had the time
320aad1cf6bSJens Wiklander * to call tee_pager_invalidate_fobj() yet.
321aad1cf6bSJens Wiklander */
322d5ad7ccfSJens Wiklander assert(TAILQ_EMPTY(&fobj->regions));
323aad1cf6bSJens Wiklander return TEE_SUCCESS;
324aad1cf6bSJens Wiklander }
325aad1cf6bSJens Wiklander
326aad1cf6bSJens Wiklander return rwp_save_page(va, rwp->state + page_idx, dst);
327aad1cf6bSJens Wiklander }
328aad1cf6bSJens Wiklander DECLARE_KEEP_PAGER(rwp_unpaged_iv_save_page);
329aad1cf6bSJens Wiklander
rwp_unpaged_iv_free(struct fobj * fobj)330b757e307SJens Wiklander static void rwp_unpaged_iv_free(struct fobj *fobj)
331aad1cf6bSJens Wiklander {
332b757e307SJens Wiklander struct fobj_rwp_unpaged_iv *rwp = NULL;
333b757e307SJens Wiklander tee_mm_entry_t *mm = NULL;
334b757e307SJens Wiklander
335b757e307SJens Wiklander if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
336aad1cf6bSJens Wiklander panic();
337b757e307SJens Wiklander
338b757e307SJens Wiklander rwp = to_rwp_unpaged_iv(fobj);
339de19cacbSJens Wiklander mm = nex_phys_mem_mm_find(virt_to_phys(rwp->store));
340b757e307SJens Wiklander
341b757e307SJens Wiklander assert(mm);
342b757e307SJens Wiklander
343b757e307SJens Wiklander fobj_uninit(fobj);
344b757e307SJens Wiklander tee_mm_free(mm);
345b757e307SJens Wiklander free(rwp->state);
346b757e307SJens Wiklander free(rwp);
347aad1cf6bSJens Wiklander }
348aad1cf6bSJens Wiklander
34900361c18SJens Wiklander /*
35000361c18SJens Wiklander * Note: this variable is weak just to ease breaking its dependency chain
35100361c18SJens Wiklander * when added to the unpaged area.
35200361c18SJens Wiklander */
35300361c18SJens Wiklander const struct fobj_ops ops_rwp_unpaged_iv
35439e8c200SJerome Forissier __weak __relrodata_unpaged("ops_rwp_unpaged_iv") = {
355aad1cf6bSJens Wiklander .free = rwp_unpaged_iv_free,
356aad1cf6bSJens Wiklander .load_page = rwp_unpaged_iv_load_page,
357aad1cf6bSJens Wiklander .save_page = rwp_unpaged_iv_save_page,
358aad1cf6bSJens Wiklander };
359aad1cf6bSJens Wiklander
rwp_init(void)360aad1cf6bSJens Wiklander static TEE_Result rwp_init(void)
361aad1cf6bSJens Wiklander {
362de19cacbSJens Wiklander paddr_size_t ta_size = nex_phys_mem_get_ta_size();
363aad1cf6bSJens Wiklander uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
364b757e307SJens Wiklander struct fobj *fobj = NULL;
365aad1cf6bSJens Wiklander size_t num_pool_pages = 0;
366aad1cf6bSJens Wiklander size_t num_fobj_pages = 0;
367aad1cf6bSJens Wiklander
368aad1cf6bSJens Wiklander if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
369aad1cf6bSJens Wiklander panic("failed to generate random");
370aad1cf6bSJens Wiklander if (crypto_aes_expand_enc_key(key, sizeof(key), rwp_ae_key.data,
371aad1cf6bSJens Wiklander sizeof(rwp_ae_key.data),
372aad1cf6bSJens Wiklander &rwp_ae_key.rounds))
373aad1cf6bSJens Wiklander panic("failed to expand key");
374aad1cf6bSJens Wiklander
375b757e307SJens Wiklander if (!IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
376b757e307SJens Wiklander return TEE_SUCCESS;
377b757e307SJens Wiklander
378de19cacbSJens Wiklander assert(ta_size && !(ta_size & SMALL_PAGE_SIZE));
379aad1cf6bSJens Wiklander
380de19cacbSJens Wiklander num_pool_pages = ta_size / SMALL_PAGE_SIZE;
381*04e46975SEtienne Carriere num_fobj_pages = ROUNDUP_DIV(num_pool_pages * sizeof(*rwp_state_base),
382*04e46975SEtienne Carriere SMALL_PAGE_SIZE);
383aad1cf6bSJens Wiklander
384aad1cf6bSJens Wiklander /*
385aad1cf6bSJens Wiklander * Each page in the pool needs a struct rwp_state.
386aad1cf6bSJens Wiklander *
387aad1cf6bSJens Wiklander * This isn't entirely true, the pages not used by
388aad1cf6bSJens Wiklander * fobj_rw_paged_alloc() don't need any. A future optimization
389aad1cf6bSJens Wiklander * may try to avoid allocating for such pages.
390ee546289SJens Wiklander */
391b757e307SJens Wiklander fobj = rwp_unpaged_iv_alloc(num_fobj_pages);
392b757e307SJens Wiklander if (!fobj)
393aad1cf6bSJens Wiklander panic();
394ee546289SJens Wiklander
395d5ad7ccfSJens Wiklander rwp_state_base = (void *)tee_pager_init_iv_region(fobj);
396aad1cf6bSJens Wiklander assert(rwp_state_base);
397aad1cf6bSJens Wiklander
398de19cacbSJens Wiklander rwp_store_base = phys_to_virt(nex_phys_mem_get_ta_base(),
3992f2f69dfSJens Wiklander MEM_AREA_SEC_RAM_OVERALL, ta_size);
400aad1cf6bSJens Wiklander assert(rwp_store_base);
401aad1cf6bSJens Wiklander
402aad1cf6bSJens Wiklander return TEE_SUCCESS;
403ee546289SJens Wiklander }
404aad1cf6bSJens Wiklander driver_init_late(rwp_init);
405ee546289SJens Wiklander
fobj_rw_paged_alloc(unsigned int num_pages)406b757e307SJens Wiklander struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
407b757e307SJens Wiklander {
408b757e307SJens Wiklander assert(num_pages);
409b757e307SJens Wiklander
410b757e307SJens Wiklander if (IS_ENABLED(CFG_CORE_PAGE_TAG_AND_IV))
411b757e307SJens Wiklander return rwp_paged_iv_alloc(num_pages);
412b757e307SJens Wiklander else
413b757e307SJens Wiklander return rwp_unpaged_iv_alloc(num_pages);
414b757e307SJens Wiklander }
415b757e307SJens Wiklander
416ee546289SJens Wiklander struct fobj_rop {
417ee546289SJens Wiklander uint8_t *hashes;
418ee546289SJens Wiklander uint8_t *store;
419ee546289SJens Wiklander struct fobj fobj;
420ee546289SJens Wiklander };
421ee546289SJens Wiklander
42200361c18SJens Wiklander const struct fobj_ops ops_ro_paged;
423c6744caaSJens Wiklander
rop_init(struct fobj_rop * rop,const struct fobj_ops * ops,unsigned int num_pages,void * hashes,void * store)424c6744caaSJens Wiklander static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
425c6744caaSJens Wiklander unsigned int num_pages, void *hashes, void *store)
426c6744caaSJens Wiklander {
427c6744caaSJens Wiklander rop->hashes = hashes;
428c6744caaSJens Wiklander rop->store = store;
429c6744caaSJens Wiklander fobj_init(&rop->fobj, ops, num_pages);
430c6744caaSJens Wiklander }
431ee546289SJens Wiklander
fobj_ro_paged_alloc(unsigned int num_pages,void * hashes,void * store)432ee546289SJens Wiklander struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
433ee546289SJens Wiklander void *store)
434ee546289SJens Wiklander {
435ee546289SJens Wiklander struct fobj_rop *rop = NULL;
436ee546289SJens Wiklander
437ee546289SJens Wiklander assert(num_pages && hashes && store);
438ee546289SJens Wiklander
439ee546289SJens Wiklander rop = calloc(1, sizeof(*rop));
440ee546289SJens Wiklander if (!rop)
441ee546289SJens Wiklander return NULL;
442ee546289SJens Wiklander
443c6744caaSJens Wiklander rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
444ee546289SJens Wiklander
445ee546289SJens Wiklander return &rop->fobj;
446ee546289SJens Wiklander }
447ee546289SJens Wiklander
to_rop(struct fobj * fobj)448ee546289SJens Wiklander static struct fobj_rop *to_rop(struct fobj *fobj)
449ee546289SJens Wiklander {
450ee546289SJens Wiklander assert(fobj->ops == &ops_ro_paged);
451ee546289SJens Wiklander
452ee546289SJens Wiklander return container_of(fobj, struct fobj_rop, fobj);
453ee546289SJens Wiklander }
454ee546289SJens Wiklander
rop_uninit(struct fobj_rop * rop)455c6744caaSJens Wiklander static void rop_uninit(struct fobj_rop *rop)
456c6744caaSJens Wiklander {
457c6744caaSJens Wiklander fobj_uninit(&rop->fobj);
458de19cacbSJens Wiklander tee_mm_free(nex_phys_mem_mm_find(virt_to_phys(rop->store)));
459c6744caaSJens Wiklander free(rop->hashes);
460c6744caaSJens Wiklander }
461c6744caaSJens Wiklander
rop_free(struct fobj * fobj)462ee546289SJens Wiklander static void rop_free(struct fobj *fobj)
463ee546289SJens Wiklander {
464ee546289SJens Wiklander struct fobj_rop *rop = to_rop(fobj);
465ee546289SJens Wiklander
466c6744caaSJens Wiklander rop_uninit(rop);
467ee546289SJens Wiklander free(rop);
468ee546289SJens Wiklander }
469ee546289SJens Wiklander
rop_load_page_helper(struct fobj_rop * rop,unsigned int page_idx,void * va)470c6744caaSJens Wiklander static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
471c6744caaSJens Wiklander unsigned int page_idx, void *va)
472c6744caaSJens Wiklander {
473c6744caaSJens Wiklander const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
474c6744caaSJens Wiklander const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
475c6744caaSJens Wiklander
476c6744caaSJens Wiklander assert(refcount_val(&rop->fobj.refc));
477c6744caaSJens Wiklander assert(page_idx < rop->fobj.num_pages);
478c6744caaSJens Wiklander memcpy(va, src, SMALL_PAGE_SIZE);
479c6744caaSJens Wiklander
480c6744caaSJens Wiklander return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
481c6744caaSJens Wiklander }
482c6744caaSJens Wiklander
rop_load_page(struct fobj * fobj,unsigned int page_idx,void * va)483ee546289SJens Wiklander static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
484ee546289SJens Wiklander void *va)
485ee546289SJens Wiklander {
486c6744caaSJens Wiklander return rop_load_page_helper(to_rop(fobj), page_idx, va);
487ee546289SJens Wiklander }
4883639b55fSJerome Forissier DECLARE_KEEP_PAGER(rop_load_page);
489ee546289SJens Wiklander
rop_save_page(struct fobj * fobj __unused,unsigned int page_idx __unused,const void * va __unused)490ee546289SJens Wiklander static TEE_Result rop_save_page(struct fobj *fobj __unused,
491ee546289SJens Wiklander unsigned int page_idx __unused,
492ee546289SJens Wiklander const void *va __unused)
493ee546289SJens Wiklander {
494ee546289SJens Wiklander return TEE_ERROR_GENERIC;
495ee546289SJens Wiklander }
4963639b55fSJerome Forissier DECLARE_KEEP_PAGER(rop_save_page);
497ee546289SJens Wiklander
49800361c18SJens Wiklander /*
49900361c18SJens Wiklander * Note: this variable is weak just to ease breaking its dependency chain
50000361c18SJens Wiklander * when added to the unpaged area.
50100361c18SJens Wiklander */
50239e8c200SJerome Forissier const struct fobj_ops ops_ro_paged
50339e8c200SJerome Forissier __weak __relrodata_unpaged("ops_ro_paged") = {
504ee546289SJens Wiklander .free = rop_free,
505ee546289SJens Wiklander .load_page = rop_load_page,
506ee546289SJens Wiklander .save_page = rop_save_page,
507ee546289SJens Wiklander };
508ee546289SJens Wiklander
509c6744caaSJens Wiklander #ifdef CFG_CORE_ASLR
510c6744caaSJens Wiklander /*
511c6744caaSJens Wiklander * When using relocated pages the relocation information must be applied
512c6744caaSJens Wiklander * before the pages can be used. With read-only paging the content is only
513c6744caaSJens Wiklander * integrity protected so relocation cannot be applied on pages in the less
514c6744caaSJens Wiklander * secure "store" or the load_address selected by ASLR could be given away.
515c6744caaSJens Wiklander * This means that each time a page has been loaded and verified it has to
516c6744caaSJens Wiklander * have its relocation information applied before it can be used.
517c6744caaSJens Wiklander *
518c6744caaSJens Wiklander * Only the relative relocations are supported, this allows a rather compact
519c6744caaSJens Wiklander * represenation of the needed relocation information in this struct.
520c6744caaSJens Wiklander * r_offset is replaced with the offset into the page that need to be updated,
521c6744caaSJens Wiklander * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
522c6744caaSJens Wiklander * used to represent it.
523c6744caaSJens Wiklander *
524c6744caaSJens Wiklander * All relocations are converted and stored in @relocs. @page_reloc_idx is
525c6744caaSJens Wiklander * an array of length @rop.fobj.num_pages with an entry for each page. If
526c6744caaSJens Wiklander * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
527c6744caaSJens Wiklander */
528c6744caaSJens Wiklander struct fobj_ro_reloc_paged {
529c6744caaSJens Wiklander uint16_t *page_reloc_idx;
530c6744caaSJens Wiklander uint16_t *relocs;
531c6744caaSJens Wiklander unsigned int num_relocs;
532c6744caaSJens Wiklander struct fobj_rop rop;
533c6744caaSJens Wiklander };
534c6744caaSJens Wiklander
53500361c18SJens Wiklander const struct fobj_ops ops_ro_reloc_paged;
536c6744caaSJens Wiklander
get_num_rels(unsigned int num_pages,unsigned int reloc_offs,const uint32_t * reloc,unsigned int num_relocs)537c6744caaSJens Wiklander static unsigned int get_num_rels(unsigned int num_pages,
538c6744caaSJens Wiklander unsigned int reloc_offs,
539c6744caaSJens Wiklander const uint32_t *reloc, unsigned int num_relocs)
540c6744caaSJens Wiklander {
541c6744caaSJens Wiklander const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
542c6744caaSJens Wiklander unsigned int nrels = 0;
543c6744caaSJens Wiklander unsigned int n = 0;
544c6744caaSJens Wiklander vaddr_t offs = 0;
545c6744caaSJens Wiklander
546c6744caaSJens Wiklander /*
547c6744caaSJens Wiklander * Count the number of relocations which are needed for these
548c6744caaSJens Wiklander * pages. Also check that the data is well formed, only expected
549c6744caaSJens Wiklander * relocations and sorted in order of address which it applies to.
550c6744caaSJens Wiklander */
551c6744caaSJens Wiklander for (; n < num_relocs; n++) {
552be501eb1SJorge Ramirez-Ortiz assert(IS_ALIGNED_WITH_TYPE(reloc[n], unsigned long));
553c6744caaSJens Wiklander assert(offs < reloc[n]); /* check that it's sorted */
554c6744caaSJens Wiklander offs = reloc[n];
555c6744caaSJens Wiklander if (offs >= reloc_offs &&
556c6744caaSJens Wiklander offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
557c6744caaSJens Wiklander nrels++;
558c6744caaSJens Wiklander }
559c6744caaSJens Wiklander
560c6744caaSJens Wiklander return nrels;
561c6744caaSJens Wiklander }
562c6744caaSJens Wiklander
init_rels(struct fobj_ro_reloc_paged * rrp,unsigned int reloc_offs,const uint32_t * reloc,unsigned int num_relocs)563c6744caaSJens Wiklander static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
564c6744caaSJens Wiklander const uint32_t *reloc, unsigned int num_relocs)
565c6744caaSJens Wiklander {
566c6744caaSJens Wiklander unsigned int npg = rrp->rop.fobj.num_pages;
567c6744caaSJens Wiklander unsigned int pg_idx = 0;
568c6744caaSJens Wiklander unsigned int reln = 0;
569c6744caaSJens Wiklander unsigned int n = 0;
570c6744caaSJens Wiklander uint32_t r = 0;
571c6744caaSJens Wiklander
572c6744caaSJens Wiklander for (n = 0; n < npg; n++)
573c6744caaSJens Wiklander rrp->page_reloc_idx[n] = UINT16_MAX;
574c6744caaSJens Wiklander
575c6744caaSJens Wiklander for (n = 0; n < num_relocs ; n++) {
576c6744caaSJens Wiklander if (reloc[n] < reloc_offs)
577c6744caaSJens Wiklander continue;
578c6744caaSJens Wiklander
579c6744caaSJens Wiklander /* r is the offset from beginning of this fobj */
580c6744caaSJens Wiklander r = reloc[n] - reloc_offs;
581c6744caaSJens Wiklander
582c6744caaSJens Wiklander pg_idx = r / SMALL_PAGE_SIZE;
583c6744caaSJens Wiklander if (pg_idx >= npg)
584c6744caaSJens Wiklander break;
585c6744caaSJens Wiklander
586c6744caaSJens Wiklander if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
587c6744caaSJens Wiklander rrp->page_reloc_idx[pg_idx] = reln;
588c6744caaSJens Wiklander rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
589c6744caaSJens Wiklander reln++;
590c6744caaSJens Wiklander }
591c6744caaSJens Wiklander
592c6744caaSJens Wiklander assert(reln == rrp->num_relocs);
593c6744caaSJens Wiklander }
594c6744caaSJens Wiklander
fobj_ro_reloc_paged_alloc(unsigned int num_pages,void * hashes,unsigned int reloc_offs,const void * reloc,unsigned int reloc_len,void * store)595c6744caaSJens Wiklander struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
596c6744caaSJens Wiklander unsigned int reloc_offs,
597c6744caaSJens Wiklander const void *reloc,
598c6744caaSJens Wiklander unsigned int reloc_len, void *store)
599c6744caaSJens Wiklander {
600c6744caaSJens Wiklander struct fobj_ro_reloc_paged *rrp = NULL;
601c6744caaSJens Wiklander const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
602c6744caaSJens Wiklander unsigned int nrels = 0;
603c6744caaSJens Wiklander
604be501eb1SJorge Ramirez-Ortiz assert(IS_ALIGNED_WITH_TYPE(reloc, uint32_t));
605be501eb1SJorge Ramirez-Ortiz assert(IS_ALIGNED_WITH_TYPE(reloc_len, uint32_t));
606c6744caaSJens Wiklander assert(num_pages && hashes && store);
607c6744caaSJens Wiklander if (!reloc_len) {
608c6744caaSJens Wiklander assert(!reloc);
609c6744caaSJens Wiklander return fobj_ro_paged_alloc(num_pages, hashes, store);
610c6744caaSJens Wiklander }
611c6744caaSJens Wiklander assert(reloc);
612c6744caaSJens Wiklander
613c6744caaSJens Wiklander nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
614c6744caaSJens Wiklander if (!nrels)
615c6744caaSJens Wiklander return fobj_ro_paged_alloc(num_pages, hashes, store);
616c6744caaSJens Wiklander
617c6744caaSJens Wiklander rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
618c6744caaSJens Wiklander nrels * sizeof(uint16_t));
619c6744caaSJens Wiklander if (!rrp)
620c6744caaSJens Wiklander return NULL;
621c6744caaSJens Wiklander rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
622c6744caaSJens Wiklander rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
623c6744caaSJens Wiklander rrp->relocs = rrp->page_reloc_idx + num_pages;
624c6744caaSJens Wiklander rrp->num_relocs = nrels;
625c6744caaSJens Wiklander init_rels(rrp, reloc_offs, reloc, num_relocs);
626c6744caaSJens Wiklander
627c6744caaSJens Wiklander return &rrp->rop.fobj;
628c6744caaSJens Wiklander }
629c6744caaSJens Wiklander
to_rrp(struct fobj * fobj)630c6744caaSJens Wiklander static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
631c6744caaSJens Wiklander {
632c6744caaSJens Wiklander assert(fobj->ops == &ops_ro_reloc_paged);
633c6744caaSJens Wiklander
634c6744caaSJens Wiklander return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
635c6744caaSJens Wiklander }
636c6744caaSJens Wiklander
rrp_free(struct fobj * fobj)637c6744caaSJens Wiklander static void rrp_free(struct fobj *fobj)
638c6744caaSJens Wiklander {
639c6744caaSJens Wiklander struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
640c6744caaSJens Wiklander
641c6744caaSJens Wiklander rop_uninit(&rrp->rop);
642c6744caaSJens Wiklander free(rrp);
643c6744caaSJens Wiklander }
644c6744caaSJens Wiklander
rrp_load_page(struct fobj * fobj,unsigned int page_idx,void * va)645c6744caaSJens Wiklander static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
646c6744caaSJens Wiklander void *va)
647c6744caaSJens Wiklander {
648c6744caaSJens Wiklander struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
649c6744caaSJens Wiklander unsigned int end_rel = rrp->num_relocs;
650c6744caaSJens Wiklander TEE_Result res = TEE_SUCCESS;
651c6744caaSJens Wiklander unsigned long *where = NULL;
652c6744caaSJens Wiklander unsigned int n = 0;
653c6744caaSJens Wiklander
654c6744caaSJens Wiklander res = rop_load_page_helper(&rrp->rop, page_idx, va);
655c6744caaSJens Wiklander if (res)
656c6744caaSJens Wiklander return res;
657c6744caaSJens Wiklander
658c6744caaSJens Wiklander /* Find the reloc index of the next page to tell when we're done */
659c6744caaSJens Wiklander for (n = page_idx + 1; n < fobj->num_pages; n++) {
660c6744caaSJens Wiklander if (rrp->page_reloc_idx[n] != UINT16_MAX) {
661c6744caaSJens Wiklander end_rel = rrp->page_reloc_idx[n];
662c6744caaSJens Wiklander break;
663c6744caaSJens Wiklander }
664c6744caaSJens Wiklander }
665c6744caaSJens Wiklander
666c6744caaSJens Wiklander for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
667c6744caaSJens Wiklander where = (void *)((vaddr_t)va + rrp->relocs[n]);
668c79fb6d4SJens Wiklander *where += boot_mmu_config.map_offset;
669c6744caaSJens Wiklander }
670c6744caaSJens Wiklander
671c6744caaSJens Wiklander return TEE_SUCCESS;
672c6744caaSJens Wiklander }
6733639b55fSJerome Forissier DECLARE_KEEP_PAGER(rrp_load_page);
674c6744caaSJens Wiklander
67500361c18SJens Wiklander /*
67600361c18SJens Wiklander * Note: this variable is weak just to ease breaking its dependency chain
67700361c18SJens Wiklander * when added to the unpaged area.
67800361c18SJens Wiklander */
67900361c18SJens Wiklander const struct fobj_ops ops_ro_reloc_paged
68039e8c200SJerome Forissier __weak __relrodata_unpaged("ops_ro_reloc_paged") = {
681c6744caaSJens Wiklander .free = rrp_free,
682c6744caaSJens Wiklander .load_page = rrp_load_page,
683c6744caaSJens Wiklander .save_page = rop_save_page, /* Direct reuse */
684c6744caaSJens Wiklander };
685c6744caaSJens Wiklander #endif /*CFG_CORE_ASLR*/
686c6744caaSJens Wiklander
68700361c18SJens Wiklander const struct fobj_ops ops_locked_paged;
688ee546289SJens Wiklander
fobj_locked_paged_alloc(unsigned int num_pages)689ee546289SJens Wiklander struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
690ee546289SJens Wiklander {
691ee546289SJens Wiklander struct fobj *f = NULL;
692ee546289SJens Wiklander
693ee546289SJens Wiklander assert(num_pages);
694ee546289SJens Wiklander
695ee546289SJens Wiklander f = calloc(1, sizeof(*f));
696ee546289SJens Wiklander if (!f)
697ee546289SJens Wiklander return NULL;
698ee546289SJens Wiklander
699ee546289SJens Wiklander fobj_init(f, &ops_locked_paged, num_pages);
700ee546289SJens Wiklander
701ee546289SJens Wiklander return f;
702ee546289SJens Wiklander }
703ee546289SJens Wiklander
lop_free(struct fobj * fobj)704ee546289SJens Wiklander static void lop_free(struct fobj *fobj)
705ee546289SJens Wiklander {
706ee546289SJens Wiklander assert(fobj->ops == &ops_locked_paged);
707ee546289SJens Wiklander fobj_uninit(fobj);
708ee546289SJens Wiklander free(fobj);
709ee546289SJens Wiklander }
710ee546289SJens Wiklander
lop_load_page(struct fobj * fobj __maybe_unused,unsigned int page_idx __maybe_unused,void * va)711ee546289SJens Wiklander static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
712ee546289SJens Wiklander unsigned int page_idx __maybe_unused,
713ee546289SJens Wiklander void *va)
714ee546289SJens Wiklander {
715ee546289SJens Wiklander assert(fobj->ops == &ops_locked_paged);
716ee546289SJens Wiklander assert(refcount_val(&fobj->refc));
717ee546289SJens Wiklander assert(page_idx < fobj->num_pages);
718ee546289SJens Wiklander
719ee546289SJens Wiklander memset(va, 0, SMALL_PAGE_SIZE);
720ee546289SJens Wiklander
721ee546289SJens Wiklander return TEE_SUCCESS;
722ee546289SJens Wiklander }
7233639b55fSJerome Forissier DECLARE_KEEP_PAGER(lop_load_page);
724ee546289SJens Wiklander
lop_save_page(struct fobj * fobj __unused,unsigned int page_idx __unused,const void * va __unused)725ee546289SJens Wiklander static TEE_Result lop_save_page(struct fobj *fobj __unused,
726ee546289SJens Wiklander unsigned int page_idx __unused,
727ee546289SJens Wiklander const void *va __unused)
728ee546289SJens Wiklander {
729ee546289SJens Wiklander return TEE_ERROR_GENERIC;
730ee546289SJens Wiklander }
7313639b55fSJerome Forissier DECLARE_KEEP_PAGER(lop_save_page);
732ee546289SJens Wiklander
73300361c18SJens Wiklander /*
73400361c18SJens Wiklander * Note: this variable is weak just to ease breaking its dependency chain
73500361c18SJens Wiklander * when added to the unpaged area.
73600361c18SJens Wiklander */
73700361c18SJens Wiklander const struct fobj_ops ops_locked_paged
73839e8c200SJerome Forissier __weak __relrodata_unpaged("ops_locked_paged") = {
739ee546289SJens Wiklander .free = lop_free,
740ee546289SJens Wiklander .load_page = lop_load_page,
741ee546289SJens Wiklander .save_page = lop_save_page,
742ee546289SJens Wiklander };
743ee546289SJens Wiklander #endif /*CFG_WITH_PAGER*/
744fbcaa411SJens Wiklander
745fbcaa411SJens Wiklander #ifndef CFG_PAGED_USER_TA
746fbcaa411SJens Wiklander
747fbcaa411SJens Wiklander struct fobj_sec_mem {
748fbcaa411SJens Wiklander tee_mm_entry_t *mm;
749fbcaa411SJens Wiklander struct fobj fobj;
750fbcaa411SJens Wiklander };
751fbcaa411SJens Wiklander
75200361c18SJens Wiklander const struct fobj_ops ops_sec_mem;
753fbcaa411SJens Wiklander
fobj_sec_mem_alloc(unsigned int num_pages)754fbcaa411SJens Wiklander struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
755fbcaa411SJens Wiklander {
756fbcaa411SJens Wiklander struct fobj_sec_mem *f = calloc(1, sizeof(*f));
757fbcaa411SJens Wiklander size_t size = 0;
758fbcaa411SJens Wiklander void *va = NULL;
759fbcaa411SJens Wiklander
760fbcaa411SJens Wiklander if (!f)
761fbcaa411SJens Wiklander return NULL;
762fbcaa411SJens Wiklander
763fbcaa411SJens Wiklander if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
764fbcaa411SJens Wiklander goto err;
765fbcaa411SJens Wiklander
766de19cacbSJens Wiklander f->mm = phys_mem_ta_alloc(size);
767fbcaa411SJens Wiklander if (!f->mm)
768fbcaa411SJens Wiklander goto err;
769fbcaa411SJens Wiklander
7702f2f69dfSJens Wiklander va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_SEC_RAM_OVERALL,
7712f2f69dfSJens Wiklander size);
772fbcaa411SJens Wiklander if (!va)
773fbcaa411SJens Wiklander goto err;
774fbcaa411SJens Wiklander
775a0e8ffe9SJens Wiklander memtag_clear_mem(va, size);
776fbcaa411SJens Wiklander f->fobj.ops = &ops_sec_mem;
777fbcaa411SJens Wiklander f->fobj.num_pages = num_pages;
778fbcaa411SJens Wiklander refcount_set(&f->fobj.refc, 1);
779fbcaa411SJens Wiklander
780fbcaa411SJens Wiklander return &f->fobj;
781fbcaa411SJens Wiklander err:
782fbcaa411SJens Wiklander tee_mm_free(f->mm);
783fbcaa411SJens Wiklander free(f);
784fbcaa411SJens Wiklander
785fbcaa411SJens Wiklander return NULL;
786fbcaa411SJens Wiklander }
787fbcaa411SJens Wiklander
to_sec_mem(struct fobj * fobj)788fbcaa411SJens Wiklander static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
789fbcaa411SJens Wiklander {
790fbcaa411SJens Wiklander assert(fobj->ops == &ops_sec_mem);
791fbcaa411SJens Wiklander
792fbcaa411SJens Wiklander return container_of(fobj, struct fobj_sec_mem, fobj);
793fbcaa411SJens Wiklander }
794fbcaa411SJens Wiklander
sec_mem_free(struct fobj * fobj)795fbcaa411SJens Wiklander static void sec_mem_free(struct fobj *fobj)
796fbcaa411SJens Wiklander {
797fbcaa411SJens Wiklander struct fobj_sec_mem *f = to_sec_mem(fobj);
798fbcaa411SJens Wiklander
799fbcaa411SJens Wiklander assert(!refcount_val(&fobj->refc));
800fbcaa411SJens Wiklander tee_mm_free(f->mm);
801fbcaa411SJens Wiklander free(f);
802fbcaa411SJens Wiklander }
803fbcaa411SJens Wiklander
sec_mem_get_pa(struct fobj * fobj,unsigned int page_idx)804fbcaa411SJens Wiklander static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
805fbcaa411SJens Wiklander {
806fbcaa411SJens Wiklander struct fobj_sec_mem *f = to_sec_mem(fobj);
807fbcaa411SJens Wiklander
808fbcaa411SJens Wiklander assert(refcount_val(&fobj->refc));
809fbcaa411SJens Wiklander assert(page_idx < fobj->num_pages);
810fbcaa411SJens Wiklander
811fbcaa411SJens Wiklander return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
812fbcaa411SJens Wiklander }
813fbcaa411SJens Wiklander
81400361c18SJens Wiklander /*
81500361c18SJens Wiklander * Note: this variable is weak just to ease breaking its dependency chain
81600361c18SJens Wiklander * when added to the unpaged area.
81700361c18SJens Wiklander */
81839e8c200SJerome Forissier const struct fobj_ops ops_sec_mem __weak __relrodata_unpaged("ops_sec_mem") = {
819fbcaa411SJens Wiklander .free = sec_mem_free,
820fbcaa411SJens Wiklander .get_pa = sec_mem_get_pa,
821fbcaa411SJens Wiklander };
822fbcaa411SJens Wiklander
823fbcaa411SJens Wiklander #endif /*PAGED_USER_TA*/
824