xref: /optee_os/core/mm/fobj.c (revision fbcaa4115ecf8426fac627ad5548228d0d7e2894)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019, Linaro Limited
4  */
5 
6 #include <crypto/crypto.h>
7 #include <crypto/internal_aes-gcm.h>
8 #include <kernel/panic.h>
9 #include <mm/core_memprot.h>
10 #include <mm/core_mmu.h>
11 #include <mm/fobj.h>
12 #include <mm/tee_mm.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <tee_api_types.h>
16 #include <types_ext.h>
17 #include <util.h>
18 
19 #ifdef CFG_WITH_PAGER
20 
21 #define RWP_AE_KEY_BITS		256
22 
23 struct rwp_aes_gcm_iv {
24 	uint32_t iv[3];
25 };
26 
27 #define RWP_AES_GCM_TAG_LEN	16
28 
29 struct rwp_state {
30 	uint64_t iv;
31 	uint8_t tag[RWP_AES_GCM_TAG_LEN];
32 };
33 
34 struct fobj_rwp {
35 	uint8_t *store;
36 	struct rwp_state *state;
37 	struct fobj fobj;
38 };
39 
40 static struct fobj_ops ops_rw_paged;
41 
42 static struct internal_aes_gcm_key rwp_ae_key;
43 
44 void fobj_generate_authenc_key(void)
45 {
46 	uint8_t key[RWP_AE_KEY_BITS / 8] = { 0 };
47 
48 	if (crypto_rng_read(key, sizeof(key)) != TEE_SUCCESS)
49 		panic("failed to generate random");
50 	if (internal_aes_gcm_expand_enc_key(key, sizeof(key), &rwp_ae_key))
51 		panic("failed to expand key");
52 }
53 
54 static void fobj_init(struct fobj *fobj, const struct fobj_ops *ops,
55 		      unsigned int num_pages)
56 {
57 	fobj->ops = ops;
58 	fobj->num_pages = num_pages;
59 	refcount_set(&fobj->refc, 1);
60 }
61 
62 static void fobj_uninit(struct fobj *fobj)
63 {
64 	assert(!refcount_val(&fobj->refc));
65 }
66 
67 struct fobj *fobj_rw_paged_alloc(unsigned int num_pages)
68 {
69 	tee_mm_entry_t *mm = NULL;
70 	struct fobj_rwp *rwp = NULL;
71 	size_t size = 0;
72 
73 	assert(num_pages);
74 
75 	rwp = calloc(1, sizeof(*rwp));
76 	if (!rwp)
77 		return NULL;
78 
79 	rwp->state = calloc(num_pages, sizeof(*rwp->state));
80 	if (!rwp->state)
81 		goto err;
82 
83 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
84 		goto err;
85 	mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
86 	if (!mm)
87 		goto err;
88 	rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
89 	assert(rwp->store); /* to assist debugging if it would ever happen */
90 	if (!rwp->store)
91 		goto err;
92 
93 	fobj_init(&rwp->fobj, &ops_rw_paged, num_pages);
94 
95 	return &rwp->fobj;
96 
97 err:
98 	tee_mm_free(mm);
99 	free(rwp->state);
100 	free(rwp);
101 
102 	return NULL;
103 }
104 
105 static struct fobj_rwp *to_rwp(struct fobj *fobj)
106 {
107 	assert(fobj->ops == &ops_rw_paged);
108 
109 	return container_of(fobj, struct fobj_rwp, fobj);
110 }
111 
112 static void rwp_free(struct fobj *fobj)
113 {
114 	struct fobj_rwp *rwp = to_rwp(fobj);
115 
116 	fobj_uninit(fobj);
117 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rwp->store)));
118 	free(rwp->state);
119 	free(rwp);
120 }
121 
122 static TEE_Result rwp_load_page(struct fobj *fobj, unsigned int page_idx,
123 				void *va)
124 {
125 	struct fobj_rwp *rwp = to_rwp(fobj);
126 	struct rwp_state *state = rwp->state + page_idx;
127 	uint8_t *src = rwp->store + page_idx * SMALL_PAGE_SIZE;
128 	struct rwp_aes_gcm_iv iv = {
129 		.iv = { (vaddr_t)state, state->iv >> 32, state->iv }
130 	};
131 
132 	assert(refcount_val(&fobj->refc));
133 	assert(page_idx < fobj->num_pages);
134 
135 	if (!state->iv) {
136 		/*
137 		 * iv still zero which means that this is previously unused
138 		 * page.
139 		 */
140 		memset(va, 0, SMALL_PAGE_SIZE);
141 		return TEE_SUCCESS;
142 	}
143 
144 	return internal_aes_gcm_dec(&rwp_ae_key, &iv, sizeof(iv),
145 				    NULL, 0, src, SMALL_PAGE_SIZE, va,
146 				    state->tag, sizeof(state->tag));
147 }
148 KEEP_PAGER(rwp_load_page);
149 
150 static TEE_Result rwp_save_page(struct fobj *fobj, unsigned int page_idx,
151 				const void *va)
152 {
153 	struct fobj_rwp *rwp = to_rwp(fobj);
154 	struct rwp_state *state = rwp->state + page_idx;
155 	size_t tag_len = sizeof(state->tag);
156 	uint8_t *dst = rwp->store + page_idx * SMALL_PAGE_SIZE;
157 	struct rwp_aes_gcm_iv iv;
158 
159 	memset(&iv, 0, sizeof(iv));
160 	assert(refcount_val(&fobj->refc));
161 	assert(page_idx < fobj->num_pages);
162 	assert(state->iv + 1 > state->iv);
163 
164 	state->iv++;
165 	/*
166 	 * IV is constructed as recommended in section "8.2.1 Deterministic
167 	 * Construction" of "Recommendation for Block Cipher Modes of
168 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
169 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
170 	 */
171 
172 	iv.iv[0] = (vaddr_t)state;
173 	iv.iv[1] = state->iv >> 32;
174 	iv.iv[2] = state->iv;
175 
176 	return internal_aes_gcm_enc(&rwp_ae_key, &iv, sizeof(iv),
177 				    NULL, 0, va, SMALL_PAGE_SIZE, dst,
178 				    state->tag, &tag_len);
179 }
180 KEEP_PAGER(rwp_save_page);
181 
182 static struct fobj_ops ops_rw_paged __rodata_unpaged = {
183 	.free = rwp_free,
184 	.load_page = rwp_load_page,
185 	.save_page = rwp_save_page,
186 };
187 
188 struct fobj_rop {
189 	uint8_t *hashes;
190 	uint8_t *store;
191 	struct fobj fobj;
192 };
193 
194 static struct fobj_ops ops_ro_paged;
195 
196 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
197 				 void *store)
198 {
199 	struct fobj_rop *rop = NULL;
200 
201 	assert(num_pages && hashes && store);
202 
203 	rop = calloc(1, sizeof(*rop));
204 	if (!rop)
205 		return NULL;
206 
207 	rop->hashes = hashes;
208 	rop->store = store;
209 	fobj_init(&rop->fobj, &ops_ro_paged, num_pages);
210 
211 	return &rop->fobj;
212 }
213 
214 static struct fobj_rop *to_rop(struct fobj *fobj)
215 {
216 	assert(fobj->ops == &ops_ro_paged);
217 
218 	return container_of(fobj, struct fobj_rop, fobj);
219 }
220 
221 static void rop_free(struct fobj *fobj)
222 {
223 	struct fobj_rop *rop = to_rop(fobj);
224 
225 	fobj_uninit(fobj);
226 	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
227 	free(rop->hashes);
228 	free(rop);
229 }
230 
231 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
232 				void *va)
233 {
234 	struct fobj_rop *rop = to_rop(fobj);
235 	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
236 	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
237 
238 	assert(refcount_val(&fobj->refc));
239 	assert(page_idx < fobj->num_pages);
240 	memcpy(va, src, SMALL_PAGE_SIZE);
241 
242 	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
243 }
244 KEEP_PAGER(rop_load_page);
245 
246 static TEE_Result rop_save_page(struct fobj *fobj __unused,
247 				unsigned int page_idx __unused,
248 				const void *va __unused)
249 {
250 	return TEE_ERROR_GENERIC;
251 }
252 KEEP_PAGER(rop_save_page);
253 
254 static struct fobj_ops ops_ro_paged __rodata_unpaged = {
255 	.free = rop_free,
256 	.load_page = rop_load_page,
257 	.save_page = rop_save_page,
258 };
259 
260 static struct fobj_ops ops_locked_paged;
261 
262 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
263 {
264 	struct fobj *f = NULL;
265 
266 	assert(num_pages);
267 
268 	f = calloc(1, sizeof(*f));
269 	if (!f)
270 		return NULL;
271 
272 	fobj_init(f, &ops_locked_paged, num_pages);
273 
274 	return f;
275 }
276 
277 static void lop_free(struct fobj *fobj)
278 {
279 	assert(fobj->ops == &ops_locked_paged);
280 	fobj_uninit(fobj);
281 	free(fobj);
282 }
283 
284 static TEE_Result lop_load_page(struct fobj *fobj __maybe_unused,
285 				unsigned int page_idx __maybe_unused,
286 				void *va)
287 {
288 	assert(fobj->ops == &ops_locked_paged);
289 	assert(refcount_val(&fobj->refc));
290 	assert(page_idx < fobj->num_pages);
291 
292 	memset(va, 0, SMALL_PAGE_SIZE);
293 
294 	return TEE_SUCCESS;
295 }
296 KEEP_PAGER(lop_load_page);
297 
298 static TEE_Result lop_save_page(struct fobj *fobj __unused,
299 				unsigned int page_idx __unused,
300 				const void *va __unused)
301 {
302 	return TEE_ERROR_GENERIC;
303 }
304 KEEP_PAGER(lop_save_page);
305 
306 static struct fobj_ops ops_locked_paged __rodata_unpaged = {
307 	.free = lop_free,
308 	.load_page = lop_load_page,
309 	.save_page = lop_save_page,
310 };
311 #endif /*CFG_WITH_PAGER*/
312 
313 #ifndef CFG_PAGED_USER_TA
314 
315 struct fobj_sec_mem {
316 	tee_mm_entry_t *mm;
317 	struct fobj fobj;
318 };
319 
320 static struct fobj_ops ops_sec_mem;
321 
322 struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
323 {
324 	struct fobj_sec_mem *f = calloc(1, sizeof(*f));
325 	size_t size = 0;
326 	void *va = NULL;
327 
328 	if (!f)
329 		return NULL;
330 
331 	if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
332 		goto err;
333 
334 	f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
335 	if (!f->mm)
336 		goto err;
337 
338 	va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM);
339 	if (!va)
340 		goto err;
341 
342 	memset(va, 0, size);
343 	f->fobj.ops = &ops_sec_mem;
344 	f->fobj.num_pages = num_pages;
345 	refcount_set(&f->fobj.refc, 1);
346 
347 	return &f->fobj;
348 err:
349 	tee_mm_free(f->mm);
350 	free(f);
351 
352 	return NULL;
353 }
354 
355 static struct fobj_sec_mem *to_sec_mem(struct fobj *fobj)
356 {
357 	assert(fobj->ops == &ops_sec_mem);
358 
359 	return container_of(fobj, struct fobj_sec_mem, fobj);
360 }
361 
362 static void sec_mem_free(struct fobj *fobj)
363 {
364 	struct fobj_sec_mem *f = to_sec_mem(fobj);
365 
366 	assert(!refcount_val(&fobj->refc));
367 	tee_mm_free(f->mm);
368 	free(f);
369 }
370 
371 static paddr_t sec_mem_get_pa(struct fobj *fobj, unsigned int page_idx)
372 {
373 	struct fobj_sec_mem *f = to_sec_mem(fobj);
374 
375 	assert(refcount_val(&fobj->refc));
376 	assert(page_idx < fobj->num_pages);
377 
378 	return tee_mm_get_smem(f->mm) + page_idx * SMALL_PAGE_SIZE;
379 }
380 
381 static struct fobj_ops ops_sec_mem __rodata_unpaged = {
382 	.free = sec_mem_free,
383 	.get_pa = sec_mem_get_pa,
384 };
385 
386 #endif /*PAGED_USER_TA*/
387