xref: /optee_os/core/arch/arm/mm/tee_pager.c (revision 26ed70ec3afa4776dff3df058723e794e5263a6b)
1 /*
2  * Copyright (c) 2016, Linaro Limited
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <arm.h>
30 #include <assert.h>
31 #include <keep.h>
32 #include <sys/queue.h>
33 #include <kernel/abort.h>
34 #include <kernel/panic.h>
35 #include <kernel/tee_misc.h>
36 #include <kernel/tee_ta_manager.h>
37 #include <kernel/thread.h>
38 #include <kernel/tz_proc.h>
39 #include <mm/core_memprot.h>
40 #include <mm/tee_mm.h>
41 #include <mm/tee_mmu_defs.h>
42 #include <mm/tee_pager.h>
43 #include <types_ext.h>
44 #include <stdlib.h>
45 #include <tee_api_defines.h>
46 #include <tee/tee_cryp_provider.h>
47 #include <trace.h>
48 #include <utee_defines.h>
49 #include <util.h>
50 
51 #include "pager_private.h"
52 
53 #define PAGER_AE_KEY_BITS	256
54 
55 struct pager_rw_pstate {
56 	uint64_t iv;
57 	uint8_t tag[PAGER_AES_GCM_TAG_LEN];
58 };
59 
60 enum area_type {
61 	AREA_TYPE_RO,
62 	AREA_TYPE_RW,
63 	AREA_TYPE_LOCK,
64 };
65 
66 struct tee_pager_area {
67 	union {
68 		const uint8_t *hashes;
69 		struct pager_rw_pstate *rwp;
70 	} u;
71 	uint8_t *store;
72 	enum area_type type;
73 	uint32_t flags;
74 	vaddr_t base;
75 	size_t size;
76 	struct pgt *pgt;
77 	TAILQ_ENTRY(tee_pager_area) link;
78 };
79 
80 TAILQ_HEAD(tee_pager_area_head, tee_pager_area);
81 
82 static struct tee_pager_area_head tee_pager_area_head =
83 	TAILQ_HEAD_INITIALIZER(tee_pager_area_head);
84 
85 #define INVALID_PGIDX	UINT_MAX
86 
87 /*
88  * struct tee_pager_pmem - Represents a physical page used for paging.
89  *
90  * @pgidx	an index of the entry in area->ti.
91  * @va_alias	Virtual address where the physical page always is aliased.
92  *		Used during remapping of the page when the content need to
93  *		be updated before it's available at the new location.
94  * @area	a pointer to the pager area
95  */
96 struct tee_pager_pmem {
97 	unsigned pgidx;
98 	void *va_alias;
99 	struct tee_pager_area *area;
100 	TAILQ_ENTRY(tee_pager_pmem) link;
101 };
102 
103 /* The list of physical pages. The first page in the list is the oldest */
104 TAILQ_HEAD(tee_pager_pmem_head, tee_pager_pmem);
105 
106 static struct tee_pager_pmem_head tee_pager_pmem_head =
107 	TAILQ_HEAD_INITIALIZER(tee_pager_pmem_head);
108 
109 static struct tee_pager_pmem_head tee_pager_lock_pmem_head =
110 	TAILQ_HEAD_INITIALIZER(tee_pager_lock_pmem_head);
111 
112 static uint8_t pager_ae_key[PAGER_AE_KEY_BITS / 8];
113 
114 /* number of pages hidden */
115 #define TEE_PAGER_NHIDE (tee_pager_npages / 3)
116 
117 /* Number of registered physical pages, used hiding pages. */
118 static size_t tee_pager_npages;
119 
120 #ifdef CFG_WITH_STATS
121 static struct tee_pager_stats pager_stats;
122 
123 static inline void incr_ro_hits(void)
124 {
125 	pager_stats.ro_hits++;
126 }
127 
128 static inline void incr_rw_hits(void)
129 {
130 	pager_stats.rw_hits++;
131 }
132 
133 static inline void incr_hidden_hits(void)
134 {
135 	pager_stats.hidden_hits++;
136 }
137 
138 static inline void incr_zi_released(void)
139 {
140 	pager_stats.zi_released++;
141 }
142 
143 static inline void incr_npages_all(void)
144 {
145 	pager_stats.npages_all++;
146 }
147 
148 static inline void set_npages(void)
149 {
150 	pager_stats.npages = tee_pager_npages;
151 }
152 
153 void tee_pager_get_stats(struct tee_pager_stats *stats)
154 {
155 	*stats = pager_stats;
156 
157 	pager_stats.hidden_hits = 0;
158 	pager_stats.ro_hits = 0;
159 	pager_stats.rw_hits = 0;
160 	pager_stats.zi_released = 0;
161 }
162 
163 #else /* CFG_WITH_STATS */
164 static inline void incr_ro_hits(void) { }
165 static inline void incr_rw_hits(void) { }
166 static inline void incr_hidden_hits(void) { }
167 static inline void incr_zi_released(void) { }
168 static inline void incr_npages_all(void) { }
169 static inline void set_npages(void) { }
170 
171 void tee_pager_get_stats(struct tee_pager_stats *stats)
172 {
173 	memset(stats, 0, sizeof(struct tee_pager_stats));
174 }
175 #endif /* CFG_WITH_STATS */
176 
177 static struct pgt pager_core_pgt;
178 struct core_mmu_table_info tee_pager_tbl_info;
179 static struct core_mmu_table_info pager_alias_tbl_info;
180 
181 static unsigned pager_lock = SPINLOCK_UNLOCK;
182 
183 /* Defines the range of the alias area */
184 static tee_mm_entry_t *pager_alias_area;
185 /*
186  * Physical pages are added in a stack like fashion to the alias area,
187  * @pager_alias_next_free gives the address of next free entry if
188  * @pager_alias_next_free is != 0
189  */
190 static uintptr_t pager_alias_next_free;
191 
192 static void set_alias_area(tee_mm_entry_t *mm)
193 {
194 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
195 	size_t tbl_va_size;
196 	unsigned idx;
197 	unsigned last_idx;
198 	vaddr_t smem = tee_mm_get_smem(mm);
199 	size_t nbytes = tee_mm_get_bytes(mm);
200 
201 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA, smem, smem + nbytes);
202 
203 	if (pager_alias_area)
204 		panic("null pager_alias_area");
205 
206 	if (!ti->num_entries && !core_mmu_find_table(smem, UINT_MAX, ti))
207 		panic("Can't find translation table");
208 
209 	if ((1 << ti->shift) != SMALL_PAGE_SIZE)
210 		panic("Unsupported page size in translation table");
211 
212 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
213 	if (!core_is_buffer_inside(smem, nbytes,
214 				   ti->va_base, tbl_va_size)) {
215 		EMSG("area 0x%" PRIxVA " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
216 		     smem, nbytes, ti->va_base, tbl_va_size);
217 		panic();
218 	}
219 
220 	if (smem & SMALL_PAGE_MASK || nbytes & SMALL_PAGE_MASK)
221 		panic("invalid area alignment");
222 
223 	pager_alias_area = mm;
224 	pager_alias_next_free = smem;
225 
226 	/* Clear all mapping in the alias area */
227 	idx = core_mmu_va2idx(ti, smem);
228 	last_idx = core_mmu_va2idx(ti, smem + nbytes);
229 	for (; idx < last_idx; idx++)
230 		core_mmu_set_entry(ti, idx, 0, 0);
231 
232 	/* TODO only invalidate entries touched above */
233 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
234 }
235 
236 static void generate_ae_key(void)
237 {
238 	if (rng_generate(pager_ae_key, sizeof(pager_ae_key)) != TEE_SUCCESS)
239 		panic("failed to generate random");
240 }
241 
242 void tee_pager_init(tee_mm_entry_t *mm_alias)
243 {
244 	set_alias_area(mm_alias);
245 	generate_ae_key();
246 }
247 
248 static void *pager_add_alias_page(paddr_t pa)
249 {
250 	unsigned idx;
251 	struct core_mmu_table_info *ti = &pager_alias_tbl_info;
252 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_GLOBAL |
253 			(TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT) |
254 			TEE_MATTR_SECURE | TEE_MATTR_PRW;
255 
256 	DMSG("0x%" PRIxPA, pa);
257 
258 	if (!pager_alias_next_free || !ti->num_entries)
259 		panic("invalid alias entry");
260 
261 	idx = core_mmu_va2idx(ti, pager_alias_next_free);
262 	core_mmu_set_entry(ti, idx, pa, attr);
263 	pgt_inc_used_entries(&pager_core_pgt);
264 	pager_alias_next_free += SMALL_PAGE_SIZE;
265 	if (pager_alias_next_free >= (tee_mm_get_smem(pager_alias_area) +
266 				      tee_mm_get_bytes(pager_alias_area)))
267 		pager_alias_next_free = 0;
268 	return (void *)core_mmu_idx2va(ti, idx);
269 }
270 
271 static struct tee_pager_area *alloc_area(struct pgt *pgt,
272 					 vaddr_t base, size_t size,
273 					 uint32_t flags, const void *store,
274 					 const void *hashes)
275 {
276 	struct tee_pager_area *area = calloc(1, sizeof(*area));
277 	enum area_type at;
278 	tee_mm_entry_t *mm_store = NULL;
279 
280 	if (!area)
281 		return NULL;
282 
283 	if (flags & (TEE_MATTR_PW | TEE_MATTR_UW)) {
284 		if (flags & TEE_MATTR_LOCKED) {
285 			at = AREA_TYPE_LOCK;
286 			goto out;
287 		}
288 		mm_store = tee_mm_alloc(&tee_mm_sec_ddr, size);
289 		if (!mm_store)
290 			goto bad;
291 		area->store = phys_to_virt(tee_mm_get_smem(mm_store),
292 					   MEM_AREA_TA_RAM);
293 		if (!area->store)
294 			goto bad;
295 		area->u.rwp = calloc(size / SMALL_PAGE_SIZE,
296 				     sizeof(struct pager_rw_pstate));
297 		if (!area->u.rwp)
298 			goto bad;
299 		at = AREA_TYPE_RW;
300 	} else {
301 		area->store = (void *)store;
302 		area->u.hashes = hashes;
303 		at = AREA_TYPE_RO;
304 	}
305 out:
306 	area->pgt = pgt;
307 	area->base = base;
308 	area->size = size;
309 	area->flags = flags;
310 	area->type = at;
311 	return area;
312 bad:
313 	tee_mm_free(mm_store);
314 	free(area->u.rwp);
315 	free(area);
316 	return NULL;
317 }
318 
319 static void area_insert_tail(struct tee_pager_area *area)
320 {
321 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
322 
323 	cpu_spin_lock(&pager_lock);
324 
325 	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
326 
327 	cpu_spin_unlock(&pager_lock);
328 	thread_set_exceptions(exceptions);
329 }
330 KEEP_PAGER(area_insert_tail);
331 
332 static size_t tbl_usage_count(struct pgt *pgt)
333 {
334 	size_t n;
335 	paddr_t pa;
336 	size_t usage = 0;
337 
338 	for (n = 0; n < tee_pager_tbl_info.num_entries; n++) {
339 		core_mmu_get_entry_primitive(pgt->tbl, tee_pager_tbl_info.level,
340 					     n, &pa, NULL);
341 		if (pa)
342 			usage++;
343 	}
344 	return usage;
345 }
346 
347 bool tee_pager_add_core_area(vaddr_t base, size_t size, uint32_t flags,
348 			const void *store, const void *hashes)
349 {
350 	struct tee_pager_area *area;
351 	size_t tbl_va_size;
352 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
353 
354 	DMSG("0x%" PRIxPTR " - 0x%" PRIxPTR " : flags 0x%x, store %p, hashes %p",
355 		base, base + size, flags, store, hashes);
356 
357 	if (base & SMALL_PAGE_MASK || size & SMALL_PAGE_MASK || !size) {
358 		EMSG("invalid pager area [%" PRIxVA " +0x%zx]", base, size);
359 		panic();
360 	}
361 
362 	if (!(flags & TEE_MATTR_PW) && (!store || !hashes))
363 		panic("write pages cannot provide store or hashes");
364 
365 	if ((flags & TEE_MATTR_PW) && (store || hashes))
366 		panic("non-write pages must provide store and hashes");
367 
368 	if (!pager_core_pgt.tbl) {
369 		pager_core_pgt.tbl = ti->table;
370 		pgt_set_used_entries(&pager_core_pgt,
371 				     tbl_usage_count(&pager_core_pgt));
372 	}
373 
374 	tbl_va_size = (1 << ti->shift) * ti->num_entries;
375 	if (!core_is_buffer_inside(base, size, ti->va_base, tbl_va_size)) {
376 		DMSG("area 0x%" PRIxPTR " len 0x%zx doesn't fit it translation table 0x%" PRIxVA " len 0x%zx",
377 			base, size, ti->va_base, tbl_va_size);
378 		return false;
379 	}
380 
381 	area = alloc_area(&pager_core_pgt, base, size, flags, store, hashes);
382 	if (!area)
383 		return false;
384 
385 	area_insert_tail(area);
386 	return true;
387 }
388 
389 #ifdef CFG_PAGED_USER_TA
390 bool tee_pager_add_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size)
391 {
392 	struct tee_pager_area *area;
393 	uint32_t flags;
394 	vaddr_t b = base;
395 	size_t s = size;
396 
397 	if (!utc->areas) {
398 		utc->areas = malloc(sizeof(*utc->areas));
399 		if (!utc->areas)
400 			return false;
401 		TAILQ_INIT(utc->areas);
402 	}
403 
404 	flags = TEE_MATTR_PRW | TEE_MATTR_URWX;
405 
406 	while (s) {
407 		size_t s2;
408 
409 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
410 
411 		/* Table info will be set when the context is activated. */
412 		area = alloc_area(NULL, b, s2, flags, NULL, NULL);
413 		if (!area)
414 			return false;
415 		TAILQ_INSERT_TAIL(utc->areas, area, link);
416 		b += s2;
417 		s -= s2;
418 	}
419 
420 	return true;
421 }
422 
423 void tee_pager_rem_uta_areas(struct user_ta_ctx *utc)
424 {
425 	struct tee_pager_area *area;
426 
427 	if (!utc->areas)
428 		return;
429 
430 	while (true) {
431 		area = TAILQ_FIRST(utc->areas);
432 		if (!area)
433 			break;
434 		TAILQ_REMOVE(utc->areas, area, link);
435 		tee_mm_free(tee_mm_find(&tee_mm_sec_ddr,
436 					virt_to_phys(area->store)));
437 		if (area->type == AREA_TYPE_RW)
438 			free(area->u.rwp);
439 		free(area);
440 	}
441 
442 	free(utc->areas);
443 }
444 #endif /*CFG_PAGED_USER_TA*/
445 
446 static struct tee_pager_area *find_area(struct tee_pager_area_head *areas,
447 					vaddr_t va)
448 {
449 	struct tee_pager_area *area;
450 
451 	if (!areas)
452 		return NULL;
453 
454 	TAILQ_FOREACH(area, areas, link) {
455 		if (core_is_buffer_inside(va, 1, area->base, area->size))
456 			return area;
457 	}
458 	return NULL;
459 }
460 
461 #ifdef CFG_PAGED_USER_TA
462 static struct tee_pager_area *find_uta_area(vaddr_t va)
463 {
464 	struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
465 
466 	if (!ctx || !is_user_ta_ctx(ctx))
467 		return NULL;
468 	return find_area(to_user_ta_ctx(ctx)->areas, va);
469 }
470 #else
471 static struct tee_pager_area *find_uta_area(vaddr_t va __unused)
472 {
473 	return NULL;
474 }
475 #endif /*CFG_PAGED_USER_TA*/
476 
477 
478 static uint32_t get_area_mattr(uint32_t area_flags)
479 {
480 	uint32_t attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_SECURE |
481 			TEE_MATTR_CACHE_CACHED << TEE_MATTR_CACHE_SHIFT |
482 			(area_flags & (TEE_MATTR_PRWX | TEE_MATTR_URWX));
483 
484 	if (!(area_flags & (TEE_MATTR_UR | TEE_MATTR_UX | TEE_MATTR_UW)))
485 		attr |= TEE_MATTR_GLOBAL;
486 
487 	return attr;
488 }
489 
490 static paddr_t get_pmem_pa(struct tee_pager_pmem *pmem)
491 {
492 	paddr_t pa;
493 	unsigned idx;
494 
495 	idx = core_mmu_va2idx(&pager_alias_tbl_info, (vaddr_t)pmem->va_alias);
496 	core_mmu_get_entry(&pager_alias_tbl_info, idx, &pa, NULL);
497 	return pa;
498 }
499 
500 static bool decrypt_page(struct pager_rw_pstate *rwp, const void *src,
501 			void *dst)
502 {
503 	struct pager_aes_gcm_iv iv = {
504 		{ (vaddr_t)rwp, rwp->iv >> 32, rwp->iv }
505 	};
506 
507 	return pager_aes_gcm_decrypt(pager_ae_key, sizeof(pager_ae_key),
508 				     &iv, rwp->tag, src, dst, SMALL_PAGE_SIZE);
509 }
510 
511 static void encrypt_page(struct pager_rw_pstate *rwp, void *src, void *dst)
512 {
513 	struct pager_aes_gcm_iv iv;
514 
515 	assert((rwp->iv + 1) > rwp->iv);
516 	rwp->iv++;
517 	/*
518 	 * IV is constructed as recommended in section "8.2.1 Deterministic
519 	 * Construction" of "Recommendation for Block Cipher Modes of
520 	 * Operation: Galois/Counter Mode (GCM) and GMAC",
521 	 * http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
522 	 */
523 	iv.iv[0] = (vaddr_t)rwp;
524 	iv.iv[1] = rwp->iv >> 32;
525 	iv.iv[2] = rwp->iv;
526 
527 	if (!pager_aes_gcm_encrypt(pager_ae_key, sizeof(pager_ae_key),
528 				   &iv, rwp->tag,
529 				   src, dst, SMALL_PAGE_SIZE))
530 		panic("gcm failed");
531 }
532 
533 static void tee_pager_load_page(struct tee_pager_area *area, vaddr_t page_va,
534 			void *va_alias)
535 {
536 	size_t idx = (page_va - area->base) >> SMALL_PAGE_SHIFT;
537 	const void *stored_page = area->store + idx * SMALL_PAGE_SIZE;
538 
539 	switch (area->type) {
540 	case AREA_TYPE_RO:
541 		{
542 			const void *hash = area->u.hashes +
543 					   idx * TEE_SHA256_HASH_SIZE;
544 
545 			memcpy(va_alias, stored_page, SMALL_PAGE_SIZE);
546 			incr_ro_hits();
547 
548 			if (hash_sha256_check(hash, va_alias,
549 					      SMALL_PAGE_SIZE) != TEE_SUCCESS) {
550 				EMSG("PH 0x%" PRIxVA " failed", page_va);
551 				panic();
552 			}
553 		}
554 		break;
555 	case AREA_TYPE_RW:
556 		FMSG("Restore %p %#" PRIxVA " iv %#" PRIx64,
557 			va_alias, page_va, area->u.rwp[idx].iv);
558 		if (!area->u.rwp[idx].iv)
559 			memset(va_alias, 0, SMALL_PAGE_SIZE);
560 		else if (!decrypt_page(&area->u.rwp[idx], stored_page,
561 				       va_alias)) {
562 			EMSG("PH 0x%" PRIxVA " failed", page_va);
563 			panic();
564 		}
565 		incr_rw_hits();
566 		break;
567 	case AREA_TYPE_LOCK:
568 		FMSG("Zero init %p %#" PRIxVA, va_alias, page_va);
569 		memset(va_alias, 0, SMALL_PAGE_SIZE);
570 		break;
571 	default:
572 		panic();
573 	}
574 }
575 
576 static void tee_pager_save_page(struct tee_pager_pmem *pmem, uint32_t attr)
577 {
578 	const uint32_t dirty_bits = TEE_MATTR_PW | TEE_MATTR_UW |
579 				    TEE_MATTR_HIDDEN_DIRTY_BLOCK;
580 
581 	if (pmem->area->type == AREA_TYPE_RW && (attr & dirty_bits)) {
582 		size_t offs = pmem->area->base & CORE_MMU_PGDIR_MASK;
583 		size_t idx = pmem->pgidx - (offs >> SMALL_PAGE_SHIFT);
584 		void *stored_page = pmem->area->store + idx * SMALL_PAGE_SIZE;
585 
586 		assert(pmem->area->flags & (TEE_MATTR_PW | TEE_MATTR_UW));
587 		encrypt_page(&pmem->area->u.rwp[idx], pmem->va_alias,
588 			     stored_page);
589 		FMSG("Saved %#" PRIxVA " iv %#" PRIx64,
590 			pmem->area->base + idx * SMALL_PAGE_SIZE,
591 			pmem->area->u.rwp[idx].iv);
592 	}
593 }
594 
595 static void area_get_entry(struct tee_pager_area *area, size_t idx,
596 			   paddr_t *pa, uint32_t *attr)
597 {
598 	assert(area->pgt);
599 	assert(idx < tee_pager_tbl_info.num_entries);
600 	core_mmu_get_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
601 				     idx, pa, attr);
602 }
603 
604 static void area_set_entry(struct tee_pager_area *area, size_t idx,
605 			   paddr_t pa, uint32_t attr)
606 {
607 	assert(area->pgt);
608 	assert(idx < tee_pager_tbl_info.num_entries);
609 	core_mmu_set_entry_primitive(area->pgt->tbl, tee_pager_tbl_info.level,
610 				     idx, pa, attr);
611 }
612 
613 static size_t area_va2idx(struct tee_pager_area *area, vaddr_t va)
614 {
615 	return (va - (area->base & ~CORE_MMU_PGDIR_MASK)) >> SMALL_PAGE_SHIFT;
616 }
617 
618 static vaddr_t __maybe_unused area_idx2va(struct tee_pager_area *area,
619 					  size_t idx)
620 {
621 	return (idx << SMALL_PAGE_SHIFT) + (area->base & ~CORE_MMU_PGDIR_MASK);
622 }
623 
624 #ifdef CFG_PAGED_USER_TA
625 bool tee_pager_set_uta_area(struct user_ta_ctx *utc, vaddr_t base, size_t size,
626 			    uint32_t flags)
627 {
628 	bool ret;
629 	vaddr_t b = base;
630 	size_t s = size;
631 	size_t s2;
632 	struct tee_pager_area *area = find_area(utc->areas, b);
633 	uint32_t exceptions;
634 	struct tee_pager_pmem *pmem;
635 	paddr_t pa;
636 	uint32_t a;
637 	uint32_t f;
638 
639 	f = (flags & TEE_MATTR_URWX) | TEE_MATTR_UR | TEE_MATTR_PR;
640 	if (f & TEE_MATTR_UW)
641 		f |= TEE_MATTR_PW;
642 	f = get_area_mattr(f);
643 
644 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
645 	cpu_spin_lock(&pager_lock);
646 
647 	while (s) {
648 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
649 		if (!area || area->base != b || area->size != s2) {
650 			ret = false;
651 			goto out;
652 		}
653 		b += s2;
654 		s -= s2;
655 
656 		TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
657 			if (pmem->area != area)
658 				continue;
659 			area_get_entry(pmem->area, pmem->pgidx, &pa, &a);
660 			if (a & TEE_MATTR_VALID_BLOCK)
661 				assert(pa == get_pmem_pa(pmem));
662 			else
663 				pa = get_pmem_pa(pmem);
664 			if (a == f)
665 				continue;
666 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
667 			/* TODO only invalidate entries touched above */
668 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
669 			if (!(flags & TEE_MATTR_UW))
670 				tee_pager_save_page(pmem, a);
671 			area_set_entry(pmem->area, pmem->pgidx, pa, f);
672 		}
673 
674 		area->flags = f;
675 		area = TAILQ_NEXT(area, link);
676 	}
677 
678 	ret = true;
679 out:
680 	cpu_spin_unlock(&pager_lock);
681 	thread_set_exceptions(exceptions);
682 	return ret;
683 }
684 KEEP_PAGER(tee_pager_set_uta_area);
685 #endif /*CFG_PAGED_USER_TA*/
686 
687 static bool tee_pager_unhide_page(vaddr_t page_va)
688 {
689 	struct tee_pager_pmem *pmem;
690 
691 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
692 		paddr_t pa;
693 		uint32_t attr;
694 
695 		if (pmem->pgidx == INVALID_PGIDX)
696 			continue;
697 
698 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
699 
700 		if (!(attr &
701 		     (TEE_MATTR_HIDDEN_BLOCK | TEE_MATTR_HIDDEN_DIRTY_BLOCK)))
702 			continue;
703 
704 		if (area_va2idx(pmem->area, page_va) == pmem->pgidx) {
705 			uint32_t a = get_area_mattr(pmem->area->flags);
706 
707 			/* page is hidden, show and move to back */
708 			if (pa != get_pmem_pa(pmem))
709 				panic("unexpected pa");
710 
711 			/*
712 			 * If it's not a dirty block, then it should be
713 			 * read only.
714 			 */
715 			if (!(attr & TEE_MATTR_HIDDEN_DIRTY_BLOCK))
716 				a &= ~(TEE_MATTR_PW | TEE_MATTR_UW);
717 			else
718 				FMSG("Unhide %#" PRIxVA, page_va);
719 
720 			if (page_va == 0x8000a000)
721 				FMSG("unhide %#" PRIxVA " a %#" PRIX32,
722 					page_va, a);
723 			area_set_entry(pmem->area, pmem->pgidx, pa, a);
724 
725 			TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
726 			TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
727 
728 			/* TODO only invalidate entry touched above */
729 			core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
730 
731 			incr_hidden_hits();
732 			return true;
733 		}
734 	}
735 
736 	return false;
737 }
738 
739 static void tee_pager_hide_pages(void)
740 {
741 	struct tee_pager_pmem *pmem;
742 	size_t n = 0;
743 
744 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
745 		paddr_t pa;
746 		uint32_t attr;
747 		uint32_t a;
748 
749 		if (n >= TEE_PAGER_NHIDE)
750 			break;
751 		n++;
752 
753 		/* we cannot hide pages when pmem->area is not defined. */
754 		if (!pmem->area)
755 			continue;
756 
757 		area_get_entry(pmem->area, pmem->pgidx, &pa, &attr);
758 		if (!(attr & TEE_MATTR_VALID_BLOCK))
759 			continue;
760 
761 		assert(pa == get_pmem_pa(pmem));
762 		if (attr & (TEE_MATTR_PW | TEE_MATTR_UW)){
763 			a = TEE_MATTR_HIDDEN_DIRTY_BLOCK;
764 			FMSG("Hide %#" PRIxVA,
765 			     area_idx2va(pmem->area, pmem->pgidx));
766 		} else
767 			a = TEE_MATTR_HIDDEN_BLOCK;
768 		area_set_entry(pmem->area, pmem->pgidx, pa, a);
769 	}
770 
771 	/* TODO only invalidate entries touched above */
772 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
773 }
774 
775 /*
776  * Find mapped pmem, hide and move to pageble pmem.
777  * Return false if page was not mapped, and true if page was mapped.
778  */
779 static bool tee_pager_release_one_phys(struct tee_pager_area *area,
780 				       vaddr_t page_va)
781 {
782 	struct tee_pager_pmem *pmem;
783 	unsigned pgidx;
784 	paddr_t pa;
785 	uint32_t attr;
786 
787 	pgidx = area_va2idx(area, page_va);
788 	area_get_entry(area, pgidx, &pa, &attr);
789 
790 	FMSG("%" PRIxVA " : %" PRIxPA "|%x", page_va, pa, attr);
791 
792 	TAILQ_FOREACH(pmem, &tee_pager_lock_pmem_head, link) {
793 		if (pmem->area != area || pmem->pgidx != pgidx)
794 			continue;
795 
796 		assert(pa == get_pmem_pa(pmem));
797 		area_set_entry(area, pgidx, 0, 0);
798 		pgt_dec_used_entries(area->pgt);
799 		TAILQ_REMOVE(&tee_pager_lock_pmem_head, pmem, link);
800 		pmem->area = NULL;
801 		pmem->pgidx = INVALID_PGIDX;
802 		tee_pager_npages++;
803 		set_npages();
804 		TAILQ_INSERT_HEAD(&tee_pager_pmem_head, pmem, link);
805 		incr_zi_released();
806 		return true;
807 	}
808 
809 	return false;
810 }
811 
812 /* Finds the oldest page and unmats it from its old virtual address */
813 static struct tee_pager_pmem *tee_pager_get_page(struct tee_pager_area *area)
814 {
815 	struct tee_pager_pmem *pmem;
816 
817 	pmem = TAILQ_FIRST(&tee_pager_pmem_head);
818 	if (!pmem) {
819 		EMSG("No pmem entries");
820 		return NULL;
821 	}
822 	if (pmem->pgidx != INVALID_PGIDX) {
823 		uint32_t a;
824 
825 		assert(pmem->area && pmem->area->pgt);
826 		area_get_entry(pmem->area, pmem->pgidx, NULL, &a);
827 		area_set_entry(pmem->area, pmem->pgidx, 0, 0);
828 		pgt_dec_used_entries(pmem->area->pgt);
829 		/* TODO only invalidate entries touched above */
830 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
831 		tee_pager_save_page(pmem, a);
832 	}
833 
834 	TAILQ_REMOVE(&tee_pager_pmem_head, pmem, link);
835 	pmem->pgidx = INVALID_PGIDX;
836 	pmem->area = NULL;
837 	if (area->type == AREA_TYPE_LOCK) {
838 		/* Move page to lock list */
839 		if (tee_pager_npages <= 0)
840 			panic("running out of page");
841 		tee_pager_npages--;
842 		set_npages();
843 		TAILQ_INSERT_TAIL(&tee_pager_lock_pmem_head, pmem, link);
844 	} else {
845 		/* move page to back */
846 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
847 	}
848 
849 	return pmem;
850 }
851 
852 static bool pager_update_permissions(struct tee_pager_area *area,
853 			struct abort_info *ai, bool *handled)
854 {
855 	unsigned int pgidx = area_va2idx(area, ai->va);
856 	uint32_t attr;
857 	paddr_t pa;
858 
859 	*handled = false;
860 
861 	area_get_entry(area, pgidx, &pa, &attr);
862 
863 	/* Not mapped */
864 	if (!(attr & TEE_MATTR_VALID_BLOCK))
865 		return false;
866 
867 	/* Not readable, should not happen */
868 	if (abort_is_user_exception(ai)) {
869 		if (!(attr & TEE_MATTR_UR))
870 			return true;
871 	} else {
872 		if (!(attr & TEE_MATTR_PR)) {
873 			abort_print_error(ai);
874 			panic();
875 		}
876 	}
877 
878 	switch (core_mmu_get_fault_type(ai->fault_descr)) {
879 	case CORE_MMU_FAULT_TRANSLATION:
880 	case CORE_MMU_FAULT_READ_PERMISSION:
881 		if (ai->abort_type == ABORT_TYPE_PREFETCH) {
882 			/* Check attempting to execute from an NOX page */
883 			if (abort_is_user_exception(ai)) {
884 				if (!(attr & TEE_MATTR_UX))
885 					return true;
886 			} else {
887 				if (!(attr & TEE_MATTR_PX)) {
888 					abort_print_error(ai);
889 					panic();
890 				}
891 			}
892 		}
893 		/* Since the page is mapped now it's OK */
894 		break;
895 	case CORE_MMU_FAULT_WRITE_PERMISSION:
896 		/* Check attempting to write to an RO page */
897 		if (abort_is_user_exception(ai)) {
898 			if (!(area->flags & TEE_MATTR_UW))
899 				return true;
900 			if (!(attr & TEE_MATTR_UW)) {
901 				FMSG("Dirty %p",
902 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
903 				area_set_entry(area, pgidx, pa,
904 					       get_area_mattr(area->flags));
905 				/* TODO only invalidate entry above */
906 				core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
907 			}
908 
909 		} else {
910 			if (!(area->flags & TEE_MATTR_PW)) {
911 				abort_print_error(ai);
912 				panic();
913 			}
914 			if (!(attr & TEE_MATTR_PW)) {
915 				FMSG("Dirty %p",
916 				     (void *)(ai->va & ~SMALL_PAGE_MASK));
917 				area_set_entry(area, pgidx, pa,
918 					       get_area_mattr(area->flags));
919 				/* TODO only invalidate entry above */
920 				core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
921 			}
922 		}
923 		/* Since permissions has been updated now it's OK */
924 		break;
925 	default:
926 		/* Some fault we can't deal with */
927 		if (abort_is_user_exception(ai))
928 			return true;
929 		abort_print_error(ai);
930 		panic();
931 	}
932 	*handled = true;
933 	return true;
934 }
935 
936 #ifdef CFG_TEE_CORE_DEBUG
937 static void stat_handle_fault(void)
938 {
939 	static size_t num_faults;
940 	static size_t min_npages = SIZE_MAX;
941 	static size_t total_min_npages = SIZE_MAX;
942 
943 	num_faults++;
944 	if ((num_faults % 1024) == 0 || tee_pager_npages < total_min_npages) {
945 		DMSG("nfaults %zu npages %zu (min %zu)",
946 		     num_faults, tee_pager_npages, min_npages);
947 		min_npages = tee_pager_npages; /* reset */
948 	}
949 	if (tee_pager_npages < min_npages)
950 		min_npages = tee_pager_npages;
951 	if (tee_pager_npages < total_min_npages)
952 		total_min_npages = tee_pager_npages;
953 }
954 #else
955 static void stat_handle_fault(void)
956 {
957 }
958 #endif
959 
960 bool tee_pager_handle_fault(struct abort_info *ai)
961 {
962 	struct tee_pager_area *area;
963 	vaddr_t page_va = ai->va & ~SMALL_PAGE_MASK;
964 	uint32_t exceptions;
965 	bool ret;
966 
967 #ifdef TEE_PAGER_DEBUG_PRINT
968 	abort_print(ai);
969 #endif
970 
971 	/*
972 	 * We're updating pages that can affect several active CPUs at a
973 	 * time below. We end up here because a thread tries to access some
974 	 * memory that isn't available. We have to be careful when making
975 	 * that memory available as other threads may succeed in accessing
976 	 * that address the moment after we've made it available.
977 	 *
978 	 * That means that we can't just map the memory and populate the
979 	 * page, instead we use the aliased mapping to populate the page
980 	 * and once everything is ready we map it.
981 	 */
982 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
983 	cpu_spin_lock(&pager_lock);
984 
985 	stat_handle_fault();
986 
987 	/* check if the access is valid */
988 	if (abort_is_user_exception(ai)) {
989 		area = find_uta_area(ai->va);
990 
991 	} else {
992 		area = find_area(&tee_pager_area_head, ai->va);
993 		if (!area)
994 			area = find_uta_area(ai->va);
995 	}
996 	if (!area) {
997 		ret = false;
998 		goto out;
999 	}
1000 
1001 	if (!tee_pager_unhide_page(page_va)) {
1002 		struct tee_pager_pmem *pmem = NULL;
1003 		uint32_t attr;
1004 
1005 		/*
1006 		 * The page wasn't hidden, but some other core may have
1007 		 * updated the table entry before we got here or we need
1008 		 * to make a read-only page read-write (dirty).
1009 		 */
1010 		if (pager_update_permissions(area, ai, &ret)) {
1011 			/*
1012 			 * Nothing more to do with the abort. The problem
1013 			 * could already have been dealt with from another
1014 			 * core or if ret is false the TA will be paniced.
1015 			 */
1016 			goto out;
1017 		}
1018 
1019 		pmem = tee_pager_get_page(area);
1020 		if (!pmem) {
1021 			abort_print(ai);
1022 			panic();
1023 		}
1024 
1025 		/* load page code & data */
1026 		tee_pager_load_page(area, page_va, pmem->va_alias);
1027 
1028 		/*
1029 		 * We've updated the page using the aliased mapping and
1030 		 * some cache maintenence is now needed if it's an
1031 		 * executable page.
1032 		 *
1033 		 * Since the d-cache is a Physically-indexed,
1034 		 * physically-tagged (PIPT) cache we can clean the aliased
1035 		 * address instead of the real virtual address.
1036 		 *
1037 		 * The i-cache can also be PIPT, but may be something else
1038 		 * to, to keep it simple we invalidate the entire i-cache.
1039 		 * As a future optimization we may invalidate only the
1040 		 * aliased area if it a PIPT cache else the entire cache.
1041 		 */
1042 		if (area->flags & (TEE_MATTR_PX | TEE_MATTR_UX)) {
1043 			/*
1044 			 * Doing these operations to LoUIS (Level of
1045 			 * unification, Inner Shareable) would be enough
1046 			 */
1047 			cache_maintenance_l1(DCACHE_AREA_CLEAN,
1048 				pmem->va_alias, SMALL_PAGE_SIZE);
1049 
1050 			cache_maintenance_l1(ICACHE_INVALIDATE, NULL, 0);
1051 		}
1052 
1053 		pmem->area = area;
1054 		pmem->pgidx = area_va2idx(area, ai->va);
1055 		attr = get_area_mattr(area->flags) &
1056 			~(TEE_MATTR_PW | TEE_MATTR_UW);
1057 		area_set_entry(area, pmem->pgidx, get_pmem_pa(pmem), attr);
1058 		pgt_inc_used_entries(area->pgt);
1059 
1060 		FMSG("Mapped 0x%" PRIxVA " -> 0x%" PRIxPA,
1061 		     area_idx2va(area, pmem->pgidx), get_pmem_pa(pmem));
1062 
1063 	}
1064 
1065 	tee_pager_hide_pages();
1066 	ret = true;
1067 out:
1068 	cpu_spin_unlock(&pager_lock);
1069 	thread_unmask_exceptions(exceptions);
1070 	return ret;
1071 }
1072 
1073 void tee_pager_add_pages(vaddr_t vaddr, size_t npages, bool unmap)
1074 {
1075 	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
1076 	size_t n;
1077 
1078 	DMSG("0x%" PRIxVA " - 0x%" PRIxVA " : %d",
1079 	     vaddr, vaddr + npages * SMALL_PAGE_SIZE, (int)unmap);
1080 
1081 	/* setup memory */
1082 	for (n = 0; n < npages; n++) {
1083 		struct tee_pager_pmem *pmem;
1084 		vaddr_t va = vaddr + n * SMALL_PAGE_SIZE;
1085 		unsigned pgidx = core_mmu_va2idx(ti, va);
1086 		paddr_t pa;
1087 		uint32_t attr;
1088 
1089 		/*
1090 		 * Note that we can only support adding pages in the
1091 		 * valid range of this table info, currently not a problem.
1092 		 */
1093 		core_mmu_get_entry(ti, pgidx, &pa, &attr);
1094 
1095 		/* Ignore unmapped pages/blocks */
1096 		if (!(attr & TEE_MATTR_VALID_BLOCK))
1097 			continue;
1098 
1099 		pmem = malloc(sizeof(struct tee_pager_pmem));
1100 		if (!pmem)
1101 			panic("out of mem");
1102 
1103 		pmem->va_alias = pager_add_alias_page(pa);
1104 
1105 		if (unmap) {
1106 			pmem->area = NULL;
1107 			pmem->pgidx = INVALID_PGIDX;
1108 			core_mmu_set_entry(ti, pgidx, 0, 0);
1109 			pgt_dec_used_entries(&pager_core_pgt);
1110 		} else {
1111 			/*
1112 			 * The page is still mapped, let's assign the area
1113 			 * and update the protection bits accordingly.
1114 			 */
1115 			pmem->area = find_area(&tee_pager_area_head, va);
1116 			assert(pmem->area->pgt == &pager_core_pgt);
1117 			pmem->pgidx = pgidx;
1118 			assert(pa == get_pmem_pa(pmem));
1119 			area_set_entry(pmem->area, pgidx, pa,
1120 				       get_area_mattr(pmem->area->flags));
1121 		}
1122 
1123 		tee_pager_npages++;
1124 		incr_npages_all();
1125 		set_npages();
1126 		TAILQ_INSERT_TAIL(&tee_pager_pmem_head, pmem, link);
1127 	}
1128 
1129 	/* Invalidate secure TLB */
1130 	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1131 }
1132 
1133 #ifdef CFG_PAGED_USER_TA
1134 static struct pgt *find_pgt(struct pgt *pgt, vaddr_t va)
1135 {
1136 	struct pgt *p = pgt;
1137 
1138 	while (p && (va & ~CORE_MMU_PGDIR_MASK) != p->vabase)
1139 		p = SLIST_NEXT(p, link);
1140 	return p;
1141 }
1142 
1143 void tee_pager_assign_uta_tables(struct user_ta_ctx *utc)
1144 {
1145 	struct tee_pager_area *area;
1146 	struct pgt *pgt = SLIST_FIRST(&thread_get_tsd()->pgt_cache);
1147 
1148 	TAILQ_FOREACH(area, utc->areas, link) {
1149 		if (!area->pgt)
1150 			area->pgt = find_pgt(pgt, area->base);
1151 		else
1152 			assert(area->pgt == find_pgt(pgt, area->base));
1153 		if (!area->pgt)
1154 			panic();
1155 	}
1156 }
1157 
1158 void tee_pager_pgt_save_and_release_entries(struct pgt *pgt)
1159 {
1160 	struct tee_pager_pmem *pmem;
1161 	struct tee_pager_area *area;
1162 	uint32_t exceptions;
1163 	uint32_t attr;
1164 
1165 	exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ);
1166 	cpu_spin_lock(&pager_lock);
1167 
1168 	if (!pgt->num_used_entries)
1169 		goto out;
1170 
1171 	TAILQ_FOREACH(pmem, &tee_pager_pmem_head, link) {
1172 		if (!pmem->area || pmem->pgidx == INVALID_PGIDX)
1173 			continue;
1174 		if (pmem->area->pgt == pgt) {
1175 			area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
1176 			area_set_entry(pmem->area, pmem->pgidx, 0, 0);
1177 			tee_pager_save_page(pmem, attr);
1178 			pmem->pgidx = INVALID_PGIDX;
1179 			pmem->area = NULL;
1180 			pgt->num_used_entries--;
1181 		}
1182 	}
1183 	assert(!pgt->num_used_entries);
1184 
1185 out:
1186 	if (is_user_ta_ctx(pgt->ctx)) {
1187 		TAILQ_FOREACH(area, to_user_ta_ctx(pgt->ctx)->areas, link) {
1188 			if (area->pgt == pgt)
1189 				area->pgt = NULL;
1190 		}
1191 	}
1192 
1193 	cpu_spin_unlock(&pager_lock);
1194 	thread_unmask_exceptions(exceptions);
1195 }
1196 KEEP_PAGER(tee_pager_pgt_save_and_release_entries);
1197 #endif /*CFG_PAGED_USER_TA*/
1198 
1199 void tee_pager_release_phys(void *addr, size_t size)
1200 {
1201 	bool unmaped = false;
1202 	vaddr_t va = (vaddr_t)addr;
1203 	vaddr_t begin = ROUNDUP(va, SMALL_PAGE_SIZE);
1204 	vaddr_t end = ROUNDDOWN(va + size, SMALL_PAGE_SIZE);
1205 	struct tee_pager_area *area;
1206 	uint32_t exceptions;
1207 
1208 	if (!size)
1209 		return;
1210 
1211 	area = find_area(&tee_pager_area_head, begin);
1212 	if (!area ||
1213 	    area != find_area(&tee_pager_area_head, end - SMALL_PAGE_SIZE))
1214 		panic();
1215 
1216 	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1217 	cpu_spin_lock(&pager_lock);
1218 
1219 	for (va = begin; va < end; va += SMALL_PAGE_SIZE)
1220 		unmaped |= tee_pager_release_one_phys(area, va);
1221 
1222 	/* Invalidate secure TLB */
1223 	if (unmaped)
1224 		core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
1225 
1226 	cpu_spin_unlock(&pager_lock);
1227 	thread_set_exceptions(exceptions);
1228 }
1229 KEEP_PAGER(tee_pager_release_phys);
1230 
1231 void *tee_pager_alloc(size_t size, uint32_t flags)
1232 {
1233 	tee_mm_entry_t *mm;
1234 	uint32_t f = TEE_MATTR_PW | TEE_MATTR_PR | (flags & TEE_MATTR_LOCKED);
1235 
1236 	if (!size)
1237 		return NULL;
1238 
1239 	mm = tee_mm_alloc(&tee_mm_vcore, ROUNDUP(size, SMALL_PAGE_SIZE));
1240 	if (!mm)
1241 		return NULL;
1242 
1243 	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
1244 				f, NULL, NULL);
1245 
1246 	return (void *)tee_mm_get_smem(mm);
1247 }
1248