xref: /optee_os/core/arch/arm/mm/core_mmu_lpae.c (revision 6a2e17e924cd09bd2584f3113989232f6ee55ef5)
1 // SPDX-License-Identifier: (BSD-2-Clause AND BSD-3-Clause)
2 /*
3  * Copyright (c) 2015-2016, 2022 Linaro Limited
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * Copyright (c) 2014, 2022, ARM Limited and Contributors. All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are met:
34  *
35  * Redistributions of source code must retain the above copyright notice, this
36  * list of conditions and the following disclaimer.
37  *
38  * Redistributions in binary form must reproduce the above copyright notice,
39  * this list of conditions and the following disclaimer in the documentation
40  * and/or other materials provided with the distribution.
41  *
42  * Neither the name of ARM nor the names of its contributors may be used
43  * to endorse or promote products derived from this software without specific
44  * prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
56  * POSSIBILITY OF SUCH DAMAGE.
57  */
58 #include <platform_config.h>
59 
60 #include <arm.h>
61 #include <assert.h>
62 #include <compiler.h>
63 #include <config.h>
64 #include <inttypes.h>
65 #include <keep.h>
66 #include <kernel/boot.h>
67 #include <kernel/cache_helpers.h>
68 #include <kernel/linker.h>
69 #include <kernel/misc.h>
70 #include <kernel/panic.h>
71 #include <kernel/thread.h>
72 #include <kernel/tlb_helpers.h>
73 #include <memtag.h>
74 #include <mm/core_memprot.h>
75 #include <mm/pgt_cache.h>
76 #include <mm/phys_mem.h>
77 #include <stdalign.h>
78 #include <string.h>
79 #include <trace.h>
80 #include <types_ext.h>
81 #include <util.h>
82 
83 #ifndef DEBUG_XLAT_TABLE
84 #define DEBUG_XLAT_TABLE 0
85 #endif
86 
87 #if DEBUG_XLAT_TABLE
88 #define debug_print(...) DMSG_RAW(__VA_ARGS__)
89 #else
90 #define debug_print(...) ((void)0)
91 #endif
92 
93 
94 /*
95  * Miscellaneous MMU related constants
96  */
97 
98 #define INVALID_DESC		0x0
99 #define BLOCK_DESC		0x1
100 #define L3_BLOCK_DESC		0x3
101 #define TABLE_DESC		0x3
102 #define DESC_ENTRY_TYPE_MASK	0x3
103 
104 #define XN			(1ull << 2)
105 #define PXN			(1ull << 1)
106 #define CONT_HINT		(1ull << 0)
107 
108 #define UPPER_ATTRS(x)		(((x) & 0x7) << 52)
109 #define GP                      BIT64(50)   /* Guarded Page, Aarch64 FEAT_BTI */
110 #define NON_GLOBAL		(1ull << 9)
111 #define ACCESS_FLAG		(1ull << 8)
112 #define NSH			(0x0 << 6)
113 #define OSH			(0x2 << 6)
114 #define ISH			(0x3 << 6)
115 
116 #define AP_RO			(0x1 << 5)
117 #define AP_RW			(0x0 << 5)
118 #define AP_UNPRIV		(0x1 << 4)
119 
120 #define NS				(0x1 << 3)
121 #define LOWER_ATTRS_SHIFT		2
122 #define LOWER_ATTRS(x)			(((x) & 0xfff) << LOWER_ATTRS_SHIFT)
123 
124 #define ATTR_DEVICE_nGnRE_INDEX		0x0
125 #define ATTR_IWBWA_OWBWA_NTR_INDEX	0x1
126 #define ATTR_DEVICE_nGnRnE_INDEX	0x2
127 #define ATTR_TAGGED_NORMAL_MEM_INDEX	0x3
128 #define ATTR_INDEX_MASK			0x7
129 
130 #define ATTR_DEVICE_nGnRnE		(0x0)
131 #define ATTR_DEVICE_nGnRE		(0x4)
132 #define ATTR_IWBWA_OWBWA_NTR		(0xff)
133 /* Same as ATTR_IWBWA_OWBWA_NTR but with memory tagging.  */
134 #define ATTR_TAGGED_NORMAL_MEM		(0xf0)
135 
136 #define MAIR_ATTR_SET(attr, index)	(((uint64_t)attr) << ((index) << 3))
137 
138 #define OUTPUT_ADDRESS_MASK	(0x0000FFFFFFFFF000ULL)
139 
140 /* (internal) physical address size bits in EL3/EL1 */
141 #define TCR_PS_BITS_4GB		(0x0)
142 #define TCR_PS_BITS_64GB	(0x1)
143 #define TCR_PS_BITS_1TB		(0x2)
144 #define TCR_PS_BITS_4TB		(0x3)
145 #define TCR_PS_BITS_16TB	(0x4)
146 #define TCR_PS_BITS_256TB	(0x5)
147 #define TCR_PS_BITS_4PB		(0x6)
148 
149 #define UNSET_DESC		((uint64_t)-1)
150 
151 #define FOUR_KB_SHIFT		12
152 #define PAGE_SIZE_SHIFT		FOUR_KB_SHIFT
153 #define PAGE_SIZE		(1 << PAGE_SIZE_SHIFT)
154 #define PAGE_SIZE_MASK		(PAGE_SIZE - 1)
155 #define IS_PAGE_ALIGNED(addr)	(((addr) & PAGE_SIZE_MASK) == 0)
156 
157 #define XLAT_ENTRY_SIZE_SHIFT	3 /* Each MMU table entry is 8 bytes (1 << 3) */
158 #define XLAT_ENTRY_SIZE		(1 << XLAT_ENTRY_SIZE_SHIFT)
159 
160 #define XLAT_TABLE_SIZE_SHIFT	PAGE_SIZE_SHIFT
161 #define XLAT_TABLE_SIZE		(1 << XLAT_TABLE_SIZE_SHIFT)
162 
163 #define XLAT_TABLE_LEVEL_MAX	U(3)
164 
165 /* Values for number of entries in each MMU translation table */
166 #define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
167 #define XLAT_TABLE_ENTRIES	(1 << XLAT_TABLE_ENTRIES_SHIFT)
168 #define XLAT_TABLE_ENTRIES_MASK	(XLAT_TABLE_ENTRIES - 1)
169 
170 /* Values to convert a memory address to an index into a translation table */
171 #define L3_XLAT_ADDRESS_SHIFT	PAGE_SIZE_SHIFT
172 #define L2_XLAT_ADDRESS_SHIFT	(L3_XLAT_ADDRESS_SHIFT + \
173 				 XLAT_TABLE_ENTRIES_SHIFT)
174 #define L1_XLAT_ADDRESS_SHIFT	(L2_XLAT_ADDRESS_SHIFT + \
175 				 XLAT_TABLE_ENTRIES_SHIFT)
176 #define L0_XLAT_ADDRESS_SHIFT	(L1_XLAT_ADDRESS_SHIFT + \
177 				 XLAT_TABLE_ENTRIES_SHIFT)
178 #define XLAT_ADDR_SHIFT(level)	(PAGE_SIZE_SHIFT + \
179 				 ((XLAT_TABLE_LEVEL_MAX - (level)) * \
180 				 XLAT_TABLE_ENTRIES_SHIFT))
181 
182 #define XLAT_BLOCK_SIZE(level)	(UL(1) << XLAT_ADDR_SHIFT(level))
183 
184 /* Base table */
185 #define BASE_XLAT_ADDRESS_SHIFT	XLAT_ADDR_SHIFT(CORE_MMU_BASE_TABLE_LEVEL)
186 #define BASE_XLAT_BLOCK_SIZE	XLAT_BLOCK_SIZE(CORE_MMU_BASE_TABLE_LEVEL)
187 
188 #define NUM_BASE_LEVEL_ENTRIES	\
189 	BIT(CFG_LPAE_ADDR_SPACE_BITS - BASE_XLAT_ADDRESS_SHIFT)
190 
191 /*
192  * MMU L1 table, one for each core
193  *
194  * With CFG_CORE_UNMAP_CORE_AT_EL0, each core has one table to be used
195  * while in kernel mode and one to be used while in user mode.
196  */
197 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
198 #define NUM_BASE_TABLES	2
199 #else
200 #define NUM_BASE_TABLES	1
201 #endif
202 
203 #ifndef MAX_XLAT_TABLES
204 #ifdef CFG_NS_VIRTUALIZATION
205 #	define XLAT_TABLE_VIRTUALIZATION_EXTRA 3
206 #else
207 #	define XLAT_TABLE_VIRTUALIZATION_EXTRA 0
208 #endif
209 #ifdef CFG_CORE_ASLR
210 #	define XLAT_TABLE_ASLR_EXTRA 3
211 #else
212 #	define XLAT_TABLE_ASLR_EXTRA 0
213 #endif
214 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
215 #	define XLAT_TABLE_TEE_EXTRA 8
216 #	define XLAT_TABLE_USER_EXTRA (NUM_BASE_TABLES * CFG_TEE_CORE_NB_CORE)
217 #else
218 #	define XLAT_TABLE_TEE_EXTRA 5
219 #	define XLAT_TABLE_USER_EXTRA 0
220 #endif
221 #define MAX_XLAT_TABLES		(XLAT_TABLE_TEE_EXTRA + \
222 				 XLAT_TABLE_VIRTUALIZATION_EXTRA + \
223 				 XLAT_TABLE_ASLR_EXTRA + \
224 				 XLAT_TABLE_USER_EXTRA + \
225 				 IS_ENABLED(CFG_DYN_CONFIG))
226 #endif /*!MAX_XLAT_TABLES*/
227 
228 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
229 #if (MAX_XLAT_TABLES <= UINT8_MAX)
230 typedef uint8_t l1_idx_t;
231 #elif (MAX_XLAT_TABLES <= UINT16_MAX)
232 typedef uint16_t l1_idx_t;
233 #else
234 #error MAX_XLAT_TABLES is suspiciously large, please check
235 #endif
236 #endif
237 
238 /*
239  * The global base translation table is a three dimensional array (array of
240  * array of array), but it's easier to visualize if broken down into
241  * components.
242  *
243  * TTBR is assigned a base translation table of NUM_BASE_LEVEL_ENTRIES
244  * entries. NUM_BASE_LEVEL_ENTRIES is determined based on
245  * CFG_LPAE_ADDR_SPACE_BITS.  CFG_LPAE_ADDR_SPACE_BITS is by default 32
246  * which results in NUM_BASE_LEVEL_ENTRIES defined to 4 where one entry is
247  * a uint64_t, 8 bytes.
248  *
249  * If CFG_CORE_UNMAP_CORE_AT_EL0=y there are two base translation tables,
250  * one for OP-TEE core with full mapping of both EL1 and EL0, and one for
251  * EL0 where EL1 is unmapped except for a minimal trampoline needed to
252  * restore EL1 mappings on exception from EL0.
253  *
254  * Each CPU core is assigned a unique set of base translation tables as:
255  * core0: table0: entry0 (table0 maps both EL1 and EL0)
256  *                entry1
257  *                entry2
258  *                entry3
259  * core0: table1: entry0 (table1 maps only EL0)
260  *                entry1
261  *                entry2
262  *                entry3
263  * core1: ...
264  *
265  * The base translation table is by default a level 1 table. It can also be
266  * configured as a level 0 table with CFG_LPAE_ADDR_SPACE_BITS >= 40 and <=
267  * 48.
268  */
269 
270 /* The size of base tables for one core */
271 #define BASE_TABLE_SIZE		(NUM_BASE_LEVEL_ENTRIES * NUM_BASE_TABLES * \
272 				 XLAT_ENTRY_SIZE)
273 #ifndef CFG_DYN_CONFIG
274 static uint64_t base_xlation_table[BASE_TABLE_SIZE * CFG_TEE_CORE_NB_CORE /
275 				   XLAT_ENTRY_SIZE]
276 	__aligned(NUM_BASE_LEVEL_ENTRIES * XLAT_ENTRY_SIZE)
277 	__section(".nozi.mmu.base_table");
278 
279 static uint64_t xlat_tables[XLAT_TABLE_SIZE * MAX_XLAT_TABLES /
280 			    XLAT_ENTRY_SIZE]
281 	__aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
282 
283 /* MMU L2 table for TAs, one for each thread */
284 static uint64_t xlat_tables_ul1[XLAT_TABLE_SIZE * CFG_NUM_THREADS /
285 				XLAT_ENTRY_SIZE]
286 	__aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
287 
288 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
289 static l1_idx_t user_l1_table_idx[NUM_BASE_TABLES * CFG_TEE_CORE_NB_CORE];
290 #endif
291 #endif
292 
293 /*
294  * TAs page table entry inside a level 1 page table.
295  *
296  * TAs mapping is expected to start from level 2.
297  *
298  * If base level is 1 then this is the index of a level 1 entry,
299  * that will point directly into TA mapping table.
300  *
301  * If base level is 0 then entry 0 in base table is always used, and then
302  * we fallback to "base level == 1" like scenario.
303  */
304 static int user_va_idx __nex_data = -1;
305 
306 /*
307  * struct mmu_partition - virtual memory of a partition
308  * @base_tables:       The global base translation table described above
309  * @xlat_tables:       Preallocated array of translation tables
310  * @l2_ta_tables:      The level 2 table used to map TAs at EL0
311  * @xlat_tables_used:  The number of used translation tables from @xlat_tables
312  * @asid:              Address space ID used for core mappings
313  * @user_l1_table_idx: Index into @xlat_tables for the entry used to map the
314  *                     level 2 table @l2_ta_tables
315  *
316  * With CORE_MMU_BASE_TABLE_LEVEL = 1 translation tables are ordered as:
317  * @base_tables is a level 1 table where @user_va_idx above is used as
318  * base_tables[user_va_idx] to identify the entry used by @l2_ta_tables.
319  *
320  * With CORE_MMU_BASE_TABLE_LEVEL = 0 translation tables are ordered as:
321  * @base_tables is a level 0 table where base_tables[0] identifies the level 1
322  * table indexed with
323  * xlat_tables[user_l1_table_idx[0/1][core_id]][user_va_idx] to find the
324  * entry used by @l2_ta_tables.
325  *
326  * With CFG_NS_VIRTUALIZATION disabled there is only one @default_partition
327  * (below) describing virtual memory mappings.
328  *
329  * With CFG_NS_VIRTUALIZATION enabled there's one struct mmu_partition
330  * allocated for each partition.
331  */
332 struct mmu_partition {
333 	uint64_t *base_tables;
334 	uint64_t *xlat_tables;
335 	uint64_t *l2_ta_tables;
336 	unsigned int xlat_tables_used;
337 	unsigned int asid;
338 
339 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
340 	/*
341 	 * Indexes of the L1 table from 'xlat_tables'
342 	 * that points to the user mappings.
343 	 */
344 	l1_idx_t *user_l1_table_idx;
345 #endif
346 };
347 
348 #ifdef CFG_DYN_CONFIG
349 static struct mmu_partition default_partition __nex_bss;
350 #else
351 static struct mmu_partition default_partition __nex_data = {
352 	.base_tables = base_xlation_table,
353 	.xlat_tables = xlat_tables,
354 	.l2_ta_tables = xlat_tables_ul1,
355 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
356 	.user_l1_table_idx = user_l1_table_idx,
357 #endif
358 	.xlat_tables_used = 0,
359 	.asid = 0
360 };
361 #endif
362 
363 #ifdef CFG_NS_VIRTUALIZATION
364 static struct mmu_partition *current_prtn[CFG_TEE_CORE_NB_CORE] __nex_bss;
365 #endif
366 
get_prtn(void)367 static struct mmu_partition *get_prtn(void)
368 {
369 #ifdef CFG_NS_VIRTUALIZATION
370 	struct mmu_partition *ret;
371 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
372 
373 	ret = current_prtn[get_core_pos()];
374 
375 	thread_unmask_exceptions(exceptions);
376 	return ret;
377 #else
378 	return &default_partition;
379 #endif
380 }
381 
desc_to_mattr(unsigned level,uint64_t desc)382 static uint32_t desc_to_mattr(unsigned level, uint64_t desc)
383 {
384 	uint32_t a;
385 
386 	if (!(desc & 1))
387 		return 0;
388 
389 	if (level == XLAT_TABLE_LEVEL_MAX) {
390 		if ((desc & DESC_ENTRY_TYPE_MASK) != L3_BLOCK_DESC)
391 			return 0;
392 	} else {
393 		if ((desc & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
394 			return TEE_MATTR_TABLE;
395 	}
396 
397 	a = TEE_MATTR_VALID_BLOCK;
398 
399 	if (desc & LOWER_ATTRS(ACCESS_FLAG))
400 		a |= TEE_MATTR_PRX | TEE_MATTR_URX;
401 
402 	if (!(desc & LOWER_ATTRS(AP_RO)))
403 		a |= TEE_MATTR_PW | TEE_MATTR_UW;
404 
405 	if (!(desc & LOWER_ATTRS(AP_UNPRIV)))
406 		a &= ~TEE_MATTR_URWX;
407 
408 	if (desc & UPPER_ATTRS(XN))
409 		a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
410 
411 	if (desc & UPPER_ATTRS(PXN))
412 		a &= ~TEE_MATTR_PX;
413 
414 	COMPILE_TIME_ASSERT(ATTR_DEVICE_nGnRnE_INDEX ==
415 			    TEE_MATTR_MEM_TYPE_STRONGLY_O);
416 	COMPILE_TIME_ASSERT(ATTR_DEVICE_nGnRE_INDEX == TEE_MATTR_MEM_TYPE_DEV);
417 	COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_NTR_INDEX ==
418 			    TEE_MATTR_MEM_TYPE_CACHED);
419 	COMPILE_TIME_ASSERT(ATTR_TAGGED_NORMAL_MEM_INDEX ==
420 			    TEE_MATTR_MEM_TYPE_TAGGED);
421 
422 	a |= ((desc & LOWER_ATTRS(ATTR_INDEX_MASK)) >> LOWER_ATTRS_SHIFT) <<
423 	     TEE_MATTR_MEM_TYPE_SHIFT;
424 
425 	if (!(desc & LOWER_ATTRS(NON_GLOBAL)))
426 		a |= TEE_MATTR_GLOBAL;
427 
428 	if (!(desc & LOWER_ATTRS(NS)))
429 		a |= TEE_MATTR_SECURE;
430 
431 	if (desc & GP)
432 		a |= TEE_MATTR_GUARDED;
433 
434 	return a;
435 }
436 
mattr_to_desc(unsigned level,uint32_t attr)437 static uint64_t mattr_to_desc(unsigned level, uint32_t attr)
438 {
439 	uint64_t desc;
440 	uint32_t a = attr;
441 
442 	if (a & TEE_MATTR_TABLE)
443 		return TABLE_DESC;
444 
445 	if (!(a & TEE_MATTR_VALID_BLOCK))
446 		return 0;
447 
448 	if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
449 		a |= TEE_MATTR_PR;
450 	if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
451 		a |= TEE_MATTR_UR;
452 	if (a & TEE_MATTR_UR)
453 		a |= TEE_MATTR_PR;
454 	if (a & TEE_MATTR_UW)
455 		a |= TEE_MATTR_PW;
456 
457 	if (IS_ENABLED(CFG_CORE_BTI) && (a & TEE_MATTR_PX))
458 		a |= TEE_MATTR_GUARDED;
459 
460 	if (level == XLAT_TABLE_LEVEL_MAX)
461 		desc = L3_BLOCK_DESC;
462 	else
463 		desc = BLOCK_DESC;
464 
465 	if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
466 		desc |= UPPER_ATTRS(XN);
467 	if (!(a & TEE_MATTR_PX))
468 		desc |= UPPER_ATTRS(PXN);
469 
470 	if (a & TEE_MATTR_UR)
471 		desc |= LOWER_ATTRS(AP_UNPRIV);
472 
473 	if (!(a & TEE_MATTR_PW))
474 		desc |= LOWER_ATTRS(AP_RO);
475 
476 	if (feat_bti_is_implemented() && (a & TEE_MATTR_GUARDED))
477 		desc |= GP;
478 
479 	/* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
480 	switch ((a >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
481 	case TEE_MATTR_MEM_TYPE_STRONGLY_O:
482 		desc |= LOWER_ATTRS(ATTR_DEVICE_nGnRnE_INDEX | OSH);
483 		break;
484 	case TEE_MATTR_MEM_TYPE_DEV:
485 		desc |= LOWER_ATTRS(ATTR_DEVICE_nGnRE_INDEX | OSH);
486 		break;
487 	case TEE_MATTR_MEM_TYPE_CACHED:
488 		desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
489 		break;
490 	case TEE_MATTR_MEM_TYPE_TAGGED:
491 		desc |= LOWER_ATTRS(ATTR_TAGGED_NORMAL_MEM_INDEX | ISH);
492 		break;
493 	default:
494 		/*
495 		 * "Can't happen" the attribute is supposed to be checked
496 		 * with core_mmu_mattr_is_ok() before.
497 		 */
498 		panic();
499 	}
500 
501 	if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
502 		desc |= LOWER_ATTRS(ACCESS_FLAG);
503 
504 	if (!(a & TEE_MATTR_GLOBAL))
505 		desc |= LOWER_ATTRS(NON_GLOBAL);
506 
507 	desc |= a & TEE_MATTR_SECURE ? 0 : LOWER_ATTRS(NS);
508 
509 	return desc;
510 }
511 
get_base_table(struct mmu_partition * prtn,size_t tbl_idx,size_t core_pos)512 static uint64_t *get_base_table(struct mmu_partition *prtn, size_t tbl_idx,
513 				size_t core_pos)
514 {
515 	assert(tbl_idx < NUM_BASE_TABLES);
516 	assert(core_pos < CFG_TEE_CORE_NB_CORE);
517 
518 	return  prtn->base_tables + (core_pos * NUM_BASE_TABLES + tbl_idx) *
519 				    NUM_BASE_LEVEL_ENTRIES;
520 }
521 
get_l2_ta_tables(struct mmu_partition * prtn,size_t thread_id)522 static uint64_t *get_l2_ta_tables(struct mmu_partition *prtn, size_t thread_id)
523 {
524 	assert(thread_id < CFG_NUM_THREADS);
525 
526 	return prtn->l2_ta_tables + XLAT_TABLE_ENTRIES * thread_id;
527 }
528 
529 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
get_l1_ta_table(struct mmu_partition * prtn,size_t base_idx,size_t core_pos)530 static uint64_t *get_l1_ta_table(struct mmu_partition *prtn, size_t base_idx,
531 				 size_t core_pos)
532 {
533 	size_t idx = 0;
534 	uint64_t *tbl = NULL;
535 
536 	idx = prtn->user_l1_table_idx[core_pos * NUM_BASE_TABLES + base_idx];
537 	tbl = (void *)((vaddr_t)prtn->xlat_tables + idx * XLAT_TABLE_SIZE);
538 	return tbl;
539 }
540 
set_l1_ta_table(struct mmu_partition * prtn,size_t base_idx,size_t core_pos,uint64_t * tbl)541 static void set_l1_ta_table(struct mmu_partition *prtn, size_t base_idx,
542 			    size_t core_pos, uint64_t *tbl)
543 {
544 	size_t idx = 0;
545 
546 	idx = ((vaddr_t)tbl - (vaddr_t)prtn->xlat_tables) / XLAT_TABLE_SIZE;
547 	assert(idx < prtn->xlat_tables_used);
548 	prtn->user_l1_table_idx[core_pos * NUM_BASE_TABLES + base_idx] = idx;
549 }
550 #endif
551 
552 #ifdef CFG_NS_VIRTUALIZATION
core_mmu_get_total_pages_size(void)553 size_t core_mmu_get_total_pages_size(void)
554 {
555 	size_t sz = ROUNDUP(BASE_TABLE_SIZE * CFG_TEE_CORE_NB_CORE,
556 			    SMALL_PAGE_SIZE);
557 
558 	sz += XLAT_TABLE_SIZE * CFG_NUM_THREADS;
559 	if (!IS_ENABLED(CFG_DYN_CONFIG))
560 		sz += XLAT_TABLE_SIZE * MAX_XLAT_TABLES;
561 
562 	return sz;
563 }
564 
core_alloc_mmu_prtn(void * tables)565 struct mmu_partition *core_alloc_mmu_prtn(void *tables)
566 {
567 	struct mmu_partition *prtn;
568 	uint8_t *tbl = tables;
569 	unsigned int asid = asid_alloc();
570 
571 	assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0);
572 
573 	if (!asid)
574 		return NULL;
575 
576 	prtn = nex_malloc(sizeof(*prtn));
577 	if (!prtn)
578 		goto err;
579 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
580 	prtn->user_l1_table_idx = nex_calloc(NUM_BASE_TABLES *
581 					     CFG_TEE_CORE_NB_CORE,
582 					     sizeof(l1_idx_t));
583 	if (!prtn->user_l1_table_idx)
584 		goto err;
585 #endif
586 
587 	memset(tables, 0, core_mmu_get_total_pages_size());
588 	prtn->base_tables = (void *)tbl;
589 	tbl += ROUNDUP(BASE_TABLE_SIZE * CFG_TEE_CORE_NB_CORE, SMALL_PAGE_SIZE);
590 
591 	if (!IS_ENABLED(CFG_DYN_CONFIG)) {
592 		prtn->xlat_tables = (void *)tbl;
593 		tbl += XLAT_TABLE_SIZE * MAX_XLAT_TABLES;
594 		assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0);
595 	}
596 
597 	prtn->l2_ta_tables = (void *)tbl;
598 	prtn->xlat_tables_used = 0;
599 	prtn->asid = asid;
600 
601 	return prtn;
602 err:
603 	nex_free(prtn);
604 	asid_free(asid);
605 	return NULL;
606 }
607 
core_free_mmu_prtn(struct mmu_partition * prtn)608 void core_free_mmu_prtn(struct mmu_partition *prtn)
609 {
610 	asid_free(prtn->asid);
611 	nex_free(prtn);
612 }
613 
core_mmu_set_prtn(struct mmu_partition * prtn)614 void core_mmu_set_prtn(struct mmu_partition *prtn)
615 {
616 	uint64_t ttbr;
617 	/*
618 	 * We are changing mappings for current CPU,
619 	 * so make sure that we will not be rescheduled
620 	 */
621 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
622 
623 	current_prtn[get_core_pos()] = prtn;
624 
625 	ttbr = virt_to_phys(get_base_table(prtn, 0, get_core_pos()));
626 
627 	write_ttbr0_el1(ttbr | ((paddr_t)prtn->asid << TTBR_ASID_SHIFT));
628 	isb();
629 	tlbi_all();
630 }
631 
core_mmu_set_default_prtn(void)632 void core_mmu_set_default_prtn(void)
633 {
634 	core_mmu_set_prtn(&default_partition);
635 }
636 
core_mmu_set_default_prtn_tbl(void)637 void core_mmu_set_default_prtn_tbl(void)
638 {
639 	size_t n = 0;
640 
641 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
642 		current_prtn[n] = &default_partition;
643 }
644 #endif
645 
core_mmu_xlat_table_alloc(struct mmu_partition * prtn)646 static uint64_t *core_mmu_xlat_table_alloc(struct mmu_partition *prtn)
647 {
648 	uint64_t *new_table = NULL;
649 
650 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
651 		if (cpu_mmu_enabled()) {
652 			tee_mm_entry_t *mm = NULL;
653 			paddr_t pa = 0;
654 
655 			/*
656 			 * The default_partition only has a physical memory
657 			 * pool for the nexus when virtualization is
658 			 * enabled. We should use the nexus physical memory
659 			 * pool if we're allocating memory for another
660 			 * partition than our own.
661 			 */
662 			if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
663 			    (prtn == &default_partition ||
664 			     prtn != get_prtn())) {
665 				mm = nex_phys_mem_core_alloc(XLAT_TABLE_SIZE);
666 				if (!mm)
667 					EMSG("Phys nex mem exhausted");
668 			} else {
669 				mm = phys_mem_core_alloc(XLAT_TABLE_SIZE);
670 				if (!mm)
671 					EMSG("Phys mem exhausted");
672 			}
673 			if (!mm)
674 				return NULL;
675 			pa = tee_mm_get_smem(mm);
676 
677 			new_table = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL,
678 						 XLAT_TABLE_SIZE);
679 			assert(new_table);
680 		} else {
681 			new_table = boot_mem_alloc(XLAT_TABLE_SIZE,
682 						   XLAT_TABLE_SIZE);
683 			if (prtn->xlat_tables) {
684 				/*
685 				 * user_l1_table_idx[] is used to index
686 				 * xlat_tables so we depend on the
687 				 * xlat_tables are linearly allocated or
688 				 * l1_idx_t might need a wider type.
689 				 */
690 				assert((vaddr_t)prtn->xlat_tables +
691 				       prtn->xlat_tables_used *
692 				       XLAT_TABLE_SIZE == (vaddr_t)new_table);
693 			} else {
694 				boot_mem_add_reloc(&prtn->xlat_tables);
695 				prtn->xlat_tables = new_table;
696 			}
697 		}
698 		prtn->xlat_tables_used++;
699 		DMSG("xlat tables used %u", prtn->xlat_tables_used);
700 	} else {
701 		if (prtn->xlat_tables_used >= MAX_XLAT_TABLES) {
702 			EMSG("%u xlat tables exhausted", MAX_XLAT_TABLES);
703 
704 			return NULL;
705 		}
706 
707 		new_table = prtn->xlat_tables +
708 			    prtn->xlat_tables_used * XLAT_TABLE_ENTRIES;
709 		prtn->xlat_tables_used++;
710 
711 		DMSG("xlat tables used %u / %u",
712 		     prtn->xlat_tables_used, MAX_XLAT_TABLES);
713 	}
714 
715 	return new_table;
716 }
717 
718 /*
719  * Given an entry that points to a table returns the virtual address
720  * of the pointed table. NULL otherwise.
721  */
core_mmu_xlat_table_entry_pa2va(struct mmu_partition * prtn,unsigned int level,uint64_t entry)722 static void *core_mmu_xlat_table_entry_pa2va(struct mmu_partition *prtn,
723 					     unsigned int level,
724 					     uint64_t entry)
725 {
726 	paddr_t pa = 0;
727 	void *va = NULL;
728 
729 	if ((entry & DESC_ENTRY_TYPE_MASK) != TABLE_DESC ||
730 	    level >= XLAT_TABLE_LEVEL_MAX)
731 		return NULL;
732 
733 	pa = entry & OUTPUT_ADDRESS_MASK;
734 
735 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || prtn == &default_partition)
736 		va = phys_to_virt(pa, MEM_AREA_TEE_RAM_RW_DATA,
737 				  XLAT_TABLE_SIZE);
738 	if (!va)
739 		va = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL,
740 				  XLAT_TABLE_SIZE);
741 
742 	return va;
743 }
744 
745 /*
746  * For a table entry that points to a table - allocate and copy to
747  * a new pointed table. This is done for the requested entry,
748  * without going deeper into the pointed table entries.
749  *
750  * A success is returned for non-table entries, as nothing to do there.
751  */
752 __maybe_unused
core_mmu_entry_copy(struct core_mmu_table_info * tbl_info,unsigned int idx)753 static bool core_mmu_entry_copy(struct core_mmu_table_info *tbl_info,
754 				unsigned int idx)
755 {
756 	uint64_t *orig_table = NULL;
757 	uint64_t *new_table = NULL;
758 	uint64_t *entry = NULL;
759 	struct mmu_partition *prtn = NULL;
760 
761 #ifdef CFG_NS_VIRTUALIZATION
762 	prtn = tbl_info->prtn;
763 #else
764 	prtn = &default_partition;
765 #endif
766 	assert(prtn);
767 
768 	if (idx >= tbl_info->num_entries)
769 		return false;
770 
771 	entry = (uint64_t *)tbl_info->table + idx;
772 
773 	/* Nothing to do for non-table entries */
774 	if ((*entry & DESC_ENTRY_TYPE_MASK) != TABLE_DESC ||
775 	    tbl_info->level >= XLAT_TABLE_LEVEL_MAX)
776 		return true;
777 
778 	new_table = core_mmu_xlat_table_alloc(prtn);
779 	if (!new_table)
780 		return false;
781 
782 	orig_table = core_mmu_xlat_table_entry_pa2va(prtn, tbl_info->level,
783 						     *entry);
784 	if (!orig_table)
785 		return false;
786 
787 	/* Copy original table content to new table */
788 	memcpy(new_table, orig_table, XLAT_TABLE_ENTRIES * XLAT_ENTRY_SIZE);
789 
790 	/* Point to the new table */
791 	*entry = virt_to_phys(new_table) | (*entry & ~OUTPUT_ADDRESS_MASK);
792 
793 	return true;
794 }
795 
share_region(struct mmu_partition * dst_prtn,struct mmu_partition * src_prtn,struct tee_mmap_region * mm)796 static void share_region(struct mmu_partition *dst_prtn,
797 			 struct mmu_partition *src_prtn,
798 			 struct tee_mmap_region *mm)
799 {
800 	unsigned int level = CORE_MMU_PGDIR_LEVEL - 1;
801 	struct core_mmu_table_info dst_ti = { };
802 	struct core_mmu_table_info src_ti = { };
803 	struct tee_mmap_region dummy_mm = *mm;
804 	ssize_t size_left = 0;
805 	unsigned int idx = 0;
806 	uint32_t attr = 0;
807 	paddr_t pa = 0;
808 	vaddr_t va = 0;
809 
810 	assert(!(mm->size % CORE_MMU_PGDIR_SIZE));
811 
812 	dummy_mm.region_size = CORE_MMU_PGDIR_SIZE;
813 	core_mmu_map_region(dst_prtn, &dummy_mm);
814 
815 	/*
816 	 * Assign the PGDIR translation tables used in the src_prtn for the
817 	 * memory region into the same virtual address in the dst_prtn.
818 	 * This is used to share dynamic nexus mappings between partitions.
819 	 */
820 	va = mm->va;
821 	size_left = mm->size;
822 	while (size_left > 0) {
823 		/*
824 		 * The loop is typically only one iteration so there's no
825 		 * need to try to be clever with the table lookup.
826 		 */
827 		if (!core_mmu_find_table(src_prtn, va, level, &src_ti))
828 			panic("can't find src table for mapping");
829 		if (!core_mmu_find_table(dst_prtn, va, level, &dst_ti))
830 			panic("can't find dst table for mapping");
831 
832 		/*
833 		 * If two mmap regions share the same table we'll overwrite
834 		 * the value with the same value. This doesn't happen often
835 		 * enough that it's worth trying to be clever about when to
836 		 * write.
837 		 */
838 		idx = core_mmu_va2idx(&src_ti, va);
839 		core_mmu_get_entry(&src_ti, idx, &pa, &attr);
840 		core_mmu_set_entry(&dst_ti, idx, pa, attr);
841 
842 		va += CORE_MMU_PGDIR_SIZE;
843 		size_left -= CORE_MMU_PGDIR_SIZE;
844 	}
845 }
846 
core_init_mmu_prtn_tee(struct mmu_partition * prtn,struct memory_map * mem_map)847 static void core_init_mmu_prtn_tee(struct mmu_partition *prtn,
848 				   struct memory_map *mem_map)
849 {
850 	size_t n = 0;
851 
852 	assert(prtn && mem_map);
853 
854 	for (n = 0; n < mem_map->count; n++) {
855 		struct tee_mmap_region *mm = mem_map->map + n;
856 		debug_print(" %010" PRIxVA " %010" PRIxPA " %10zx %x",
857 			    mm->va, mm->pa, mm->size, mm->attr);
858 
859 		if (!IS_PAGE_ALIGNED(mm->pa) || !IS_PAGE_ALIGNED(mm->size))
860 			panic("unaligned region");
861 	}
862 
863 	/* Clear table before use */
864 	memset(prtn->base_tables, 0, BASE_TABLE_SIZE * CFG_TEE_CORE_NB_CORE);
865 
866 	for (n = 0; n < mem_map->count; n++) {
867 		if (core_mmu_type_is_nex_shared(mem_map->map[n].type) &&
868 		    prtn != &default_partition)
869 			share_region(prtn, &default_partition,
870 				     mem_map->map + n);
871 		else
872 			core_mmu_map_region(prtn, mem_map->map + n);
873 	}
874 
875 	/*
876 	 * Primary mapping table is ready at index `get_core_pos()`
877 	 * whose value may not be ZERO. Take this index as copy source.
878 	 */
879 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
880 		if (n == get_core_pos())
881 			continue;
882 
883 		memcpy(get_base_table(prtn, 0, n),
884 		       get_base_table(prtn, 0, get_core_pos()),
885 		       XLAT_ENTRY_SIZE * NUM_BASE_LEVEL_ENTRIES);
886 	}
887 }
888 
889 /*
890  * In order to support 32-bit TAs we will have to find
891  * a user VA base in the region [1GB, 4GB[.
892  * Due to OP-TEE design limitation, TAs page table should be an entry
893  * inside a level 1 page table.
894  *
895  * Available options are only these:
896  * - base level 0 entry 0 - [0GB, 512GB[
897  *   - level 1 entry 0 - [0GB, 1GB[
898  *   - level 1 entry 1 - [1GB, 2GB[           <----
899  *   - level 1 entry 2 - [2GB, 3GB[           <----
900  *   - level 1 entry 3 - [3GB, 4GB[           <----
901  *   - level 1 entry 4 - [4GB, 5GB[
902  *   - ...
903  * - ...
904  *
905  * - base level 1 entry 0 - [0GB, 1GB[
906  * - base level 1 entry 1 - [1GB, 2GB[        <----
907  * - base level 1 entry 2 - [2GB, 3GB[        <----
908  * - base level 1 entry 3 - [3GB, 4GB[        <----
909  * - base level 1 entry 4 - [4GB, 5GB[
910  * - ...
911  */
set_user_va_idx(struct mmu_partition * prtn)912 static void set_user_va_idx(struct mmu_partition *prtn)
913 {
914 	uint64_t *tbl = NULL;
915 	unsigned int n = 0;
916 
917 	assert(prtn);
918 
919 	tbl = get_base_table(prtn, 0, get_core_pos());
920 
921 	/*
922 	 * If base level is 0, then we must use its entry 0.
923 	 */
924 	if (CORE_MMU_BASE_TABLE_LEVEL == 0) {
925 		/*
926 		 * If base level 0 entry 0 is not used then
927 		 * it's clear that we can use level 1 entry 1 inside it.
928 		 * (will be allocated later).
929 		 */
930 		if ((tbl[0] & DESC_ENTRY_TYPE_MASK) == INVALID_DESC) {
931 			user_va_idx = 1;
932 
933 			return;
934 		}
935 
936 		assert((tbl[0] & DESC_ENTRY_TYPE_MASK) == TABLE_DESC);
937 
938 		tbl = core_mmu_xlat_table_entry_pa2va(prtn, 0, tbl[0]);
939 		assert(tbl);
940 	}
941 
942 	/*
943 	 * Search level 1 table (i.e. 1GB mapping per entry) for
944 	 * an empty entry in the range [1GB, 4GB[.
945 	 */
946 	for (n = 1; n < 4; n++) {
947 		if ((tbl[n] & DESC_ENTRY_TYPE_MASK) == INVALID_DESC) {
948 			user_va_idx = n;
949 			break;
950 		}
951 	}
952 
953 	assert(user_va_idx != -1);
954 }
955 
956 /*
957  * Setup an entry inside a core level 1 page table for TAs memory mapping
958  *
959  * If base table level is 1 - user_va_idx is already the index,
960  *                            so nothing to do.
961  * If base table level is 0 - we might need to allocate entry 0 of base table,
962  *                            as TAs page table is an entry inside a level 1
963  *                            page table.
964  */
core_init_mmu_prtn_ta_core(struct mmu_partition * prtn __maybe_unused,unsigned int base_idx __maybe_unused,unsigned int core __maybe_unused)965 static void core_init_mmu_prtn_ta_core(struct mmu_partition *prtn
966 				       __maybe_unused,
967 				       unsigned int base_idx __maybe_unused,
968 				       unsigned int core __maybe_unused)
969 {
970 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
971 	struct core_mmu_table_info tbl_info = { };
972 	uint64_t *tbl = NULL;
973 
974 	assert(user_va_idx != -1);
975 	COMPILE_TIME_ASSERT(MAX_XLAT_TABLES < (1 << (8 * sizeof(l1_idx_t))));
976 
977 	tbl = get_base_table(prtn, base_idx, core);
978 
979 	/*
980 	 * If base level is 0, then user_va_idx refers to
981 	 * level 1 page table that's in base level 0 entry 0.
982 	 */
983 	core_mmu_set_info_table(&tbl_info, 0, 0, tbl);
984 #ifdef CFG_NS_VIRTUALIZATION
985 	tbl_info.prtn = prtn;
986 #endif
987 
988 	/*
989 	 * If this isn't the core that created the initial tables
990 	 * mappings, then the level 1 table must be copied,
991 	 * as it will hold pointer to the user mapping table
992 	 * that changes per core.
993 	 */
994 	if (core != get_core_pos()) {
995 		if (!core_mmu_entry_copy(&tbl_info, 0))
996 			panic();
997 	}
998 
999 	if (!core_mmu_entry_to_finer_grained(&tbl_info, 0, true))
1000 		panic();
1001 
1002 	/*
1003 	 * Now base level table should be ready with a table descriptor
1004 	 */
1005 	assert((tbl[0] & DESC_ENTRY_TYPE_MASK) == TABLE_DESC);
1006 
1007 	tbl = core_mmu_xlat_table_entry_pa2va(prtn, 0, tbl[0]);
1008 	assert(tbl);
1009 
1010 	set_l1_ta_table(prtn, base_idx, core, tbl);
1011 #endif
1012 }
1013 
core_init_mmu_prtn_ta(struct mmu_partition * prtn)1014 static void core_init_mmu_prtn_ta(struct mmu_partition *prtn)
1015 {
1016 	unsigned int base_idx = 0;
1017 	unsigned int core = 0;
1018 
1019 	assert(user_va_idx != -1);
1020 
1021 	for (base_idx = 0; base_idx < NUM_BASE_TABLES; base_idx++)
1022 		for (core = 0; core < CFG_TEE_CORE_NB_CORE; core++)
1023 			core_init_mmu_prtn_ta_core(prtn, base_idx, core);
1024 }
1025 
core_init_mmu_prtn(struct mmu_partition * prtn,struct memory_map * mem_map)1026 void core_init_mmu_prtn(struct mmu_partition *prtn, struct memory_map *mem_map)
1027 {
1028 	core_init_mmu_prtn_tee(prtn, mem_map);
1029 	core_init_mmu_prtn_ta(prtn);
1030 }
1031 
core_init_mmu(struct memory_map * mem_map)1032 void core_init_mmu(struct memory_map *mem_map)
1033 {
1034 	struct mmu_partition *prtn = &default_partition;
1035 	uint64_t max_va = 0;
1036 	size_t n;
1037 
1038 	COMPILE_TIME_ASSERT(CORE_MMU_BASE_TABLE_SHIFT ==
1039 			    XLAT_ADDR_SHIFT(CORE_MMU_BASE_TABLE_LEVEL));
1040 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1041 	COMPILE_TIME_ASSERT(CORE_MMU_BASE_TABLE_OFFSET ==
1042 			    BASE_TABLE_SIZE / NUM_BASE_TABLES);
1043 #endif
1044 
1045 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
1046 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
1047 		prtn->user_l1_table_idx = boot_mem_alloc(NUM_BASE_TABLES *
1048 							 CFG_TEE_CORE_NB_CORE *
1049 							 sizeof(l1_idx_t),
1050 							 alignof(l1_idx_t));
1051 		boot_mem_add_reloc(&prtn->user_l1_table_idx);
1052 #endif
1053 		prtn->base_tables = boot_mem_alloc(BASE_TABLE_SIZE *
1054 						   CFG_TEE_CORE_NB_CORE,
1055 						   NUM_BASE_LEVEL_ENTRIES *
1056 						   XLAT_ENTRY_SIZE);
1057 		boot_mem_add_reloc(&prtn->base_tables);
1058 
1059 		prtn->l2_ta_tables = boot_mem_alloc(XLAT_TABLE_SIZE *
1060 						    CFG_NUM_THREADS,
1061 						    XLAT_TABLE_SIZE);
1062 		boot_mem_add_reloc(&prtn->l2_ta_tables);
1063 	}
1064 
1065 	/* Initialize default pagetables */
1066 	core_init_mmu_prtn_tee(&default_partition, mem_map);
1067 
1068 	for (n = 0; n < mem_map->count; n++) {
1069 		vaddr_t va_end = mem_map->map[n].va + mem_map->map[n].size - 1;
1070 
1071 		if (va_end > max_va)
1072 			max_va = va_end;
1073 	}
1074 
1075 	set_user_va_idx(&default_partition);
1076 
1077 	core_init_mmu_prtn_ta(&default_partition);
1078 
1079 	COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS > L1_XLAT_ADDRESS_SHIFT);
1080 	assert(max_va < BIT64(CFG_LPAE_ADDR_SPACE_BITS));
1081 }
1082 
1083 #ifdef CFG_WITH_PAGER
1084 /* Prefer to consume only 1 base xlat table for the whole mapping */
core_mmu_prefer_tee_ram_at_top(paddr_t paddr)1085 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr)
1086 {
1087 	size_t base_level_size = BASE_XLAT_BLOCK_SIZE;
1088 	paddr_t base_level_mask = base_level_size - 1;
1089 
1090 	return (paddr & base_level_mask) > (base_level_size / 2);
1091 }
1092 #endif
1093 
1094 #ifdef ARM32
core_init_mmu_regs(struct core_mmu_config * cfg)1095 void core_init_mmu_regs(struct core_mmu_config *cfg)
1096 {
1097 	struct mmu_partition *prtn = &default_partition;
1098 	uint32_t ttbcr = 0;
1099 	uint32_t mair = 0;
1100 
1101 	cfg->ttbr0_base = virt_to_phys(get_base_table(prtn, 0, 0));
1102 	cfg->ttbr0_core_offset = BASE_TABLE_SIZE;
1103 
1104 	mair  = MAIR_ATTR_SET(ATTR_DEVICE_nGnRE, ATTR_DEVICE_nGnRE_INDEX);
1105 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
1106 	mair |= MAIR_ATTR_SET(ATTR_DEVICE_nGnRnE, ATTR_DEVICE_nGnRnE_INDEX);
1107 	/*
1108 	 * Tagged memory isn't supported in 32-bit mode, map tagged memory
1109 	 * as normal memory instead.
1110 	 */
1111 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
1112 			      ATTR_TAGGED_NORMAL_MEM_INDEX);
1113 	cfg->mair0 = mair;
1114 
1115 	ttbcr = TTBCR_EAE;
1116 	ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN0_SHIFT;
1117 	ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN0_SHIFT;
1118 	ttbcr |= TTBCR_SHX_ISH << TTBCR_SH0_SHIFT;
1119 	ttbcr |= TTBCR_EPD1;	/* Disable the use of TTBR1 */
1120 
1121 	/* TTBCR.A1 = 0 => ASID is stored in TTBR0 */
1122 	cfg->ttbcr = ttbcr;
1123 }
1124 #endif /*ARM32*/
1125 
1126 #ifdef ARM64
get_hard_coded_pa_size_bits(void)1127 static unsigned int get_hard_coded_pa_size_bits(void)
1128 {
1129 	/*
1130 	 * Intermediate Physical Address Size.
1131 	 * 0b000      32 bits, 4GB.
1132 	 * 0b001      36 bits, 64GB.
1133 	 * 0b010      40 bits, 1TB.
1134 	 * 0b011      42 bits, 4TB.
1135 	 * 0b100      44 bits, 16TB.
1136 	 * 0b101      48 bits, 256TB.
1137 	 * 0b110      52 bits, 4PB
1138 	 */
1139 	static_assert(CFG_CORE_ARM64_PA_BITS >= 32);
1140 	static_assert(CFG_CORE_ARM64_PA_BITS <= 52);
1141 
1142 	if (CFG_CORE_ARM64_PA_BITS <= 32)
1143 		return TCR_PS_BITS_4GB;
1144 
1145 	if (CFG_CORE_ARM64_PA_BITS <= 36)
1146 		return TCR_PS_BITS_64GB;
1147 
1148 	if (CFG_CORE_ARM64_PA_BITS <= 40)
1149 		return TCR_PS_BITS_1TB;
1150 
1151 	if (CFG_CORE_ARM64_PA_BITS <= 42)
1152 		return TCR_PS_BITS_4TB;
1153 
1154 	if (CFG_CORE_ARM64_PA_BITS <= 44)
1155 		return TCR_PS_BITS_16TB;
1156 
1157 	if (CFG_CORE_ARM64_PA_BITS <= 48)
1158 		return TCR_PS_BITS_256TB;
1159 
1160 	/* CFG_CORE_ARM64_PA_BITS <= 48 */
1161 	return TCR_PS_BITS_4PB;
1162 }
1163 
get_physical_addr_size_bits(void)1164 static unsigned int get_physical_addr_size_bits(void)
1165 {
1166 	const unsigned int size_bits = read_id_aa64mmfr0_el1() &
1167 				       ID_AA64MMFR0_EL1_PARANGE_MASK;
1168 	unsigned int b = 0;
1169 
1170 	if (IS_ENABLED(CFG_AUTO_MAX_PA_BITS))
1171 		return size_bits;
1172 
1173 	b = get_hard_coded_pa_size_bits();
1174 	assert(b <= size_bits);
1175 	return b;
1176 }
1177 
core_mmu_arm64_get_pa_width(void)1178 unsigned int core_mmu_arm64_get_pa_width(void)
1179 {
1180 	const uint8_t map[] = { 32, 36, 40, 42, 44, 48, 52, };
1181 	unsigned int size_bits = get_physical_addr_size_bits();
1182 
1183 	size_bits = MIN(size_bits, ARRAY_SIZE(map) - 1);
1184 	return map[size_bits];
1185 }
1186 
core_init_mmu_regs(struct core_mmu_config * cfg)1187 void core_init_mmu_regs(struct core_mmu_config *cfg)
1188 {
1189 	struct mmu_partition *prtn = &default_partition;
1190 	uint64_t ips = get_physical_addr_size_bits();
1191 	uint64_t mair = 0;
1192 	uint64_t tcr = 0;
1193 
1194 	cfg->ttbr0_el1_base = virt_to_phys(get_base_table(prtn, 0, 0));
1195 	cfg->ttbr0_core_offset = BASE_TABLE_SIZE;
1196 
1197 	mair  = MAIR_ATTR_SET(ATTR_DEVICE_nGnRE, ATTR_DEVICE_nGnRE_INDEX);
1198 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
1199 	mair |= MAIR_ATTR_SET(ATTR_DEVICE_nGnRnE, ATTR_DEVICE_nGnRnE_INDEX);
1200 	/*
1201 	 * If MEMTAG isn't enabled, map tagged memory as normal memory
1202 	 * instead.
1203 	 */
1204 	if (memtag_is_enabled())
1205 		mair |= MAIR_ATTR_SET(ATTR_TAGGED_NORMAL_MEM,
1206 				      ATTR_TAGGED_NORMAL_MEM_INDEX);
1207 	else
1208 		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
1209 				      ATTR_TAGGED_NORMAL_MEM_INDEX);
1210 	cfg->mair_el1 = mair;
1211 
1212 	tcr = TCR_RES1;
1213 	tcr |= TCR_XRGNX_WBWA << TCR_IRGN0_SHIFT;
1214 	tcr |= TCR_XRGNX_WBWA << TCR_ORGN0_SHIFT;
1215 	tcr |= TCR_SHX_ISH << TCR_SH0_SHIFT;
1216 	tcr |= ips << TCR_EL1_IPS_SHIFT;
1217 	tcr |= 64 - CFG_LPAE_ADDR_SPACE_BITS;
1218 
1219 	/* Disable the use of TTBR1 */
1220 	tcr |= TCR_EPD1;
1221 
1222 	/*
1223 	 * TCR.A1 = 0 => ASID is stored in TTBR0
1224 	 * TCR.AS = 0 => Same ASID size as in Aarch32/ARMv7
1225 	 */
1226 	cfg->tcr_el1 = tcr;
1227 }
1228 #endif /*ARM64*/
1229 
core_mmu_set_info_table(struct core_mmu_table_info * tbl_info,unsigned level,vaddr_t va_base,void * table)1230 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
1231 		unsigned level, vaddr_t va_base, void *table)
1232 {
1233 	tbl_info->level = level;
1234 	tbl_info->next_level = level + 1;
1235 	tbl_info->table = table;
1236 	tbl_info->va_base = va_base;
1237 	tbl_info->shift = XLAT_ADDR_SHIFT(level);
1238 
1239 #if (CORE_MMU_BASE_TABLE_LEVEL > 0)
1240 	assert(level >= CORE_MMU_BASE_TABLE_LEVEL);
1241 #endif
1242 	assert(level <= XLAT_TABLE_LEVEL_MAX);
1243 
1244 	if (level == CORE_MMU_BASE_TABLE_LEVEL)
1245 		tbl_info->num_entries = NUM_BASE_LEVEL_ENTRIES;
1246 	else
1247 		tbl_info->num_entries = XLAT_TABLE_ENTRIES;
1248 }
1249 
core_mmu_get_user_pgdir(struct core_mmu_table_info * pgd_info)1250 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
1251 {
1252 	vaddr_t va_range_base;
1253 	void *tbl = get_l2_ta_tables(get_prtn(), thread_get_id());
1254 
1255 	core_mmu_get_user_va_range(&va_range_base, NULL);
1256 	core_mmu_set_info_table(pgd_info, 2, va_range_base, tbl);
1257 }
1258 
core_mmu_create_user_map(struct user_mode_ctx * uctx,struct core_mmu_user_map * map)1259 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
1260 			      struct core_mmu_user_map *map)
1261 {
1262 	struct core_mmu_table_info dir_info;
1263 
1264 	COMPILE_TIME_ASSERT(sizeof(uint64_t) * XLAT_TABLE_ENTRIES == PGT_SIZE);
1265 
1266 	core_mmu_get_user_pgdir(&dir_info);
1267 	memset(dir_info.table, 0, PGT_SIZE);
1268 	core_mmu_populate_user_map(&dir_info, uctx);
1269 	map->user_map = virt_to_phys(dir_info.table) | TABLE_DESC;
1270 	map->asid = uctx->vm_info.asid;
1271 }
1272 
core_mmu_find_table(struct mmu_partition * prtn,vaddr_t va,unsigned max_level,struct core_mmu_table_info * tbl_info)1273 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
1274 			 unsigned max_level,
1275 			 struct core_mmu_table_info *tbl_info)
1276 {
1277 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1278 	unsigned int num_entries = NUM_BASE_LEVEL_ENTRIES;
1279 	unsigned int level = CORE_MMU_BASE_TABLE_LEVEL;
1280 	vaddr_t va_base = 0;
1281 	bool ret = false;
1282 	uint64_t *tbl;
1283 
1284 	if (!prtn)
1285 		prtn = get_prtn();
1286 	tbl = get_base_table(prtn, 0, get_core_pos());
1287 
1288 	while (true) {
1289 		unsigned int level_size_shift = XLAT_ADDR_SHIFT(level);
1290 		unsigned int n = (va - va_base) >> level_size_shift;
1291 
1292 		if (n >= num_entries)
1293 			goto out;
1294 
1295 		if (level == max_level || level == XLAT_TABLE_LEVEL_MAX ||
1296 		    (tbl[n] & TABLE_DESC) != TABLE_DESC) {
1297 			/*
1298 			 * We've either reached max_level, a block
1299 			 * mapping entry or an "invalid" mapping entry.
1300 			 */
1301 
1302 			/*
1303 			 * Base level is the CPU specific translation table.
1304 			 * It doesn't make sense to return anything based
1305 			 * on that unless foreign interrupts already are
1306 			 * masked.
1307 			 */
1308 			if (level == CORE_MMU_BASE_TABLE_LEVEL &&
1309 			    !(exceptions & THREAD_EXCP_FOREIGN_INTR))
1310 				goto out;
1311 
1312 			tbl_info->table = tbl;
1313 			tbl_info->va_base = va_base;
1314 			tbl_info->level = level;
1315 			tbl_info->next_level = level + 1;
1316 			tbl_info->shift = level_size_shift;
1317 			tbl_info->num_entries = num_entries;
1318 #ifdef CFG_NS_VIRTUALIZATION
1319 			tbl_info->prtn = prtn;
1320 #endif
1321 			ret = true;
1322 			goto out;
1323 		}
1324 
1325 		tbl = core_mmu_xlat_table_entry_pa2va(prtn, level, tbl[n]);
1326 
1327 		if (!tbl)
1328 			goto out;
1329 
1330 		va_base += (vaddr_t)n << level_size_shift;
1331 		level++;
1332 		num_entries = XLAT_TABLE_ENTRIES;
1333 	}
1334 out:
1335 	thread_unmask_exceptions(exceptions);
1336 	return ret;
1337 }
1338 
core_mmu_entry_to_finer_grained(struct core_mmu_table_info * tbl_info,unsigned int idx,bool secure __unused)1339 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
1340 				     unsigned int idx, bool secure __unused)
1341 {
1342 	uint64_t *new_table;
1343 	uint64_t *entry;
1344 	int i;
1345 	paddr_t pa;
1346 	uint64_t attr;
1347 	paddr_t block_size_on_next_lvl = XLAT_BLOCK_SIZE(tbl_info->level + 1);
1348 	struct mmu_partition *prtn;
1349 
1350 #ifdef CFG_NS_VIRTUALIZATION
1351 	prtn = tbl_info->prtn;
1352 #else
1353 	prtn = &default_partition;
1354 #endif
1355 	assert(prtn);
1356 
1357 	if (tbl_info->level >= XLAT_TABLE_LEVEL_MAX ||
1358 	    idx >= tbl_info->num_entries)
1359 		return false;
1360 
1361 	entry = (uint64_t *)tbl_info->table + idx;
1362 
1363 	if ((*entry & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
1364 		return true;
1365 
1366 	new_table = core_mmu_xlat_table_alloc(prtn);
1367 	if (!new_table)
1368 		return false;
1369 
1370 	if (*entry) {
1371 		pa = *entry & OUTPUT_ADDRESS_MASK;
1372 		attr = *entry & ~(OUTPUT_ADDRESS_MASK | DESC_ENTRY_TYPE_MASK);
1373 		for (i = 0; i < XLAT_TABLE_ENTRIES; i++) {
1374 			new_table[i] = pa | attr | BLOCK_DESC;
1375 			pa += block_size_on_next_lvl;
1376 		}
1377 	} else {
1378 		memset(new_table, 0, XLAT_TABLE_ENTRIES * XLAT_ENTRY_SIZE);
1379 	}
1380 
1381 	*entry = virt_to_phys(new_table) | TABLE_DESC;
1382 
1383 	return true;
1384 }
1385 
core_mmu_set_entry_primitive(void * table,size_t level,size_t idx,paddr_t pa,uint32_t attr)1386 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
1387 				  paddr_t pa, uint32_t attr)
1388 {
1389 	uint64_t *tbl = table;
1390 	uint64_t desc = mattr_to_desc(level, attr);
1391 
1392 	tbl[idx] = desc | pa;
1393 }
1394 
core_mmu_get_entry_primitive(const void * table,size_t level,size_t idx,paddr_t * pa,uint32_t * attr)1395 void core_mmu_get_entry_primitive(const void *table, size_t level,
1396 				  size_t idx, paddr_t *pa, uint32_t *attr)
1397 {
1398 	const uint64_t *tbl = table;
1399 
1400 	if (pa)
1401 		*pa = tbl[idx] & GENMASK_64(47, 12);
1402 
1403 	if (attr)
1404 		*attr = desc_to_mattr(level, tbl[idx]);
1405 }
1406 
core_mmu_user_va_range_is_defined(void)1407 bool core_mmu_user_va_range_is_defined(void)
1408 {
1409 	return user_va_idx != -1;
1410 }
1411 
core_mmu_get_user_va_range(vaddr_t * base,size_t * size)1412 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
1413 {
1414 	assert(user_va_idx != -1);
1415 
1416 	if (base)
1417 		*base = (vaddr_t)user_va_idx << L1_XLAT_ADDRESS_SHIFT;
1418 	if (size)
1419 		*size = BIT64(L1_XLAT_ADDRESS_SHIFT);
1420 }
1421 
core_mmu_get_user_mapping_entry(struct mmu_partition * prtn,unsigned int base_idx)1422 static uint64_t *core_mmu_get_user_mapping_entry(struct mmu_partition *prtn,
1423 						 unsigned int base_idx)
1424 {
1425 	uint64_t *tbl = NULL;
1426 
1427 	assert(user_va_idx != -1);
1428 
1429 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
1430 	tbl = get_l1_ta_table(prtn, base_idx, get_core_pos());
1431 #else
1432 	tbl =  get_base_table(prtn, base_idx, get_core_pos());
1433 #endif
1434 
1435 	return tbl + user_va_idx;
1436 }
1437 
core_mmu_user_mapping_is_active(void)1438 bool core_mmu_user_mapping_is_active(void)
1439 {
1440 	bool ret = false;
1441 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1442 	uint64_t *entry = NULL;
1443 
1444 	entry = core_mmu_get_user_mapping_entry(get_prtn(), 0);
1445 	ret = (*entry != 0);
1446 
1447 	thread_unmask_exceptions(exceptions);
1448 
1449 	return ret;
1450 }
1451 
1452 #ifdef ARM32
core_mmu_get_user_map(struct core_mmu_user_map * map)1453 void core_mmu_get_user_map(struct core_mmu_user_map *map)
1454 {
1455 	struct mmu_partition *prtn = get_prtn();
1456 	uint64_t *entry = NULL;
1457 
1458 	entry = core_mmu_get_user_mapping_entry(prtn, 0);
1459 
1460 	map->user_map = *entry;
1461 	if (map->user_map) {
1462 		map->asid = (read_ttbr0_64bit() >> TTBR_ASID_SHIFT) &
1463 			    TTBR_ASID_MASK;
1464 	} else {
1465 		map->asid = 0;
1466 	}
1467 }
1468 
core_mmu_set_user_map(struct core_mmu_user_map * map)1469 void core_mmu_set_user_map(struct core_mmu_user_map *map)
1470 {
1471 	uint64_t ttbr = 0;
1472 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1473 	struct mmu_partition *prtn = get_prtn();
1474 	uint64_t *entries[NUM_BASE_TABLES] = { };
1475 	unsigned int i = 0;
1476 
1477 	ttbr = read_ttbr0_64bit();
1478 	/* Clear ASID */
1479 	ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
1480 	write_ttbr0_64bit(ttbr);
1481 	isb();
1482 
1483 	for (i = 0; i < NUM_BASE_TABLES; i++)
1484 		entries[i] = core_mmu_get_user_mapping_entry(prtn, i);
1485 
1486 	/* Set the new map */
1487 	if (map && map->user_map) {
1488 		for (i = 0; i < NUM_BASE_TABLES; i++)
1489 			*entries[i] = map->user_map;
1490 
1491 		dsb();	/* Make sure the write above is visible */
1492 		ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
1493 		write_ttbr0_64bit(ttbr);
1494 		isb();
1495 	} else {
1496 		for (i = 0; i < NUM_BASE_TABLES; i++)
1497 			*entries[i] = INVALID_DESC;
1498 
1499 		dsb();	/* Make sure the write above is visible */
1500 	}
1501 
1502 	tlbi_all();
1503 	icache_inv_all();
1504 
1505 	thread_unmask_exceptions(exceptions);
1506 }
1507 
core_mmu_get_fault_type(uint32_t fault_descr)1508 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
1509 {
1510 	assert(fault_descr & FSR_LPAE);
1511 
1512 	switch (fault_descr & FSR_STATUS_MASK) {
1513 	case 0x10: /* b010000 Synchronous extern abort, not on table walk */
1514 	case 0x15: /* b010101 Synchronous extern abort, on table walk L1 */
1515 	case 0x16: /* b010110 Synchronous extern abort, on table walk L2 */
1516 	case 0x17: /* b010111 Synchronous extern abort, on table walk L3 */
1517 		return CORE_MMU_FAULT_SYNC_EXTERNAL;
1518 	case 0x11: /* b010001 Asynchronous extern abort (DFSR only) */
1519 		return CORE_MMU_FAULT_ASYNC_EXTERNAL;
1520 	case 0x21: /* b100001 Alignment fault */
1521 		return CORE_MMU_FAULT_ALIGNMENT;
1522 	case 0x22: /* b100010 Debug event */
1523 		return CORE_MMU_FAULT_DEBUG_EVENT;
1524 	default:
1525 		break;
1526 	}
1527 
1528 	switch ((fault_descr & FSR_STATUS_MASK) >> 2) {
1529 	case 0x1: /* b0001LL Translation fault */
1530 		return CORE_MMU_FAULT_TRANSLATION;
1531 	case 0x2: /* b0010LL Access flag fault */
1532 	case 0x3: /* b0011LL Permission fault */
1533 		if (fault_descr & FSR_WNR)
1534 			return CORE_MMU_FAULT_WRITE_PERMISSION;
1535 		else
1536 			return CORE_MMU_FAULT_READ_PERMISSION;
1537 	default:
1538 		return CORE_MMU_FAULT_OTHER;
1539 	}
1540 }
1541 #endif /*ARM32*/
1542 
1543 #ifdef ARM64
core_mmu_get_user_map(struct core_mmu_user_map * map)1544 void core_mmu_get_user_map(struct core_mmu_user_map *map)
1545 {
1546 	struct mmu_partition *prtn = get_prtn();
1547 	uint64_t *entry = NULL;
1548 
1549 	entry = core_mmu_get_user_mapping_entry(prtn, 0);
1550 
1551 	map->user_map = *entry;
1552 	if (map->user_map) {
1553 		map->asid = (read_ttbr0_el1() >> TTBR_ASID_SHIFT) &
1554 			    TTBR_ASID_MASK;
1555 	} else {
1556 		map->asid = 0;
1557 	}
1558 }
1559 
core_mmu_set_user_map(struct core_mmu_user_map * map)1560 void core_mmu_set_user_map(struct core_mmu_user_map *map)
1561 {
1562 	uint64_t ttbr = 0;
1563 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1564 	struct mmu_partition *prtn = get_prtn();
1565 	uint64_t *entries[NUM_BASE_TABLES] = { };
1566 	unsigned int i = 0;
1567 
1568 	ttbr = read_ttbr0_el1();
1569 	/* Clear ASID */
1570 	ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
1571 	write_ttbr0_el1(ttbr);
1572 	isb();
1573 
1574 	for (i = 0; i < NUM_BASE_TABLES; i++)
1575 		entries[i] = core_mmu_get_user_mapping_entry(prtn, i);
1576 
1577 	/* Set the new map */
1578 	if (map && map->user_map) {
1579 		for (i = 0; i < NUM_BASE_TABLES; i++)
1580 			*entries[i] = map->user_map;
1581 
1582 		dsb();	/* Make sure the write above is visible */
1583 		ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
1584 		write_ttbr0_el1(ttbr);
1585 		isb();
1586 	} else {
1587 		for (i = 0; i < NUM_BASE_TABLES; i++)
1588 			*entries[i] = INVALID_DESC;
1589 
1590 		dsb();	/* Make sure the write above is visible */
1591 	}
1592 
1593 	tlbi_all();
1594 	icache_inv_all();
1595 
1596 	thread_unmask_exceptions(exceptions);
1597 }
1598 
core_mmu_get_fault_type(uint32_t fault_descr)1599 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
1600 {
1601 	switch ((fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
1602 	case ESR_EC_SP_ALIGN:
1603 	case ESR_EC_PC_ALIGN:
1604 		return CORE_MMU_FAULT_ALIGNMENT;
1605 	case ESR_EC_IABT_EL0:
1606 	case ESR_EC_DABT_EL0:
1607 	case ESR_EC_IABT_EL1:
1608 	case ESR_EC_DABT_EL1:
1609 		switch (fault_descr & ESR_FSC_MASK) {
1610 		case ESR_FSC_SIZE_L0:
1611 		case ESR_FSC_SIZE_L1:
1612 		case ESR_FSC_SIZE_L2:
1613 		case ESR_FSC_SIZE_L3:
1614 		case ESR_FSC_TRANS_L0:
1615 		case ESR_FSC_TRANS_L1:
1616 		case ESR_FSC_TRANS_L2:
1617 		case ESR_FSC_TRANS_L3:
1618 			return CORE_MMU_FAULT_TRANSLATION;
1619 		case ESR_FSC_ACCF_L1:
1620 		case ESR_FSC_ACCF_L2:
1621 		case ESR_FSC_ACCF_L3:
1622 		case ESR_FSC_PERMF_L1:
1623 		case ESR_FSC_PERMF_L2:
1624 		case ESR_FSC_PERMF_L3:
1625 			if (fault_descr & ESR_ABT_WNR)
1626 				return CORE_MMU_FAULT_WRITE_PERMISSION;
1627 			else
1628 				return CORE_MMU_FAULT_READ_PERMISSION;
1629 		case ESR_FSC_ALIGN:
1630 			return CORE_MMU_FAULT_ALIGNMENT;
1631 		case ESR_FSC_TAG_CHECK:
1632 			return CORE_MMU_FAULT_TAG_CHECK;
1633 		case ESR_FSC_SEA_NTT:
1634 		case ESR_FSC_SEA_TT_SUB_L2:
1635 		case ESR_FSC_SEA_TT_SUB_L1:
1636 		case ESR_FSC_SEA_TT_L0:
1637 		case ESR_FSC_SEA_TT_L1:
1638 		case ESR_FSC_SEA_TT_L2:
1639 		case ESR_FSC_SEA_TT_L3:
1640 			return CORE_MMU_FAULT_SYNC_EXTERNAL;
1641 		default:
1642 			return CORE_MMU_FAULT_OTHER;
1643 		}
1644 	default:
1645 		return CORE_MMU_FAULT_OTHER;
1646 	}
1647 }
1648 #endif /*ARM64*/
1649