xref: /optee_os/core/arch/arm/mm/core_mmu_lpae.c (revision 4219abe1e6287069a13a5f552f12ddbd230bbaff)
1 // SPDX-License-Identifier: (BSD-2-Clause AND BSD-3-Clause)
2 /*
3  * Copyright (c) 2015-2016, 2022 Linaro Limited
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * Copyright (c) 2014, 2022, ARM Limited and Contributors. All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are met:
34  *
35  * Redistributions of source code must retain the above copyright notice, this
36  * list of conditions and the following disclaimer.
37  *
38  * Redistributions in binary form must reproduce the above copyright notice,
39  * this list of conditions and the following disclaimer in the documentation
40  * and/or other materials provided with the distribution.
41  *
42  * Neither the name of ARM nor the names of its contributors may be used
43  * to endorse or promote products derived from this software without specific
44  * prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
56  * POSSIBILITY OF SUCH DAMAGE.
57  */
58 #include <platform_config.h>
59 
60 #include <arm.h>
61 #include <assert.h>
62 #include <compiler.h>
63 #include <config.h>
64 #include <inttypes.h>
65 #include <keep.h>
66 #include <kernel/boot.h>
67 #include <kernel/cache_helpers.h>
68 #include <kernel/linker.h>
69 #include <kernel/misc.h>
70 #include <kernel/panic.h>
71 #include <kernel/thread.h>
72 #include <kernel/tlb_helpers.h>
73 #include <memtag.h>
74 #include <mm/core_memprot.h>
75 #include <mm/pgt_cache.h>
76 #include <mm/phys_mem.h>
77 #include <stdalign.h>
78 #include <string.h>
79 #include <trace.h>
80 #include <types_ext.h>
81 #include <util.h>
82 
83 #ifndef DEBUG_XLAT_TABLE
84 #define DEBUG_XLAT_TABLE 0
85 #endif
86 
87 #if DEBUG_XLAT_TABLE
88 #define debug_print(...) DMSG_RAW(__VA_ARGS__)
89 #else
90 #define debug_print(...) ((void)0)
91 #endif
92 
93 
94 /*
95  * Miscellaneous MMU related constants
96  */
97 
98 #define INVALID_DESC		0x0
99 #define BLOCK_DESC		0x1
100 #define L3_BLOCK_DESC		0x3
101 #define TABLE_DESC		0x3
102 #define DESC_ENTRY_TYPE_MASK	0x3
103 
104 #define XN			(1ull << 2)
105 #define PXN			(1ull << 1)
106 #define CONT_HINT		(1ull << 0)
107 
108 #define UPPER_ATTRS(x)		(((x) & 0x7) << 52)
109 #define GP                      BIT64(50)   /* Guarded Page, Aarch64 FEAT_BTI */
110 #define NON_GLOBAL		(1ull << 9)
111 #define ACCESS_FLAG		(1ull << 8)
112 #define NSH			(0x0 << 6)
113 #define OSH			(0x2 << 6)
114 #define ISH			(0x3 << 6)
115 
116 #define AP_RO			(0x1 << 5)
117 #define AP_RW			(0x0 << 5)
118 #define AP_UNPRIV		(0x1 << 4)
119 
120 #define NS				(0x1 << 3)
121 #define LOWER_ATTRS_SHIFT		2
122 #define LOWER_ATTRS(x)			(((x) & 0xfff) << LOWER_ATTRS_SHIFT)
123 
124 #define ATTR_DEVICE_nGnRE_INDEX		0x0
125 #define ATTR_IWBWA_OWBWA_NTR_INDEX	0x1
126 #define ATTR_DEVICE_nGnRnE_INDEX	0x2
127 #define ATTR_TAGGED_NORMAL_MEM_INDEX	0x3
128 #define ATTR_INDEX_MASK			0x7
129 
130 #define ATTR_DEVICE_nGnRnE		(0x0)
131 #define ATTR_DEVICE_nGnRE		(0x4)
132 #define ATTR_IWBWA_OWBWA_NTR		(0xff)
133 /* Same as ATTR_IWBWA_OWBWA_NTR but with memory tagging.  */
134 #define ATTR_TAGGED_NORMAL_MEM		(0xf0)
135 
136 #define MAIR_ATTR_SET(attr, index)	(((uint64_t)attr) << ((index) << 3))
137 
138 #define OUTPUT_ADDRESS_MASK	(0x0000FFFFFFFFF000ULL)
139 
140 /* (internal) physical address size bits in EL3/EL1 */
141 #define TCR_PS_BITS_4GB		(0x0)
142 #define TCR_PS_BITS_64GB	(0x1)
143 #define TCR_PS_BITS_1TB		(0x2)
144 #define TCR_PS_BITS_4TB		(0x3)
145 #define TCR_PS_BITS_16TB	(0x4)
146 #define TCR_PS_BITS_256TB	(0x5)
147 #define TCR_PS_BITS_4PB		(0x6)
148 
149 #define UNSET_DESC		((uint64_t)-1)
150 
151 #define FOUR_KB_SHIFT		12
152 #define PAGE_SIZE_SHIFT		FOUR_KB_SHIFT
153 #define PAGE_SIZE		(1 << PAGE_SIZE_SHIFT)
154 #define PAGE_SIZE_MASK		(PAGE_SIZE - 1)
155 #define IS_PAGE_ALIGNED(addr)	(((addr) & PAGE_SIZE_MASK) == 0)
156 
157 #define XLAT_ENTRY_SIZE_SHIFT	3 /* Each MMU table entry is 8 bytes (1 << 3) */
158 #define XLAT_ENTRY_SIZE		(1 << XLAT_ENTRY_SIZE_SHIFT)
159 
160 #define XLAT_TABLE_SIZE_SHIFT	PAGE_SIZE_SHIFT
161 #define XLAT_TABLE_SIZE		(1 << XLAT_TABLE_SIZE_SHIFT)
162 
163 #define XLAT_TABLE_LEVEL_MAX	U(3)
164 
165 /* Values for number of entries in each MMU translation table */
166 #define XLAT_TABLE_ENTRIES_SHIFT (XLAT_TABLE_SIZE_SHIFT - XLAT_ENTRY_SIZE_SHIFT)
167 #define XLAT_TABLE_ENTRIES	(1 << XLAT_TABLE_ENTRIES_SHIFT)
168 #define XLAT_TABLE_ENTRIES_MASK	(XLAT_TABLE_ENTRIES - 1)
169 
170 /* Values to convert a memory address to an index into a translation table */
171 #define L3_XLAT_ADDRESS_SHIFT	PAGE_SIZE_SHIFT
172 #define L2_XLAT_ADDRESS_SHIFT	(L3_XLAT_ADDRESS_SHIFT + \
173 				 XLAT_TABLE_ENTRIES_SHIFT)
174 #define L1_XLAT_ADDRESS_SHIFT	(L2_XLAT_ADDRESS_SHIFT + \
175 				 XLAT_TABLE_ENTRIES_SHIFT)
176 #define L0_XLAT_ADDRESS_SHIFT	(L1_XLAT_ADDRESS_SHIFT + \
177 				 XLAT_TABLE_ENTRIES_SHIFT)
178 #define XLAT_ADDR_SHIFT(level)	(PAGE_SIZE_SHIFT + \
179 				 ((XLAT_TABLE_LEVEL_MAX - (level)) * \
180 				 XLAT_TABLE_ENTRIES_SHIFT))
181 
182 #define XLAT_BLOCK_SIZE(level)	(UL(1) << XLAT_ADDR_SHIFT(level))
183 
184 /* Base table */
185 #define BASE_XLAT_ADDRESS_SHIFT	XLAT_ADDR_SHIFT(CORE_MMU_BASE_TABLE_LEVEL)
186 #define BASE_XLAT_BLOCK_SIZE	XLAT_BLOCK_SIZE(CORE_MMU_BASE_TABLE_LEVEL)
187 
188 #define NUM_BASE_LEVEL_ENTRIES	\
189 	BIT(CFG_LPAE_ADDR_SPACE_BITS - BASE_XLAT_ADDRESS_SHIFT)
190 
191 /*
192  * MMU L1 table, one for each core
193  *
194  * With CFG_CORE_UNMAP_CORE_AT_EL0, each core has one table to be used
195  * while in kernel mode and one to be used while in user mode.
196  */
197 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
198 #define NUM_BASE_TABLES	2
199 #else
200 #define NUM_BASE_TABLES	1
201 #endif
202 
203 #ifndef MAX_XLAT_TABLES
204 #ifdef CFG_NS_VIRTUALIZATION
205 #	define XLAT_TABLE_VIRTUALIZATION_EXTRA 3
206 #else
207 #	define XLAT_TABLE_VIRTUALIZATION_EXTRA 0
208 #endif
209 #ifdef CFG_CORE_ASLR
210 #	define XLAT_TABLE_ASLR_EXTRA 3
211 #else
212 #	define XLAT_TABLE_ASLR_EXTRA 0
213 #endif
214 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
215 #	define XLAT_TABLE_TEE_EXTRA 8
216 #	define XLAT_TABLE_USER_EXTRA (NUM_BASE_TABLES * CFG_TEE_CORE_NB_CORE)
217 #else
218 #	define XLAT_TABLE_TEE_EXTRA 5
219 #	define XLAT_TABLE_USER_EXTRA 0
220 #endif
221 #define MAX_XLAT_TABLES		(XLAT_TABLE_TEE_EXTRA + \
222 				 XLAT_TABLE_VIRTUALIZATION_EXTRA + \
223 				 XLAT_TABLE_ASLR_EXTRA + \
224 				 XLAT_TABLE_USER_EXTRA + \
225 				 IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) + \
226 				 IS_ENABLED(CFG_DYN_CONFIG))
227 #endif /*!MAX_XLAT_TABLES*/
228 
229 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
230 #if (MAX_XLAT_TABLES <= UINT8_MAX)
231 typedef uint8_t l1_idx_t;
232 #elif (MAX_XLAT_TABLES <= UINT16_MAX)
233 typedef uint16_t l1_idx_t;
234 #else
235 #error MAX_XLAT_TABLES is suspiciously large, please check
236 #endif
237 #endif
238 
239 /*
240  * The global base translation table is a three dimensional array (array of
241  * array of array), but it's easier to visualize if broken down into
242  * components.
243  *
244  * TTBR is assigned a base translation table of NUM_BASE_LEVEL_ENTRIES
245  * entries. NUM_BASE_LEVEL_ENTRIES is determined based on
246  * CFG_LPAE_ADDR_SPACE_BITS.  CFG_LPAE_ADDR_SPACE_BITS is by default 32
247  * which results in NUM_BASE_LEVEL_ENTRIES defined to 4 where one entry is
248  * a uint64_t, 8 bytes.
249  *
250  * If CFG_CORE_UNMAP_CORE_AT_EL0=y there are two base translation tables,
251  * one for OP-TEE core with full mapping of both EL1 and EL0, and one for
252  * EL0 where EL1 is unmapped except for a minimal trampoline needed to
253  * restore EL1 mappings on exception from EL0.
254  *
255  * Each CPU core is assigned a unique set of base translation tables as:
256  * core0: table0: entry0 (table0 maps both EL1 and EL0)
257  *                entry1
258  *                entry2
259  *                entry3
260  * core0: table1: entry0 (table1 maps only EL0)
261  *                entry1
262  *                entry2
263  *                entry3
264  * core1: ...
265  *
266  * The base translation table is by default a level 1 table. It can also be
267  * configured as a level 0 table with CFG_LPAE_ADDR_SPACE_BITS >= 40 and <=
268  * 48.
269  */
270 
271 /* The size of base tables for one core */
272 #define BASE_TABLE_SIZE		(NUM_BASE_LEVEL_ENTRIES * NUM_BASE_TABLES * \
273 				 XLAT_ENTRY_SIZE)
274 #ifndef CFG_DYN_CONFIG
275 static uint64_t base_xlation_table[BASE_TABLE_SIZE * CFG_TEE_CORE_NB_CORE /
276 				   XLAT_ENTRY_SIZE]
277 	__aligned(NUM_BASE_LEVEL_ENTRIES * XLAT_ENTRY_SIZE)
278 	__section(".nozi.mmu.base_table");
279 
280 static uint64_t xlat_tables[XLAT_TABLE_SIZE * MAX_XLAT_TABLES /
281 			    XLAT_ENTRY_SIZE]
282 	__aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
283 
284 /* MMU L2 table for TAs, one for each thread */
285 static uint64_t xlat_tables_ul1[XLAT_TABLE_SIZE * CFG_NUM_THREADS /
286 				XLAT_ENTRY_SIZE]
287 	__aligned(XLAT_TABLE_SIZE) __section(".nozi.mmu.l2");
288 
289 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
290 static l1_idx_t user_l1_table_idx[NUM_BASE_TABLES * CFG_TEE_CORE_NB_CORE];
291 #endif
292 #endif
293 
294 /*
295  * TAs page table entry inside a level 1 page table.
296  *
297  * TAs mapping is expected to start from level 2.
298  *
299  * If base level is 1 then this is the index of a level 1 entry,
300  * that will point directly into TA mapping table.
301  *
302  * If base level is 0 then entry 0 in base table is always used, and then
303  * we fallback to "base level == 1" like scenario.
304  */
305 static int user_va_idx __nex_data = -1;
306 
307 /*
308  * struct mmu_partition - virtual memory of a partition
309  * @base_tables:       The global base translation table described above
310  * @xlat_tables:       Preallocated array of translation tables
311  * @l2_ta_tables:      The level 2 table used to map TAs at EL0
312  * @xlat_tables_used:  The number of used translation tables from @xlat_tables
313  * @asid:              Address space ID used for core mappings
314  * @user_l1_table_idx: Index into @xlat_tables for the entry used to map the
315  *                     level 2 table @l2_ta_tables
316  *
317  * With CORE_MMU_BASE_TABLE_LEVEL = 1 translation tables are ordered as:
318  * @base_tables is a level 1 table where @user_va_idx above is used as
319  * base_tables[user_va_idx] to identify the entry used by @l2_ta_tables.
320  *
321  * With CORE_MMU_BASE_TABLE_LEVEL = 0 translation tables are ordered as:
322  * @base_tables is a level 0 table where base_tables[0] identifies the level 1
323  * table indexed with
324  * xlat_tables[user_l1_table_idx[0/1][core_id]][user_va_idx] to find the
325  * entry used by @l2_ta_tables.
326  *
327  * With CFG_NS_VIRTUALIZATION disabled there is only one @default_partition
328  * (below) describing virtual memory mappings.
329  *
330  * With CFG_NS_VIRTUALIZATION enabled there's one struct mmu_partition
331  * allocated for each partition.
332  */
333 struct mmu_partition {
334 	uint64_t *base_tables;
335 	uint64_t *xlat_tables;
336 	uint64_t *l2_ta_tables;
337 	unsigned int xlat_tables_used;
338 	unsigned int asid;
339 
340 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
341 	/*
342 	 * Indexes of the L1 table from 'xlat_tables'
343 	 * that points to the user mappings.
344 	 */
345 	l1_idx_t *user_l1_table_idx;
346 #endif
347 };
348 
349 #ifdef CFG_DYN_CONFIG
350 static struct mmu_partition default_partition __nex_bss;
351 #else
352 static struct mmu_partition default_partition __nex_data = {
353 	.base_tables = base_xlation_table,
354 	.xlat_tables = xlat_tables,
355 	.l2_ta_tables = xlat_tables_ul1,
356 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
357 	.user_l1_table_idx = user_l1_table_idx,
358 #endif
359 	.xlat_tables_used = 0,
360 	.asid = 0
361 };
362 #endif
363 
364 #ifdef CFG_NS_VIRTUALIZATION
365 static struct mmu_partition *current_prtn[CFG_TEE_CORE_NB_CORE] __nex_bss;
366 #endif
367 
get_prtn(void)368 static struct mmu_partition *get_prtn(void)
369 {
370 #ifdef CFG_NS_VIRTUALIZATION
371 	struct mmu_partition *ret;
372 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
373 
374 	ret = current_prtn[get_core_pos()];
375 
376 	thread_unmask_exceptions(exceptions);
377 	return ret;
378 #else
379 	return &default_partition;
380 #endif
381 }
382 
desc_to_mattr(unsigned level,uint64_t desc)383 static uint32_t desc_to_mattr(unsigned level, uint64_t desc)
384 {
385 	uint32_t a;
386 
387 	if (!(desc & 1))
388 		return 0;
389 
390 	if (level == XLAT_TABLE_LEVEL_MAX) {
391 		if ((desc & DESC_ENTRY_TYPE_MASK) != L3_BLOCK_DESC)
392 			return 0;
393 	} else {
394 		if ((desc & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
395 			return TEE_MATTR_TABLE;
396 	}
397 
398 	a = TEE_MATTR_VALID_BLOCK;
399 
400 	if (desc & LOWER_ATTRS(ACCESS_FLAG))
401 		a |= TEE_MATTR_PRX | TEE_MATTR_URX;
402 
403 	if (!(desc & LOWER_ATTRS(AP_RO)))
404 		a |= TEE_MATTR_PW | TEE_MATTR_UW;
405 
406 	if (!(desc & LOWER_ATTRS(AP_UNPRIV)))
407 		a &= ~TEE_MATTR_URWX;
408 
409 	if (desc & UPPER_ATTRS(XN))
410 		a &= ~(TEE_MATTR_PX | TEE_MATTR_UX);
411 
412 	if (desc & UPPER_ATTRS(PXN))
413 		a &= ~TEE_MATTR_PX;
414 
415 	COMPILE_TIME_ASSERT(ATTR_DEVICE_nGnRnE_INDEX ==
416 			    TEE_MATTR_MEM_TYPE_STRONGLY_O);
417 	COMPILE_TIME_ASSERT(ATTR_DEVICE_nGnRE_INDEX == TEE_MATTR_MEM_TYPE_DEV);
418 	COMPILE_TIME_ASSERT(ATTR_IWBWA_OWBWA_NTR_INDEX ==
419 			    TEE_MATTR_MEM_TYPE_CACHED);
420 	COMPILE_TIME_ASSERT(ATTR_TAGGED_NORMAL_MEM_INDEX ==
421 			    TEE_MATTR_MEM_TYPE_TAGGED);
422 
423 	a |= ((desc & LOWER_ATTRS(ATTR_INDEX_MASK)) >> LOWER_ATTRS_SHIFT) <<
424 	     TEE_MATTR_MEM_TYPE_SHIFT;
425 
426 	if (!(desc & LOWER_ATTRS(NON_GLOBAL)))
427 		a |= TEE_MATTR_GLOBAL;
428 
429 	if (!(desc & LOWER_ATTRS(NS)))
430 		a |= TEE_MATTR_SECURE;
431 
432 	if (desc & GP)
433 		a |= TEE_MATTR_GUARDED;
434 
435 	return a;
436 }
437 
mattr_to_desc(unsigned level,uint32_t attr)438 static uint64_t mattr_to_desc(unsigned level, uint32_t attr)
439 {
440 	uint64_t desc;
441 	uint32_t a = attr;
442 
443 	if (a & TEE_MATTR_TABLE)
444 		return TABLE_DESC;
445 
446 	if (!(a & TEE_MATTR_VALID_BLOCK))
447 		return 0;
448 
449 	if (a & (TEE_MATTR_PX | TEE_MATTR_PW))
450 		a |= TEE_MATTR_PR;
451 	if (a & (TEE_MATTR_UX | TEE_MATTR_UW))
452 		a |= TEE_MATTR_UR;
453 	if (a & TEE_MATTR_UR)
454 		a |= TEE_MATTR_PR;
455 	if (a & TEE_MATTR_UW)
456 		a |= TEE_MATTR_PW;
457 
458 	if (IS_ENABLED(CFG_CORE_BTI) && (a & TEE_MATTR_PX))
459 		a |= TEE_MATTR_GUARDED;
460 
461 	if (level == XLAT_TABLE_LEVEL_MAX)
462 		desc = L3_BLOCK_DESC;
463 	else
464 		desc = BLOCK_DESC;
465 
466 	if (!(a & (TEE_MATTR_PX | TEE_MATTR_UX)))
467 		desc |= UPPER_ATTRS(XN);
468 	if (!(a & TEE_MATTR_PX))
469 		desc |= UPPER_ATTRS(PXN);
470 
471 	if (a & TEE_MATTR_UR)
472 		desc |= LOWER_ATTRS(AP_UNPRIV);
473 
474 	if (!(a & TEE_MATTR_PW))
475 		desc |= LOWER_ATTRS(AP_RO);
476 
477 	if (feat_bti_is_implemented() && (a & TEE_MATTR_GUARDED))
478 		desc |= GP;
479 
480 	/* Keep in sync with core_mmu.c:core_mmu_mattr_is_ok */
481 	switch ((a >> TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK) {
482 	case TEE_MATTR_MEM_TYPE_STRONGLY_O:
483 		desc |= LOWER_ATTRS(ATTR_DEVICE_nGnRnE_INDEX | OSH);
484 		break;
485 	case TEE_MATTR_MEM_TYPE_DEV:
486 		desc |= LOWER_ATTRS(ATTR_DEVICE_nGnRE_INDEX | OSH);
487 		break;
488 	case TEE_MATTR_MEM_TYPE_CACHED:
489 		desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
490 		break;
491 	case TEE_MATTR_MEM_TYPE_TAGGED:
492 		desc |= LOWER_ATTRS(ATTR_TAGGED_NORMAL_MEM_INDEX | ISH);
493 		break;
494 	default:
495 		/*
496 		 * "Can't happen" the attribute is supposed to be checked
497 		 * with core_mmu_mattr_is_ok() before.
498 		 */
499 		panic();
500 	}
501 
502 	if (a & (TEE_MATTR_UR | TEE_MATTR_PR))
503 		desc |= LOWER_ATTRS(ACCESS_FLAG);
504 
505 	if (!(a & TEE_MATTR_GLOBAL))
506 		desc |= LOWER_ATTRS(NON_GLOBAL);
507 
508 	desc |= a & TEE_MATTR_SECURE ? 0 : LOWER_ATTRS(NS);
509 
510 	return desc;
511 }
512 
get_base_table(struct mmu_partition * prtn,size_t tbl_idx,size_t core_pos)513 static uint64_t *get_base_table(struct mmu_partition *prtn, size_t tbl_idx,
514 				size_t core_pos)
515 {
516 	assert(tbl_idx < NUM_BASE_TABLES);
517 	assert(core_pos < CFG_TEE_CORE_NB_CORE);
518 
519 	return  prtn->base_tables + (core_pos * NUM_BASE_TABLES + tbl_idx) *
520 				    NUM_BASE_LEVEL_ENTRIES;
521 }
522 
get_l2_ta_tables(struct mmu_partition * prtn,size_t thread_id)523 static uint64_t *get_l2_ta_tables(struct mmu_partition *prtn, size_t thread_id)
524 {
525 	assert(thread_id < CFG_NUM_THREADS);
526 
527 	return prtn->l2_ta_tables + XLAT_TABLE_ENTRIES * thread_id;
528 }
529 
530 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
get_l1_ta_table(struct mmu_partition * prtn,size_t base_idx,size_t core_pos)531 static uint64_t *get_l1_ta_table(struct mmu_partition *prtn, size_t base_idx,
532 				 size_t core_pos)
533 {
534 	size_t idx = 0;
535 	uint64_t *tbl = NULL;
536 
537 	idx = prtn->user_l1_table_idx[core_pos * NUM_BASE_TABLES + base_idx];
538 	tbl = (void *)((vaddr_t)prtn->xlat_tables + idx * XLAT_TABLE_SIZE);
539 	return tbl;
540 }
541 
set_l1_ta_table(struct mmu_partition * prtn,size_t base_idx,size_t core_pos,uint64_t * tbl)542 static void set_l1_ta_table(struct mmu_partition *prtn, size_t base_idx,
543 			    size_t core_pos, uint64_t *tbl)
544 {
545 	size_t idx = 0;
546 
547 	idx = ((vaddr_t)tbl - (vaddr_t)prtn->xlat_tables) / XLAT_TABLE_SIZE;
548 	assert(idx < prtn->xlat_tables_used);
549 	prtn->user_l1_table_idx[core_pos * NUM_BASE_TABLES + base_idx] = idx;
550 }
551 #endif
552 
553 #ifdef CFG_NS_VIRTUALIZATION
core_mmu_get_total_pages_size(void)554 size_t core_mmu_get_total_pages_size(void)
555 {
556 	size_t sz = ROUNDUP(BASE_TABLE_SIZE * CFG_TEE_CORE_NB_CORE,
557 			    SMALL_PAGE_SIZE);
558 
559 	sz += XLAT_TABLE_SIZE * CFG_NUM_THREADS;
560 	if (!IS_ENABLED(CFG_DYN_CONFIG))
561 		sz += XLAT_TABLE_SIZE * MAX_XLAT_TABLES;
562 
563 	return sz;
564 }
565 
core_alloc_mmu_prtn(void * tables)566 struct mmu_partition *core_alloc_mmu_prtn(void *tables)
567 {
568 	struct mmu_partition *prtn;
569 	uint8_t *tbl = tables;
570 	unsigned int asid = asid_alloc();
571 
572 	assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0);
573 
574 	if (!asid)
575 		return NULL;
576 
577 	prtn = nex_malloc(sizeof(*prtn));
578 	if (!prtn)
579 		goto err;
580 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
581 	prtn->user_l1_table_idx = nex_calloc(NUM_BASE_TABLES *
582 					     CFG_TEE_CORE_NB_CORE,
583 					     sizeof(l1_idx_t));
584 	if (!prtn->user_l1_table_idx)
585 		goto err;
586 #endif
587 
588 	memset(tables, 0, core_mmu_get_total_pages_size());
589 	prtn->base_tables = (void *)tbl;
590 	tbl += ROUNDUP(BASE_TABLE_SIZE * CFG_TEE_CORE_NB_CORE, SMALL_PAGE_SIZE);
591 
592 	if (!IS_ENABLED(CFG_DYN_CONFIG)) {
593 		prtn->xlat_tables = (void *)tbl;
594 		tbl += XLAT_TABLE_SIZE * MAX_XLAT_TABLES;
595 		assert(((vaddr_t)tbl) % SMALL_PAGE_SIZE == 0);
596 	}
597 
598 	prtn->l2_ta_tables = (void *)tbl;
599 	prtn->xlat_tables_used = 0;
600 	prtn->asid = asid;
601 
602 	return prtn;
603 err:
604 	nex_free(prtn);
605 	asid_free(asid);
606 	return NULL;
607 }
608 
core_free_mmu_prtn(struct mmu_partition * prtn)609 void core_free_mmu_prtn(struct mmu_partition *prtn)
610 {
611 	asid_free(prtn->asid);
612 	nex_free(prtn);
613 }
614 
core_mmu_set_prtn(struct mmu_partition * prtn)615 void core_mmu_set_prtn(struct mmu_partition *prtn)
616 {
617 	uint64_t ttbr;
618 	/*
619 	 * We are changing mappings for current CPU,
620 	 * so make sure that we will not be rescheduled
621 	 */
622 	assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR);
623 
624 	current_prtn[get_core_pos()] = prtn;
625 
626 	ttbr = virt_to_phys(get_base_table(prtn, 0, get_core_pos()));
627 
628 	write_ttbr0_el1(ttbr | ((paddr_t)prtn->asid << TTBR_ASID_SHIFT));
629 	isb();
630 	tlbi_all();
631 }
632 
core_mmu_set_default_prtn(void)633 void core_mmu_set_default_prtn(void)
634 {
635 	core_mmu_set_prtn(&default_partition);
636 }
637 
core_mmu_set_default_prtn_tbl(void)638 void core_mmu_set_default_prtn_tbl(void)
639 {
640 	size_t n = 0;
641 
642 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++)
643 		current_prtn[n] = &default_partition;
644 }
645 #endif
646 
core_mmu_xlat_table_alloc(struct mmu_partition * prtn)647 static uint64_t *core_mmu_xlat_table_alloc(struct mmu_partition *prtn)
648 {
649 	uint64_t *new_table = NULL;
650 
651 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
652 		if (cpu_mmu_enabled()) {
653 			tee_mm_entry_t *mm = NULL;
654 			paddr_t pa = 0;
655 
656 			/*
657 			 * The default_partition only has a physical memory
658 			 * pool for the nexus when virtualization is
659 			 * enabled. We should use the nexus physical memory
660 			 * pool if we're allocating memory for another
661 			 * partition than our own.
662 			 */
663 			if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
664 			    (prtn == &default_partition ||
665 			     prtn != get_prtn())) {
666 				mm = nex_phys_mem_core_alloc(XLAT_TABLE_SIZE);
667 				if (!mm)
668 					EMSG("Phys nex mem exhausted");
669 			} else {
670 				mm = phys_mem_core_alloc(XLAT_TABLE_SIZE);
671 				if (!mm)
672 					EMSG("Phys mem exhausted");
673 			}
674 			if (!mm)
675 				return NULL;
676 			pa = tee_mm_get_smem(mm);
677 
678 			new_table = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL,
679 						 XLAT_TABLE_SIZE);
680 			assert(new_table);
681 		} else {
682 			new_table = boot_mem_alloc(XLAT_TABLE_SIZE,
683 						   XLAT_TABLE_SIZE);
684 			if (prtn->xlat_tables) {
685 				/*
686 				 * user_l1_table_idx[] is used to index
687 				 * xlat_tables so we depend on the
688 				 * xlat_tables are linearly allocated or
689 				 * l1_idx_t might need a wider type.
690 				 */
691 				assert((vaddr_t)prtn->xlat_tables +
692 				       prtn->xlat_tables_used *
693 				       XLAT_TABLE_SIZE == (vaddr_t)new_table);
694 			} else {
695 				boot_mem_add_reloc(&prtn->xlat_tables);
696 				prtn->xlat_tables = new_table;
697 			}
698 		}
699 		prtn->xlat_tables_used++;
700 		DMSG("xlat tables used %u", prtn->xlat_tables_used);
701 	} else {
702 		if (prtn->xlat_tables_used >= MAX_XLAT_TABLES) {
703 			EMSG("%u xlat tables exhausted", MAX_XLAT_TABLES);
704 
705 			return NULL;
706 		}
707 
708 		new_table = prtn->xlat_tables +
709 			    prtn->xlat_tables_used * XLAT_TABLE_ENTRIES;
710 		prtn->xlat_tables_used++;
711 
712 		DMSG("xlat tables used %u / %u",
713 		     prtn->xlat_tables_used, MAX_XLAT_TABLES);
714 	}
715 
716 	return new_table;
717 }
718 
719 /*
720  * Given an entry that points to a table returns the virtual address
721  * of the pointed table. NULL otherwise.
722  */
core_mmu_xlat_table_entry_pa2va(struct mmu_partition * prtn,unsigned int level,uint64_t entry)723 static void *core_mmu_xlat_table_entry_pa2va(struct mmu_partition *prtn,
724 					     unsigned int level,
725 					     uint64_t entry)
726 {
727 	paddr_t pa = 0;
728 	void *va = NULL;
729 
730 	if ((entry & DESC_ENTRY_TYPE_MASK) != TABLE_DESC ||
731 	    level >= XLAT_TABLE_LEVEL_MAX)
732 		return NULL;
733 
734 	pa = entry & OUTPUT_ADDRESS_MASK;
735 
736 	if (!IS_ENABLED(CFG_NS_VIRTUALIZATION) || prtn == &default_partition)
737 		va = phys_to_virt(pa, MEM_AREA_TEE_RAM_RW_DATA,
738 				  XLAT_TABLE_SIZE);
739 	if (!va)
740 		va = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL,
741 				  XLAT_TABLE_SIZE);
742 
743 	return va;
744 }
745 
746 /*
747  * For a table entry that points to a table - allocate and copy to
748  * a new pointed table. This is done for the requested entry,
749  * without going deeper into the pointed table entries.
750  *
751  * A success is returned for non-table entries, as nothing to do there.
752  */
753 __maybe_unused
core_mmu_entry_copy(struct core_mmu_table_info * tbl_info,unsigned int idx)754 static bool core_mmu_entry_copy(struct core_mmu_table_info *tbl_info,
755 				unsigned int idx)
756 {
757 	uint64_t *orig_table = NULL;
758 	uint64_t *new_table = NULL;
759 	uint64_t *entry = NULL;
760 	struct mmu_partition *prtn = NULL;
761 
762 #ifdef CFG_NS_VIRTUALIZATION
763 	prtn = tbl_info->prtn;
764 #else
765 	prtn = &default_partition;
766 #endif
767 	assert(prtn);
768 
769 	if (idx >= tbl_info->num_entries)
770 		return false;
771 
772 	entry = (uint64_t *)tbl_info->table + idx;
773 
774 	/* Nothing to do for non-table entries */
775 	if ((*entry & DESC_ENTRY_TYPE_MASK) != TABLE_DESC ||
776 	    tbl_info->level >= XLAT_TABLE_LEVEL_MAX)
777 		return true;
778 
779 	new_table = core_mmu_xlat_table_alloc(prtn);
780 	if (!new_table)
781 		return false;
782 
783 	orig_table = core_mmu_xlat_table_entry_pa2va(prtn, tbl_info->level,
784 						     *entry);
785 	if (!orig_table)
786 		return false;
787 
788 	/* Copy original table content to new table */
789 	memcpy(new_table, orig_table, XLAT_TABLE_ENTRIES * XLAT_ENTRY_SIZE);
790 
791 	/* Point to the new table */
792 	*entry = virt_to_phys(new_table) | (*entry & ~OUTPUT_ADDRESS_MASK);
793 
794 	return true;
795 }
796 
share_region(struct mmu_partition * dst_prtn,struct mmu_partition * src_prtn,struct tee_mmap_region * mm)797 static void share_region(struct mmu_partition *dst_prtn,
798 			 struct mmu_partition *src_prtn,
799 			 struct tee_mmap_region *mm)
800 {
801 	unsigned int level = CORE_MMU_PGDIR_LEVEL - 1;
802 	struct core_mmu_table_info dst_ti = { };
803 	struct core_mmu_table_info src_ti = { };
804 	struct tee_mmap_region dummy_mm = *mm;
805 	ssize_t size_left = 0;
806 	unsigned int idx = 0;
807 	uint32_t attr = 0;
808 	paddr_t pa = 0;
809 	vaddr_t va = 0;
810 
811 	assert(!(mm->size % CORE_MMU_PGDIR_SIZE));
812 
813 	dummy_mm.region_size = CORE_MMU_PGDIR_SIZE;
814 	core_mmu_map_region(dst_prtn, &dummy_mm);
815 
816 	/*
817 	 * Assign the PGDIR translation tables used in the src_prtn for the
818 	 * memory region into the same virtual address in the dst_prtn.
819 	 * This is used to share dynamic nexus mappings between partitions.
820 	 */
821 	va = mm->va;
822 	size_left = mm->size;
823 	while (size_left > 0) {
824 		/*
825 		 * The loop is typically only one iteration so there's no
826 		 * need to try to be clever with the table lookup.
827 		 */
828 		if (!core_mmu_find_table(src_prtn, va, level, &src_ti))
829 			panic("can't find src table for mapping");
830 		if (!core_mmu_find_table(dst_prtn, va, level, &dst_ti))
831 			panic("can't find dst table for mapping");
832 
833 		/*
834 		 * If two mmap regions share the same table we'll overwrite
835 		 * the value with the same value. This doesn't happen often
836 		 * enough that it's worth trying to be clever about when to
837 		 * write.
838 		 */
839 		idx = core_mmu_va2idx(&src_ti, va);
840 		core_mmu_get_entry(&src_ti, idx, &pa, &attr);
841 		core_mmu_set_entry(&dst_ti, idx, pa, attr);
842 
843 		va += CORE_MMU_PGDIR_SIZE;
844 		size_left -= CORE_MMU_PGDIR_SIZE;
845 	}
846 }
847 
core_init_mmu_prtn_tee(struct mmu_partition * prtn,struct memory_map * mem_map)848 static void core_init_mmu_prtn_tee(struct mmu_partition *prtn,
849 				   struct memory_map *mem_map)
850 {
851 	size_t n = 0;
852 
853 	assert(prtn && mem_map);
854 
855 	for (n = 0; n < mem_map->count; n++) {
856 		struct tee_mmap_region *mm = mem_map->map + n;
857 		debug_print(" %010" PRIxVA " %010" PRIxPA " %10zx %x",
858 			    mm->va, mm->pa, mm->size, mm->attr);
859 
860 		if (!IS_PAGE_ALIGNED(mm->pa) || !IS_PAGE_ALIGNED(mm->size))
861 			panic("unaligned region");
862 	}
863 
864 	/* Clear table before use */
865 	memset(prtn->base_tables, 0, BASE_TABLE_SIZE * CFG_TEE_CORE_NB_CORE);
866 
867 	for (n = 0; n < mem_map->count; n++) {
868 		if (core_mmu_type_is_nex_shared(mem_map->map[n].type) &&
869 		    prtn != &default_partition)
870 			share_region(prtn, &default_partition,
871 				     mem_map->map + n);
872 		else
873 			core_mmu_map_region(prtn, mem_map->map + n);
874 	}
875 
876 	/*
877 	 * Primary mapping table is ready at index `get_core_pos()`
878 	 * whose value may not be ZERO. Take this index as copy source.
879 	 */
880 	for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) {
881 		if (n == get_core_pos())
882 			continue;
883 
884 		memcpy(get_base_table(prtn, 0, n),
885 		       get_base_table(prtn, 0, get_core_pos()),
886 		       XLAT_ENTRY_SIZE * NUM_BASE_LEVEL_ENTRIES);
887 	}
888 }
889 
890 /*
891  * In order to support 32-bit TAs we will have to find
892  * a user VA base in the region [1GB, 4GB[.
893  * Due to OP-TEE design limitation, TAs page table should be an entry
894  * inside a level 1 page table.
895  *
896  * Available options are only these:
897  * - base level 0 entry 0 - [0GB, 512GB[
898  *   - level 1 entry 0 - [0GB, 1GB[
899  *   - level 1 entry 1 - [1GB, 2GB[           <----
900  *   - level 1 entry 2 - [2GB, 3GB[           <----
901  *   - level 1 entry 3 - [3GB, 4GB[           <----
902  *   - level 1 entry 4 - [4GB, 5GB[
903  *   - ...
904  * - ...
905  *
906  * - base level 1 entry 0 - [0GB, 1GB[
907  * - base level 1 entry 1 - [1GB, 2GB[        <----
908  * - base level 1 entry 2 - [2GB, 3GB[        <----
909  * - base level 1 entry 3 - [3GB, 4GB[        <----
910  * - base level 1 entry 4 - [4GB, 5GB[
911  * - ...
912  */
set_user_va_idx(struct mmu_partition * prtn)913 static void set_user_va_idx(struct mmu_partition *prtn)
914 {
915 	uint64_t *tbl = NULL;
916 	unsigned int n = 0;
917 
918 	assert(prtn);
919 
920 	tbl = get_base_table(prtn, 0, get_core_pos());
921 
922 	/*
923 	 * If base level is 0, then we must use its entry 0.
924 	 */
925 	if (CORE_MMU_BASE_TABLE_LEVEL == 0) {
926 		/*
927 		 * If base level 0 entry 0 is not used then
928 		 * it's clear that we can use level 1 entry 1 inside it.
929 		 * (will be allocated later).
930 		 */
931 		if ((tbl[0] & DESC_ENTRY_TYPE_MASK) == INVALID_DESC) {
932 			user_va_idx = 1;
933 
934 			return;
935 		}
936 
937 		assert((tbl[0] & DESC_ENTRY_TYPE_MASK) == TABLE_DESC);
938 
939 		tbl = core_mmu_xlat_table_entry_pa2va(prtn, 0, tbl[0]);
940 		assert(tbl);
941 	}
942 
943 	/*
944 	 * Search level 1 table (i.e. 1GB mapping per entry) for
945 	 * an empty entry in the range [1GB, 4GB[.
946 	 */
947 	for (n = 1; n < 4; n++) {
948 		if ((tbl[n] & DESC_ENTRY_TYPE_MASK) == INVALID_DESC) {
949 			user_va_idx = n;
950 			break;
951 		}
952 	}
953 
954 	assert(user_va_idx != -1);
955 }
956 
957 /*
958  * Setup an entry inside a core level 1 page table for TAs memory mapping
959  *
960  * If base table level is 1 - user_va_idx is already the index,
961  *                            so nothing to do.
962  * If base table level is 0 - we might need to allocate entry 0 of base table,
963  *                            as TAs page table is an entry inside a level 1
964  *                            page table.
965  */
core_init_mmu_prtn_ta_core(struct mmu_partition * prtn __maybe_unused,unsigned int base_idx __maybe_unused,unsigned int core __maybe_unused)966 static void core_init_mmu_prtn_ta_core(struct mmu_partition *prtn
967 				       __maybe_unused,
968 				       unsigned int base_idx __maybe_unused,
969 				       unsigned int core __maybe_unused)
970 {
971 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
972 	struct core_mmu_table_info tbl_info = { };
973 	uint64_t *tbl = NULL;
974 
975 	assert(user_va_idx != -1);
976 	COMPILE_TIME_ASSERT(MAX_XLAT_TABLES < (1 << (8 * sizeof(l1_idx_t))));
977 
978 	tbl = get_base_table(prtn, base_idx, core);
979 
980 	/*
981 	 * If base level is 0, then user_va_idx refers to
982 	 * level 1 page table that's in base level 0 entry 0.
983 	 */
984 	core_mmu_set_info_table(&tbl_info, 0, 0, tbl);
985 #ifdef CFG_NS_VIRTUALIZATION
986 	tbl_info.prtn = prtn;
987 #endif
988 
989 	/*
990 	 * If this isn't the core that created the initial tables
991 	 * mappings, then the level 1 table must be copied,
992 	 * as it will hold pointer to the user mapping table
993 	 * that changes per core.
994 	 */
995 	if (core != get_core_pos()) {
996 		if (!core_mmu_entry_copy(&tbl_info, 0))
997 			panic();
998 	}
999 
1000 	if (!core_mmu_entry_to_finer_grained(&tbl_info, 0, true))
1001 		panic();
1002 
1003 	/*
1004 	 * Now base level table should be ready with a table descriptor
1005 	 */
1006 	assert((tbl[0] & DESC_ENTRY_TYPE_MASK) == TABLE_DESC);
1007 
1008 	tbl = core_mmu_xlat_table_entry_pa2va(prtn, 0, tbl[0]);
1009 	assert(tbl);
1010 
1011 	set_l1_ta_table(prtn, base_idx, core, tbl);
1012 #endif
1013 }
1014 
core_init_mmu_prtn_ta(struct mmu_partition * prtn)1015 static void core_init_mmu_prtn_ta(struct mmu_partition *prtn)
1016 {
1017 	unsigned int base_idx = 0;
1018 	unsigned int core = 0;
1019 
1020 	assert(user_va_idx != -1);
1021 
1022 	for (base_idx = 0; base_idx < NUM_BASE_TABLES; base_idx++)
1023 		for (core = 0; core < CFG_TEE_CORE_NB_CORE; core++)
1024 			core_init_mmu_prtn_ta_core(prtn, base_idx, core);
1025 }
1026 
core_init_mmu_prtn(struct mmu_partition * prtn,struct memory_map * mem_map)1027 void core_init_mmu_prtn(struct mmu_partition *prtn, struct memory_map *mem_map)
1028 {
1029 	core_init_mmu_prtn_tee(prtn, mem_map);
1030 	core_init_mmu_prtn_ta(prtn);
1031 }
1032 
core_init_mmu(struct memory_map * mem_map)1033 void core_init_mmu(struct memory_map *mem_map)
1034 {
1035 	struct mmu_partition *prtn = &default_partition;
1036 	uint64_t max_va = 0;
1037 	size_t n;
1038 
1039 	COMPILE_TIME_ASSERT(CORE_MMU_BASE_TABLE_SHIFT ==
1040 			    XLAT_ADDR_SHIFT(CORE_MMU_BASE_TABLE_LEVEL));
1041 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
1042 	COMPILE_TIME_ASSERT(CORE_MMU_BASE_TABLE_OFFSET ==
1043 			    BASE_TABLE_SIZE / NUM_BASE_TABLES);
1044 #endif
1045 
1046 	if (IS_ENABLED(CFG_DYN_CONFIG)) {
1047 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
1048 		prtn->user_l1_table_idx = boot_mem_alloc(NUM_BASE_TABLES *
1049 							 CFG_TEE_CORE_NB_CORE *
1050 							 sizeof(l1_idx_t),
1051 							 alignof(l1_idx_t));
1052 		boot_mem_add_reloc(&prtn->user_l1_table_idx);
1053 #endif
1054 		prtn->base_tables = boot_mem_alloc(BASE_TABLE_SIZE *
1055 						   CFG_TEE_CORE_NB_CORE,
1056 						   NUM_BASE_LEVEL_ENTRIES *
1057 						   XLAT_ENTRY_SIZE);
1058 		boot_mem_add_reloc(&prtn->base_tables);
1059 
1060 		prtn->l2_ta_tables = boot_mem_alloc(XLAT_TABLE_SIZE *
1061 						    CFG_NUM_THREADS,
1062 						    XLAT_TABLE_SIZE);
1063 		boot_mem_add_reloc(&prtn->l2_ta_tables);
1064 	}
1065 
1066 	/* Initialize default pagetables */
1067 	core_init_mmu_prtn_tee(&default_partition, mem_map);
1068 
1069 	for (n = 0; n < mem_map->count; n++) {
1070 		vaddr_t va_end = mem_map->map[n].va + mem_map->map[n].size - 1;
1071 
1072 		if (va_end > max_va)
1073 			max_va = va_end;
1074 	}
1075 
1076 	set_user_va_idx(&default_partition);
1077 
1078 	core_init_mmu_prtn_ta(&default_partition);
1079 
1080 	COMPILE_TIME_ASSERT(CFG_LPAE_ADDR_SPACE_BITS > L1_XLAT_ADDRESS_SHIFT);
1081 	assert(max_va < BIT64(CFG_LPAE_ADDR_SPACE_BITS));
1082 }
1083 
1084 #ifdef CFG_WITH_PAGER
1085 /* Prefer to consume only 1 base xlat table for the whole mapping */
core_mmu_prefer_tee_ram_at_top(paddr_t paddr)1086 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr)
1087 {
1088 	size_t base_level_size = BASE_XLAT_BLOCK_SIZE;
1089 	paddr_t base_level_mask = base_level_size - 1;
1090 
1091 	return (paddr & base_level_mask) > (base_level_size / 2);
1092 }
1093 #endif
1094 
1095 #ifdef ARM32
core_init_mmu_regs(struct core_mmu_config * cfg)1096 void core_init_mmu_regs(struct core_mmu_config *cfg)
1097 {
1098 	struct mmu_partition *prtn = &default_partition;
1099 	uint32_t ttbcr = 0;
1100 	uint32_t mair = 0;
1101 
1102 	cfg->ttbr0_base = virt_to_phys(get_base_table(prtn, 0, 0));
1103 	cfg->ttbr0_core_offset = BASE_TABLE_SIZE;
1104 
1105 	mair  = MAIR_ATTR_SET(ATTR_DEVICE_nGnRE, ATTR_DEVICE_nGnRE_INDEX);
1106 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
1107 	mair |= MAIR_ATTR_SET(ATTR_DEVICE_nGnRnE, ATTR_DEVICE_nGnRnE_INDEX);
1108 	/*
1109 	 * Tagged memory isn't supported in 32-bit mode, map tagged memory
1110 	 * as normal memory instead.
1111 	 */
1112 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
1113 			      ATTR_TAGGED_NORMAL_MEM_INDEX);
1114 	cfg->mair0 = mair;
1115 
1116 	ttbcr = TTBCR_EAE;
1117 	ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_IRGN0_SHIFT;
1118 	ttbcr |= TTBCR_XRGNX_WBWA << TTBCR_ORGN0_SHIFT;
1119 	ttbcr |= TTBCR_SHX_ISH << TTBCR_SH0_SHIFT;
1120 	ttbcr |= TTBCR_EPD1;	/* Disable the use of TTBR1 */
1121 
1122 	/* TTBCR.A1 = 0 => ASID is stored in TTBR0 */
1123 	cfg->ttbcr = ttbcr;
1124 }
1125 #endif /*ARM32*/
1126 
1127 #ifdef ARM64
get_hard_coded_pa_size_bits(void)1128 static unsigned int get_hard_coded_pa_size_bits(void)
1129 {
1130 	/*
1131 	 * Intermediate Physical Address Size.
1132 	 * 0b000      32 bits, 4GB.
1133 	 * 0b001      36 bits, 64GB.
1134 	 * 0b010      40 bits, 1TB.
1135 	 * 0b011      42 bits, 4TB.
1136 	 * 0b100      44 bits, 16TB.
1137 	 * 0b101      48 bits, 256TB.
1138 	 * 0b110      52 bits, 4PB
1139 	 */
1140 	static_assert(CFG_CORE_ARM64_PA_BITS >= 32);
1141 	static_assert(CFG_CORE_ARM64_PA_BITS <= 52);
1142 
1143 	if (CFG_CORE_ARM64_PA_BITS <= 32)
1144 		return TCR_PS_BITS_4GB;
1145 
1146 	if (CFG_CORE_ARM64_PA_BITS <= 36)
1147 		return TCR_PS_BITS_64GB;
1148 
1149 	if (CFG_CORE_ARM64_PA_BITS <= 40)
1150 		return TCR_PS_BITS_1TB;
1151 
1152 	if (CFG_CORE_ARM64_PA_BITS <= 42)
1153 		return TCR_PS_BITS_4TB;
1154 
1155 	if (CFG_CORE_ARM64_PA_BITS <= 44)
1156 		return TCR_PS_BITS_16TB;
1157 
1158 	if (CFG_CORE_ARM64_PA_BITS <= 48)
1159 		return TCR_PS_BITS_256TB;
1160 
1161 	/* CFG_CORE_ARM64_PA_BITS <= 48 */
1162 	return TCR_PS_BITS_4PB;
1163 }
1164 
get_physical_addr_size_bits(void)1165 static unsigned int get_physical_addr_size_bits(void)
1166 {
1167 	const unsigned int size_bits = read_id_aa64mmfr0_el1() &
1168 				       ID_AA64MMFR0_EL1_PARANGE_MASK;
1169 	unsigned int b = 0;
1170 
1171 	if (IS_ENABLED(CFG_AUTO_MAX_PA_BITS))
1172 		return size_bits;
1173 
1174 	b = get_hard_coded_pa_size_bits();
1175 	assert(b <= size_bits);
1176 	return b;
1177 }
1178 
core_mmu_arm64_get_pa_width(void)1179 unsigned int core_mmu_arm64_get_pa_width(void)
1180 {
1181 	const uint8_t map[] = { 32, 36, 40, 42, 44, 48, 52, };
1182 	unsigned int size_bits = get_physical_addr_size_bits();
1183 
1184 	size_bits = MIN(size_bits, ARRAY_SIZE(map) - 1);
1185 	return map[size_bits];
1186 }
1187 
core_init_mmu_regs(struct core_mmu_config * cfg)1188 void core_init_mmu_regs(struct core_mmu_config *cfg)
1189 {
1190 	struct mmu_partition *prtn = &default_partition;
1191 	uint64_t ips = get_physical_addr_size_bits();
1192 	uint64_t mair = 0;
1193 	uint64_t tcr = 0;
1194 
1195 	cfg->ttbr0_el1_base = virt_to_phys(get_base_table(prtn, 0, 0));
1196 	cfg->ttbr0_core_offset = BASE_TABLE_SIZE;
1197 
1198 	mair  = MAIR_ATTR_SET(ATTR_DEVICE_nGnRE, ATTR_DEVICE_nGnRE_INDEX);
1199 	mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
1200 	mair |= MAIR_ATTR_SET(ATTR_DEVICE_nGnRnE, ATTR_DEVICE_nGnRnE_INDEX);
1201 	/*
1202 	 * If MEMTAG isn't enabled, map tagged memory as normal memory
1203 	 * instead.
1204 	 */
1205 	if (memtag_is_enabled())
1206 		mair |= MAIR_ATTR_SET(ATTR_TAGGED_NORMAL_MEM,
1207 				      ATTR_TAGGED_NORMAL_MEM_INDEX);
1208 	else
1209 		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
1210 				      ATTR_TAGGED_NORMAL_MEM_INDEX);
1211 	cfg->mair_el1 = mair;
1212 
1213 	tcr = TCR_RES1;
1214 	tcr |= TCR_XRGNX_WBWA << TCR_IRGN0_SHIFT;
1215 	tcr |= TCR_XRGNX_WBWA << TCR_ORGN0_SHIFT;
1216 	tcr |= TCR_SHX_ISH << TCR_SH0_SHIFT;
1217 	tcr |= ips << TCR_EL1_IPS_SHIFT;
1218 	tcr |= 64 - CFG_LPAE_ADDR_SPACE_BITS;
1219 
1220 	/* Disable the use of TTBR1 */
1221 	tcr |= TCR_EPD1;
1222 
1223 	/*
1224 	 * TCR.A1 = 0 => ASID is stored in TTBR0
1225 	 * TCR.AS = 0 => Same ASID size as in Aarch32/ARMv7
1226 	 */
1227 	cfg->tcr_el1 = tcr;
1228 }
1229 #endif /*ARM64*/
1230 
core_mmu_set_info_table(struct core_mmu_table_info * tbl_info,unsigned level,vaddr_t va_base,void * table)1231 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
1232 		unsigned level, vaddr_t va_base, void *table)
1233 {
1234 	tbl_info->level = level;
1235 	tbl_info->next_level = level + 1;
1236 	tbl_info->table = table;
1237 	tbl_info->va_base = va_base;
1238 	tbl_info->shift = XLAT_ADDR_SHIFT(level);
1239 
1240 #if (CORE_MMU_BASE_TABLE_LEVEL > 0)
1241 	assert(level >= CORE_MMU_BASE_TABLE_LEVEL);
1242 #endif
1243 	assert(level <= XLAT_TABLE_LEVEL_MAX);
1244 
1245 	if (level == CORE_MMU_BASE_TABLE_LEVEL)
1246 		tbl_info->num_entries = NUM_BASE_LEVEL_ENTRIES;
1247 	else
1248 		tbl_info->num_entries = XLAT_TABLE_ENTRIES;
1249 }
1250 
core_mmu_get_user_pgdir(struct core_mmu_table_info * pgd_info)1251 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info)
1252 {
1253 	vaddr_t va_range_base;
1254 	void *tbl = get_l2_ta_tables(get_prtn(), thread_get_id());
1255 
1256 	core_mmu_get_user_va_range(&va_range_base, NULL);
1257 	core_mmu_set_info_table(pgd_info, 2, va_range_base, tbl);
1258 }
1259 
core_mmu_create_user_map(struct user_mode_ctx * uctx,struct core_mmu_user_map * map)1260 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
1261 			      struct core_mmu_user_map *map)
1262 {
1263 	struct core_mmu_table_info dir_info;
1264 
1265 	COMPILE_TIME_ASSERT(sizeof(uint64_t) * XLAT_TABLE_ENTRIES == PGT_SIZE);
1266 
1267 	core_mmu_get_user_pgdir(&dir_info);
1268 	memset(dir_info.table, 0, PGT_SIZE);
1269 	core_mmu_populate_user_map(&dir_info, uctx);
1270 	map->user_map = virt_to_phys(dir_info.table) | TABLE_DESC;
1271 	map->asid = uctx->vm_info.asid;
1272 }
1273 
core_mmu_find_table(struct mmu_partition * prtn,vaddr_t va,unsigned max_level,struct core_mmu_table_info * tbl_info)1274 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
1275 			 unsigned max_level,
1276 			 struct core_mmu_table_info *tbl_info)
1277 {
1278 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1279 	unsigned int num_entries = NUM_BASE_LEVEL_ENTRIES;
1280 	unsigned int level = CORE_MMU_BASE_TABLE_LEVEL;
1281 	vaddr_t va_base = 0;
1282 	bool ret = false;
1283 	uint64_t *tbl;
1284 
1285 	if (!prtn)
1286 		prtn = get_prtn();
1287 	tbl = get_base_table(prtn, 0, get_core_pos());
1288 
1289 	while (true) {
1290 		unsigned int level_size_shift = XLAT_ADDR_SHIFT(level);
1291 		unsigned int n = (va - va_base) >> level_size_shift;
1292 
1293 		if (n >= num_entries)
1294 			goto out;
1295 
1296 		if (level == max_level || level == XLAT_TABLE_LEVEL_MAX ||
1297 		    (tbl[n] & TABLE_DESC) != TABLE_DESC) {
1298 			/*
1299 			 * We've either reached max_level, a block
1300 			 * mapping entry or an "invalid" mapping entry.
1301 			 */
1302 
1303 			/*
1304 			 * Base level is the CPU specific translation table.
1305 			 * It doesn't make sense to return anything based
1306 			 * on that unless foreign interrupts already are
1307 			 * masked.
1308 			 */
1309 			if (level == CORE_MMU_BASE_TABLE_LEVEL &&
1310 			    !(exceptions & THREAD_EXCP_FOREIGN_INTR))
1311 				goto out;
1312 
1313 			tbl_info->table = tbl;
1314 			tbl_info->va_base = va_base;
1315 			tbl_info->level = level;
1316 			tbl_info->next_level = level + 1;
1317 			tbl_info->shift = level_size_shift;
1318 			tbl_info->num_entries = num_entries;
1319 #ifdef CFG_NS_VIRTUALIZATION
1320 			tbl_info->prtn = prtn;
1321 #endif
1322 			ret = true;
1323 			goto out;
1324 		}
1325 
1326 		tbl = core_mmu_xlat_table_entry_pa2va(prtn, level, tbl[n]);
1327 
1328 		if (!tbl)
1329 			goto out;
1330 
1331 		va_base += (vaddr_t)n << level_size_shift;
1332 		level++;
1333 		num_entries = XLAT_TABLE_ENTRIES;
1334 	}
1335 out:
1336 	thread_unmask_exceptions(exceptions);
1337 	return ret;
1338 }
1339 
core_mmu_entry_to_finer_grained(struct core_mmu_table_info * tbl_info,unsigned int idx,bool secure __unused)1340 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
1341 				     unsigned int idx, bool secure __unused)
1342 {
1343 	uint64_t *new_table;
1344 	uint64_t *entry;
1345 	int i;
1346 	paddr_t pa;
1347 	uint64_t attr;
1348 	paddr_t block_size_on_next_lvl = XLAT_BLOCK_SIZE(tbl_info->level + 1);
1349 	struct mmu_partition *prtn;
1350 
1351 #ifdef CFG_NS_VIRTUALIZATION
1352 	prtn = tbl_info->prtn;
1353 #else
1354 	prtn = &default_partition;
1355 #endif
1356 	assert(prtn);
1357 
1358 	if (tbl_info->level >= XLAT_TABLE_LEVEL_MAX ||
1359 	    idx >= tbl_info->num_entries)
1360 		return false;
1361 
1362 	entry = (uint64_t *)tbl_info->table + idx;
1363 
1364 	if ((*entry & DESC_ENTRY_TYPE_MASK) == TABLE_DESC)
1365 		return true;
1366 
1367 	new_table = core_mmu_xlat_table_alloc(prtn);
1368 	if (!new_table)
1369 		return false;
1370 
1371 	if (*entry) {
1372 		pa = *entry & OUTPUT_ADDRESS_MASK;
1373 		attr = *entry & ~(OUTPUT_ADDRESS_MASK | DESC_ENTRY_TYPE_MASK);
1374 		for (i = 0; i < XLAT_TABLE_ENTRIES; i++) {
1375 			new_table[i] = pa | attr | BLOCK_DESC;
1376 			pa += block_size_on_next_lvl;
1377 		}
1378 	} else {
1379 		memset(new_table, 0, XLAT_TABLE_ENTRIES * XLAT_ENTRY_SIZE);
1380 	}
1381 
1382 	*entry = virt_to_phys(new_table) | TABLE_DESC;
1383 
1384 	return true;
1385 }
1386 
core_mmu_set_entry_primitive(void * table,size_t level,size_t idx,paddr_t pa,uint32_t attr)1387 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
1388 				  paddr_t pa, uint32_t attr)
1389 {
1390 	uint64_t *tbl = table;
1391 	uint64_t desc = mattr_to_desc(level, attr);
1392 
1393 	tbl[idx] = desc | pa;
1394 }
1395 
core_mmu_get_entry_primitive(const void * table,size_t level,size_t idx,paddr_t * pa,uint32_t * attr)1396 void core_mmu_get_entry_primitive(const void *table, size_t level,
1397 				  size_t idx, paddr_t *pa, uint32_t *attr)
1398 {
1399 	const uint64_t *tbl = table;
1400 
1401 	if (pa)
1402 		*pa = tbl[idx] & GENMASK_64(47, 12);
1403 
1404 	if (attr)
1405 		*attr = desc_to_mattr(level, tbl[idx]);
1406 }
1407 
core_mmu_user_va_range_is_defined(void)1408 bool core_mmu_user_va_range_is_defined(void)
1409 {
1410 	return user_va_idx != -1;
1411 }
1412 
core_mmu_get_user_va_range(vaddr_t * base,size_t * size)1413 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size)
1414 {
1415 	assert(user_va_idx != -1);
1416 
1417 	if (base)
1418 		*base = (vaddr_t)user_va_idx << L1_XLAT_ADDRESS_SHIFT;
1419 	if (size)
1420 		*size = BIT64(L1_XLAT_ADDRESS_SHIFT);
1421 }
1422 
core_mmu_get_user_mapping_entry(struct mmu_partition * prtn,unsigned int base_idx)1423 static uint64_t *core_mmu_get_user_mapping_entry(struct mmu_partition *prtn,
1424 						 unsigned int base_idx)
1425 {
1426 	uint64_t *tbl = NULL;
1427 
1428 	assert(user_va_idx != -1);
1429 
1430 #if (CORE_MMU_BASE_TABLE_LEVEL == 0)
1431 	tbl = get_l1_ta_table(prtn, base_idx, get_core_pos());
1432 #else
1433 	tbl =  get_base_table(prtn, base_idx, get_core_pos());
1434 #endif
1435 
1436 	return tbl + user_va_idx;
1437 }
1438 
core_mmu_user_mapping_is_active(void)1439 bool core_mmu_user_mapping_is_active(void)
1440 {
1441 	bool ret = false;
1442 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1443 	uint64_t *entry = NULL;
1444 
1445 	entry = core_mmu_get_user_mapping_entry(get_prtn(), 0);
1446 	ret = (*entry != 0);
1447 
1448 	thread_unmask_exceptions(exceptions);
1449 
1450 	return ret;
1451 }
1452 
1453 #ifdef ARM32
core_mmu_get_user_map(struct core_mmu_user_map * map)1454 void core_mmu_get_user_map(struct core_mmu_user_map *map)
1455 {
1456 	struct mmu_partition *prtn = get_prtn();
1457 	uint64_t *entry = NULL;
1458 
1459 	entry = core_mmu_get_user_mapping_entry(prtn, 0);
1460 
1461 	map->user_map = *entry;
1462 	if (map->user_map) {
1463 		map->asid = (read_ttbr0_64bit() >> TTBR_ASID_SHIFT) &
1464 			    TTBR_ASID_MASK;
1465 	} else {
1466 		map->asid = 0;
1467 	}
1468 }
1469 
core_mmu_set_user_map(struct core_mmu_user_map * map)1470 void core_mmu_set_user_map(struct core_mmu_user_map *map)
1471 {
1472 	uint64_t ttbr = 0;
1473 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1474 	struct mmu_partition *prtn = get_prtn();
1475 	uint64_t *entries[NUM_BASE_TABLES] = { };
1476 	unsigned int i = 0;
1477 
1478 	ttbr = read_ttbr0_64bit();
1479 	/* Clear ASID */
1480 	ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
1481 	write_ttbr0_64bit(ttbr);
1482 	isb();
1483 
1484 	for (i = 0; i < NUM_BASE_TABLES; i++)
1485 		entries[i] = core_mmu_get_user_mapping_entry(prtn, i);
1486 
1487 	/* Set the new map */
1488 	if (map && map->user_map) {
1489 		for (i = 0; i < NUM_BASE_TABLES; i++)
1490 			*entries[i] = map->user_map;
1491 
1492 		dsb();	/* Make sure the write above is visible */
1493 		ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
1494 		write_ttbr0_64bit(ttbr);
1495 		isb();
1496 	} else {
1497 		for (i = 0; i < NUM_BASE_TABLES; i++)
1498 			*entries[i] = INVALID_DESC;
1499 
1500 		dsb();	/* Make sure the write above is visible */
1501 	}
1502 
1503 	tlbi_all();
1504 	icache_inv_all();
1505 
1506 	thread_unmask_exceptions(exceptions);
1507 }
1508 
core_mmu_get_fault_type(uint32_t fault_descr)1509 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
1510 {
1511 	assert(fault_descr & FSR_LPAE);
1512 
1513 	switch (fault_descr & FSR_STATUS_MASK) {
1514 	case 0x10: /* b010000 Synchronous extern abort, not on table walk */
1515 	case 0x15: /* b010101 Synchronous extern abort, on table walk L1 */
1516 	case 0x16: /* b010110 Synchronous extern abort, on table walk L2 */
1517 	case 0x17: /* b010111 Synchronous extern abort, on table walk L3 */
1518 		return CORE_MMU_FAULT_SYNC_EXTERNAL;
1519 	case 0x11: /* b010001 Asynchronous extern abort (DFSR only) */
1520 		return CORE_MMU_FAULT_ASYNC_EXTERNAL;
1521 	case 0x21: /* b100001 Alignment fault */
1522 		return CORE_MMU_FAULT_ALIGNMENT;
1523 	case 0x22: /* b100010 Debug event */
1524 		return CORE_MMU_FAULT_DEBUG_EVENT;
1525 	default:
1526 		break;
1527 	}
1528 
1529 	switch ((fault_descr & FSR_STATUS_MASK) >> 2) {
1530 	case 0x1: /* b0001LL Translation fault */
1531 		return CORE_MMU_FAULT_TRANSLATION;
1532 	case 0x2: /* b0010LL Access flag fault */
1533 	case 0x3: /* b0011LL Permission fault */
1534 		if (fault_descr & FSR_WNR)
1535 			return CORE_MMU_FAULT_WRITE_PERMISSION;
1536 		else
1537 			return CORE_MMU_FAULT_READ_PERMISSION;
1538 	default:
1539 		return CORE_MMU_FAULT_OTHER;
1540 	}
1541 }
1542 #endif /*ARM32*/
1543 
1544 #ifdef ARM64
core_mmu_get_user_map(struct core_mmu_user_map * map)1545 void core_mmu_get_user_map(struct core_mmu_user_map *map)
1546 {
1547 	struct mmu_partition *prtn = get_prtn();
1548 	uint64_t *entry = NULL;
1549 
1550 	entry = core_mmu_get_user_mapping_entry(prtn, 0);
1551 
1552 	map->user_map = *entry;
1553 	if (map->user_map) {
1554 		map->asid = (read_ttbr0_el1() >> TTBR_ASID_SHIFT) &
1555 			    TTBR_ASID_MASK;
1556 	} else {
1557 		map->asid = 0;
1558 	}
1559 }
1560 
core_mmu_set_user_map(struct core_mmu_user_map * map)1561 void core_mmu_set_user_map(struct core_mmu_user_map *map)
1562 {
1563 	uint64_t ttbr = 0;
1564 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
1565 	struct mmu_partition *prtn = get_prtn();
1566 	uint64_t *entries[NUM_BASE_TABLES] = { };
1567 	unsigned int i = 0;
1568 
1569 	ttbr = read_ttbr0_el1();
1570 	/* Clear ASID */
1571 	ttbr &= ~((uint64_t)TTBR_ASID_MASK << TTBR_ASID_SHIFT);
1572 	write_ttbr0_el1(ttbr);
1573 	isb();
1574 
1575 	for (i = 0; i < NUM_BASE_TABLES; i++)
1576 		entries[i] = core_mmu_get_user_mapping_entry(prtn, i);
1577 
1578 	/* Set the new map */
1579 	if (map && map->user_map) {
1580 		for (i = 0; i < NUM_BASE_TABLES; i++)
1581 			*entries[i] = map->user_map;
1582 
1583 		dsb();	/* Make sure the write above is visible */
1584 		ttbr |= ((uint64_t)map->asid << TTBR_ASID_SHIFT);
1585 		write_ttbr0_el1(ttbr);
1586 		isb();
1587 	} else {
1588 		for (i = 0; i < NUM_BASE_TABLES; i++)
1589 			*entries[i] = INVALID_DESC;
1590 
1591 		dsb();	/* Make sure the write above is visible */
1592 	}
1593 
1594 	tlbi_all();
1595 	icache_inv_all();
1596 
1597 	thread_unmask_exceptions(exceptions);
1598 }
1599 
core_mmu_get_fault_type(uint32_t fault_descr)1600 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr)
1601 {
1602 	switch ((fault_descr >> ESR_EC_SHIFT) & ESR_EC_MASK) {
1603 	case ESR_EC_SP_ALIGN:
1604 	case ESR_EC_PC_ALIGN:
1605 		return CORE_MMU_FAULT_ALIGNMENT;
1606 	case ESR_EC_IABT_EL0:
1607 	case ESR_EC_DABT_EL0:
1608 	case ESR_EC_IABT_EL1:
1609 	case ESR_EC_DABT_EL1:
1610 		switch (fault_descr & ESR_FSC_MASK) {
1611 		case ESR_FSC_SIZE_L0:
1612 		case ESR_FSC_SIZE_L1:
1613 		case ESR_FSC_SIZE_L2:
1614 		case ESR_FSC_SIZE_L3:
1615 		case ESR_FSC_TRANS_L0:
1616 		case ESR_FSC_TRANS_L1:
1617 		case ESR_FSC_TRANS_L2:
1618 		case ESR_FSC_TRANS_L3:
1619 			return CORE_MMU_FAULT_TRANSLATION;
1620 		case ESR_FSC_ACCF_L1:
1621 		case ESR_FSC_ACCF_L2:
1622 		case ESR_FSC_ACCF_L3:
1623 		case ESR_FSC_PERMF_L1:
1624 		case ESR_FSC_PERMF_L2:
1625 		case ESR_FSC_PERMF_L3:
1626 			if (fault_descr & ESR_ABT_WNR)
1627 				return CORE_MMU_FAULT_WRITE_PERMISSION;
1628 			else
1629 				return CORE_MMU_FAULT_READ_PERMISSION;
1630 		case ESR_FSC_ALIGN:
1631 			return CORE_MMU_FAULT_ALIGNMENT;
1632 		case ESR_FSC_TAG_CHECK:
1633 			return CORE_MMU_FAULT_TAG_CHECK;
1634 		case ESR_FSC_SEA_NTT:
1635 		case ESR_FSC_SEA_TT_SUB_L2:
1636 		case ESR_FSC_SEA_TT_SUB_L1:
1637 		case ESR_FSC_SEA_TT_L0:
1638 		case ESR_FSC_SEA_TT_L1:
1639 		case ESR_FSC_SEA_TT_L2:
1640 		case ESR_FSC_SEA_TT_L3:
1641 			return CORE_MMU_FAULT_SYNC_EXTERNAL;
1642 		default:
1643 			return CORE_MMU_FAULT_OTHER;
1644 		}
1645 	default:
1646 		return CORE_MMU_FAULT_OTHER;
1647 	}
1648 }
1649 #endif /*ARM64*/
1650