xref: /rk3399_ARM-atf/lib/xlat_tables/aarch32/xlat_tables.c (revision d70a7d0ce02c0b73891cc1e26fc2c568d7120b84)
1 /*
2  * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <cassert.h>
11 #include <platform_def.h>
12 #include <utils.h>
13 #include <xlat_tables.h>
14 #include "../xlat_tables_private.h"
15 
16 /*
17  * Each platform can define the size of the virtual address space, which is
18  * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus
19  * the width of said address space. The value of TTBCR.TxSZ must be in the
20  * range 0 to 7 [1], which means that the virtual address space width must be
21  * in the range 32 to 25 bits.
22  *
23  * Here we calculate the initial lookup level from the value of
24  * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual
25  * address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or
26  * narrower address spaces are not supported. As a result, level 3 cannot be
27  * used as initial lookup level with 4 KB granularity [1].
28  *
29  * For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
30  * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
31  * G4-5 in the ARM ARM, the initial lookup level for an address space like that
32  * is 1.
33  *
34  * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
35  * information:
36  * [1] Section G4.6.5
37  */
38 
39 #if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
40 
41 # error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
42 
43 #elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
44 
45 # define XLAT_TABLE_LEVEL_BASE	1
46 # define NUM_BASE_LEVEL_ENTRIES	\
47 		(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
48 
49 #elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
50 
51 # define XLAT_TABLE_LEVEL_BASE	2
52 # define NUM_BASE_LEVEL_ENTRIES	\
53 		(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
54 
55 #else
56 
57 # error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
58 
59 #endif
60 
61 static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
62 		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
63 
64 #if ENABLE_ASSERTIONS
65 static unsigned long long get_max_supported_pa(void)
66 {
67 	/* Physical address space size for long descriptor format. */
68 	return (1ULL << 40) - 1ULL;
69 }
70 #endif /* ENABLE_ASSERTIONS */
71 
72 int xlat_arch_current_el(void)
73 {
74 	/*
75 	 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
76 	 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
77 	 */
78 	return 3;
79 }
80 
81 uint64_t xlat_arch_get_xn_desc(int el __unused)
82 {
83 	return UPPER_ATTRS(XN);
84 }
85 
86 void init_xlat_tables(void)
87 {
88 	unsigned long long max_pa;
89 	uintptr_t max_va;
90 	print_mmap();
91 	init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
92 						&max_va, &max_pa);
93 
94 	assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
95 	assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
96 	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
97 }
98 
99 /*******************************************************************************
100  * Function for enabling the MMU in Secure PL1, assuming that the
101  * page-tables have already been created.
102  ******************************************************************************/
103 void enable_mmu_secure(unsigned int flags)
104 {
105 	unsigned int mair0, ttbcr, sctlr;
106 	uint64_t ttbr0;
107 
108 	assert(IS_IN_SECURE());
109 	assert((read_sctlr() & SCTLR_M_BIT) == 0);
110 
111 	/* Set attributes in the right indices of the MAIR */
112 	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
113 	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
114 			ATTR_IWBWA_OWBWA_NTR_INDEX);
115 	mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
116 			ATTR_NON_CACHEABLE_INDEX);
117 	write_mair0(mair0);
118 
119 	/* Invalidate TLBs at the current exception level */
120 	tlbiall();
121 
122 	/*
123 	 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
124 	 */
125 	if (flags & XLAT_TABLE_NC) {
126 		/* Inner & outer non-cacheable non-shareable. */
127 		ttbcr = TTBCR_EAE_BIT |
128 			TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
129 			TTBCR_RGN0_INNER_NC |
130 			(32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
131 	} else {
132 		/* Inner & outer WBWA & shareable. */
133 		ttbcr = TTBCR_EAE_BIT |
134 			TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
135 			TTBCR_RGN0_INNER_WBA |
136 			(32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
137 	}
138 	ttbcr |= TTBCR_EPD1_BIT;
139 	write_ttbcr(ttbcr);
140 
141 	/* Set TTBR0 bits as well */
142 	ttbr0 = (uintptr_t) base_xlation_table;
143 	write64_ttbr0(ttbr0);
144 	write64_ttbr1(0);
145 
146 	/*
147 	 * Ensure all translation table writes have drained
148 	 * into memory, the TLB invalidation is complete,
149 	 * and translation register writes are committed
150 	 * before enabling the MMU
151 	 */
152 	dsbish();
153 	isb();
154 
155 	sctlr = read_sctlr();
156 	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
157 
158 	if (flags & DISABLE_DCACHE)
159 		sctlr &= ~SCTLR_C_BIT;
160 	else
161 		sctlr |= SCTLR_C_BIT;
162 
163 	write_sctlr(sctlr);
164 
165 	/* Ensure the MMU enable takes effect immediately */
166 	isb();
167 }
168