xref: /rk3399_ARM-atf/lib/xlat_tables/aarch32/xlat_tables.c (revision 51faada71a219a8b94cd8d8e423f0f22e9da4d8f)
1 /*
2  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <cassert.h>
35 #include <platform_def.h>
36 #include <utils.h>
37 #include <xlat_tables.h>
38 #include "../xlat_tables_private.h"
39 
40 /*
41  * Each platform can define the size of the virtual address space, which is
42  * defined in PLAT_VIRT_ADDR_SPACE_SIZE. TTBCR.TxSZ is calculated as 32 minus
43  * the width of said address space. The value of TTBCR.TxSZ must be in the
44  * range 0 to 7 [1], which means that the virtual address space width must be
45  * in the range 32 to 25 bits.
46  *
47  * Here we calculate the initial lookup level from the value of
48  * PLAT_VIRT_ADDR_SPACE_SIZE. For a 4 KB page size, level 1 supports virtual
49  * address spaces of widths 32 to 31 bits, and level 2 from 30 to 25. Wider or
50  * narrower address spaces are not supported. As a result, level 3 cannot be
51  * used as initial lookup level with 4 KB granularity [1].
52  *
53  * For example, for a 31-bit address space (i.e. PLAT_VIRT_ADDR_SPACE_SIZE ==
54  * 1 << 31), TTBCR.TxSZ will be programmed to (32 - 31) = 1. According to Table
55  * G4-5 in the ARM ARM, the initial lookup level for an address space like that
56  * is 1.
57  *
58  * See the ARMv8-A Architecture Reference Manual (DDI 0487A.j) for more
59  * information:
60  * [1] Section G4.6.5
61  */
62 
63 #if PLAT_VIRT_ADDR_SPACE_SIZE > (1ULL << (32 - TTBCR_TxSZ_MIN))
64 
65 # error "PLAT_VIRT_ADDR_SPACE_SIZE is too big."
66 
67 #elif PLAT_VIRT_ADDR_SPACE_SIZE > (1 << L1_XLAT_ADDRESS_SHIFT)
68 
69 # define XLAT_TABLE_LEVEL_BASE	1
70 # define NUM_BASE_LEVEL_ENTRIES	\
71 		(PLAT_VIRT_ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT)
72 
73 #elif PLAT_VIRT_ADDR_SPACE_SIZE >= (1 << (32 - TTBCR_TxSZ_MAX))
74 
75 # define XLAT_TABLE_LEVEL_BASE	2
76 # define NUM_BASE_LEVEL_ENTRIES	\
77 		(PLAT_VIRT_ADDR_SPACE_SIZE >> L2_XLAT_ADDRESS_SHIFT)
78 
79 #else
80 
81 # error "PLAT_VIRT_ADDR_SPACE_SIZE is too small."
82 
83 #endif
84 
85 static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
86 		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
87 
88 #if DEBUG
89 static unsigned long long get_max_supported_pa(void)
90 {
91 	/* Physical address space size for long descriptor format. */
92 	return (1ULL << 40) - 1ULL;
93 }
94 #endif
95 
96 void init_xlat_tables(void)
97 {
98 	unsigned long long max_pa;
99 	uintptr_t max_va;
100 	print_mmap();
101 	init_xlation_table(0, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
102 						&max_va, &max_pa);
103 
104 	assert(max_va <= PLAT_VIRT_ADDR_SPACE_SIZE - 1);
105 	assert(max_pa <= PLAT_PHY_ADDR_SPACE_SIZE - 1);
106 	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1) <= get_max_supported_pa());
107 }
108 
109 /*******************************************************************************
110  * Function for enabling the MMU in Secure PL1, assuming that the
111  * page-tables have already been created.
112  ******************************************************************************/
113 void enable_mmu_secure(unsigned int flags)
114 {
115 	unsigned int mair0, ttbcr, sctlr;
116 	uint64_t ttbr0;
117 
118 	assert(IS_IN_SECURE());
119 	assert((read_sctlr() & SCTLR_M_BIT) == 0);
120 
121 	/* Set attributes in the right indices of the MAIR */
122 	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
123 	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
124 			ATTR_IWBWA_OWBWA_NTR_INDEX);
125 	mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
126 			ATTR_NON_CACHEABLE_INDEX);
127 	write_mair0(mair0);
128 
129 	/* Invalidate TLBs at the current exception level */
130 	tlbiall();
131 
132 	/*
133 	 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
134 	 */
135 	if (flags & XLAT_TABLE_NC) {
136 		/* Inner & outer non-cacheable non-shareable. */
137 		ttbcr = TTBCR_EAE_BIT |
138 			TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
139 			TTBCR_RGN0_INNER_NC |
140 			(32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
141 	} else {
142 		/* Inner & outer WBWA & shareable. */
143 		ttbcr = TTBCR_EAE_BIT |
144 			TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
145 			TTBCR_RGN0_INNER_WBA |
146 			(32 - __builtin_ctzl((uintptr_t)PLAT_VIRT_ADDR_SPACE_SIZE));
147 	}
148 	ttbcr |= TTBCR_EPD1_BIT;
149 	write_ttbcr(ttbcr);
150 
151 	/* Set TTBR0 bits as well */
152 	ttbr0 = (uintptr_t) base_xlation_table;
153 	write64_ttbr0(ttbr0);
154 	write64_ttbr1(0);
155 
156 	/*
157 	 * Ensure all translation table writes have drained
158 	 * into memory, the TLB invalidation is complete,
159 	 * and translation register writes are committed
160 	 * before enabling the MMU
161 	 */
162 	dsb();
163 	isb();
164 
165 	sctlr = read_sctlr();
166 	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
167 
168 	if (flags & DISABLE_DCACHE)
169 		sctlr &= ~SCTLR_C_BIT;
170 	else
171 		sctlr |= SCTLR_C_BIT;
172 
173 	write_sctlr(sctlr);
174 
175 	/* Ensure the MMU enable takes effect immediately */
176 	isb();
177 }
178