xref: /rk3399_ARM-atf/lib/xlat_tables/aarch64/xlat_tables.c (revision c3cf06f1a3a9b9ee8ac7a0ae505f95c45f7dca84)
1 /*
2  * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <bl_common.h>
11 #include <common_def.h>
12 #include <platform_def.h>
13 #include <stdint.h>
14 #include <utils.h>
15 #include <xlat_tables.h>
16 #include <xlat_tables_arch.h>
17 #include "../xlat_tables_private.h"
18 
19 #define XLAT_TABLE_LEVEL_BASE	\
20        GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
21 
22 #define NUM_BASE_LEVEL_ENTRIES	\
23        GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
24 
25 static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
26 		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
27 
28 static unsigned long long tcr_ps_bits;
29 
30 static unsigned long long calc_physical_addr_size_bits(
31 					unsigned long long max_addr)
32 {
33 	/* Physical address can't exceed 48 bits */
34 	assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
35 
36 	/* 48 bits address */
37 	if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
38 		return TCR_PS_BITS_256TB;
39 
40 	/* 44 bits address */
41 	if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
42 		return TCR_PS_BITS_16TB;
43 
44 	/* 42 bits address */
45 	if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
46 		return TCR_PS_BITS_4TB;
47 
48 	/* 40 bits address */
49 	if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
50 		return TCR_PS_BITS_1TB;
51 
52 	/* 36 bits address */
53 	if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
54 		return TCR_PS_BITS_64GB;
55 
56 	return TCR_PS_BITS_4GB;
57 }
58 
59 #if ENABLE_ASSERTIONS
60 /*
61  * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
62  * supported in ARMv8.2 onwards.
63  */
64 static const unsigned int pa_range_bits_arr[] = {
65 	PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
66 	PARANGE_0101, PARANGE_0110
67 };
68 
69 static unsigned long long get_max_supported_pa(void)
70 {
71 	u_register_t pa_range = read_id_aa64mmfr0_el1() &
72 						ID_AA64MMFR0_EL1_PARANGE_MASK;
73 
74 	/* All other values are reserved */
75 	assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
76 
77 	return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
78 }
79 #endif /* ENABLE_ASSERTIONS */
80 
81 unsigned int xlat_arch_current_el(void)
82 {
83 	unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
84 
85 	assert(el > 0U);
86 
87 	return el;
88 }
89 
90 uint64_t xlat_arch_get_xn_desc(unsigned int el)
91 {
92 	if (el == 3U) {
93 		return UPPER_ATTRS(XN);
94 	} else {
95 		assert(el == 1U);
96 		return UPPER_ATTRS(PXN);
97 	}
98 }
99 
100 void init_xlat_tables(void)
101 {
102 	unsigned long long max_pa;
103 	uintptr_t max_va;
104 	print_mmap();
105 	init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
106 			   &max_va, &max_pa);
107 
108 	assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
109 	assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
110 	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
111 
112 	tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
113 }
114 
115 /*******************************************************************************
116  * Macro generating the code for the function enabling the MMU in the given
117  * exception level, assuming that the pagetables have already been created.
118  *
119  *   _el:		Exception level at which the function will run
120  *   _tcr_extra:	Extra bits to set in the TCR register. This mask will
121  *			be OR'ed with the default TCR value.
122  *   _tlbi_fct:		Function to invalidate the TLBs at the current
123  *			exception level
124  ******************************************************************************/
125 #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)		\
126 	void enable_mmu_el##_el(unsigned int flags)				\
127 	{								\
128 		uint64_t mair, tcr, ttbr;				\
129 		uint32_t sctlr;						\
130 									\
131 		assert(IS_IN_EL(_el));					\
132 		assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0U);	\
133 									\
134 		/* Set attributes in the right indices of the MAIR */	\
135 		mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);	\
136 		mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,		\
137 				ATTR_IWBWA_OWBWA_NTR_INDEX);		\
138 		mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE,		\
139 				ATTR_NON_CACHEABLE_INDEX);		\
140 		write_mair_el##_el(mair);				\
141 									\
142 		/* Invalidate TLBs at the current exception level */	\
143 		_tlbi_fct();						\
144 									\
145 		/* Set TCR bits as well. */				\
146 		/* Set T0SZ to (64 - width of virtual address space) */	\
147 		int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\
148 									\
149 		if ((flags & XLAT_TABLE_NC) != 0U) {			\
150 			/* Inner & outer non-cacheable non-shareable. */\
151 			tcr = TCR_SH_NON_SHAREABLE |			\
152 				TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC |	\
153 				(uint64_t) t0sz;			\
154 		} else {						\
155 			/* Inner & outer WBWA & shareable. */		\
156 			tcr = TCR_SH_INNER_SHAREABLE |			\
157 				TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA |	\
158 				(uint64_t) t0sz;			\
159 		}							\
160 		tcr |= _tcr_extra;					\
161 		write_tcr_el##_el(tcr);					\
162 									\
163 		/* Set TTBR bits as well */				\
164 		ttbr = (uint64_t) base_xlation_table;			\
165 		write_ttbr0_el##_el(ttbr);				\
166 									\
167 		/* Ensure all translation table writes have drained */	\
168 		/* into memory, the TLB invalidation is complete, */	\
169 		/* and translation register writes are committed */	\
170 		/* before enabling the MMU */				\
171 		dsbish();						\
172 		isb();							\
173 									\
174 		sctlr = read_sctlr_el##_el();				\
175 		sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;			\
176 									\
177 		if ((flags & DISABLE_DCACHE) != 0U)			\
178 			sctlr &= ~SCTLR_C_BIT;				\
179 		else							\
180 			sctlr |= SCTLR_C_BIT;				\
181 									\
182 		write_sctlr_el##_el(sctlr);				\
183 									\
184 		/* Ensure the MMU enable takes effect immediately */	\
185 		isb();							\
186 	}								\
187 									\
188 	void enable_mmu_direct_el##_el(unsigned int flags)		\
189 	{								\
190 		enable_mmu_el##_el(flags);				\
191 	}
192 
193 /* Define EL1 and EL3 variants of the function enabling the MMU */
194 DEFINE_ENABLE_MMU_EL(1,
195 		/*
196 		 * TCR_EL1.EPD1: Disable translation table walk for addresses
197 		 * that are translated using TTBR1_EL1.
198 		 */
199 		TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
200 		tlbivmalle1)
201 DEFINE_ENABLE_MMU_EL(3,
202 		TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
203 		tlbialle3)
204