xref: /rk3399_ARM-atf/lib/xlat_tables/aarch32/xlat_tables.c (revision 091f39675a98ee9e22ed78f52e239880bedf8911)
1 /*
2  * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <platform_def.h>
11 #include <utils.h>
12 #include <xlat_tables_arch.h>
13 #include <xlat_tables.h>
14 #include "../xlat_tables_private.h"
15 
16 #if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
17 #error ARMv7 target does not support LPAE MMU descriptors
18 #endif
19 
20 #define XLAT_TABLE_LEVEL_BASE	\
21        GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
22 
23 #define NUM_BASE_LEVEL_ENTRIES	\
24        GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
25 
26 static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
27 		__aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
28 
29 #if ENABLE_ASSERTIONS
30 static unsigned long long get_max_supported_pa(void)
31 {
32 	/* Physical address space size for long descriptor format. */
33 	return (1ULL << 40) - 1ULL;
34 }
35 #endif /* ENABLE_ASSERTIONS */
36 
37 unsigned int xlat_arch_current_el(void)
38 {
39 	/*
40 	 * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
41 	 * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
42 	 */
43 	return 3U;
44 }
45 
46 uint64_t xlat_arch_get_xn_desc(unsigned int el __unused)
47 {
48 	return UPPER_ATTRS(XN);
49 }
50 
51 void init_xlat_tables(void)
52 {
53 	unsigned long long max_pa;
54 	uintptr_t max_va;
55 	print_mmap();
56 	init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
57 						&max_va, &max_pa);
58 
59 	assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
60 	assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
61 	assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
62 }
63 
64 /*******************************************************************************
65  * Function for enabling the MMU in Secure PL1, assuming that the
66  * page-tables have already been created.
67  ******************************************************************************/
68 #if !ERROR_DEPRECATED
69 void enable_mmu_secure(unsigned int flags)
70 {
71 	enable_mmu_svc_mon(flags);
72 }
73 
74 void enable_mmu_direct(unsigned int flags)
75 {
76 	enable_mmu_direct_svc_mon(flags);
77 }
78 #endif
79 
80 void enable_mmu_svc_mon(unsigned int flags)
81 {
82 	unsigned int mair0, ttbcr, sctlr;
83 	uint64_t ttbr0;
84 
85 	assert(IS_IN_SECURE());
86 	assert((read_sctlr() & SCTLR_M_BIT) == 0U);
87 
88 	/* Set attributes in the right indices of the MAIR */
89 	mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
90 	mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
91 			ATTR_IWBWA_OWBWA_NTR_INDEX);
92 	mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
93 			ATTR_NON_CACHEABLE_INDEX);
94 	write_mair0(mair0);
95 
96 	/* Invalidate TLBs at the current exception level */
97 	tlbiall();
98 
99 	/*
100 	 * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
101 	 */
102 	int t0sz = 32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);
103 
104 	if ((flags & XLAT_TABLE_NC) != 0U) {
105 		/* Inner & outer non-cacheable non-shareable. */
106 		ttbcr = TTBCR_EAE_BIT |
107 			TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
108 			TTBCR_RGN0_INNER_NC | (uint32_t) t0sz;
109 	} else {
110 		/* Inner & outer WBWA & shareable. */
111 		ttbcr = TTBCR_EAE_BIT |
112 			TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
113 			TTBCR_RGN0_INNER_WBA | (uint32_t) t0sz;
114 	}
115 	ttbcr |= TTBCR_EPD1_BIT;
116 	write_ttbcr(ttbcr);
117 
118 	/* Set TTBR0 bits as well */
119 	ttbr0 = (uintptr_t) base_xlation_table;
120 	write64_ttbr0(ttbr0);
121 	write64_ttbr1(0U);
122 
123 	/*
124 	 * Ensure all translation table writes have drained
125 	 * into memory, the TLB invalidation is complete,
126 	 * and translation register writes are committed
127 	 * before enabling the MMU
128 	 */
129 	dsbish();
130 	isb();
131 
132 	sctlr = read_sctlr();
133 	sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
134 
135 	if ((flags & DISABLE_DCACHE) != 0U)
136 		sctlr &= ~SCTLR_C_BIT;
137 	else
138 		sctlr |= SCTLR_C_BIT;
139 
140 	write_sctlr(sctlr);
141 
142 	/* Ensure the MMU enable takes effect immediately */
143 	isb();
144 }
145 
146 void enable_mmu_direct_svc_mon(unsigned int flags)
147 {
148 	enable_mmu_svc_mon(flags);
149 }
150