xref: /rk3399_ARM-atf/lib/xlat_tables_v2/xlat_tables_context.c (revision aa1d5f60474ae0508b2953c72148c176c08d9cfe)
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <debug.h>
9 #include <platform_def.h>
10 #include <xlat_tables_defs.h>
11 #include <xlat_tables_v2.h>
12 
13 #include "xlat_tables_private.h"
14 
15 /*
16  * Each platform can define the size of its physical and virtual address spaces.
17  * If the platform hasn't defined one or both of them, default to
18  * ADDR_SPACE_SIZE. The latter is deprecated, though.
19  */
20 #if ERROR_DEPRECATED
21 # ifdef ADDR_SPACE_SIZE
22 #  error "ADDR_SPACE_SIZE is deprecated. Use PLAT_xxx_ADDR_SPACE_SIZE instead."
23 # endif
24 #elif defined(ADDR_SPACE_SIZE)
25 # ifndef PLAT_PHY_ADDR_SPACE_SIZE
26 #  define PLAT_PHY_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
27 # endif
28 # ifndef PLAT_VIRT_ADDR_SPACE_SIZE
29 #  define PLAT_VIRT_ADDR_SPACE_SIZE	ADDR_SPACE_SIZE
30 # endif
31 #endif
32 
33 /*
34  * Allocate and initialise the default translation context for the BL image
35  * currently executing.
36  */
37 REGISTER_XLAT_CONTEXT(tf, MAX_MMAP_REGIONS, MAX_XLAT_TABLES,
38 		PLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE);
39 
40 void mmap_add_region(unsigned long long base_pa, uintptr_t base_va, size_t size,
41 		     unsigned int attr)
42 {
43 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
44 
45 	mmap_add_region_ctx(&tf_xlat_ctx, &mm);
46 }
47 
48 void mmap_add(const mmap_region_t *mm)
49 {
50 	mmap_add_ctx(&tf_xlat_ctx, mm);
51 }
52 
53 #if PLAT_XLAT_TABLES_DYNAMIC
54 
55 int mmap_add_dynamic_region(unsigned long long base_pa, uintptr_t base_va,
56 			    size_t size, unsigned int attr)
57 {
58 	mmap_region_t mm = MAP_REGION(base_pa, base_va, size, attr);
59 
60 	return mmap_add_dynamic_region_ctx(&tf_xlat_ctx, &mm);
61 }
62 
63 int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
64 {
65 	return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx,
66 					base_va, size);
67 }
68 
69 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
70 
71 void init_xlat_tables(void)
72 {
73 	assert(tf_xlat_ctx.xlat_regime == EL_REGIME_INVALID);
74 
75 	int current_el = xlat_arch_current_el();
76 
77 	if (current_el == 1) {
78 		tf_xlat_ctx.xlat_regime = EL1_EL0_REGIME;
79 	} else {
80 		assert(current_el == 3);
81 		tf_xlat_ctx.xlat_regime = EL3_REGIME;
82 	}
83 
84 	init_xlat_tables_ctx(&tf_xlat_ctx);
85 }
86 
87 /*
88  * If dynamic allocation of new regions is disabled then by the time we call the
89  * function enabling the MMU, we'll have registered all the memory regions to
90  * map for the system's lifetime. Therefore, at this point we know the maximum
91  * physical address that will ever be mapped.
92  *
93  * If dynamic allocation is enabled then we can't make any such assumption
94  * because the maximum physical address could get pushed while adding a new
95  * region. Therefore, in this case we have to assume that the whole address
96  * space size might be mapped.
97  */
98 #ifdef PLAT_XLAT_TABLES_DYNAMIC
99 #define MAX_PHYS_ADDR	tf_xlat_ctx.pa_max_address
100 #else
101 #define MAX_PHYS_ADDR	tf_xlat_ctx.max_pa
102 #endif
103 
104 #ifdef AARCH32
105 
106 void enable_mmu_secure(unsigned int flags)
107 {
108 	setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
109 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
110 	enable_mmu_direct(flags);
111 }
112 
113 #else
114 
115 void enable_mmu_el1(unsigned int flags)
116 {
117 	setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
118 		      tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
119 	enable_mmu_direct_el1(flags);
120 }
121 
122 void enable_mmu_el3(unsigned int flags)
123 {
124 	setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
125 		      tf_xlat_ctx.va_max_address, EL3_REGIME);
126 	enable_mmu_direct_el3(flags);
127 }
128 
129 #endif /* AARCH32 */
130