1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7 #include <arm.h>
8 #include <assert.h>
9 #include <bitstring.h>
10 #include <config.h>
11 #include <kernel/cache_helpers.h>
12 #include <kernel/spinlock.h>
13 #include <kernel/tee_l2cc_mutex.h>
14 #include <kernel/tlb_helpers.h>
15 #include <kernel/tz_ssvce_pl310.h>
16 #include <mm/core_memprot.h>
17 #include <mm/core_mmu.h>
18 #include <platform_config.h>
19 #include <trace.h>
20 #include <util.h>
21
22 /*
23 * Two ASIDs per context, one for kernel mode and one for user mode. ASID 0
24 * and 1 are reserved and not used. This means a maximum of 126 loaded user
25 * mode contexts. This value can be increased but not beyond the maximum
26 * ASID, which is architecture dependent (max 255 for ARMv7-A and ARMv8-A
27 * Aarch32). This constant defines number of ASID pairs.
28 */
29 #define MMU_NUM_ASID_PAIRS 64
30
31 static bitstr_t bit_decl(g_asid, MMU_NUM_ASID_PAIRS) __nex_bss;
32 static unsigned int g_asid_spinlock __nex_bss = SPINLOCK_UNLOCK;
33
tlbi_va_range(vaddr_t va,size_t len,size_t granule)34 void tlbi_va_range(vaddr_t va, size_t len, size_t granule)
35 {
36 assert(granule == CORE_MMU_PGDIR_SIZE || granule == SMALL_PAGE_SIZE);
37 assert(!(va & (granule - 1)) && !(len & (granule - 1)));
38
39 dsb_ishst();
40 while (len) {
41 tlbi_va_allasid_nosync(va);
42 len -= granule;
43 va += granule;
44 }
45 dsb_ish();
46 isb();
47 }
48
tlbi_va_range_asid(vaddr_t va,size_t len,size_t granule,uint32_t asid)49 void tlbi_va_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid)
50 {
51 assert(granule == CORE_MMU_PGDIR_SIZE || granule == SMALL_PAGE_SIZE);
52 assert(!(va & (granule - 1)) && !(len & (granule - 1)));
53
54 dsb_ishst();
55 while (len) {
56 tlbi_va_asid_nosync(va, asid);
57 len -= granule;
58 va += granule;
59 }
60 dsb_ish();
61 isb();
62 }
63
cache_op_inner(enum cache_op op,void * va,size_t len)64 TEE_Result cache_op_inner(enum cache_op op, void *va, size_t len)
65 {
66 switch (op) {
67 case DCACHE_CLEAN:
68 dcache_op_all(DCACHE_OP_CLEAN);
69 break;
70 case DCACHE_AREA_CLEAN:
71 dcache_clean_range(va, len);
72 break;
73 case DCACHE_INVALIDATE:
74 dcache_op_all(DCACHE_OP_INV);
75 break;
76 case DCACHE_AREA_INVALIDATE:
77 dcache_inv_range(va, len);
78 break;
79 case ICACHE_INVALIDATE:
80 icache_inv_all();
81 break;
82 case ICACHE_AREA_INVALIDATE:
83 icache_inv_range(va, len);
84 break;
85 case DCACHE_CLEAN_INV:
86 dcache_op_all(DCACHE_OP_CLEAN_INV);
87 break;
88 case DCACHE_AREA_CLEAN_INV:
89 dcache_cleaninv_range(va, len);
90 break;
91 default:
92 return TEE_ERROR_NOT_IMPLEMENTED;
93 }
94 return TEE_SUCCESS;
95 }
96
97 #ifdef CFG_PL310
cache_op_outer(enum cache_op op,paddr_t pa,size_t len)98 TEE_Result cache_op_outer(enum cache_op op, paddr_t pa, size_t len)
99 {
100 TEE_Result ret = TEE_SUCCESS;
101 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
102 vaddr_t pl310_base_pa_op = 0;
103
104 /*
105 * According the ARM PL310 documentation, if the operation is specific
106 * to the PA, the behavior is presented in the following manner:
107 * - Secure access: The data in the cache is only affected by the
108 * operation if it is secure.
109 * - Non-secure access: The data in the cache is only affected by the
110 * operation if it is non-secure.
111 *
112 * https://developer.arm.com/documentation/ddi0246/a/programmer-s-model/register-descriptions/register-7--cache-maintenance-operations
113 *
114 * Depending on the buffer location, use the secure or non-secure PL310
115 * base address to do physical address based cache operation on the
116 * buffer.
117 */
118 if (tee_pbuf_is_sec(pa, len))
119 pl310_base_pa_op = pl310_base();
120 else
121 pl310_base_pa_op = pl310_nsbase();
122
123 tee_l2cc_mutex_lock();
124 switch (op) {
125 case DCACHE_INVALIDATE:
126 arm_cl2_invbyway(pl310_base());
127 break;
128 case DCACHE_AREA_INVALIDATE:
129 if (len)
130 arm_cl2_invbypa(pl310_base_pa_op, pa, pa + len - 1);
131 break;
132 case DCACHE_CLEAN:
133 arm_cl2_cleanbyway(pl310_base());
134 break;
135 case DCACHE_AREA_CLEAN:
136 if (len)
137 arm_cl2_cleanbypa(pl310_base_pa_op, pa, pa + len - 1);
138 break;
139 case DCACHE_CLEAN_INV:
140 arm_cl2_cleaninvbyway(pl310_base());
141 break;
142 case DCACHE_AREA_CLEAN_INV:
143 if (len)
144 arm_cl2_cleaninvbypa(pl310_base_pa_op, pa,
145 pa + len - 1);
146 break;
147 default:
148 ret = TEE_ERROR_NOT_IMPLEMENTED;
149 }
150
151 tee_l2cc_mutex_unlock();
152 thread_unmask_exceptions(exceptions);
153 return ret;
154 }
155 #endif /*CFG_PL310*/
156
asid_alloc(void)157 unsigned int asid_alloc(void)
158 {
159 uint32_t exceptions = cpu_spin_lock_xsave(&g_asid_spinlock);
160 unsigned int r;
161 int i;
162
163 bit_ffc(g_asid, MMU_NUM_ASID_PAIRS, &i);
164 if (i == -1) {
165 r = 0;
166 } else {
167 bit_set(g_asid, i);
168 r = (i + 1) * 2;
169 }
170
171 cpu_spin_unlock_xrestore(&g_asid_spinlock, exceptions);
172 return r;
173 }
174
asid_free(unsigned int asid)175 void asid_free(unsigned int asid)
176 {
177 uint32_t exceptions = cpu_spin_lock_xsave(&g_asid_spinlock);
178
179 /* Only even ASIDs are supposed to be allocated */
180 assert(!(asid & 1));
181
182 if (asid) {
183 int i = (asid - 1) / 2;
184
185 assert(i < MMU_NUM_ASID_PAIRS && bit_test(g_asid, i));
186 bit_clear(g_asid, i);
187 }
188
189 cpu_spin_unlock_xrestore(&g_asid_spinlock, exceptions);
190 }
191
arch_va2pa_helper(void * va,paddr_t * pa)192 bool arch_va2pa_helper(void *va, paddr_t *pa)
193 {
194 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
195 paddr_t par = 0;
196 paddr_t par_pa_mask = 0;
197 bool ret = false;
198
199 #ifdef ARM32
200 write_ats1cpr((vaddr_t)va);
201 isb();
202 #ifdef CFG_WITH_LPAE
203 par = read_par64();
204 par_pa_mask = PAR64_PA_MASK;
205 #else
206 par = read_par32();
207 par_pa_mask = PAR32_PA_MASK;
208 #endif
209 #endif /*ARM32*/
210
211 #ifdef ARM64
212 write_at_s1e1r((vaddr_t)va);
213 isb();
214 par = read_par_el1();
215 par_pa_mask = PAR_PA_MASK;
216 #endif
217 if (par & PAR_F)
218 goto out;
219 *pa = (par & (par_pa_mask << PAR_PA_SHIFT)) |
220 ((vaddr_t)va & (BIT64(PAR_PA_SHIFT) - 1));
221
222 ret = true;
223 out:
224 thread_unmask_exceptions(exceptions);
225 return ret;
226 }
227
arch_aslr_base_addr(vaddr_t start_addr,uint64_t seed,unsigned int iteration_count)228 vaddr_t arch_aslr_base_addr(vaddr_t start_addr, uint64_t seed,
229 unsigned int iteration_count)
230 {
231 vaddr_t base_addr = start_addr + seed;
232 const unsigned int va_width = core_mmu_get_va_width();
233 const vaddr_t va_mask = GENMASK_64(va_width - 1, SMALL_PAGE_SHIFT);
234
235 if (iteration_count)
236 base_addr ^= BIT64(va_width - iteration_count);
237
238 return base_addr & va_mask;
239 }
240
cpu_mmu_enabled(void)241 bool cpu_mmu_enabled(void)
242 {
243 uint32_t sctlr;
244
245 #ifdef ARM32
246 sctlr = read_sctlr();
247 #else
248 sctlr = read_sctlr_el1();
249 #endif
250
251 return sctlr & SCTLR_M ? true : false;
252 }
253