xref: /optee_os/core/mm/phys_mem.c (revision 55a4d839310ce46aca79a12015ab8e1da9f110e5)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2024, Linaro Limited
4  */
5 
6 #include <kernel/panic.h>
7 #include <kernel/tee_misc.h>
8 #include <mm/core_mmu.h>
9 #include <mm/phys_mem.h>
10 #include <mm/tee_mm.h>
11 #include <string.h>
12 #include <types_ext.h>
13 
14 static tee_mm_pool_t *nex_core_pool __nex_bss;
15 static tee_mm_pool_t *nex_ta_pool __nex_bss;
16 
17 static tee_mm_pool_t *init_pool(paddr_t b, paddr_size_t sz, uint32_t flags)
18 {
19 	tee_mm_pool_t *pool = NULL;
20 
21 	if (!b && !sz)
22 		return NULL;
23 
24 	if (!b || (b & CORE_MMU_USER_CODE_MASK) ||
25 	    !sz || (sz & CORE_MMU_USER_CODE_MASK))
26 		panic("invalid phys mem");
27 
28 	if (flags & TEE_MM_POOL_NEX_MALLOC)
29 		pool = nex_malloc(sizeof(*pool));
30 	else
31 		pool = malloc(sizeof(*pool));
32 	if (!pool)
33 		panic();
34 
35 	tee_mm_init(pool, b, sz, CORE_MMU_USER_CODE_SHIFT, flags);
36 	return pool;
37 }
38 
39 void nex_phys_mem_init(paddr_t core_base, paddr_size_t core_size,
40 		       paddr_t ta_base, paddr_size_t ta_size)
41 {
42 	uint32_t flags = TEE_MM_POOL_NEX_MALLOC;
43 
44 	assert(!nex_core_pool && !nex_ta_pool);
45 
46 	nex_core_pool = init_pool(core_base, core_size, flags);
47 	nex_ta_pool = init_pool(ta_base, ta_size, flags);
48 }
49 
50 paddr_size_t nex_phys_mem_get_ta_size(void)
51 {
52 	if (nex_ta_pool)
53 		return nex_ta_pool->size;
54 	assert(nex_core_pool);
55 	return nex_core_pool->size - TEE_RAM_VA_SIZE;
56 }
57 
58 paddr_t nex_phys_mem_get_ta_base(void)
59 {
60 	if (nex_ta_pool)
61 		return nex_ta_pool->lo;
62 	assert(nex_core_pool);
63 	return nex_core_pool->lo;
64 }
65 
66 static bool is_in_pool_range(tee_mm_pool_t *pool, paddr_t addr)
67 {
68 	return pool && core_is_buffer_inside(addr, 1, pool->lo, pool->size);
69 }
70 
71 static tee_mm_entry_t *mm_find(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
72 			       paddr_t addr)
73 {
74 	if (is_in_pool_range(p0, addr))
75 		return tee_mm_find(p0, addr);
76 	if (is_in_pool_range(p1, addr))
77 		return tee_mm_find(p1, addr);
78 	return NULL;
79 }
80 
81 tee_mm_entry_t *nex_phys_mem_mm_find(paddr_t addr)
82 {
83 	return mm_find(nex_core_pool, nex_ta_pool, addr);
84 }
85 
86 static tee_mm_entry_t *mm_alloc(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
87 				size_t size)
88 {
89 	tee_mm_entry_t *mm = NULL;
90 
91 	if (p0)
92 		mm = tee_mm_alloc(p0, size);
93 	if (!mm && p1)
94 		mm = tee_mm_alloc(p1, size);
95 
96 	return mm;
97 }
98 
99 tee_mm_entry_t *nex_phys_mem_core_alloc(size_t size)
100 {
101 	return mm_alloc(nex_core_pool, NULL, size);
102 }
103 
104 tee_mm_entry_t *nex_phys_mem_ta_alloc(size_t size)
105 {
106 	return mm_alloc(nex_ta_pool, nex_core_pool, size);
107 }
108 
109 static tee_mm_entry_t *mm_alloc2(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
110 				 paddr_t base, size_t size)
111 {
112 	if (is_in_pool_range(p0, base))
113 		return tee_mm_alloc2(p0, base, size);
114 	if (is_in_pool_range(p1, base))
115 		return tee_mm_alloc2(p1, base, size);
116 	return NULL;
117 }
118 
119 tee_mm_entry_t *nex_phys_mem_alloc2(paddr_t base, size_t size)
120 {
121 	return mm_alloc2(nex_core_pool, nex_ta_pool, base, size);
122 }
123 
124 static void partial_carve_out(tee_mm_pool_t *pool, paddr_t base, size_t size)
125 {
126 	if (pool &&
127 	    core_is_buffer_intersect(base, size, pool->lo, pool->size)) {
128 		tee_mm_entry_t *mm __maybe_unused = NULL;
129 		paddr_t end_pa = 0;
130 		paddr_t pa = 0;
131 		size_t sz = 0;
132 
133 		pa = MAX(base, pool->lo);
134 		end_pa = MIN(base + size - 1, pool->lo + pool->size - 1);
135 		sz = end_pa - pa + 1;
136 
137 		mm = tee_mm_alloc2(pool, pa, sz);
138 		assert(mm);
139 	}
140 }
141 
142 void nex_phys_mem_partial_carve_out(paddr_t base, size_t size)
143 {
144 	partial_carve_out(nex_core_pool, base, size);
145 	partial_carve_out(nex_ta_pool, base, size);
146 }
147 
148 #ifdef CFG_WITH_STATS
149 static void add_pool_stats(tee_mm_pool_t *pool, struct pta_stats_alloc *stats,
150 			   bool reset)
151 {
152 	if (pool) {
153 		struct pta_stats_alloc s = { };
154 
155 		tee_mm_get_pool_stats(pool, &s, reset);
156 		stats->size += s.size;
157 		if (s.max_allocated > stats->max_allocated)
158 			stats->max_allocated = s.max_allocated;
159 		stats->allocated += s.allocated;
160 	}
161 }
162 
163 void nex_phys_mem_stats(struct pta_stats_alloc *stats, bool reset)
164 {
165 	memset(stats, 0, sizeof(*stats));
166 
167 	add_pool_stats(nex_core_pool, stats, reset);
168 	add_pool_stats(nex_ta_pool, stats, reset);
169 }
170 #endif /*CFG_WITH_STATS*/
171 
172 #if defined(CFG_NS_VIRTUALIZATION)
173 
174 static tee_mm_pool_t *core_pool;
175 static tee_mm_pool_t *ta_pool;
176 
177 void phys_mem_init(paddr_t core_base, paddr_size_t core_size,
178 		   paddr_t ta_base, paddr_size_t ta_size)
179 {
180 	uint32_t flags = TEE_MM_POOL_NO_FLAGS;
181 
182 	assert(!core_pool && !ta_pool);
183 
184 	core_pool = init_pool(core_base, core_size, flags);
185 	ta_pool = init_pool(ta_base, ta_size, flags);
186 }
187 
188 tee_mm_entry_t *phys_mem_mm_find(paddr_t addr)
189 {
190 	return mm_find(core_pool, ta_pool, addr);
191 }
192 
193 tee_mm_entry_t *phys_mem_core_alloc(size_t size)
194 {
195 	return mm_alloc(core_pool, NULL, size);
196 }
197 
198 tee_mm_entry_t *phys_mem_ta_alloc(size_t size)
199 {
200 	return mm_alloc(ta_pool, core_pool, size);
201 }
202 
203 tee_mm_entry_t *phys_mem_alloc2(paddr_t base, size_t size)
204 {
205 	return mm_alloc2(core_pool, ta_pool, base, size);
206 }
207 
208 #ifdef CFG_WITH_STATS
209 void phys_mem_stats(struct pta_stats_alloc *stats, bool reset)
210 {
211 	memset(stats, 0, sizeof(*stats));
212 
213 	add_pool_stats(core_pool, stats, reset);
214 	add_pool_stats(ta_pool, stats, reset);
215 }
216 #endif /*CFG_WITH_STATS*/
217 #endif /*CFG_NS_VIRTUALIZATION*/
218