xref: /optee_os/core/mm/phys_mem.c (revision c596d8359eb30c3151ff183691d11d895d4d2c59)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2024, Linaro Limited
4  */
5 
6 #include <kernel/panic.h>
7 #include <kernel/tee_misc.h>
8 #include <mm/core_mmu.h>
9 #include <mm/phys_mem.h>
10 #include <mm/tee_mm.h>
11 #include <string.h>
12 #include <types_ext.h>
13 
14 static tee_mm_pool_t *nex_core_pool __nex_bss;
15 static tee_mm_pool_t *nex_ta_pool __nex_bss;
16 
17 static tee_mm_pool_t *init_pool(paddr_t b, paddr_size_t sz, uint32_t flags)
18 {
19 	tee_mm_pool_t *pool = NULL;
20 
21 	if (!b && !sz)
22 		return NULL;
23 
24 	if (!b || (b & CORE_MMU_USER_CODE_MASK) ||
25 	    !sz || (sz & CORE_MMU_USER_CODE_MASK))
26 		panic("invalid phys mem");
27 
28 	if (flags & TEE_MM_POOL_NEX_MALLOC)
29 		pool = nex_malloc(sizeof(*pool));
30 	else
31 		pool = malloc(sizeof(*pool));
32 	if (!pool)
33 		panic();
34 
35 	tee_mm_init(pool, b, sz, CORE_MMU_USER_CODE_SHIFT, flags);
36 	return pool;
37 }
38 
39 void nex_phys_mem_init(paddr_t core_base, paddr_size_t core_size,
40 		       paddr_t ta_base, paddr_size_t ta_size)
41 {
42 	uint32_t flags = TEE_MM_POOL_NEX_MALLOC;
43 
44 	nex_core_pool = init_pool(core_base, core_size, flags);
45 	nex_ta_pool = init_pool(ta_base, ta_size, flags);
46 }
47 
48 paddr_size_t nex_phys_mem_get_ta_size(void)
49 {
50 	if (nex_ta_pool)
51 		return nex_ta_pool->size;
52 	assert(nex_core_pool);
53 	return nex_core_pool->size - TEE_RAM_VA_SIZE;
54 }
55 
56 paddr_t nex_phys_mem_get_ta_base(void)
57 {
58 	if (nex_ta_pool)
59 		return nex_ta_pool->lo;
60 	assert(nex_core_pool);
61 	return nex_core_pool->lo;
62 }
63 
64 static bool is_in_pool_range(tee_mm_pool_t *pool, paddr_t addr)
65 {
66 	return pool && core_is_buffer_inside(addr, 1, pool->lo, pool->size);
67 }
68 
69 static tee_mm_entry_t *mm_find(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
70 			       paddr_t addr)
71 {
72 	if (is_in_pool_range(p0, addr))
73 		return tee_mm_find(p0, addr);
74 	if (is_in_pool_range(p1, addr))
75 		return tee_mm_find(p1, addr);
76 	return NULL;
77 }
78 
79 tee_mm_entry_t *nex_phys_mem_mm_find(paddr_t addr)
80 {
81 	return mm_find(nex_core_pool, nex_ta_pool, addr);
82 }
83 
84 static tee_mm_entry_t *mm_alloc(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
85 				size_t size)
86 {
87 	tee_mm_entry_t *mm = NULL;
88 
89 	if (p0)
90 		mm = tee_mm_alloc(p0, size);
91 	if (!mm && p1)
92 		mm = tee_mm_alloc(p1, size);
93 
94 	return mm;
95 }
96 
97 tee_mm_entry_t *nex_phys_mem_core_alloc(size_t size)
98 {
99 	return mm_alloc(nex_core_pool, NULL, size);
100 }
101 
102 tee_mm_entry_t *nex_phys_mem_ta_alloc(size_t size)
103 {
104 	return mm_alloc(nex_ta_pool, nex_core_pool, size);
105 }
106 
107 static tee_mm_entry_t *mm_alloc2(tee_mm_pool_t *p0, tee_mm_pool_t *p1,
108 				 paddr_t base, size_t size)
109 {
110 	if (is_in_pool_range(p0, base))
111 		return tee_mm_alloc2(p0, base, size);
112 	if (is_in_pool_range(p1, base))
113 		return tee_mm_alloc2(p1, base, size);
114 	return NULL;
115 }
116 
117 tee_mm_entry_t *nex_phys_mem_alloc2(paddr_t base, size_t size)
118 {
119 	return mm_alloc2(nex_core_pool, nex_ta_pool, base, size);
120 }
121 
122 static void partial_carve_out(tee_mm_pool_t *pool, paddr_t base, size_t size)
123 {
124 	if (pool &&
125 	    core_is_buffer_intersect(base, size, pool->lo, pool->size)) {
126 		tee_mm_entry_t *mm __maybe_unused = NULL;
127 		paddr_t end_pa = 0;
128 		paddr_t pa = 0;
129 		size_t sz = 0;
130 
131 		pa = MAX(base, pool->lo);
132 		end_pa = MIN(base + size - 1, pool->lo + pool->size - 1);
133 		sz = end_pa - pa + 1;
134 
135 		mm = tee_mm_alloc2(pool, pa, sz);
136 		assert(mm);
137 	}
138 }
139 
140 void nex_phys_mem_partial_carve_out(paddr_t base, size_t size)
141 {
142 	partial_carve_out(nex_core_pool, base, size);
143 	partial_carve_out(nex_ta_pool, base, size);
144 }
145 
146 #ifdef CFG_WITH_STATS
147 static void add_pool_stats(tee_mm_pool_t *pool, struct pta_stats_alloc *stats,
148 			   bool reset)
149 {
150 	if (pool) {
151 		struct pta_stats_alloc s = { };
152 
153 		tee_mm_get_pool_stats(pool, &s, reset);
154 		stats->size += s.size;
155 		if (s.max_allocated > stats->max_allocated)
156 			stats->max_allocated = s.max_allocated;
157 		stats->allocated += s.allocated;
158 	}
159 }
160 
161 void nex_phys_mem_stats(struct pta_stats_alloc *stats, bool reset)
162 {
163 	memset(stats, 0, sizeof(*stats));
164 
165 	add_pool_stats(nex_core_pool, stats, reset);
166 	add_pool_stats(nex_ta_pool, stats, reset);
167 }
168 #endif /*CFG_WITH_STATS*/
169 
170 #if defined(CFG_NS_VIRTUALIZATION)
171 
172 static tee_mm_pool_t *core_pool;
173 static tee_mm_pool_t *ta_pool;
174 
175 void phys_mem_init(paddr_t core_base, paddr_size_t core_size,
176 		   paddr_t ta_base, paddr_size_t ta_size)
177 {
178 	uint32_t flags = TEE_MM_POOL_NO_FLAGS;
179 
180 	core_pool = init_pool(core_base, core_size, flags);
181 	ta_pool = init_pool(ta_base, ta_size, flags);
182 }
183 
184 tee_mm_entry_t *phys_mem_mm_find(paddr_t addr)
185 {
186 	return mm_find(core_pool, ta_pool, addr);
187 }
188 
189 tee_mm_entry_t *phys_mem_core_alloc(size_t size)
190 {
191 	return mm_alloc(core_pool, NULL, size);
192 }
193 
194 tee_mm_entry_t *phys_mem_ta_alloc(size_t size)
195 {
196 	return mm_alloc(ta_pool, core_pool, size);
197 }
198 
199 tee_mm_entry_t *phys_mem_alloc2(paddr_t base, size_t size)
200 {
201 	return mm_alloc2(core_pool, ta_pool, base, size);
202 }
203 
204 #ifdef CFG_WITH_STATS
205 void phys_mem_stats(struct pta_stats_alloc *stats, bool reset)
206 {
207 	memset(stats, 0, sizeof(*stats));
208 
209 	add_pool_stats(core_pool, stats, reset);
210 	add_pool_stats(ta_pool, stats, reset);
211 }
212 #endif /*CFG_WITH_STATS*/
213 #endif /*CFG_NS_VIRTUALIZATION*/
214