1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2016-2024 Linaro Limited
4 */
5
6 #ifndef __MM_MOBJ_H
7 #define __MM_MOBJ_H
8
9 #include <compiler.h>
10 #include <mm/core_memprot.h>
11 #include <mm/file.h>
12 #include <mm/fobj.h>
13 #include <string_ext.h>
14 #include <sys/queue.h>
15 #include <tee_api_types.h>
16 #include <types_ext.h>
17
18 #include <optee_msg.h>
19
20 enum mobj_use_case {
21 MOBJ_USE_CASE_NS_SHM,
22 MOBJ_USE_CASE_SEC_VIDEO_PLAY,
23 MOBJ_USE_CASE_TRUSED_UI,
24 };
25
26 struct mobj {
27 const struct mobj_ops *ops;
28 size_t size;
29 size_t phys_granule;
30 struct refcount refc;
31 };
32
33 struct mobj_ops {
34 void *(*get_va)(struct mobj *mobj, size_t offs, size_t len);
35 TEE_Result (*get_pa)(struct mobj *mobj, size_t offs, size_t granule,
36 paddr_t *pa);
37 size_t (*get_phys_offs)(struct mobj *mobj, size_t granule);
38 TEE_Result (*get_mem_type)(struct mobj *mobj, uint32_t *mt);
39 bool (*matches)(struct mobj *mobj, enum buf_is_attr attr);
40 void (*free)(struct mobj *mobj);
41 uint64_t (*get_cookie)(struct mobj *mobj);
42 struct fobj *(*get_fobj)(struct mobj *mobj);
43 TEE_Result (*inc_map)(struct mobj *mobj);
44 TEE_Result (*dec_map)(struct mobj *mobj);
45 };
46
47 extern struct mobj mobj_virt;
48 extern struct mobj *mobj_tee_ram_rx;
49 extern struct mobj *mobj_tee_ram_rw;
50
51 /*
52 * mobj_get_va() - get virtual address of a mapped mobj
53 * @mobj: memory object
54 * @offset: find the va of this offset into @mobj
55 * @len: how many bytes after @offset that must be valid, can be 1 if
56 * the caller knows by other means that the expected buffer is
57 * available.
58 *
59 * return a virtual address on success or NULL on error
60 */
mobj_get_va(struct mobj * mobj,size_t offset,size_t len)61 static inline void *mobj_get_va(struct mobj *mobj, size_t offset, size_t len)
62 {
63 if (mobj && mobj->ops && mobj->ops->get_va)
64 return mobj->ops->get_va(mobj, offset, len);
65 return NULL;
66 }
67
mobj_get_pa(struct mobj * mobj,size_t offs,size_t granule,paddr_t * pa)68 static inline TEE_Result mobj_get_pa(struct mobj *mobj, size_t offs,
69 size_t granule, paddr_t *pa)
70 {
71 if (mobj && mobj->ops && mobj->ops->get_pa)
72 return mobj->ops->get_pa(mobj, offs, granule, pa);
73 return TEE_ERROR_GENERIC;
74 }
75
mobj_get_phys_offs(struct mobj * mobj,size_t granule)76 static inline size_t mobj_get_phys_offs(struct mobj *mobj, size_t granule)
77 {
78 if (mobj && mobj->ops && mobj->ops->get_phys_offs)
79 return mobj->ops->get_phys_offs(mobj, granule);
80 return 0;
81 }
82
mobj_get_mem_type(struct mobj * mobj,uint32_t * mt)83 static inline TEE_Result mobj_get_mem_type(struct mobj *mobj, uint32_t *mt)
84 {
85 if (mobj && mobj->ops && mobj->ops->get_mem_type)
86 return mobj->ops->get_mem_type(mobj, mt);
87 return TEE_ERROR_GENERIC;
88 }
89
mobj_matches(struct mobj * mobj,enum buf_is_attr attr)90 static inline bool mobj_matches(struct mobj *mobj, enum buf_is_attr attr)
91 {
92 if (mobj && mobj->ops && mobj->ops->matches)
93 return mobj->ops->matches(mobj, attr);
94 return false;
95 }
96
97 /**
98 * mobj_inc_map() - increase map count
99 * @mobj: pointer to a MOBJ
100 *
101 * Maps the MOBJ if it isn't mapped already and increases the map count
102 * Each call to mobj_inc_map() is supposed to be matches by a call to
103 * mobj_dec_map().
104 *
105 * Returns TEE_SUCCESS on success or an error code on failure
106 */
mobj_inc_map(struct mobj * mobj)107 static inline TEE_Result mobj_inc_map(struct mobj *mobj)
108 {
109 if (mobj && mobj->ops) {
110 if (mobj->ops->inc_map)
111 return mobj->ops->inc_map(mobj);
112 return TEE_SUCCESS;
113 }
114 return TEE_ERROR_GENERIC;
115 }
116
117 /**
118 * mobj_dec_map() - decrease map count
119 * @mobj: pointer to a MOBJ
120 *
121 * Decreases the map count and also unmaps the MOBJ if the map count
122 * reaches 0. Each call to mobj_inc_map() is supposed to be matched by a
123 * call to mobj_dec_map().
124 *
125 * Returns TEE_SUCCESS on success or an error code on failure
126 */
mobj_dec_map(struct mobj * mobj)127 static inline TEE_Result mobj_dec_map(struct mobj *mobj)
128 {
129 if (mobj && mobj->ops) {
130 if (mobj->ops->dec_map)
131 return mobj->ops->dec_map(mobj);
132 return TEE_SUCCESS;
133 }
134 return TEE_ERROR_GENERIC;
135 }
136
137 /**
138 * mobj_get() - get a MOBJ
139 * @mobj: Pointer to a MOBJ or NULL
140 *
141 * Increases reference counter of the @mobj
142 *
143 * Returns @mobj with reference counter increased or NULL if @mobj was NULL
144 */
mobj_get(struct mobj * mobj)145 static inline struct mobj *mobj_get(struct mobj *mobj)
146 {
147 if (mobj && !refcount_inc(&mobj->refc))
148 panic();
149
150 return mobj;
151 }
152
153 /**
154 * mobj_put() - put a MOBJ
155 * @mobj: Pointer to a MOBJ or NULL
156 *
157 * Decreases reference counter of the @mobj and frees it if the counter
158 * reaches 0.
159 */
mobj_put(struct mobj * mobj)160 static inline void mobj_put(struct mobj *mobj)
161 {
162 if (mobj && refcount_dec(&mobj->refc))
163 mobj->ops->free(mobj);
164 }
165
166 /**
167 * mobj_put_wipe() - wipe and put a MOBJ
168 * @mobj: Pointer to a MOBJ or NULL
169 *
170 * Clears the memory represented by the mobj and then puts it.
171 */
mobj_put_wipe(struct mobj * mobj)172 static inline void mobj_put_wipe(struct mobj *mobj)
173 {
174 if (mobj) {
175 void *buf = mobj_get_va(mobj, 0, mobj->size);
176
177 if (buf)
178 memzero_explicit(buf, mobj->size);
179 mobj_put(mobj);
180 }
181 }
182
mobj_get_cookie(struct mobj * mobj)183 static inline uint64_t mobj_get_cookie(struct mobj *mobj)
184 {
185 if (mobj && mobj->ops && mobj->ops->get_cookie)
186 return mobj->ops->get_cookie(mobj);
187
188 #if defined(CFG_CORE_FFA)
189 return OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
190 #else
191 return 0;
192 #endif
193 }
194
mobj_get_fobj(struct mobj * mobj)195 static inline struct fobj *mobj_get_fobj(struct mobj *mobj)
196 {
197 if (mobj && mobj->ops && mobj->ops->get_fobj)
198 return mobj->ops->get_fobj(mobj);
199
200 return NULL;
201 }
202
mobj_is_nonsec(struct mobj * mobj)203 static inline bool mobj_is_nonsec(struct mobj *mobj)
204 {
205 return mobj_matches(mobj, CORE_MEM_NON_SEC);
206 }
207
mobj_is_secure(struct mobj * mobj)208 static inline bool mobj_is_secure(struct mobj *mobj)
209 {
210 return mobj_matches(mobj, CORE_MEM_SEC);
211 }
212
mobj_is_sdp_mem(struct mobj * mobj)213 static inline bool mobj_is_sdp_mem(struct mobj *mobj)
214 {
215 return mobj_matches(mobj, CORE_MEM_SDP_MEM);
216 }
217
mobj_get_phys_granule(struct mobj * mobj)218 static inline size_t mobj_get_phys_granule(struct mobj *mobj)
219 {
220 if (mobj->phys_granule)
221 return mobj->phys_granule;
222 return mobj->size;
223 }
224
mobj_check_offset_and_len(struct mobj * mobj,size_t offset,size_t len)225 static inline bool mobj_check_offset_and_len(struct mobj *mobj, size_t offset,
226 size_t len)
227 {
228 size_t end_offs = 0;
229
230 return len && !ADD_OVERFLOW(offset, len - 1, &end_offs) &&
231 end_offs < mobj->size;
232 }
233
234 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
235 enum buf_is_attr battr);
236
237 #if defined(CFG_CORE_FFA)
238 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
239 unsigned int internal_offs);
240
241 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie);
242
243 /* Functions for SPMC */
244 #ifdef CFG_CORE_SEL1_SPMC
245 struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie,
246 unsigned int num_pages,
247 enum mobj_use_case use_case);
248 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mobj);
249 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie);
250 #else
251 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages,
252 enum mobj_use_case use_case);
253 void mobj_ffa_spmc_delete(struct mobj_ffa *mobj);
254 #endif
255
256 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mobj);
257 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mobj, unsigned int *idx,
258 paddr_t pa, unsigned int num_pages);
259 TEE_Result mobj_ffa_push_to_inactive(struct mobj_ffa *mobj);
260
261 #ifdef CFG_CORE_DYN_PROTMEM
262 TEE_Result mobj_ffa_assign_protmem(uint64_t cookie,
263 enum mobj_use_case use_case);
264 struct mobj *mobj_ffa_protmem_get_by_pa(paddr_t pa, paddr_size_t size);
265 #endif
266
267 #elif defined(CFG_CORE_DYN_SHM)
268 /* reg_shm represents TEE shared memory */
269 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
270 paddr_t page_offset, uint64_t cookie);
271
272 /**
273 * mobj_reg_shm_get_by_cookie() - get a MOBJ based on cookie
274 * @cookie: Cookie used by normal world when suppling the shared memory
275 *
276 * Searches for a registered shared memory MOBJ and if one with a matching
277 * @cookie is found its reference counter is increased before returning
278 * the MOBJ.
279 *
280 * Returns a valid pointer on success or NULL on failure.
281 */
282 struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie);
283
284 TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie);
285
286 /**
287 * mobj_reg_shm_unguard() - unguards a reg_shm
288 * @mobj: pointer to a registered shared memory mobj
289 *
290 * A registered shared memory mobj is normally guarded against being
291 * released with mobj_reg_shm_try_release_by_cookie(). After this function
292 * has returned the mobj can be released by a call to
293 * mobj_reg_shm_try_release_by_cookie() if the reference counter allows it.
294 */
295 void mobj_reg_shm_unguard(struct mobj *mobj);
296
297 /*
298 * mapped_shm represents registered shared buffer
299 * which is mapped into OPTEE va space
300 */
301 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
302 paddr_t page_offset, uint64_t cookie);
303
304 #if defined(CFG_CORE_DYN_PROTMEM)
305 struct mobj *mobj_protmem_alloc(paddr_t pa, paddr_size_t size, uint64_t cookie,
306 enum mobj_use_case use_case);
307 TEE_Result mobj_protmem_release_by_cookie(uint64_t cookie);
308 struct mobj *mobj_protmem_get_by_pa(paddr_t pa, paddr_size_t size);
309 #endif /*CFG_CORE_DYN_PROTMEM*/
310
311 #endif /*CFG_CORE_DYN_SHM*/
312
313 #if !defined(CFG_CORE_DYN_SHM)
mobj_mapped_shm_alloc(paddr_t * pages __unused,size_t num_pages __unused,paddr_t page_offset __unused,uint64_t cookie __unused)314 static inline struct mobj *mobj_mapped_shm_alloc(paddr_t *pages __unused,
315 size_t num_pages __unused,
316 paddr_t page_offset __unused,
317 uint64_t cookie __unused)
318 {
319 return NULL;
320 }
321
mobj_reg_shm_get_by_cookie(uint64_t cookie __unused)322 static inline struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie __unused)
323 {
324 return NULL;
325 }
326 #endif
327
328 #if !defined(CFG_CORE_DYN_PROTMEM) || defined(CFG_CORE_FFA)
329 static inline struct mobj *
mobj_protmem_alloc(paddr_t pa __unused,paddr_size_t size __unused,uint64_t cookie __unused,enum mobj_use_case use_case __unused)330 mobj_protmem_alloc(paddr_t pa __unused, paddr_size_t size __unused,
331 uint64_t cookie __unused,
332 enum mobj_use_case use_case __unused)
333 {
334 return NULL;
335 }
336
337 static inline TEE_Result
mobj_protmem_release_by_cookie(uint64_t cookie __unused)338 mobj_protmem_release_by_cookie(uint64_t cookie __unused)
339 {
340 return TEE_ERROR_NOT_IMPLEMENTED;
341 }
342
mobj_protmem_get_by_pa(paddr_t pa __unused,paddr_size_t size __unused)343 static inline struct mobj *mobj_protmem_get_by_pa(paddr_t pa __unused,
344 paddr_size_t size __unused)
345 {
346 return NULL;
347 }
348 #endif
349
350 #if !defined(CFG_CORE_DYN_PROTMEM) || !defined(CFG_CORE_FFA)
351 static inline struct mobj *
mobj_ffa_protmem_get_by_pa(paddr_t pa __unused,paddr_size_t size __unused)352 mobj_ffa_protmem_get_by_pa(paddr_t pa __unused, paddr_size_t size __unused)
353 {
354 return NULL;
355 }
356
357 static inline TEE_Result
mobj_ffa_assign_protmem(uint64_t cookie __unused,enum mobj_use_case use_case __unused)358 mobj_ffa_assign_protmem(uint64_t cookie __unused,
359 enum mobj_use_case use_case __unused)
360 {
361 return TEE_ERROR_NOT_IMPLEMENTED;
362 }
363 #endif
364
365 #if !defined(CFG_CORE_FFA)
366 static inline struct mobj *
mobj_ffa_get_by_cookie(uint64_t cookie __unused,unsigned int internal_offs __unused)367 mobj_ffa_get_by_cookie(uint64_t cookie __unused,
368 unsigned int internal_offs __unused)
369 {
370 return NULL;
371 }
372 #endif
373
374 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie);
375
376 #ifdef CFG_PAGED_USER_TA
377 bool mobj_is_paged(struct mobj *mobj);
378 #else
mobj_is_paged(struct mobj * mobj __unused)379 static inline bool mobj_is_paged(struct mobj *mobj __unused)
380 {
381 return false;
382 }
383 #endif
384
385 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file,
386 uint32_t mem_type);
387
388 #ifdef CFG_CORE_DYN_PROTMEM
389 /*
390 * plat_get_protmem_config() - Platform specific config for a protected memory
391 * use-case
392 * @use_case: Identifies the protected memory use-case
393 * @min_mem_sz: out value for minumim memory size
394 * @min_mem_align: out value for minimum alignment
395 *
396 * The function is not supposed to be called with MOBJ_USE_CASE_NS_SHM as
397 * @use_case, but any other defined enum value is up to the platform.
398 *
399 * returns TEE_Result value
400 */
401 TEE_Result plat_get_protmem_config(enum mobj_use_case use_case,
402 size_t *min_mem_sz, size_t *min_mem_align);
403
404 /*
405 * plat_set_protmem_range() - Platform specific change of memory protection
406 * @use_case: Identifies the protected memory use-case
407 * @pa: Start physical address
408 * @sz: Size of the memory range
409 *
410 * The @use_case defines how the supplied memory range should be protected.
411 * The function can be called with MOBJ_USE_CASE_NS_SHM as @use_case to
412 * restore the non-protected state.
413 *
414 * returns TEE_Result value
415 */
416
417 TEE_Result plat_set_protmem_range(enum mobj_use_case use_case, paddr_t pa,
418 paddr_size_t sz);
419 #else
420 static inline TEE_Result
plat_get_protmem_config(enum mobj_use_case use_case __unused,size_t * min_mem_sz __unused,size_t * min_mem_align __unused)421 plat_get_protmem_config(enum mobj_use_case use_case __unused,
422 size_t *min_mem_sz __unused,
423 size_t *min_mem_align __unused)
424 {
425 return TEE_ERROR_BAD_PARAMETERS;
426 }
427
428 static inline TEE_Result
plat_set_protmem_range(enum mobj_use_case use_case __unused,paddr_t pa __unused,paddr_size_t sz __unused)429 plat_set_protmem_range(enum mobj_use_case use_case __unused,
430 paddr_t pa __unused, paddr_size_t sz __unused)
431 {
432 return TEE_ERROR_BAD_PARAMETERS;
433 }
434 #endif
435
436 #endif /*__MM_MOBJ_H*/
437