xref: /optee_os/core/include/mm/mobj.h (revision ef3bc69c72b8d46493eab724eab6e018423088e1)
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016-2017, 2022 Linaro Limited
4  */
5 
6 #ifndef __MM_MOBJ_H
7 #define __MM_MOBJ_H
8 
9 #include <compiler.h>
10 #include <mm/core_memprot.h>
11 #include <mm/file.h>
12 #include <mm/fobj.h>
13 #include <string_ext.h>
14 #include <sys/queue.h>
15 #include <tee_api_types.h>
16 #include <types_ext.h>
17 
18 #include <optee_msg.h>
19 
20 struct mobj {
21 	const struct mobj_ops *ops;
22 	size_t size;
23 	size_t phys_granule;
24 	struct refcount refc;
25 };
26 
27 struct mobj_ops {
28 	void *(*get_va)(struct mobj *mobj, size_t offs, size_t len);
29 	TEE_Result (*get_pa)(struct mobj *mobj, size_t offs, size_t granule,
30 			     paddr_t *pa);
31 	size_t (*get_phys_offs)(struct mobj *mobj, size_t granule);
32 	TEE_Result (*get_mem_type)(struct mobj *mobj, uint32_t *mt);
33 	bool (*matches)(struct mobj *mobj, enum buf_is_attr attr);
34 	void (*free)(struct mobj *mobj);
35 	uint64_t (*get_cookie)(struct mobj *mobj);
36 	struct fobj *(*get_fobj)(struct mobj *mobj);
37 	TEE_Result (*inc_map)(struct mobj *mobj);
38 	TEE_Result (*dec_map)(struct mobj *mobj);
39 };
40 
41 extern struct mobj mobj_virt;
42 extern struct mobj *mobj_tee_ram_rx;
43 extern struct mobj *mobj_tee_ram_rw;
44 
45 /*
46  * mobj_get_va() - get virtual address of a mapped mobj
47  * @mobj:	memory object
48  * @offset:	find the va of this offset into @mobj
49  * @len:	how many bytes after @offset that must be valid, can be 1 if
50  *		the caller knows by other means that the expected buffer is
51  *		available.
52  *
53  * return a virtual address on success or NULL on error
54  */
55 static inline void *mobj_get_va(struct mobj *mobj, size_t offset, size_t len)
56 {
57 	if (mobj && mobj->ops && mobj->ops->get_va)
58 		return mobj->ops->get_va(mobj, offset, len);
59 	return NULL;
60 }
61 
62 static inline TEE_Result mobj_get_pa(struct mobj *mobj, size_t offs,
63 				     size_t granule, paddr_t *pa)
64 {
65 	if (mobj && mobj->ops && mobj->ops->get_pa)
66 		return mobj->ops->get_pa(mobj, offs, granule, pa);
67 	return TEE_ERROR_GENERIC;
68 }
69 
70 static inline size_t mobj_get_phys_offs(struct mobj *mobj, size_t granule)
71 {
72 	if (mobj && mobj->ops && mobj->ops->get_phys_offs)
73 		return mobj->ops->get_phys_offs(mobj, granule);
74 	return 0;
75 }
76 
77 static inline TEE_Result mobj_get_mem_type(struct mobj *mobj, uint32_t *mt)
78 {
79 	if (mobj && mobj->ops && mobj->ops->get_mem_type)
80 		return mobj->ops->get_mem_type(mobj, mt);
81 	return TEE_ERROR_GENERIC;
82 }
83 
84 static inline bool mobj_matches(struct mobj *mobj, enum buf_is_attr attr)
85 {
86 	if (mobj && mobj->ops && mobj->ops->matches)
87 		return mobj->ops->matches(mobj, attr);
88 	return false;
89 }
90 
91 /**
92  * mobj_inc_map() - increase map count
93  * @mobj:	pointer to a MOBJ
94  *
95  * Maps the MOBJ if it isn't mapped already and increases the map count
96  * Each call to mobj_inc_map() is supposed to be matches by a call to
97  * mobj_dec_map().
98  *
99  * Returns TEE_SUCCESS on success or an error code on failure
100  */
101 static inline TEE_Result mobj_inc_map(struct mobj *mobj)
102 {
103 	if (mobj && mobj->ops) {
104 		if (mobj->ops->inc_map)
105 			return mobj->ops->inc_map(mobj);
106 		return TEE_SUCCESS;
107 	}
108 	return TEE_ERROR_GENERIC;
109 }
110 
111 /**
112  * mobj_dec_map() - decrease map count
113  * @mobj:	pointer to a MOBJ
114  *
115  * Decreases the map count and also unmaps the MOBJ if the map count
116  * reaches 0.  Each call to mobj_inc_map() is supposed to be matched by a
117  * call to mobj_dec_map().
118  *
119  * Returns TEE_SUCCESS on success or an error code on failure
120  */
121 static inline TEE_Result mobj_dec_map(struct mobj *mobj)
122 {
123 	if (mobj && mobj->ops) {
124 		if (mobj->ops->dec_map)
125 			return mobj->ops->dec_map(mobj);
126 		return TEE_SUCCESS;
127 	}
128 	return TEE_ERROR_GENERIC;
129 }
130 
131 /**
132  * mobj_get() - get a MOBJ
133  * @mobj:	Pointer to a MOBJ or NULL
134  *
135  * Increases reference counter of the @mobj
136  *
137  * Returns @mobj with reference counter increased or NULL if @mobj was NULL
138  */
139 static inline struct mobj *mobj_get(struct mobj *mobj)
140 {
141 	if (mobj && !refcount_inc(&mobj->refc))
142 		panic();
143 
144 	return mobj;
145 }
146 
147 /**
148  * mobj_put() - put a MOBJ
149  * @mobj:	Pointer to a MOBJ or NULL
150  *
151  * Decreases reference counter of the @mobj and frees it if the counter
152  * reaches 0.
153  */
154 static inline void mobj_put(struct mobj *mobj)
155 {
156 	if (mobj && refcount_dec(&mobj->refc))
157 		mobj->ops->free(mobj);
158 }
159 
160 /**
161  * mobj_put_wipe() - wipe and put a MOBJ
162  * @mobj:	Pointer to a MOBJ or NULL
163  *
164  * Clears the memory represented by the mobj and then puts it.
165  */
166 static inline void mobj_put_wipe(struct mobj *mobj)
167 {
168 	if (mobj) {
169 		void *buf = mobj_get_va(mobj, 0, mobj->size);
170 
171 		if (buf)
172 			memzero_explicit(buf, mobj->size);
173 		mobj_put(mobj);
174 	}
175 }
176 
177 static inline uint64_t mobj_get_cookie(struct mobj *mobj)
178 {
179 	if (mobj && mobj->ops && mobj->ops->get_cookie)
180 		return mobj->ops->get_cookie(mobj);
181 
182 #if defined(CFG_CORE_FFA)
183 	return OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
184 #else
185 	return 0;
186 #endif
187 }
188 
189 static inline struct fobj *mobj_get_fobj(struct mobj *mobj)
190 {
191 	if (mobj && mobj->ops && mobj->ops->get_fobj)
192 		return mobj->ops->get_fobj(mobj);
193 
194 	return NULL;
195 }
196 
197 static inline bool mobj_is_nonsec(struct mobj *mobj)
198 {
199 	return mobj_matches(mobj, CORE_MEM_NON_SEC);
200 }
201 
202 static inline bool mobj_is_secure(struct mobj *mobj)
203 {
204 	return mobj_matches(mobj, CORE_MEM_SEC);
205 }
206 
207 static inline bool mobj_is_sdp_mem(struct mobj *mobj)
208 {
209 	return mobj_matches(mobj, CORE_MEM_SDP_MEM);
210 }
211 
212 static inline size_t mobj_get_phys_granule(struct mobj *mobj)
213 {
214 	if (mobj->phys_granule)
215 		return mobj->phys_granule;
216 	return mobj->size;
217 }
218 
219 static inline bool mobj_check_offset_and_len(struct mobj *mobj, size_t offset,
220 					     size_t len)
221 {
222 	size_t end_offs = 0;
223 
224 	return len && !ADD_OVERFLOW(offset, len - 1, &end_offs) &&
225 	       end_offs < mobj->size;
226 }
227 
228 struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr,
229 			     enum buf_is_attr battr);
230 
231 #if defined(CFG_CORE_FFA)
232 struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie,
233 				    unsigned int internal_offs);
234 
235 TEE_Result mobj_ffa_unregister_by_cookie(uint64_t cookie);
236 
237 /* Functions for SPMC */
238 #ifdef CFG_CORE_SEL1_SPMC
239 struct mobj_ffa *mobj_ffa_sel1_spmc_new(uint64_t cookie,
240 					unsigned int num_pages);
241 void mobj_ffa_sel1_spmc_delete(struct mobj_ffa *mobj);
242 TEE_Result mobj_ffa_sel1_spmc_reclaim(uint64_t cookie);
243 #else
244 struct mobj_ffa *mobj_ffa_spmc_new(uint64_t cookie, unsigned int num_pages);
245 void mobj_ffa_spmc_delete(struct mobj_ffa *mobj);
246 #endif
247 
248 uint64_t mobj_ffa_get_cookie(struct mobj_ffa *mobj);
249 TEE_Result mobj_ffa_add_pages_at(struct mobj_ffa *mobj, unsigned int *idx,
250 				 paddr_t pa, unsigned int num_pages);
251 uint64_t mobj_ffa_push_to_inactive(struct mobj_ffa *mobj);
252 
253 #elif defined(CFG_CORE_DYN_SHM)
254 /* reg_shm represents TEE shared memory */
255 struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
256 				paddr_t page_offset, uint64_t cookie);
257 
258 /**
259  * mobj_reg_shm_get_by_cookie() - get a MOBJ based on cookie
260  * @cookie:	Cookie used by normal world when suppling the shared memory
261  *
262  * Searches for a registered shared memory MOBJ and if one with a matching
263  * @cookie is found its reference counter is increased before returning
264  * the MOBJ.
265  *
266  * Returns a valid pointer on success or NULL on failure.
267  */
268 struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie);
269 
270 TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie);
271 
272 /**
273  * mobj_reg_shm_unguard() - unguards a reg_shm
274  * @mobj:	pointer to a registered shared memory mobj
275  *
276  * A registered shared memory mobj is normally guarded against being
277  * released with mobj_reg_shm_try_release_by_cookie(). After this function
278  * has returned the mobj can be released by a call to
279  * mobj_reg_shm_try_release_by_cookie() if the reference counter allows it.
280  */
281 void mobj_reg_shm_unguard(struct mobj *mobj);
282 
283 /*
284  * mapped_shm represents registered shared buffer
285  * which is mapped into OPTEE va space
286  */
287 struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
288 				   paddr_t page_offset, uint64_t cookie);
289 #endif /*CFG_CORE_DYN_SHM*/
290 
291 #if !defined(CFG_CORE_DYN_SHM)
292 static inline struct mobj *mobj_mapped_shm_alloc(paddr_t *pages __unused,
293 						 size_t num_pages __unused,
294 						 paddr_t page_offset __unused,
295 						 uint64_t cookie __unused)
296 {
297 	return NULL;
298 }
299 
300 static inline struct mobj *mobj_reg_shm_get_by_cookie(uint64_t cookie __unused)
301 {
302 	return NULL;
303 }
304 #endif
305 
306 struct mobj *mobj_shm_alloc(paddr_t pa, size_t size, uint64_t cookie);
307 
308 #ifdef CFG_PAGED_USER_TA
309 bool mobj_is_paged(struct mobj *mobj);
310 #else
311 static inline bool mobj_is_paged(struct mobj *mobj __unused)
312 {
313 	return false;
314 }
315 #endif
316 
317 struct mobj *mobj_with_fobj_alloc(struct fobj *fobj, struct file *file,
318 				  uint32_t mem_type);
319 
320 #endif /*__MM_MOBJ_H*/
321