1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2015-2020, 2022 Linaro Limited
5 */
6
7 #include <initcall.h>
8 #include <kernel/linker.h>
9 #include <kernel/user_access.h>
10 #include <kernel/user_mode_ctx.h>
11 #include <memtag.h>
12 #include <mm/vm.h>
13 #include <string.h>
14 #include <tee_api_types.h>
15 #include <types_ext.h>
16
17 #define BB_ALIGNMENT (sizeof(long) * 2)
18
get_current_uctx(void)19 static struct user_mode_ctx *get_current_uctx(void)
20 {
21 struct ts_session *s = ts_get_current_session();
22
23 if (!is_user_mode_ctx(s->ctx)) {
24 /*
25 * We may be called within a PTA session, which doesn't
26 * have a user_mode_ctx. Here, try to retrieve the
27 * user_mode_ctx associated with the calling session.
28 */
29 s = TAILQ_NEXT(s, link_tsd);
30 if (!s || !is_user_mode_ctx(s->ctx))
31 return NULL;
32 }
33
34 return to_user_mode_ctx(s->ctx);
35 }
36
check_user_access(uint32_t flags,const void * uaddr,size_t len)37 TEE_Result check_user_access(uint32_t flags, const void *uaddr, size_t len)
38 {
39 struct user_mode_ctx *uctx = get_current_uctx();
40
41 if (!uctx)
42 return TEE_ERROR_GENERIC;
43
44 return vm_check_access_rights(uctx, flags, (vaddr_t)uaddr, len);
45 }
46
copy_from_user(void * kaddr,const void * uaddr,size_t len)47 TEE_Result copy_from_user(void *kaddr, const void *uaddr, size_t len)
48 {
49 uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER;
50 TEE_Result res = TEE_SUCCESS;
51
52 uaddr = memtag_strip_tag_const(uaddr);
53 res = check_user_access(flags, uaddr, len);
54 if (!res && kaddr && uaddr) {
55 enter_user_access();
56 memcpy(kaddr, uaddr, len);
57 exit_user_access();
58 }
59
60 return res;
61 }
62
copy_to_user(void * uaddr,const void * kaddr,size_t len)63 TEE_Result copy_to_user(void *uaddr, const void *kaddr, size_t len)
64 {
65 uint32_t flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER;
66 TEE_Result res = TEE_SUCCESS;
67
68 uaddr = memtag_strip_tag(uaddr);
69 res = check_user_access(flags, uaddr, len);
70 if (!res && kaddr && uaddr) {
71 enter_user_access();
72 memcpy(uaddr, kaddr, len);
73 exit_user_access();
74 }
75
76 return res;
77 }
78
copy_from_user_private(void * kaddr,const void * uaddr,size_t len)79 TEE_Result copy_from_user_private(void *kaddr, const void *uaddr, size_t len)
80 {
81 uint32_t flags = TEE_MEMORY_ACCESS_READ;
82 TEE_Result res = TEE_SUCCESS;
83
84 uaddr = memtag_strip_tag_const(uaddr);
85 res = check_user_access(flags, uaddr, len);
86 if (!res && kaddr && uaddr) {
87 enter_user_access();
88 memcpy(kaddr, uaddr, len);
89 exit_user_access();
90 }
91
92 return res;
93 }
94
copy_to_user_private(void * uaddr,const void * kaddr,size_t len)95 TEE_Result copy_to_user_private(void *uaddr, const void *kaddr, size_t len)
96 {
97 uint32_t flags = TEE_MEMORY_ACCESS_WRITE;
98 TEE_Result res = TEE_SUCCESS;
99
100 uaddr = memtag_strip_tag(uaddr);
101 res = check_user_access(flags, uaddr, len);
102 if (!res && kaddr && uaddr) {
103 enter_user_access();
104 memcpy(uaddr, kaddr, len);
105 exit_user_access();
106 }
107
108 return res;
109 }
110
maybe_tag_bb(void * buf,size_t sz)111 static void *maybe_tag_bb(void *buf, size_t sz)
112 {
113 static_assert(MEMTAG_GRANULE_SIZE <= BB_ALIGNMENT);
114
115 if (!MEMTAG_IS_ENABLED)
116 return buf;
117
118 assert(!((vaddr_t)buf % MEMTAG_GRANULE_SIZE));
119 return memtag_set_random_tags(buf, ROUNDUP(sz, MEMTAG_GRANULE_SIZE));
120 }
121
maybe_untag_bb(void * buf,size_t sz)122 static void maybe_untag_bb(void *buf, size_t sz)
123 {
124 if (MEMTAG_IS_ENABLED) {
125 assert(!((vaddr_t)buf % MEMTAG_GRANULE_SIZE));
126 memtag_set_tags(buf, ROUNDUP(sz, MEMTAG_GRANULE_SIZE), 0);
127 }
128 }
129
bb_alloc(size_t len)130 void *bb_alloc(size_t len)
131 {
132 struct user_mode_ctx *uctx = get_current_uctx();
133 size_t offs = 0;
134 void *bb = NULL;
135
136 if (uctx && !ADD_OVERFLOW(uctx->bbuf_offs, len, &offs) &&
137 offs <= uctx->bbuf_size) {
138 bb = maybe_tag_bb(uctx->bbuf + uctx->bbuf_offs, len);
139 uctx->bbuf_offs = ROUNDUP(offs, BB_ALIGNMENT);
140 }
141 return bb;
142 }
143
bb_free_helper(struct user_mode_ctx * uctx,vaddr_t bb,size_t len)144 static void bb_free_helper(struct user_mode_ctx *uctx, vaddr_t bb, size_t len)
145 {
146 vaddr_t bbuf = (vaddr_t)uctx->bbuf;
147
148 if (bb >= bbuf && IS_ALIGNED(bb, BB_ALIGNMENT)) {
149 size_t prev_offs = bb - bbuf;
150
151 /*
152 * Even if we can't update offset we can still invalidate
153 * the memory allocation.
154 */
155 maybe_untag_bb((void *)bb, len);
156
157 if (prev_offs + ROUNDUP(len, BB_ALIGNMENT) == uctx->bbuf_offs)
158 uctx->bbuf_offs = prev_offs;
159 }
160 }
161
bb_free(void * bb,size_t len)162 void bb_free(void *bb, size_t len)
163 {
164 struct user_mode_ctx *uctx = get_current_uctx();
165
166 if (uctx)
167 bb_free_helper(uctx, memtag_strip_tag_vaddr(bb), len);
168 }
169
bb_free_wipe(void * bb,size_t len)170 void bb_free_wipe(void *bb, size_t len)
171 {
172 if (bb)
173 memset(bb, 0, len);
174 bb_free(bb, len);
175 }
176
bb_reset(void)177 void bb_reset(void)
178 {
179 struct user_mode_ctx *uctx = get_current_uctx();
180
181 if (uctx) {
182 /*
183 * Only the part up to the offset have been allocated, so
184 * no need to clear tags beyond that.
185 */
186 maybe_untag_bb(uctx->bbuf, uctx->bbuf_offs);
187
188 uctx->bbuf_offs = 0;
189 }
190 }
191
clear_user(void * uaddr,size_t n)192 TEE_Result clear_user(void *uaddr, size_t n)
193 {
194 uint32_t flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER;
195 TEE_Result res = TEE_SUCCESS;
196
197 uaddr = memtag_strip_tag(uaddr);
198 res = check_user_access(flags, uaddr, n);
199 if (res)
200 return res;
201
202 enter_user_access();
203 memset(uaddr, 0, n);
204 exit_user_access();
205
206 return TEE_SUCCESS;
207 }
208
strnlen_user(const void * uaddr,size_t len)209 size_t strnlen_user(const void *uaddr, size_t len)
210 {
211 uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER;
212 TEE_Result res = TEE_SUCCESS;
213 size_t n = 0;
214
215 if (!len)
216 return 0;
217
218 uaddr = memtag_strip_tag_const(uaddr);
219 res = check_user_access(flags, uaddr, len);
220 if (!res) {
221 enter_user_access();
222 n = strnlen(uaddr, len);
223 exit_user_access();
224 }
225
226 return n;
227 }
228
__bb_memdup_user(TEE_Result (* copy_func)(void * uaddr,const void * kaddr,size_t len),const void * src,size_t len,void ** p)229 static TEE_Result __bb_memdup_user(TEE_Result (*copy_func)(void *uaddr,
230 const void *kaddr,
231 size_t len),
232 const void *src, size_t len, void **p)
233 {
234 TEE_Result res = TEE_SUCCESS;
235 void *buf = NULL;
236
237 buf = bb_alloc(len);
238 if (!buf)
239 return TEE_ERROR_OUT_OF_MEMORY;
240
241 if (len)
242 res = copy_func(buf, src, len);
243
244 if (res)
245 bb_free(buf, len);
246 else
247 *p = buf;
248
249 return res;
250 }
251
bb_memdup_user(const void * src,size_t len,void ** p)252 TEE_Result bb_memdup_user(const void *src, size_t len, void **p)
253 {
254 return __bb_memdup_user(copy_from_user, src, len, p);
255 }
256
bb_memdup_user_private(const void * src,size_t len,void ** p)257 TEE_Result bb_memdup_user_private(const void *src, size_t len, void **p)
258 {
259 return __bb_memdup_user(copy_from_user_private, src, len, p);
260 }
261
bb_strndup_user(const char * src,size_t maxlen,char ** dst,size_t * dstlen)262 TEE_Result bb_strndup_user(const char *src, size_t maxlen, char **dst,
263 size_t *dstlen)
264 {
265 uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER;
266 TEE_Result res = TEE_SUCCESS;
267 size_t l = 0;
268 char *d = NULL;
269
270 src = memtag_strip_tag_const(src);
271 if (maxlen) {
272 res = check_user_access(flags, src, maxlen);
273 if (res)
274 return res;
275
276 enter_user_access();
277 l = strnlen(src, maxlen);
278 exit_user_access();
279 }
280
281 d = bb_alloc(l + 1);
282 if (!d)
283 return TEE_ERROR_OUT_OF_MEMORY;
284
285 if (l && src && d) {
286 enter_user_access();
287 memcpy(d, src, l);
288 exit_user_access();
289 }
290
291 d[l] = 0;
292
293 *dst = d;
294 *dstlen = l;
295 return TEE_SUCCESS;
296 }
297
copy_kaddr_to_uref(uint32_t * uref,void * kaddr)298 TEE_Result copy_kaddr_to_uref(uint32_t *uref, void *kaddr)
299 {
300 uint32_t ref = kaddr_to_uref(kaddr);
301
302 return copy_to_user_private(uref, &ref, sizeof(ref));
303 }
304
kaddr_to_uref(void * kaddr)305 uint32_t kaddr_to_uref(void *kaddr)
306 {
307 if (MEMTAG_IS_ENABLED) {
308 unsigned int uref_tag_shift = 32 - MEMTAG_TAG_WIDTH;
309 vaddr_t uref = memtag_strip_tag_vaddr(kaddr);
310
311 uref -= VCORE_START_VA;
312 assert(uref < (UINT32_MAX >> MEMTAG_TAG_WIDTH));
313 uref |= (vaddr_t)memtag_get_tag(kaddr) << uref_tag_shift;
314 return uref;
315 }
316
317 assert(((vaddr_t)kaddr - VCORE_START_VA) < UINT32_MAX);
318 return (vaddr_t)kaddr - VCORE_START_VA;
319 }
320
uref_to_vaddr(uint32_t uref)321 vaddr_t uref_to_vaddr(uint32_t uref)
322 {
323 if (MEMTAG_IS_ENABLED) {
324 vaddr_t u = uref & (UINT32_MAX >> MEMTAG_TAG_WIDTH);
325 unsigned int uref_tag_shift = 32 - MEMTAG_TAG_WIDTH;
326 uint8_t tag = uref >> uref_tag_shift;
327
328 return memtag_insert_tag_vaddr(VCORE_START_VA + u, tag);
329 }
330
331 return VCORE_START_VA + uref;
332 }
333