xref: /optee_os/core/kernel/user_access.c (revision 4edd96e6d7a7228e907cf498b23e5b5fbdaf39a0)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2020, 2022 Linaro Limited
5  */
6 
7 #include <initcall.h>
8 #include <kernel/linker.h>
9 #include <kernel/user_access.h>
10 #include <kernel/user_mode_ctx.h>
11 #include <memtag.h>
12 #include <mm/vm.h>
13 #include <string.h>
14 #include <tee_api_types.h>
15 #include <types_ext.h>
16 
17 #define BB_ALIGNMENT	(sizeof(long) * 2)
18 
19 static struct user_mode_ctx *get_current_uctx(void)
20 {
21 	struct ts_session *s = ts_get_current_session();
22 
23 	if (!is_user_mode_ctx(s->ctx)) {
24 		/*
25 		 * We may be called within a PTA session, which doesn't
26 		 * have a user_mode_ctx. Here, try to retrieve the
27 		 * user_mode_ctx associated with the calling session.
28 		 */
29 		s = TAILQ_NEXT(s, link_tsd);
30 		if (!s || !is_user_mode_ctx(s->ctx))
31 			return NULL;
32 	}
33 
34 	return to_user_mode_ctx(s->ctx);
35 }
36 
37 TEE_Result check_user_access(uint32_t flags, const void *uaddr, size_t len)
38 {
39 	struct user_mode_ctx *uctx = get_current_uctx();
40 
41 	if (!uctx)
42 		return TEE_ERROR_GENERIC;
43 
44 	return vm_check_access_rights(uctx, flags, (vaddr_t)uaddr, len);
45 }
46 
47 TEE_Result copy_from_user(void *kaddr, const void *uaddr, size_t len)
48 {
49 	uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER;
50 	TEE_Result res = TEE_SUCCESS;
51 
52 	uaddr = memtag_strip_tag_const(uaddr);
53 	res = check_user_access(flags, uaddr, len);
54 	if (!res) {
55 		enter_user_access();
56 		memcpy(kaddr, uaddr, len);
57 		exit_user_access();
58 	}
59 
60 	return res;
61 }
62 
63 TEE_Result copy_to_user(void *uaddr, const void *kaddr, size_t len)
64 {
65 	uint32_t flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER;
66 	TEE_Result res = TEE_SUCCESS;
67 
68 	uaddr = memtag_strip_tag(uaddr);
69 	res = check_user_access(flags, uaddr, len);
70 	if (!res) {
71 		enter_user_access();
72 		memcpy(uaddr, kaddr, len);
73 		exit_user_access();
74 	}
75 
76 	return res;
77 }
78 
79 TEE_Result copy_from_user_private(void *kaddr, const void *uaddr, size_t len)
80 {
81 	uint32_t flags = TEE_MEMORY_ACCESS_READ;
82 	TEE_Result res = TEE_SUCCESS;
83 
84 	uaddr = memtag_strip_tag_const(uaddr);
85 	res = check_user_access(flags, uaddr, len);
86 	if (!res) {
87 		enter_user_access();
88 		memcpy(kaddr, uaddr, len);
89 		exit_user_access();
90 	}
91 
92 	return res;
93 }
94 
95 TEE_Result copy_to_user_private(void *uaddr, const void *kaddr, size_t len)
96 {
97 	uint32_t flags = TEE_MEMORY_ACCESS_WRITE;
98 	TEE_Result res = TEE_SUCCESS;
99 
100 	uaddr = memtag_strip_tag(uaddr);
101 	res = check_user_access(flags, uaddr, len);
102 	if (!res) {
103 		enter_user_access();
104 		memcpy(uaddr, kaddr, len);
105 		exit_user_access();
106 	}
107 
108 	return res;
109 }
110 
111 static void *maybe_tag_bb(void *buf, size_t sz)
112 {
113 	static_assert(MEMTAG_GRANULE_SIZE <= BB_ALIGNMENT);
114 
115 	if (!MEMTAG_IS_ENABLED)
116 		return buf;
117 
118 	assert(!((vaddr_t)buf % MEMTAG_GRANULE_SIZE));
119 	return memtag_set_random_tags(buf, ROUNDUP(sz, MEMTAG_GRANULE_SIZE));
120 }
121 
122 static void maybe_untag_bb(void *buf, size_t sz)
123 {
124 	if (MEMTAG_IS_ENABLED) {
125 		assert(!((vaddr_t)buf % MEMTAG_GRANULE_SIZE));
126 		memtag_set_tags(buf, ROUNDUP(sz, MEMTAG_GRANULE_SIZE), 0);
127 	}
128 }
129 
130 void *bb_alloc(size_t len)
131 {
132 	struct user_mode_ctx *uctx = get_current_uctx();
133 	size_t offs = 0;
134 	void *bb = NULL;
135 
136 	if (uctx && !ADD_OVERFLOW(uctx->bbuf_offs, len, &offs) &&
137 	    offs <= uctx->bbuf_size) {
138 		bb = maybe_tag_bb(uctx->bbuf + uctx->bbuf_offs, len);
139 		uctx->bbuf_offs = ROUNDUP(offs, BB_ALIGNMENT);
140 	}
141 	return bb;
142 }
143 
144 static void bb_free_helper(struct user_mode_ctx *uctx, vaddr_t bb, size_t len)
145 {
146 	vaddr_t bbuf = (vaddr_t)uctx->bbuf;
147 
148 	if (bb >= bbuf && IS_ALIGNED(bb, BB_ALIGNMENT)) {
149 		size_t prev_offs = bb - bbuf;
150 
151 		/*
152 		 * Even if we can't update offset we can still invalidate
153 		 * the memory allocation.
154 		 */
155 		maybe_untag_bb((void *)bb, len);
156 
157 		if (prev_offs + ROUNDUP(len, BB_ALIGNMENT) == uctx->bbuf_offs)
158 			uctx->bbuf_offs = prev_offs;
159 	}
160 }
161 
162 void bb_free(void *bb, size_t len)
163 {
164 	struct user_mode_ctx *uctx = get_current_uctx();
165 
166 	if (uctx)
167 		bb_free_helper(uctx, memtag_strip_tag_vaddr(bb), len);
168 }
169 
170 void bb_reset(void)
171 {
172 	struct user_mode_ctx *uctx = get_current_uctx();
173 
174 	if (uctx) {
175 		/*
176 		 * Only the part up to the offset have been allocated, so
177 		 * no need to clear tags beyond that.
178 		 */
179 		maybe_untag_bb(uctx->bbuf, uctx->bbuf_offs);
180 
181 		uctx->bbuf_offs = 0;
182 	}
183 }
184 
185 TEE_Result clear_user(void *uaddr, size_t n)
186 {
187 	uint32_t flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER;
188 	TEE_Result res = TEE_SUCCESS;
189 
190 	uaddr = memtag_strip_tag(uaddr);
191 	res = check_user_access(flags, uaddr, n);
192 	if (res)
193 		return res;
194 
195 	enter_user_access();
196 	memset(uaddr, 0, n);
197 	exit_user_access();
198 
199 	return TEE_SUCCESS;
200 }
201 
202 size_t strnlen_user(const void *uaddr, size_t len)
203 {
204 	uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER;
205 	TEE_Result res = TEE_SUCCESS;
206 	size_t n = 0;
207 
208 	uaddr = memtag_strip_tag_const(uaddr);
209 	res = check_user_access(flags, uaddr, len);
210 	if (!res) {
211 		enter_user_access();
212 		n = strnlen(uaddr, len);
213 		exit_user_access();
214 	}
215 
216 	return n;
217 }
218 
219 TEE_Result bb_memdup_user(const void *src, size_t len, void **p)
220 {
221 	TEE_Result res = TEE_SUCCESS;
222 	void *buf = NULL;
223 
224 	buf = bb_alloc(len);
225 	if (!buf)
226 		return TEE_ERROR_OUT_OF_MEMORY;
227 
228 	res = copy_from_user(buf, src, len);
229 	if (res)
230 		bb_free(buf, len);
231 	else
232 		*p = buf;
233 
234 	return res;
235 }
236 
237 TEE_Result bb_memdup_user_private(const void *src, size_t len, void **p)
238 {
239 	TEE_Result res = TEE_SUCCESS;
240 	void *buf = NULL;
241 
242 	buf = bb_alloc(len);
243 	if (!buf)
244 		return TEE_ERROR_OUT_OF_MEMORY;
245 
246 	res = copy_from_user_private(buf, src, len);
247 	if (res)
248 		bb_free(buf, len);
249 	else
250 		*p = buf;
251 
252 	return res;
253 }
254 
255 TEE_Result bb_strndup_user(const char *src, size_t maxlen, char **dst,
256 			   size_t *dstlen)
257 {
258 	uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER;
259 	TEE_Result res = TEE_SUCCESS;
260 	size_t l = 0;
261 	char *d = NULL;
262 
263 	src = memtag_strip_tag_const(src);
264 	res = check_user_access(flags, src, maxlen);
265 	if (res)
266 		return res;
267 
268 	enter_user_access();
269 	l = strnlen(src, maxlen);
270 	exit_user_access();
271 
272 	d = bb_alloc(l + 1);
273 	if (!d)
274 		return TEE_ERROR_OUT_OF_MEMORY;
275 
276 	enter_user_access();
277 	memcpy(d, src, l);
278 	exit_user_access();
279 
280 	d[l] = 0;
281 
282 	*dst = d;
283 	*dstlen = l;
284 	return TEE_SUCCESS;
285 }
286 
287 TEE_Result copy_kaddr_to_uref(uint32_t *uref, void *kaddr)
288 {
289 	uint32_t ref = kaddr_to_uref(kaddr);
290 
291 	return copy_to_user_private(uref, &ref, sizeof(ref));
292 }
293 
294 uint32_t kaddr_to_uref(void *kaddr)
295 {
296 	if (MEMTAG_IS_ENABLED) {
297 		unsigned int uref_tag_shift = 32 - MEMTAG_TAG_WIDTH;
298 		vaddr_t uref = memtag_strip_tag_vaddr(kaddr);
299 
300 		uref -= VCORE_START_VA;
301 		assert(uref < (UINT32_MAX >> MEMTAG_TAG_WIDTH));
302 		uref |= memtag_get_tag(kaddr) << uref_tag_shift;
303 		return uref;
304 	}
305 
306 	assert(((vaddr_t)kaddr - VCORE_START_VA) < UINT32_MAX);
307 	return (vaddr_t)kaddr - VCORE_START_VA;
308 }
309 
310 vaddr_t uref_to_vaddr(uint32_t uref)
311 {
312 	if (MEMTAG_IS_ENABLED) {
313 		vaddr_t u = uref & (UINT32_MAX >> MEMTAG_TAG_WIDTH);
314 		unsigned int uref_tag_shift = 32 - MEMTAG_TAG_WIDTH;
315 		uint8_t tag = uref >> uref_tag_shift;
316 
317 		return memtag_insert_tag_vaddr(VCORE_START_VA + u, tag);
318 	}
319 
320 	return VCORE_START_VA + uref;
321 }
322