xref: /optee_os/core/kernel/user_access.c (revision 9c99bb1d8d879682f8bf2ca6f27a4a910cad20f5)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2020, 2022 Linaro Limited
5  */
6 
7 #include <initcall.h>
8 #include <kernel/linker.h>
9 #include <kernel/user_access.h>
10 #include <kernel/user_mode_ctx.h>
11 #include <memtag.h>
12 #include <mm/vm.h>
13 #include <string.h>
14 #include <tee_api_types.h>
15 #include <types_ext.h>
16 
17 #define BB_ALIGNMENT	(sizeof(long) * 2)
18 
19 static struct user_mode_ctx *get_current_uctx(void)
20 {
21 	struct ts_session *s = ts_get_current_session();
22 
23 	if (!is_user_mode_ctx(s->ctx)) {
24 		/*
25 		 * We may be called within a PTA session, which doesn't
26 		 * have a user_mode_ctx. Here, try to retrieve the
27 		 * user_mode_ctx associated with the calling session.
28 		 */
29 		s = TAILQ_NEXT(s, link_tsd);
30 		if (!s || !is_user_mode_ctx(s->ctx))
31 			return NULL;
32 	}
33 
34 	return to_user_mode_ctx(s->ctx);
35 }
36 
37 static TEE_Result check_access(uint32_t flags, const void *uaddr, size_t len)
38 {
39 	struct user_mode_ctx *uctx = get_current_uctx();
40 
41 	if (!uctx)
42 		return TEE_ERROR_GENERIC;
43 
44 	return vm_check_access_rights(uctx, flags, (vaddr_t)uaddr, len);
45 }
46 
47 TEE_Result copy_from_user(void *kaddr, const void *uaddr, size_t len)
48 {
49 	uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER;
50 	TEE_Result res = TEE_SUCCESS;
51 
52 	uaddr = memtag_strip_tag_const(uaddr);
53 	res = check_access(flags, uaddr, len);
54 	if (!res)
55 		memcpy(kaddr, uaddr, len);
56 
57 	return res;
58 }
59 
60 TEE_Result copy_to_user(void *uaddr, const void *kaddr, size_t len)
61 {
62 	uint32_t flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER;
63 	TEE_Result res = TEE_SUCCESS;
64 
65 	uaddr = memtag_strip_tag(uaddr);
66 	res = check_access(flags, uaddr, len);
67 	if (!res)
68 		memcpy(uaddr, kaddr, len);
69 
70 	return res;
71 }
72 
73 TEE_Result copy_from_user_private(void *kaddr, const void *uaddr, size_t len)
74 {
75 	uint32_t flags = TEE_MEMORY_ACCESS_READ;
76 	TEE_Result res = TEE_SUCCESS;
77 
78 	uaddr = memtag_strip_tag_const(uaddr);
79 	res = check_access(flags, uaddr, len);
80 	if (!res)
81 		memcpy(kaddr, uaddr, len);
82 
83 	return res;
84 }
85 
86 TEE_Result copy_to_user_private(void *uaddr, const void *kaddr, size_t len)
87 {
88 	uint32_t flags = TEE_MEMORY_ACCESS_WRITE;
89 	TEE_Result res = TEE_SUCCESS;
90 
91 	uaddr = memtag_strip_tag(uaddr);
92 	res = check_access(flags, uaddr, len);
93 	if (!res)
94 		memcpy(uaddr, kaddr, len);
95 
96 	return res;
97 }
98 
99 void *bb_alloc(size_t len)
100 {
101 	struct user_mode_ctx *uctx = get_current_uctx();
102 	size_t offs = 0;
103 	void *bb = NULL;
104 
105 	if (uctx && !ADD_OVERFLOW(uctx->bbuf_offs, len, &offs) &&
106 	    offs <= uctx->bbuf_size) {
107 		bb = uctx->bbuf + uctx->bbuf_offs;
108 		uctx->bbuf_offs = ROUNDUP(offs, BB_ALIGNMENT);
109 	}
110 	return bb;
111 }
112 
113 static void bb_free_helper(struct user_mode_ctx *uctx, vaddr_t bb, size_t len)
114 {
115 	vaddr_t bbuf = (vaddr_t)uctx->bbuf;
116 
117 	if (bb >= bbuf && IS_ALIGNED(bb, BB_ALIGNMENT)) {
118 		size_t prev_offs = bb - bbuf;
119 
120 		if (prev_offs + ROUNDUP(len, BB_ALIGNMENT) == uctx->bbuf_offs)
121 			uctx->bbuf_offs = prev_offs;
122 	}
123 }
124 
125 void bb_free(void *bb, size_t len)
126 {
127 	struct user_mode_ctx *uctx = get_current_uctx();
128 
129 	if (uctx)
130 		bb_free_helper(uctx, (vaddr_t)bb, len);
131 }
132 
133 void bb_reset(void)
134 {
135 	struct user_mode_ctx *uctx = get_current_uctx();
136 
137 	if (uctx)
138 		uctx->bbuf_offs = 0;
139 }
140 
141 TEE_Result clear_user(void *uaddr, size_t n)
142 {
143 	uint32_t flags = TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER;
144 	TEE_Result res = TEE_SUCCESS;
145 
146 	uaddr = memtag_strip_tag(uaddr);
147 	res = check_access(flags, uaddr, n);
148 	if (res)
149 		return res;
150 
151 	memset(uaddr, 0, n);
152 
153 	return TEE_SUCCESS;
154 }
155 
156 size_t strnlen_user(const void *uaddr, size_t len)
157 {
158 	uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER;
159 	TEE_Result res = TEE_SUCCESS;
160 	size_t n = 0;
161 
162 	uaddr = memtag_strip_tag_const(uaddr);
163 	res = check_access(flags, uaddr, len);
164 	if (!res)
165 		n = strnlen(uaddr, len);
166 
167 	return n;
168 }
169 
170 TEE_Result bb_memdup_user(const void *src, size_t len, void **p)
171 {
172 	TEE_Result res = TEE_SUCCESS;
173 	void *buf = NULL;
174 
175 	buf = bb_alloc(len);
176 	if (!buf)
177 		return TEE_ERROR_OUT_OF_MEMORY;
178 
179 	res = copy_from_user(buf, src, len);
180 	if (res)
181 		bb_free(buf, len);
182 	else
183 		*p = buf;
184 
185 	return res;
186 }
187 
188 TEE_Result bb_memdup_user_private(const void *src, size_t len, void **p)
189 {
190 	TEE_Result res = TEE_SUCCESS;
191 	void *buf = NULL;
192 
193 	buf = bb_alloc(len);
194 	if (!buf)
195 		return TEE_ERROR_OUT_OF_MEMORY;
196 
197 	res = copy_from_user_private(buf, src, len);
198 	if (res)
199 		bb_free(buf, len);
200 	else
201 		*p = buf;
202 
203 	return res;
204 }
205 
206 TEE_Result bb_strndup_user(const char *src, size_t maxlen, char **dst,
207 			   size_t *dstlen)
208 {
209 	uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER;
210 	TEE_Result res = TEE_SUCCESS;
211 	size_t l = 0;
212 	char *d = NULL;
213 
214 	src = memtag_strip_tag_const(src);
215 	res = check_access(flags, src, maxlen);
216 	if (res)
217 		return res;
218 	l = strnlen(src, maxlen);
219 	d = bb_alloc(l + 1);
220 	if (!d)
221 		return TEE_ERROR_OUT_OF_MEMORY;
222 	memcpy(d, src, l);
223 	d[l] = 0;
224 
225 	*dst = d;
226 	*dstlen = l;
227 	return TEE_SUCCESS;
228 }
229 
230 TEE_Result copy_kaddr_to_uref(uint32_t *uref, void *kaddr)
231 {
232 	uint32_t ref = kaddr_to_uref(kaddr);
233 
234 	return copy_to_user_private(uref, &ref, sizeof(ref));
235 }
236 
237 uint32_t kaddr_to_uref(void *kaddr)
238 {
239 	if (MEMTAG_IS_ENABLED) {
240 		unsigned int uref_tag_shift = 32 - MEMTAG_TAG_WIDTH;
241 		vaddr_t uref = memtag_strip_tag_vaddr(kaddr);
242 
243 		uref -= VCORE_START_VA;
244 		assert(uref < (UINT32_MAX >> MEMTAG_TAG_WIDTH));
245 		uref |= memtag_get_tag(kaddr) << uref_tag_shift;
246 		return uref;
247 	}
248 
249 	assert(((vaddr_t)kaddr - VCORE_START_VA) < UINT32_MAX);
250 	return (vaddr_t)kaddr - VCORE_START_VA;
251 }
252 
253 vaddr_t uref_to_vaddr(uint32_t uref)
254 {
255 	if (MEMTAG_IS_ENABLED) {
256 		vaddr_t u = uref & (UINT32_MAX >> MEMTAG_TAG_WIDTH);
257 		unsigned int uref_tag_shift = 32 - MEMTAG_TAG_WIDTH;
258 		uint8_t tag = uref >> uref_tag_shift;
259 
260 		return memtag_insert_tag_vaddr(VCORE_START_VA + u, tag);
261 	}
262 
263 	return VCORE_START_VA + uref;
264 }
265