xref: /optee_os/core/arch/arm/kernel/thread_optee_smc.c (revision e7dd9fbb056ef9d52de2c699a91c5b42178184ea)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2019-2021, Linaro Limited
4  */
5 
6 #include <assert.h>
7 #include <compiler.h>
8 #include <config.h>
9 #include <io.h>
10 #include <kernel/misc.h>
11 #include <kernel/msg_param.h>
12 #include <kernel/notif.h>
13 #include <kernel/thread.h>
14 #include <kernel/thread_private.h>
15 #include <kernel/virtualization.h>
16 #include <mm/core_mmu.h>
17 #include <optee_msg.h>
18 #include <optee_rpc_cmd.h>
19 #include <sm/optee_smc.h>
20 #include <sm/sm.h>
21 #include <string.h>
22 #include <tee/entry_fast.h>
23 #include <tee/entry_std.h>
24 #include <tee/tee_cryp_utl.h>
25 
26 static bool thread_prealloc_rpc_cache;
27 static unsigned int thread_rpc_pnum;
28 
29 static_assert(NOTIF_VALUE_DO_BOTTOM_HALF ==
30 	      OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF);
31 
thread_handle_fast_smc(struct thread_smc_args * args)32 void thread_handle_fast_smc(struct thread_smc_args *args)
33 {
34 	thread_check_canaries();
35 
36 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) &&
37 	    virt_set_guest(args->a7) && args->a7 != HYP_CLNT_ID) {
38 		args->a0 = OPTEE_SMC_RETURN_ENOTAVAIL;
39 		goto out;
40 	}
41 
42 	tee_entry_fast(args);
43 
44 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
45 		virt_unset_guest();
46 
47 out:
48 	/* Fast handlers must not unmask any exceptions */
49 	assert(thread_get_exceptions() == THREAD_EXCP_ALL);
50 }
51 
thread_handle_std_smc(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4,uint32_t a5,uint32_t a6 __unused,uint32_t a7 __maybe_unused)52 uint32_t thread_handle_std_smc(uint32_t a0, uint32_t a1, uint32_t a2,
53 			       uint32_t a3, uint32_t a4, uint32_t a5,
54 			       uint32_t a6 __unused, uint32_t a7 __maybe_unused)
55 {
56 	uint32_t rv = OPTEE_SMC_RETURN_OK;
57 
58 	thread_check_canaries();
59 
60 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && virt_set_guest(a7))
61 		return OPTEE_SMC_RETURN_ENOTAVAIL;
62 
63 	/*
64 	 * thread_resume_from_rpc() and thread_alloc_and_run() only return
65 	 * on error. Successful return is done via thread_exit() or
66 	 * thread_rpc().
67 	 */
68 	if (a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) {
69 		thread_resume_from_rpc(a3, a1, a2, a4, a5);
70 		rv = OPTEE_SMC_RETURN_ERESUME;
71 	} else {
72 		thread_alloc_and_run(a0, a1, a2, a3, 0, 0);
73 		rv = OPTEE_SMC_RETURN_ETHREAD_LIMIT;
74 	}
75 
76 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
77 		virt_unset_guest();
78 
79 	return rv;
80 }
81 
82 /**
83  * Free physical memory previously allocated with thread_rpc_alloc_arg()
84  *
85  * @cookie:	cookie received when allocating the buffer
86  */
thread_rpc_free_arg(uint64_t cookie)87 static void thread_rpc_free_arg(uint64_t cookie)
88 {
89 	if (cookie) {
90 		uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
91 			OPTEE_SMC_RETURN_RPC_FREE
92 		};
93 
94 		reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2);
95 		thread_rpc(rpc_args);
96 	}
97 }
98 
get_msg_arg(struct mobj * mobj,size_t offset,size_t * num_params,struct optee_msg_arg ** arg,struct optee_msg_arg ** rpc_arg)99 static uint32_t get_msg_arg(struct mobj *mobj, size_t offset,
100 			    size_t *num_params, struct optee_msg_arg **arg,
101 			    struct optee_msg_arg **rpc_arg)
102 {
103 	void *p = NULL;
104 	size_t sz = 0;
105 
106 	if (!mobj)
107 		return OPTEE_SMC_RETURN_EBADADDR;
108 
109 	p = mobj_get_va(mobj, offset, sizeof(struct optee_msg_arg));
110 	if (!p || !IS_ALIGNED_WITH_TYPE(p, struct optee_msg_arg))
111 		return OPTEE_SMC_RETURN_EBADADDR;
112 
113 	*arg = p;
114 	*num_params = READ_ONCE((*arg)->num_params);
115 	if (*num_params > OPTEE_MSG_MAX_NUM_PARAMS)
116 		return OPTEE_SMC_RETURN_EBADADDR;
117 
118 	sz = OPTEE_MSG_GET_ARG_SIZE(*num_params);
119 	if (!mobj_get_va(mobj, offset, sz))
120 		return OPTEE_SMC_RETURN_EBADADDR;
121 
122 	if (rpc_arg) {
123 		size_t rpc_sz = 0;
124 
125 		rpc_sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
126 		p = mobj_get_va(mobj, offset + sz, rpc_sz);
127 		if (!p)
128 			return OPTEE_SMC_RETURN_EBADADDR;
129 		*rpc_arg = p;
130 	}
131 
132 	return OPTEE_SMC_RETURN_OK;
133 }
134 
clear_prealloc_rpc_cache(struct thread_ctx * thr)135 static void clear_prealloc_rpc_cache(struct thread_ctx *thr)
136 {
137 	thread_rpc_free_arg(mobj_get_cookie(thr->rpc_mobj));
138 	mobj_put(thr->rpc_mobj);
139 	thr->rpc_arg = NULL;
140 	thr->rpc_mobj = NULL;
141 }
142 
call_entry_std(struct optee_msg_arg * arg,size_t num_params,struct optee_msg_arg * rpc_arg)143 static uint32_t call_entry_std(struct optee_msg_arg *arg, size_t num_params,
144 			       struct optee_msg_arg *rpc_arg)
145 {
146 	struct thread_ctx *thr = threads + thread_get_id();
147 	uint32_t rv = 0;
148 
149 	if (rpc_arg) {
150 		/*
151 		 * In case the prealloc RPC arg cache is enabled, clear the
152 		 * cached object for this thread.
153 		 *
154 		 * Normally it doesn't make sense to have the prealloc RPC
155 		 * arg cache enabled together with a supplied RPC arg
156 		 * struct. But if it is we must use the supplied struct and
157 		 * at the same time make sure to not break anything.
158 		 */
159 		if (IS_ENABLED(CFG_PREALLOC_RPC_CACHE) &&
160 		    thread_prealloc_rpc_cache)
161 			clear_prealloc_rpc_cache(thr);
162 		thr->rpc_arg = rpc_arg;
163 	}
164 
165 	if (tee_entry_std(arg, num_params))
166 		rv = OPTEE_SMC_RETURN_EBADCMD;
167 	else
168 		rv = OPTEE_SMC_RETURN_OK;
169 
170 	thread_rpc_shm_cache_clear(&thr->shm_cache);
171 	if (rpc_arg)
172 		thr->rpc_arg = NULL;
173 
174 	if (rv == OPTEE_SMC_RETURN_OK &&
175 	    !(IS_ENABLED(CFG_PREALLOC_RPC_CACHE) && thread_prealloc_rpc_cache))
176 		clear_prealloc_rpc_cache(thr);
177 
178 	return rv;
179 }
180 
std_entry_with_parg(paddr_t parg,bool with_rpc_arg)181 static uint32_t std_entry_with_parg(paddr_t parg, bool with_rpc_arg)
182 {
183 	size_t sz = sizeof(struct optee_msg_arg);
184 	struct optee_msg_arg *rpc_arg = NULL;
185 	struct optee_msg_arg *arg = NULL;
186 	struct mobj *mobj = NULL;
187 	size_t num_params = 0;
188 	uint32_t rv = 0;
189 
190 	/* Check if this region is in static shared space */
191 	if (core_pbuf_is(CORE_MEM_NSEC_SHM, parg, sz)) {
192 		if (!IS_ALIGNED_WITH_TYPE(parg, struct optee_msg_arg))
193 			goto bad_addr;
194 
195 		arg = phys_to_virt(parg, MEM_AREA_NSEC_SHM,
196 				   sizeof(struct optee_msg_arg));
197 		if (!arg)
198 			goto bad_addr;
199 
200 		num_params = READ_ONCE(arg->num_params);
201 		if (num_params > OPTEE_MSG_MAX_NUM_PARAMS)
202 			return OPTEE_SMC_RETURN_EBADADDR;
203 
204 		sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
205 		if (with_rpc_arg) {
206 			rpc_arg = (void *)((uint8_t *)arg + sz);
207 			sz += OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
208 		}
209 		if (!core_pbuf_is(CORE_MEM_NSEC_SHM, parg, sz))
210 			goto bad_addr;
211 
212 		return call_entry_std(arg, num_params, rpc_arg);
213 	} else {
214 		if (parg & SMALL_PAGE_MASK)
215 			goto bad_addr;
216 		/*
217 		 * mobj_mapped_shm_alloc checks if parg resides in nonsec
218 		 * ddr.
219 		 */
220 		mobj = mobj_mapped_shm_alloc(&parg, 1, 0, 0);
221 		if (!mobj)
222 			goto bad_addr;
223 		if (with_rpc_arg)
224 			rv = get_msg_arg(mobj, 0, &num_params, &arg, &rpc_arg);
225 		else
226 			rv = get_msg_arg(mobj, 0, &num_params, &arg, NULL);
227 		if (!rv)
228 			rv = call_entry_std(arg, num_params, rpc_arg);
229 		mobj_put(mobj);
230 		return rv;
231 	}
232 
233 bad_addr:
234 	EMSG("Bad arg address 0x%"PRIxPA, parg);
235 	return OPTEE_SMC_RETURN_EBADADDR;
236 }
237 
std_entry_with_regd_arg(uint64_t cookie,size_t offset)238 static uint32_t std_entry_with_regd_arg(uint64_t cookie, size_t offset)
239 {
240 	struct optee_msg_arg *rpc_arg = NULL;
241 	struct optee_msg_arg *arg = NULL;
242 	size_t num_params = 0;
243 	struct mobj *mobj = NULL;
244 	uint32_t rv = 0;
245 
246 	mobj = mobj_reg_shm_get_by_cookie(cookie);
247 	if (!mobj) {
248 		EMSG("Bad arg cookie 0x%"PRIx64, cookie);
249 		return OPTEE_SMC_RETURN_EBADADDR;
250 	}
251 
252 	if (mobj_inc_map(mobj)) {
253 		rv = OPTEE_SMC_RETURN_ENOMEM;
254 		goto out;
255 	}
256 
257 	rv = get_msg_arg(mobj, offset, &num_params, &arg, &rpc_arg);
258 	if (!rv)
259 		rv = call_entry_std(arg, num_params, rpc_arg);
260 
261 	mobj_dec_map(mobj);
262 out:
263 	mobj_put(mobj);
264 
265 	return rv;
266 }
267 
std_smc_entry(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3 __unused)268 static uint32_t std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2,
269 			      uint32_t a3 __unused)
270 {
271 	const bool with_rpc_arg = true;
272 
273 	switch (a0) {
274 	case OPTEE_SMC_CALL_WITH_ARG:
275 		return std_entry_with_parg(reg_pair_to_64(a1, a2),
276 					   !with_rpc_arg);
277 	case OPTEE_SMC_CALL_WITH_RPC_ARG:
278 		return std_entry_with_parg(reg_pair_to_64(a1, a2),
279 					   with_rpc_arg);
280 	case OPTEE_SMC_CALL_WITH_REGD_ARG:
281 		return std_entry_with_regd_arg(reg_pair_to_64(a1, a2), a3);
282 	default:
283 		EMSG("Unknown SMC 0x%"PRIx32, a0);
284 		return OPTEE_SMC_RETURN_EBADCMD;
285 	}
286 }
287 
288 /*
289  * Helper routine for the assembly function thread_std_smc_entry()
290  *
291  * Note: this function is weak just to make it possible to exclude it from
292  * the unpaged area.
293  */
__thread_std_smc_entry(uint32_t a0,uint32_t a1,uint32_t a2,uint32_t a3,uint32_t a4 __unused,uint32_t a5 __unused)294 uint32_t __weak __thread_std_smc_entry(uint32_t a0, uint32_t a1, uint32_t a2,
295 				       uint32_t a3, uint32_t a4 __unused,
296 				       uint32_t a5 __unused)
297 {
298 	if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
299 		virt_on_stdcall();
300 
301 	return std_smc_entry(a0, a1, a2, a3);
302 }
303 
thread_disable_prealloc_rpc_cache(uint64_t * cookie)304 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie)
305 {
306 	bool rv = false;
307 	size_t n = 0;
308 	uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
309 
310 	thread_lock_global();
311 
312 	for (n = 0; n < CFG_NUM_THREADS; n++) {
313 		if (threads[n].state != THREAD_STATE_FREE) {
314 			rv = false;
315 			goto out;
316 		}
317 	}
318 
319 	rv = true;
320 
321 	if (IS_ENABLED(CFG_PREALLOC_RPC_CACHE)) {
322 		for (n = 0; n < CFG_NUM_THREADS; n++) {
323 			if (threads[n].rpc_arg) {
324 				*cookie = mobj_get_cookie(threads[n].rpc_mobj);
325 				mobj_put(threads[n].rpc_mobj);
326 				threads[n].rpc_arg = NULL;
327 				threads[n].rpc_mobj = NULL;
328 				goto out;
329 			}
330 		}
331 	}
332 
333 	*cookie = 0;
334 	thread_prealloc_rpc_cache = false;
335 out:
336 	thread_unlock_global();
337 	thread_unmask_exceptions(exceptions);
338 	return rv;
339 }
340 
thread_enable_prealloc_rpc_cache(void)341 bool thread_enable_prealloc_rpc_cache(void)
342 {
343 	bool rv = false;
344 	size_t n = 0;
345 	uint32_t exceptions = 0;
346 
347 	if (!IS_ENABLED(CFG_PREALLOC_RPC_CACHE))
348 		return true;
349 
350 	exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR);
351 	thread_lock_global();
352 
353 	for (n = 0; n < CFG_NUM_THREADS; n++) {
354 		if (threads[n].state != THREAD_STATE_FREE) {
355 			rv = false;
356 			goto out;
357 		}
358 	}
359 
360 	rv = true;
361 	thread_prealloc_rpc_cache = true;
362 out:
363 	thread_unlock_global();
364 	thread_unmask_exceptions(exceptions);
365 	return rv;
366 }
367 
rpc_shm_mobj_alloc(paddr_t pa,size_t sz,uint64_t cookie)368 static struct mobj *rpc_shm_mobj_alloc(paddr_t pa, size_t sz, uint64_t cookie)
369 {
370 	/* Check if this region is in static shared space */
371 	if (core_pbuf_is(CORE_MEM_NSEC_SHM, pa, sz))
372 		return mobj_shm_alloc(pa, sz, cookie);
373 
374 	if (IS_ENABLED(CFG_CORE_DYN_SHM) &&
375 	    !(pa & SMALL_PAGE_MASK) && sz <= SMALL_PAGE_SIZE)
376 		return mobj_mapped_shm_alloc(&pa, 1, 0, cookie);
377 
378 	return NULL;
379 }
380 
381 /**
382  * Allocates data for struct optee_msg_arg.
383  *
384  * @size:	size in bytes of struct optee_msg_arg
385  *
386  * @returns	mobj that describes allocated buffer or NULL on error
387  */
thread_rpc_alloc_arg(size_t size)388 static struct mobj *thread_rpc_alloc_arg(size_t size)
389 {
390 	paddr_t pa;
391 	uint64_t co;
392 	uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = {
393 		OPTEE_SMC_RETURN_RPC_ALLOC, size
394 	};
395 	struct mobj *mobj = NULL;
396 
397 	thread_rpc(rpc_args);
398 
399 	/* Registers 1 and 2 passed from normal world */
400 	pa = reg_pair_to_64(rpc_args[0], rpc_args[1]);
401 	/* Registers 4 and 5 passed from normal world */
402 	co = reg_pair_to_64(rpc_args[2], rpc_args[3]);
403 
404 	if (!IS_ALIGNED_WITH_TYPE(pa, struct optee_msg_arg))
405 		goto err;
406 
407 	mobj = rpc_shm_mobj_alloc(pa, size, co);
408 	if (!mobj)
409 		goto err;
410 
411 	return mobj;
412 err:
413 	thread_rpc_free_arg(co);
414 	mobj_put(mobj);
415 	return NULL;
416 }
417 
set_rmem(struct optee_msg_param * param,struct thread_param * tpm)418 static bool set_rmem(struct optee_msg_param *param,
419 		     struct thread_param *tpm)
420 {
421 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
422 		      OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
423 	param->u.rmem.offs = tpm->u.memref.offs;
424 	param->u.rmem.size = tpm->u.memref.size;
425 	if (tpm->u.memref.mobj) {
426 		param->u.rmem.shm_ref = mobj_get_cookie(tpm->u.memref.mobj);
427 		if (!param->u.rmem.shm_ref)
428 			return false;
429 	} else {
430 		param->u.rmem.shm_ref = 0;
431 	}
432 
433 	return true;
434 }
435 
set_tmem(struct optee_msg_param * param,struct thread_param * tpm)436 static bool set_tmem(struct optee_msg_param *param,
437 		     struct thread_param *tpm)
438 {
439 	paddr_t pa = 0;
440 	uint64_t shm_ref = 0;
441 	struct mobj *mobj = tpm->u.memref.mobj;
442 
443 	param->attr = tpm->attr - THREAD_PARAM_ATTR_MEMREF_IN +
444 		      OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
445 	if (mobj) {
446 		shm_ref = mobj_get_cookie(mobj);
447 		if (!shm_ref)
448 			return false;
449 		if (mobj_get_pa(mobj, tpm->u.memref.offs, 0, &pa))
450 			return false;
451 	}
452 
453 	param->u.tmem.size = tpm->u.memref.size;
454 	param->u.tmem.buf_ptr = pa;
455 	param->u.tmem.shm_ref = shm_ref;
456 
457 	return true;
458 }
459 
get_rpc_arg(uint32_t cmd,size_t num_params,struct thread_param * params,void ** arg_ret,uint64_t * carg_ret)460 static uint32_t get_rpc_arg(uint32_t cmd, size_t num_params,
461 			    struct thread_param *params, void **arg_ret,
462 			    uint64_t *carg_ret)
463 {
464 	struct thread_ctx *thr = threads + thread_get_id();
465 	struct optee_msg_arg *arg = thr->rpc_arg;
466 	size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS);
467 
468 	if (num_params > THREAD_RPC_MAX_NUM_PARAMS)
469 		return TEE_ERROR_BAD_PARAMETERS;
470 
471 	if (!arg) {
472 		struct mobj *mobj = thread_rpc_alloc_arg(sz);
473 
474 		if (!mobj)
475 			return TEE_ERROR_OUT_OF_MEMORY;
476 
477 		arg = mobj_get_va(mobj, 0, sz);
478 		if (!arg) {
479 			thread_rpc_free_arg(mobj_get_cookie(mobj));
480 			return TEE_ERROR_OUT_OF_MEMORY;
481 		}
482 
483 		thr->rpc_arg = arg;
484 		thr->rpc_mobj = mobj;
485 	}
486 
487 	memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
488 	arg->cmd = cmd;
489 	arg->num_params = num_params;
490 	arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */
491 
492 	for (size_t n = 0; n < num_params; n++) {
493 		switch (params[n].attr) {
494 		case THREAD_PARAM_ATTR_NONE:
495 			arg->params[n].attr = OPTEE_MSG_ATTR_TYPE_NONE;
496 			break;
497 		case THREAD_PARAM_ATTR_VALUE_IN:
498 		case THREAD_PARAM_ATTR_VALUE_OUT:
499 		case THREAD_PARAM_ATTR_VALUE_INOUT:
500 			arg->params[n].attr = params[n].attr -
501 					      THREAD_PARAM_ATTR_VALUE_IN +
502 					      OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
503 			arg->params[n].u.value.a = params[n].u.value.a;
504 			arg->params[n].u.value.b = params[n].u.value.b;
505 			arg->params[n].u.value.c = params[n].u.value.c;
506 			break;
507 		case THREAD_PARAM_ATTR_MEMREF_IN:
508 		case THREAD_PARAM_ATTR_MEMREF_OUT:
509 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
510 			if (!params[n].u.memref.mobj ||
511 			    mobj_matches(params[n].u.memref.mobj,
512 					 CORE_MEM_NSEC_SHM)) {
513 				if (!set_tmem(arg->params + n, params + n))
514 					return TEE_ERROR_BAD_PARAMETERS;
515 			} else  if (mobj_matches(params[n].u.memref.mobj,
516 						 CORE_MEM_REG_SHM)) {
517 				if (!set_rmem(arg->params + n, params + n))
518 					return TEE_ERROR_BAD_PARAMETERS;
519 			} else {
520 				return TEE_ERROR_BAD_PARAMETERS;
521 			}
522 			break;
523 		default:
524 			return TEE_ERROR_BAD_PARAMETERS;
525 		}
526 	}
527 
528 	*arg_ret = arg;
529 	*carg_ret = mobj_get_cookie(thr->rpc_mobj);
530 
531 	return TEE_SUCCESS;
532 }
533 
get_rpc_arg_res(struct optee_msg_arg * arg,size_t num_params,struct thread_param * params)534 static uint32_t get_rpc_arg_res(struct optee_msg_arg *arg, size_t num_params,
535 				struct thread_param *params)
536 {
537 	for (size_t n = 0; n < num_params; n++) {
538 		switch (params[n].attr) {
539 		case THREAD_PARAM_ATTR_VALUE_OUT:
540 		case THREAD_PARAM_ATTR_VALUE_INOUT:
541 			params[n].u.value.a = arg->params[n].u.value.a;
542 			params[n].u.value.b = arg->params[n].u.value.b;
543 			params[n].u.value.c = arg->params[n].u.value.c;
544 			break;
545 		case THREAD_PARAM_ATTR_MEMREF_OUT:
546 		case THREAD_PARAM_ATTR_MEMREF_INOUT:
547 			/*
548 			 * rmem.size and tmem.size is the same type and
549 			 * location.
550 			 */
551 			params[n].u.memref.size = arg->params[n].u.rmem.size;
552 			break;
553 		default:
554 			break;
555 		}
556 	}
557 
558 	return arg->ret;
559 }
560 
thread_rpc_cmd(uint32_t cmd,size_t num_params,struct thread_param * params)561 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params,
562 			struct thread_param *params)
563 {
564 	uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
565 	void *arg = NULL;
566 	uint64_t carg = 0;
567 	uint32_t ret = 0;
568 
569 	/* The source CRYPTO_RNG_SRC_JITTER_RPC is safe to use here */
570 	plat_prng_add_jitter_entropy(CRYPTO_RNG_SRC_JITTER_RPC,
571 				     &thread_rpc_pnum);
572 
573 	ret = get_rpc_arg(cmd, num_params, params, &arg, &carg);
574 	if (ret)
575 		return ret;
576 
577 	reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
578 	thread_rpc(rpc_args);
579 
580 	return get_rpc_arg_res(arg, num_params, params);
581 }
582 
583 /**
584  * Free physical memory previously allocated with thread_rpc_alloc()
585  *
586  * @cookie:	cookie received when allocating the buffer
587  * @bt:		must be the same as supplied when allocating
588  * @mobj:	mobj that describes allocated buffer
589  *
590  * This function also frees corresponding mobj.
591  */
thread_rpc_free(unsigned int bt,uint64_t cookie,struct mobj * mobj)592 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj)
593 {
594 	uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
595 	void *arg = NULL;
596 	uint64_t carg = 0;
597 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, cookie, 0);
598 	uint32_t ret = get_rpc_arg(OPTEE_RPC_CMD_SHM_FREE, 1, &param,
599 				   &arg, &carg);
600 
601 	mobj_put(mobj);
602 
603 	if (!ret) {
604 		reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
605 		thread_rpc(rpc_args);
606 	}
607 }
608 
get_rpc_alloc_res(struct optee_msg_arg * arg,unsigned int bt,size_t size)609 static struct mobj *get_rpc_alloc_res(struct optee_msg_arg *arg,
610 				      unsigned int bt, size_t size)
611 {
612 	struct mobj *mobj = NULL;
613 	uint64_t cookie = 0;
614 	size_t sz = 0;
615 	paddr_t p = 0;
616 
617 	if (arg->ret || arg->num_params != 1)
618 		goto err;
619 
620 	if (arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT  &&
621 	    arg->params[0].attr != (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
622 				    OPTEE_MSG_ATTR_NONCONTIG))
623 		goto err;
624 
625 	p = arg->params[0].u.tmem.buf_ptr;
626 	sz = READ_ONCE(arg->params[0].u.tmem.size);
627 	cookie = arg->params[0].u.tmem.shm_ref;
628 	if (sz < size)
629 		goto err;
630 
631 	if (arg->params[0].attr == OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT)
632 		mobj = rpc_shm_mobj_alloc(p, sz, cookie);
633 	else
634 		mobj = msg_param_mobj_from_noncontig(p, sz, cookie, true);
635 
636 	if (!mobj) {
637 		thread_rpc_free(bt, cookie, mobj);
638 		goto err;
639 	}
640 
641 	assert(mobj_is_nonsec(mobj));
642 	return mobj;
643 err:
644 	EMSG("RPC allocation failed. Non-secure world result: ret=%#"
645 	     PRIx32" ret_origin=%#"PRIx32, arg->ret, arg->ret_origin);
646 	return NULL;
647 }
648 
649 /**
650  * Allocates shared memory buffer via RPC
651  *
652  * @size:	size in bytes of shared memory buffer
653  * @align:	required alignment of buffer
654  * @bt:		buffer type OPTEE_RPC_SHM_TYPE_*
655  *
656  * Returns a pointer to MOBJ for the memory on success, or NULL on failure.
657  */
thread_rpc_alloc(size_t size,size_t align,unsigned int bt)658 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt)
659 {
660 	uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD };
661 	void *arg = NULL;
662 	uint64_t carg = 0;
663 	struct thread_param param = THREAD_PARAM_VALUE(IN, bt, size, align);
664 	uint32_t ret = get_rpc_arg(OPTEE_RPC_CMD_SHM_ALLOC, 1, &param,
665 				   &arg, &carg);
666 
667 	if (ret)
668 		return NULL;
669 
670 	reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2);
671 	thread_rpc(rpc_args);
672 
673 	return get_rpc_alloc_res(arg, bt, size);
674 }
675 
thread_rpc_alloc_payload(size_t size)676 struct mobj *thread_rpc_alloc_payload(size_t size)
677 {
678 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_APPL);
679 }
680 
thread_rpc_alloc_kernel_payload(size_t size)681 struct mobj *thread_rpc_alloc_kernel_payload(size_t size)
682 {
683 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_KERNEL);
684 }
685 
thread_rpc_free_kernel_payload(struct mobj * mobj)686 void thread_rpc_free_kernel_payload(struct mobj *mobj)
687 {
688 	if (mobj)
689 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_KERNEL,
690 				mobj_get_cookie(mobj), mobj);
691 }
692 
thread_rpc_free_payload(struct mobj * mobj)693 void thread_rpc_free_payload(struct mobj *mobj)
694 {
695 	if (mobj)
696 		thread_rpc_free(OPTEE_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj),
697 				mobj);
698 }
699 
thread_rpc_alloc_global_payload(size_t size)700 struct mobj *thread_rpc_alloc_global_payload(size_t size)
701 {
702 	return thread_rpc_alloc(size, 8, OPTEE_RPC_SHM_TYPE_GLOBAL);
703 }
704 
thread_rpc_free_global_payload(struct mobj * mobj)705 void thread_rpc_free_global_payload(struct mobj *mobj)
706 {
707 	thread_rpc_free(OPTEE_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj),
708 			mobj);
709 }
710