xref: /OK3568_Linux_fs/kernel/drivers/tee/optee/call.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2015, Linaro Limited
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <linux/arm-smccc.h>
6*4882a593Smuzhiyun #include <linux/device.h>
7*4882a593Smuzhiyun #include <linux/err.h>
8*4882a593Smuzhiyun #include <linux/errno.h>
9*4882a593Smuzhiyun #include <linux/mm.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/tee_drv.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/uaccess.h>
15*4882a593Smuzhiyun #include "optee_private.h"
16*4882a593Smuzhiyun #include "optee_smc.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun struct optee_call_waiter {
19*4882a593Smuzhiyun 	struct list_head list_node;
20*4882a593Smuzhiyun 	struct completion c;
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun 
optee_cq_wait_init(struct optee_call_queue * cq,struct optee_call_waiter * w)23*4882a593Smuzhiyun static void optee_cq_wait_init(struct optee_call_queue *cq,
24*4882a593Smuzhiyun 			       struct optee_call_waiter *w)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	/*
27*4882a593Smuzhiyun 	 * We're preparing to make a call to secure world. In case we can't
28*4882a593Smuzhiyun 	 * allocate a thread in secure world we'll end up waiting in
29*4882a593Smuzhiyun 	 * optee_cq_wait_for_completion().
30*4882a593Smuzhiyun 	 *
31*4882a593Smuzhiyun 	 * Normally if there's no contention in secure world the call will
32*4882a593Smuzhiyun 	 * complete and we can cleanup directly with optee_cq_wait_final().
33*4882a593Smuzhiyun 	 */
34*4882a593Smuzhiyun 	mutex_lock(&cq->mutex);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	/*
37*4882a593Smuzhiyun 	 * We add ourselves to the queue, but we don't wait. This
38*4882a593Smuzhiyun 	 * guarantees that we don't lose a completion if secure world
39*4882a593Smuzhiyun 	 * returns busy and another thread just exited and try to complete
40*4882a593Smuzhiyun 	 * someone.
41*4882a593Smuzhiyun 	 */
42*4882a593Smuzhiyun 	init_completion(&w->c);
43*4882a593Smuzhiyun 	list_add_tail(&w->list_node, &cq->waiters);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	mutex_unlock(&cq->mutex);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
optee_cq_wait_for_completion(struct optee_call_queue * cq,struct optee_call_waiter * w)48*4882a593Smuzhiyun static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
49*4882a593Smuzhiyun 					 struct optee_call_waiter *w)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	wait_for_completion(&w->c);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	mutex_lock(&cq->mutex);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/* Move to end of list to get out of the way for other waiters */
56*4882a593Smuzhiyun 	list_del(&w->list_node);
57*4882a593Smuzhiyun 	reinit_completion(&w->c);
58*4882a593Smuzhiyun 	list_add_tail(&w->list_node, &cq->waiters);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	mutex_unlock(&cq->mutex);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
optee_cq_complete_one(struct optee_call_queue * cq)63*4882a593Smuzhiyun static void optee_cq_complete_one(struct optee_call_queue *cq)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct optee_call_waiter *w;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	list_for_each_entry(w, &cq->waiters, list_node) {
68*4882a593Smuzhiyun 		if (!completion_done(&w->c)) {
69*4882a593Smuzhiyun 			complete(&w->c);
70*4882a593Smuzhiyun 			break;
71*4882a593Smuzhiyun 		}
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
optee_cq_wait_final(struct optee_call_queue * cq,struct optee_call_waiter * w)75*4882a593Smuzhiyun static void optee_cq_wait_final(struct optee_call_queue *cq,
76*4882a593Smuzhiyun 				struct optee_call_waiter *w)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	/*
79*4882a593Smuzhiyun 	 * We're done with the call to secure world. The thread in secure
80*4882a593Smuzhiyun 	 * world that was used for this call is now available for some
81*4882a593Smuzhiyun 	 * other task to use.
82*4882a593Smuzhiyun 	 */
83*4882a593Smuzhiyun 	mutex_lock(&cq->mutex);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/* Get out of the list */
86*4882a593Smuzhiyun 	list_del(&w->list_node);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/* Wake up one eventual waiting task */
89*4882a593Smuzhiyun 	optee_cq_complete_one(cq);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/*
92*4882a593Smuzhiyun 	 * If we're completed we've got a completion from another task that
93*4882a593Smuzhiyun 	 * was just done with its call to secure world. Since yet another
94*4882a593Smuzhiyun 	 * thread now is available in secure world wake up another eventual
95*4882a593Smuzhiyun 	 * waiting task.
96*4882a593Smuzhiyun 	 */
97*4882a593Smuzhiyun 	if (completion_done(&w->c))
98*4882a593Smuzhiyun 		optee_cq_complete_one(cq);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	mutex_unlock(&cq->mutex);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /* Requires the filpstate mutex to be held */
find_session(struct optee_context_data * ctxdata,u32 session_id)104*4882a593Smuzhiyun static struct optee_session *find_session(struct optee_context_data *ctxdata,
105*4882a593Smuzhiyun 					  u32 session_id)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct optee_session *sess;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	list_for_each_entry(sess, &ctxdata->sess_list, list_node)
110*4882a593Smuzhiyun 		if (sess->session_id == session_id)
111*4882a593Smuzhiyun 			return sess;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	return NULL;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /**
117*4882a593Smuzhiyun  * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
118*4882a593Smuzhiyun  * @ctx:	calling context
119*4882a593Smuzhiyun  * @parg:	physical address of message to pass to secure world
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * Does and SMC to OP-TEE in secure world and handles eventual resulting
122*4882a593Smuzhiyun  * Remote Procedure Calls (RPC) from OP-TEE.
123*4882a593Smuzhiyun  *
124*4882a593Smuzhiyun  * Returns return code from secure world, 0 is OK
125*4882a593Smuzhiyun  */
optee_do_call_with_arg(struct tee_context * ctx,phys_addr_t parg)126*4882a593Smuzhiyun u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct optee *optee = tee_get_drvdata(ctx->teedev);
129*4882a593Smuzhiyun 	struct optee_call_waiter w;
130*4882a593Smuzhiyun 	struct optee_rpc_param param = { };
131*4882a593Smuzhiyun 	struct optee_call_ctx call_ctx = { };
132*4882a593Smuzhiyun 	u32 ret;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	param.a0 = OPTEE_SMC_CALL_WITH_ARG;
135*4882a593Smuzhiyun 	reg_pair_from_64(&param.a1, &param.a2, parg);
136*4882a593Smuzhiyun 	/* Initialize waiter */
137*4882a593Smuzhiyun 	optee_cq_wait_init(&optee->call_queue, &w);
138*4882a593Smuzhiyun 	while (true) {
139*4882a593Smuzhiyun 		struct arm_smccc_res res;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
142*4882a593Smuzhiyun 				 param.a4, param.a5, param.a6, param.a7,
143*4882a593Smuzhiyun 				 &res);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 		if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
146*4882a593Smuzhiyun 			/*
147*4882a593Smuzhiyun 			 * Out of threads in secure world, wait for a thread
148*4882a593Smuzhiyun 			 * become available.
149*4882a593Smuzhiyun 			 */
150*4882a593Smuzhiyun 			optee_cq_wait_for_completion(&optee->call_queue, &w);
151*4882a593Smuzhiyun 		} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
152*4882a593Smuzhiyun 			if (need_resched())
153*4882a593Smuzhiyun 				cond_resched();
154*4882a593Smuzhiyun 			param.a0 = res.a0;
155*4882a593Smuzhiyun 			param.a1 = res.a1;
156*4882a593Smuzhiyun 			param.a2 = res.a2;
157*4882a593Smuzhiyun 			param.a3 = res.a3;
158*4882a593Smuzhiyun 			optee_handle_rpc(ctx, &param, &call_ctx);
159*4882a593Smuzhiyun 		} else {
160*4882a593Smuzhiyun 			ret = res.a0;
161*4882a593Smuzhiyun 			break;
162*4882a593Smuzhiyun 		}
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	optee_rpc_finalize_call(&call_ctx);
166*4882a593Smuzhiyun 	/*
167*4882a593Smuzhiyun 	 * We're done with our thread in secure world, if there's any
168*4882a593Smuzhiyun 	 * thread waiters wake up one.
169*4882a593Smuzhiyun 	 */
170*4882a593Smuzhiyun 	optee_cq_wait_final(&optee->call_queue, &w);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	return ret;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
get_msg_arg(struct tee_context * ctx,size_t num_params,struct optee_msg_arg ** msg_arg,phys_addr_t * msg_parg)175*4882a593Smuzhiyun static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
176*4882a593Smuzhiyun 				   struct optee_msg_arg **msg_arg,
177*4882a593Smuzhiyun 				   phys_addr_t *msg_parg)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	int rc;
180*4882a593Smuzhiyun 	struct tee_shm *shm;
181*4882a593Smuzhiyun 	struct optee_msg_arg *ma;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
184*4882a593Smuzhiyun 			    TEE_SHM_MAPPED | TEE_SHM_PRIV);
185*4882a593Smuzhiyun 	if (IS_ERR(shm))
186*4882a593Smuzhiyun 		return shm;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	ma = tee_shm_get_va(shm, 0);
189*4882a593Smuzhiyun 	if (IS_ERR(ma)) {
190*4882a593Smuzhiyun 		rc = PTR_ERR(ma);
191*4882a593Smuzhiyun 		goto out;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	rc = tee_shm_get_pa(shm, 0, msg_parg);
195*4882a593Smuzhiyun 	if (rc)
196*4882a593Smuzhiyun 		goto out;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
199*4882a593Smuzhiyun 	ma->num_params = num_params;
200*4882a593Smuzhiyun 	*msg_arg = ma;
201*4882a593Smuzhiyun out:
202*4882a593Smuzhiyun 	if (rc) {
203*4882a593Smuzhiyun 		tee_shm_free(shm);
204*4882a593Smuzhiyun 		return ERR_PTR(rc);
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return shm;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
optee_open_session(struct tee_context * ctx,struct tee_ioctl_open_session_arg * arg,struct tee_param * param)210*4882a593Smuzhiyun int optee_open_session(struct tee_context *ctx,
211*4882a593Smuzhiyun 		       struct tee_ioctl_open_session_arg *arg,
212*4882a593Smuzhiyun 		       struct tee_param *param)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct optee_context_data *ctxdata = ctx->data;
215*4882a593Smuzhiyun 	int rc;
216*4882a593Smuzhiyun 	struct tee_shm *shm;
217*4882a593Smuzhiyun 	struct optee_msg_arg *msg_arg;
218*4882a593Smuzhiyun 	phys_addr_t msg_parg;
219*4882a593Smuzhiyun 	struct optee_session *sess = NULL;
220*4882a593Smuzhiyun 	uuid_t client_uuid;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	/* +2 for the meta parameters added below */
223*4882a593Smuzhiyun 	shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
224*4882a593Smuzhiyun 	if (IS_ERR(shm))
225*4882a593Smuzhiyun 		return PTR_ERR(shm);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
228*4882a593Smuzhiyun 	msg_arg->cancel_id = arg->cancel_id;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/*
231*4882a593Smuzhiyun 	 * Initialize and add the meta parameters needed when opening a
232*4882a593Smuzhiyun 	 * session.
233*4882a593Smuzhiyun 	 */
234*4882a593Smuzhiyun 	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
235*4882a593Smuzhiyun 				  OPTEE_MSG_ATTR_META;
236*4882a593Smuzhiyun 	msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
237*4882a593Smuzhiyun 				  OPTEE_MSG_ATTR_META;
238*4882a593Smuzhiyun 	memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
239*4882a593Smuzhiyun 	msg_arg->params[1].u.value.c = arg->clnt_login;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login,
242*4882a593Smuzhiyun 					  arg->clnt_uuid);
243*4882a593Smuzhiyun 	if (rc)
244*4882a593Smuzhiyun 		goto out;
245*4882a593Smuzhiyun 	export_uuid(msg_arg->params[1].u.octets, &client_uuid);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
248*4882a593Smuzhiyun 	if (rc)
249*4882a593Smuzhiyun 		goto out;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	sess = kzalloc(sizeof(*sess), GFP_KERNEL);
252*4882a593Smuzhiyun 	if (!sess) {
253*4882a593Smuzhiyun 		rc = -ENOMEM;
254*4882a593Smuzhiyun 		goto out;
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (optee_do_call_with_arg(ctx, msg_parg)) {
258*4882a593Smuzhiyun 		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
259*4882a593Smuzhiyun 		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (msg_arg->ret == TEEC_SUCCESS) {
263*4882a593Smuzhiyun 		/* A new session has been created, add it to the list. */
264*4882a593Smuzhiyun 		sess->session_id = msg_arg->session;
265*4882a593Smuzhiyun 		mutex_lock(&ctxdata->mutex);
266*4882a593Smuzhiyun 		list_add(&sess->list_node, &ctxdata->sess_list);
267*4882a593Smuzhiyun 		mutex_unlock(&ctxdata->mutex);
268*4882a593Smuzhiyun 	} else {
269*4882a593Smuzhiyun 		kfree(sess);
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
273*4882a593Smuzhiyun 		arg->ret = TEEC_ERROR_COMMUNICATION;
274*4882a593Smuzhiyun 		arg->ret_origin = TEEC_ORIGIN_COMMS;
275*4882a593Smuzhiyun 		/* Close session again to avoid leakage */
276*4882a593Smuzhiyun 		optee_close_session(ctx, msg_arg->session);
277*4882a593Smuzhiyun 	} else {
278*4882a593Smuzhiyun 		arg->session = msg_arg->session;
279*4882a593Smuzhiyun 		arg->ret = msg_arg->ret;
280*4882a593Smuzhiyun 		arg->ret_origin = msg_arg->ret_origin;
281*4882a593Smuzhiyun 	}
282*4882a593Smuzhiyun out:
283*4882a593Smuzhiyun 	tee_shm_free(shm);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	return rc;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
optee_close_session(struct tee_context * ctx,u32 session)288*4882a593Smuzhiyun int optee_close_session(struct tee_context *ctx, u32 session)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct optee_context_data *ctxdata = ctx->data;
291*4882a593Smuzhiyun 	struct tee_shm *shm;
292*4882a593Smuzhiyun 	struct optee_msg_arg *msg_arg;
293*4882a593Smuzhiyun 	phys_addr_t msg_parg;
294*4882a593Smuzhiyun 	struct optee_session *sess;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/* Check that the session is valid and remove it from the list */
297*4882a593Smuzhiyun 	mutex_lock(&ctxdata->mutex);
298*4882a593Smuzhiyun 	sess = find_session(ctxdata, session);
299*4882a593Smuzhiyun 	if (sess)
300*4882a593Smuzhiyun 		list_del(&sess->list_node);
301*4882a593Smuzhiyun 	mutex_unlock(&ctxdata->mutex);
302*4882a593Smuzhiyun 	if (!sess)
303*4882a593Smuzhiyun 		return -EINVAL;
304*4882a593Smuzhiyun 	kfree(sess);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
307*4882a593Smuzhiyun 	if (IS_ERR(shm))
308*4882a593Smuzhiyun 		return PTR_ERR(shm);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
311*4882a593Smuzhiyun 	msg_arg->session = session;
312*4882a593Smuzhiyun 	optee_do_call_with_arg(ctx, msg_parg);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	tee_shm_free(shm);
315*4882a593Smuzhiyun 	return 0;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
optee_invoke_func(struct tee_context * ctx,struct tee_ioctl_invoke_arg * arg,struct tee_param * param)318*4882a593Smuzhiyun int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
319*4882a593Smuzhiyun 		      struct tee_param *param)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	struct optee_context_data *ctxdata = ctx->data;
322*4882a593Smuzhiyun 	struct tee_shm *shm;
323*4882a593Smuzhiyun 	struct optee_msg_arg *msg_arg;
324*4882a593Smuzhiyun 	phys_addr_t msg_parg;
325*4882a593Smuzhiyun 	struct optee_session *sess;
326*4882a593Smuzhiyun 	int rc;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/* Check that the session is valid */
329*4882a593Smuzhiyun 	mutex_lock(&ctxdata->mutex);
330*4882a593Smuzhiyun 	sess = find_session(ctxdata, arg->session);
331*4882a593Smuzhiyun 	mutex_unlock(&ctxdata->mutex);
332*4882a593Smuzhiyun 	if (!sess)
333*4882a593Smuzhiyun 		return -EINVAL;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
336*4882a593Smuzhiyun 	if (IS_ERR(shm))
337*4882a593Smuzhiyun 		return PTR_ERR(shm);
338*4882a593Smuzhiyun 	msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
339*4882a593Smuzhiyun 	msg_arg->func = arg->func;
340*4882a593Smuzhiyun 	msg_arg->session = arg->session;
341*4882a593Smuzhiyun 	msg_arg->cancel_id = arg->cancel_id;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
344*4882a593Smuzhiyun 	if (rc)
345*4882a593Smuzhiyun 		goto out;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (optee_do_call_with_arg(ctx, msg_parg)) {
348*4882a593Smuzhiyun 		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
349*4882a593Smuzhiyun 		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
353*4882a593Smuzhiyun 		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
354*4882a593Smuzhiyun 		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	arg->ret = msg_arg->ret;
358*4882a593Smuzhiyun 	arg->ret_origin = msg_arg->ret_origin;
359*4882a593Smuzhiyun out:
360*4882a593Smuzhiyun 	tee_shm_free(shm);
361*4882a593Smuzhiyun 	return rc;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
optee_cancel_req(struct tee_context * ctx,u32 cancel_id,u32 session)364*4882a593Smuzhiyun int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	struct optee_context_data *ctxdata = ctx->data;
367*4882a593Smuzhiyun 	struct tee_shm *shm;
368*4882a593Smuzhiyun 	struct optee_msg_arg *msg_arg;
369*4882a593Smuzhiyun 	phys_addr_t msg_parg;
370*4882a593Smuzhiyun 	struct optee_session *sess;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	/* Check that the session is valid */
373*4882a593Smuzhiyun 	mutex_lock(&ctxdata->mutex);
374*4882a593Smuzhiyun 	sess = find_session(ctxdata, session);
375*4882a593Smuzhiyun 	mutex_unlock(&ctxdata->mutex);
376*4882a593Smuzhiyun 	if (!sess)
377*4882a593Smuzhiyun 		return -EINVAL;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
380*4882a593Smuzhiyun 	if (IS_ERR(shm))
381*4882a593Smuzhiyun 		return PTR_ERR(shm);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
384*4882a593Smuzhiyun 	msg_arg->session = session;
385*4882a593Smuzhiyun 	msg_arg->cancel_id = cancel_id;
386*4882a593Smuzhiyun 	optee_do_call_with_arg(ctx, msg_parg);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	tee_shm_free(shm);
389*4882a593Smuzhiyun 	return 0;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun /**
393*4882a593Smuzhiyun  * optee_enable_shm_cache() - Enables caching of some shared memory allocation
394*4882a593Smuzhiyun  *			      in OP-TEE
395*4882a593Smuzhiyun  * @optee:	main service struct
396*4882a593Smuzhiyun  */
optee_enable_shm_cache(struct optee * optee)397*4882a593Smuzhiyun void optee_enable_shm_cache(struct optee *optee)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct optee_call_waiter w;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* We need to retry until secure world isn't busy. */
402*4882a593Smuzhiyun 	optee_cq_wait_init(&optee->call_queue, &w);
403*4882a593Smuzhiyun 	while (true) {
404*4882a593Smuzhiyun 		struct arm_smccc_res res;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
407*4882a593Smuzhiyun 				 0, &res);
408*4882a593Smuzhiyun 		if (res.a0 == OPTEE_SMC_RETURN_OK)
409*4882a593Smuzhiyun 			break;
410*4882a593Smuzhiyun 		optee_cq_wait_for_completion(&optee->call_queue, &w);
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 	optee_cq_wait_final(&optee->call_queue, &w);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun /**
416*4882a593Smuzhiyun  * __optee_disable_shm_cache() - Disables caching of some shared memory
417*4882a593Smuzhiyun  *                               allocation in OP-TEE
418*4882a593Smuzhiyun  * @optee:	main service struct
419*4882a593Smuzhiyun  * @is_mapped:	true if the cached shared memory addresses were mapped by this
420*4882a593Smuzhiyun  *		kernel, are safe to dereference, and should be freed
421*4882a593Smuzhiyun  */
__optee_disable_shm_cache(struct optee * optee,bool is_mapped)422*4882a593Smuzhiyun static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	struct optee_call_waiter w;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	/* We need to retry until secure world isn't busy. */
427*4882a593Smuzhiyun 	optee_cq_wait_init(&optee->call_queue, &w);
428*4882a593Smuzhiyun 	while (true) {
429*4882a593Smuzhiyun 		union {
430*4882a593Smuzhiyun 			struct arm_smccc_res smccc;
431*4882a593Smuzhiyun 			struct optee_smc_disable_shm_cache_result result;
432*4882a593Smuzhiyun 		} res;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 		optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
435*4882a593Smuzhiyun 				 0, &res.smccc);
436*4882a593Smuzhiyun 		if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
437*4882a593Smuzhiyun 			break; /* All shm's freed */
438*4882a593Smuzhiyun 		if (res.result.status == OPTEE_SMC_RETURN_OK) {
439*4882a593Smuzhiyun 			struct tee_shm *shm;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 			/*
442*4882a593Smuzhiyun 			 * Shared memory references that were not mapped by
443*4882a593Smuzhiyun 			 * this kernel must be ignored to prevent a crash.
444*4882a593Smuzhiyun 			 */
445*4882a593Smuzhiyun 			if (!is_mapped)
446*4882a593Smuzhiyun 				continue;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 			shm = reg_pair_to_ptr(res.result.shm_upper32,
449*4882a593Smuzhiyun 					      res.result.shm_lower32);
450*4882a593Smuzhiyun 			tee_shm_free(shm);
451*4882a593Smuzhiyun 		} else {
452*4882a593Smuzhiyun 			optee_cq_wait_for_completion(&optee->call_queue, &w);
453*4882a593Smuzhiyun 		}
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 	optee_cq_wait_final(&optee->call_queue, &w);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun /**
459*4882a593Smuzhiyun  * optee_disable_shm_cache() - Disables caching of mapped shared memory
460*4882a593Smuzhiyun  *                             allocations in OP-TEE
461*4882a593Smuzhiyun  * @optee:	main service struct
462*4882a593Smuzhiyun  */
optee_disable_shm_cache(struct optee * optee)463*4882a593Smuzhiyun void optee_disable_shm_cache(struct optee *optee)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	return __optee_disable_shm_cache(optee, true);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun /**
469*4882a593Smuzhiyun  * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
470*4882a593Smuzhiyun  *                                      allocations in OP-TEE which are not
471*4882a593Smuzhiyun  *                                      currently mapped
472*4882a593Smuzhiyun  * @optee:	main service struct
473*4882a593Smuzhiyun  */
optee_disable_unmapped_shm_cache(struct optee * optee)474*4882a593Smuzhiyun void optee_disable_unmapped_shm_cache(struct optee *optee)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	return __optee_disable_shm_cache(optee, false);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun #define PAGELIST_ENTRIES_PER_PAGE				\
480*4882a593Smuzhiyun 	((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun /**
483*4882a593Smuzhiyun  * optee_fill_pages_list() - write list of user pages to given shared
484*4882a593Smuzhiyun  * buffer.
485*4882a593Smuzhiyun  *
486*4882a593Smuzhiyun  * @dst: page-aligned buffer where list of pages will be stored
487*4882a593Smuzhiyun  * @pages: array of pages that represents shared buffer
488*4882a593Smuzhiyun  * @num_pages: number of entries in @pages
489*4882a593Smuzhiyun  * @page_offset: offset of user buffer from page start
490*4882a593Smuzhiyun  *
491*4882a593Smuzhiyun  * @dst should be big enough to hold list of user page addresses and
492*4882a593Smuzhiyun  *	links to the next pages of buffer
493*4882a593Smuzhiyun  */
optee_fill_pages_list(u64 * dst,struct page ** pages,int num_pages,size_t page_offset)494*4882a593Smuzhiyun void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
495*4882a593Smuzhiyun 			   size_t page_offset)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	int n = 0;
498*4882a593Smuzhiyun 	phys_addr_t optee_page;
499*4882a593Smuzhiyun 	/*
500*4882a593Smuzhiyun 	 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
501*4882a593Smuzhiyun 	 * for details.
502*4882a593Smuzhiyun 	 */
503*4882a593Smuzhiyun 	struct {
504*4882a593Smuzhiyun 		u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
505*4882a593Smuzhiyun 		u64 next_page_data;
506*4882a593Smuzhiyun 	} *pages_data;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/*
509*4882a593Smuzhiyun 	 * Currently OP-TEE uses 4k page size and it does not looks
510*4882a593Smuzhiyun 	 * like this will change in the future.  On other hand, there are
511*4882a593Smuzhiyun 	 * no know ARM architectures with page size < 4k.
512*4882a593Smuzhiyun 	 * Thus the next built assert looks redundant. But the following
513*4882a593Smuzhiyun 	 * code heavily relies on this assumption, so it is better be
514*4882a593Smuzhiyun 	 * safe than sorry.
515*4882a593Smuzhiyun 	 */
516*4882a593Smuzhiyun 	BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	pages_data = (void *)dst;
519*4882a593Smuzhiyun 	/*
520*4882a593Smuzhiyun 	 * If linux page is bigger than 4k, and user buffer offset is
521*4882a593Smuzhiyun 	 * larger than 4k/8k/12k/etc this will skip first 4k pages,
522*4882a593Smuzhiyun 	 * because they bear no value data for OP-TEE.
523*4882a593Smuzhiyun 	 */
524*4882a593Smuzhiyun 	optee_page = page_to_phys(*pages) +
525*4882a593Smuzhiyun 		round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	while (true) {
528*4882a593Smuzhiyun 		pages_data->pages_list[n++] = optee_page;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 		if (n == PAGELIST_ENTRIES_PER_PAGE) {
531*4882a593Smuzhiyun 			pages_data->next_page_data =
532*4882a593Smuzhiyun 				virt_to_phys(pages_data + 1);
533*4882a593Smuzhiyun 			pages_data++;
534*4882a593Smuzhiyun 			n = 0;
535*4882a593Smuzhiyun 		}
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 		optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
538*4882a593Smuzhiyun 		if (!(optee_page & ~PAGE_MASK)) {
539*4882a593Smuzhiyun 			if (!--num_pages)
540*4882a593Smuzhiyun 				break;
541*4882a593Smuzhiyun 			pages++;
542*4882a593Smuzhiyun 			optee_page = page_to_phys(*pages);
543*4882a593Smuzhiyun 		}
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun /*
548*4882a593Smuzhiyun  * The final entry in each pagelist page is a pointer to the next
549*4882a593Smuzhiyun  * pagelist page.
550*4882a593Smuzhiyun  */
get_pages_list_size(size_t num_entries)551*4882a593Smuzhiyun static size_t get_pages_list_size(size_t num_entries)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
optee_allocate_pages_list(size_t num_entries)558*4882a593Smuzhiyun u64 *optee_allocate_pages_list(size_t num_entries)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
optee_free_pages_list(void * list,size_t num_entries)563*4882a593Smuzhiyun void optee_free_pages_list(void *list, size_t num_entries)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	free_pages_exact(list, get_pages_list_size(num_entries));
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
is_normal_memory(pgprot_t p)568*4882a593Smuzhiyun static bool is_normal_memory(pgprot_t p)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun #if defined(CONFIG_ARM)
571*4882a593Smuzhiyun 	return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
572*4882a593Smuzhiyun 		((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
573*4882a593Smuzhiyun #elif defined(CONFIG_ARM64)
574*4882a593Smuzhiyun 	return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
575*4882a593Smuzhiyun #else
576*4882a593Smuzhiyun #error "Unuspported architecture"
577*4882a593Smuzhiyun #endif
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
__check_mem_type(struct vm_area_struct * vma,unsigned long end)580*4882a593Smuzhiyun static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	while (vma && is_normal_memory(vma->vm_page_prot)) {
583*4882a593Smuzhiyun 		if (vma->vm_end >= end)
584*4882a593Smuzhiyun 			return 0;
585*4882a593Smuzhiyun 		vma = vma->vm_next;
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	return -EINVAL;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
check_mem_type(unsigned long start,size_t num_pages)591*4882a593Smuzhiyun static int check_mem_type(unsigned long start, size_t num_pages)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
594*4882a593Smuzhiyun 	int rc;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	/*
597*4882a593Smuzhiyun 	 * Allow kernel address to register with OP-TEE as kernel
598*4882a593Smuzhiyun 	 * pages are configured as normal memory only.
599*4882a593Smuzhiyun 	 */
600*4882a593Smuzhiyun 	if (virt_addr_valid(start))
601*4882a593Smuzhiyun 		return 0;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	mmap_read_lock(mm);
604*4882a593Smuzhiyun 	rc = __check_mem_type(find_vma(mm, start),
605*4882a593Smuzhiyun 			      start + num_pages * PAGE_SIZE);
606*4882a593Smuzhiyun 	mmap_read_unlock(mm);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	return rc;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
optee_shm_register(struct tee_context * ctx,struct tee_shm * shm,struct page ** pages,size_t num_pages,unsigned long start)611*4882a593Smuzhiyun int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
612*4882a593Smuzhiyun 		       struct page **pages, size_t num_pages,
613*4882a593Smuzhiyun 		       unsigned long start)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun 	struct tee_shm *shm_arg = NULL;
616*4882a593Smuzhiyun 	struct optee_msg_arg *msg_arg;
617*4882a593Smuzhiyun 	u64 *pages_list;
618*4882a593Smuzhiyun 	phys_addr_t msg_parg;
619*4882a593Smuzhiyun 	int rc;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	if (!num_pages)
622*4882a593Smuzhiyun 		return -EINVAL;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	rc = check_mem_type(start, num_pages);
625*4882a593Smuzhiyun 	if (rc)
626*4882a593Smuzhiyun 		return rc;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	pages_list = optee_allocate_pages_list(num_pages);
629*4882a593Smuzhiyun 	if (!pages_list)
630*4882a593Smuzhiyun 		return -ENOMEM;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
633*4882a593Smuzhiyun 	if (IS_ERR(shm_arg)) {
634*4882a593Smuzhiyun 		rc = PTR_ERR(shm_arg);
635*4882a593Smuzhiyun 		goto out;
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	optee_fill_pages_list(pages_list, pages, num_pages,
639*4882a593Smuzhiyun 			      tee_shm_get_page_offset(shm));
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
642*4882a593Smuzhiyun 	msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
643*4882a593Smuzhiyun 				OPTEE_MSG_ATTR_NONCONTIG;
644*4882a593Smuzhiyun 	msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
645*4882a593Smuzhiyun 	msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
646*4882a593Smuzhiyun 	/*
647*4882a593Smuzhiyun 	 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
648*4882a593Smuzhiyun 	 * store buffer offset from 4k page, as described in OP-TEE ABI.
649*4882a593Smuzhiyun 	 */
650*4882a593Smuzhiyun 	msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
651*4882a593Smuzhiyun 	  (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	if (optee_do_call_with_arg(ctx, msg_parg) ||
654*4882a593Smuzhiyun 	    msg_arg->ret != TEEC_SUCCESS)
655*4882a593Smuzhiyun 		rc = -EINVAL;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	tee_shm_free(shm_arg);
658*4882a593Smuzhiyun out:
659*4882a593Smuzhiyun 	optee_free_pages_list(pages_list, num_pages);
660*4882a593Smuzhiyun 	return rc;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun 
optee_shm_unregister(struct tee_context * ctx,struct tee_shm * shm)663*4882a593Smuzhiyun int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct tee_shm *shm_arg;
666*4882a593Smuzhiyun 	struct optee_msg_arg *msg_arg;
667*4882a593Smuzhiyun 	phys_addr_t msg_parg;
668*4882a593Smuzhiyun 	int rc = 0;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
671*4882a593Smuzhiyun 	if (IS_ERR(shm_arg))
672*4882a593Smuzhiyun 		return PTR_ERR(shm_arg);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
677*4882a593Smuzhiyun 	msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	if (optee_do_call_with_arg(ctx, msg_parg) ||
680*4882a593Smuzhiyun 	    msg_arg->ret != TEEC_SUCCESS)
681*4882a593Smuzhiyun 		rc = -EINVAL;
682*4882a593Smuzhiyun 	tee_shm_free(shm_arg);
683*4882a593Smuzhiyun 	return rc;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun 
optee_shm_register_supp(struct tee_context * ctx,struct tee_shm * shm,struct page ** pages,size_t num_pages,unsigned long start)686*4882a593Smuzhiyun int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
687*4882a593Smuzhiyun 			    struct page **pages, size_t num_pages,
688*4882a593Smuzhiyun 			    unsigned long start)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	/*
691*4882a593Smuzhiyun 	 * We don't want to register supplicant memory in OP-TEE.
692*4882a593Smuzhiyun 	 * Instead information about it will be passed in RPC code.
693*4882a593Smuzhiyun 	 */
694*4882a593Smuzhiyun 	return check_mem_type(start, num_pages);
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun 
optee_shm_unregister_supp(struct tee_context * ctx,struct tee_shm * shm)697*4882a593Smuzhiyun int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun 	return 0;
700*4882a593Smuzhiyun }
701