1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2015, Linaro Limited
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/device.h>
6*4882a593Smuzhiyun #include <linux/slab.h>
7*4882a593Smuzhiyun #include <linux/uaccess.h>
8*4882a593Smuzhiyun #include "optee_private.h"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun struct optee_supp_req {
11*4882a593Smuzhiyun struct list_head link;
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun bool in_queue;
14*4882a593Smuzhiyun u32 func;
15*4882a593Smuzhiyun u32 ret;
16*4882a593Smuzhiyun size_t num_params;
17*4882a593Smuzhiyun struct tee_param *param;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun struct completion c;
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun
optee_supp_init(struct optee_supp * supp)22*4882a593Smuzhiyun void optee_supp_init(struct optee_supp *supp)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun memset(supp, 0, sizeof(*supp));
25*4882a593Smuzhiyun mutex_init(&supp->mutex);
26*4882a593Smuzhiyun init_completion(&supp->reqs_c);
27*4882a593Smuzhiyun idr_init(&supp->idr);
28*4882a593Smuzhiyun INIT_LIST_HEAD(&supp->reqs);
29*4882a593Smuzhiyun supp->req_id = -1;
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
optee_supp_uninit(struct optee_supp * supp)32*4882a593Smuzhiyun void optee_supp_uninit(struct optee_supp *supp)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun mutex_destroy(&supp->mutex);
35*4882a593Smuzhiyun idr_destroy(&supp->idr);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
optee_supp_release(struct optee_supp * supp)38*4882a593Smuzhiyun void optee_supp_release(struct optee_supp *supp)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun int id;
41*4882a593Smuzhiyun struct optee_supp_req *req;
42*4882a593Smuzhiyun struct optee_supp_req *req_tmp;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun mutex_lock(&supp->mutex);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Abort all request retrieved by supplicant */
47*4882a593Smuzhiyun idr_for_each_entry(&supp->idr, req, id) {
48*4882a593Smuzhiyun idr_remove(&supp->idr, id);
49*4882a593Smuzhiyun req->ret = TEEC_ERROR_COMMUNICATION;
50*4882a593Smuzhiyun complete(&req->c);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* Abort all queued requests */
54*4882a593Smuzhiyun list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
55*4882a593Smuzhiyun list_del(&req->link);
56*4882a593Smuzhiyun req->in_queue = false;
57*4882a593Smuzhiyun req->ret = TEEC_ERROR_COMMUNICATION;
58*4882a593Smuzhiyun complete(&req->c);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun supp->ctx = NULL;
62*4882a593Smuzhiyun supp->req_id = -1;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun mutex_unlock(&supp->mutex);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /**
68*4882a593Smuzhiyun * optee_supp_thrd_req() - request service from supplicant
69*4882a593Smuzhiyun * @ctx: context doing the request
70*4882a593Smuzhiyun * @func: function requested
71*4882a593Smuzhiyun * @num_params: number of elements in @param array
72*4882a593Smuzhiyun * @param: parameters for function
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * Returns result of operation to be passed to secure world
75*4882a593Smuzhiyun */
optee_supp_thrd_req(struct tee_context * ctx,u32 func,size_t num_params,struct tee_param * param)76*4882a593Smuzhiyun u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
77*4882a593Smuzhiyun struct tee_param *param)
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun struct optee *optee = tee_get_drvdata(ctx->teedev);
81*4882a593Smuzhiyun struct optee_supp *supp = &optee->supp;
82*4882a593Smuzhiyun struct optee_supp_req *req;
83*4882a593Smuzhiyun bool interruptable;
84*4882a593Smuzhiyun u32 ret;
85*4882a593Smuzhiyun unsigned long timeleft;
86*4882a593Smuzhiyun int id;
87*4882a593Smuzhiyun struct optee_supp_req *get_req;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Return in case there is no supplicant available and
91*4882a593Smuzhiyun * non-blocking request.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun if (!supp->ctx && ctx->supp_nowait)
94*4882a593Smuzhiyun return TEEC_ERROR_COMMUNICATION;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun req = kzalloc(sizeof(*req), GFP_KERNEL);
97*4882a593Smuzhiyun if (!req)
98*4882a593Smuzhiyun return TEEC_ERROR_OUT_OF_MEMORY;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun init_completion(&req->c);
101*4882a593Smuzhiyun req->func = func;
102*4882a593Smuzhiyun req->num_params = num_params;
103*4882a593Smuzhiyun req->param = param;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* Insert the request in the request list */
106*4882a593Smuzhiyun mutex_lock(&supp->mutex);
107*4882a593Smuzhiyun list_add_tail(&req->link, &supp->reqs);
108*4882a593Smuzhiyun req->in_queue = true;
109*4882a593Smuzhiyun mutex_unlock(&supp->mutex);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* Tell an eventual waiter there's a new request */
112*4882a593Smuzhiyun complete(&supp->reqs_c);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun * Wait for supplicant to process and return result, once we've
116*4882a593Smuzhiyun * returned from wait_for_completion(&req->c) successfully we have
117*4882a593Smuzhiyun * exclusive access again.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun while (wait_for_completion_interruptible(&req->c)) {
120*4882a593Smuzhiyun pr_err("Warning, Interrupting an RPC to supplicant!\n");
121*4882a593Smuzhiyun timeleft = wait_for_completion_timeout(&req->c, msecs_to_jiffies(2000));
122*4882a593Smuzhiyun if (timeleft) {
123*4882a593Smuzhiyun /* get completion, it means tee-supplicant is alive. */
124*4882a593Smuzhiyun break;
125*4882a593Smuzhiyun } else {
126*4882a593Smuzhiyun /* timeout, it means tee-supplicant is dead, interrupting an RPC. */
127*4882a593Smuzhiyun interruptable = true;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun mutex_lock(&supp->mutex);
131*4882a593Smuzhiyun if (interruptable) {
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * There's no supplicant available and since the
134*4882a593Smuzhiyun * supp->mutex currently is held none can
135*4882a593Smuzhiyun * become available until the mutex released
136*4882a593Smuzhiyun * again.
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun * Interrupting an RPC to supplicant is only
139*4882a593Smuzhiyun * allowed as a way of slightly improving the user
140*4882a593Smuzhiyun * experience in case the supplicant hasn't been
141*4882a593Smuzhiyun * started yet. During normal operation the supplicant
142*4882a593Smuzhiyun * will serve all requests in a timely manner and
143*4882a593Smuzhiyun * interrupting then wouldn't make sense.
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun if (req->in_queue) {
146*4882a593Smuzhiyun list_del(&req->link);
147*4882a593Smuzhiyun req->in_queue = false;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun idr_for_each_entry(&supp->idr, get_req, id) {
151*4882a593Smuzhiyun if (get_req == req) {
152*4882a593Smuzhiyun idr_remove(&supp->idr, id);
153*4882a593Smuzhiyun supp->req_id = -1;
154*4882a593Smuzhiyun break;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun mutex_unlock(&supp->mutex);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (interruptable) {
161*4882a593Smuzhiyun req->ret = TEEC_ERROR_COMMUNICATION;
162*4882a593Smuzhiyun break;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun ret = req->ret;
167*4882a593Smuzhiyun kfree(req);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun return ret;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
supp_pop_entry(struct optee_supp * supp,int num_params,int * id)172*4882a593Smuzhiyun static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
173*4882a593Smuzhiyun int num_params, int *id)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct optee_supp_req *req;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (supp->req_id != -1) {
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Supplicant should not mix synchronous and asnynchronous
180*4882a593Smuzhiyun * requests.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (list_empty(&supp->reqs))
186*4882a593Smuzhiyun return NULL;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (num_params < req->num_params) {
191*4882a593Smuzhiyun /* Not enough room for parameters */
192*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
196*4882a593Smuzhiyun if (*id < 0)
197*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun list_del(&req->link);
200*4882a593Smuzhiyun req->in_queue = false;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun return req;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
supp_check_recv_params(size_t num_params,struct tee_param * params,size_t * num_meta)205*4882a593Smuzhiyun static int supp_check_recv_params(size_t num_params, struct tee_param *params,
206*4882a593Smuzhiyun size_t *num_meta)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun size_t n;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (!num_params)
211*4882a593Smuzhiyun return -EINVAL;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * If there's memrefs we need to decrease those as they where
215*4882a593Smuzhiyun * increased earlier and we'll even refuse to accept any below.
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun for (n = 0; n < num_params; n++)
218*4882a593Smuzhiyun if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
219*4882a593Smuzhiyun tee_shm_put(params[n].u.memref.shm);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
223*4882a593Smuzhiyun * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun for (n = 0; n < num_params; n++)
226*4882a593Smuzhiyun if (params[n].attr &&
227*4882a593Smuzhiyun params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
228*4882a593Smuzhiyun return -EINVAL;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* At most we'll need one meta parameter so no need to check for more */
231*4882a593Smuzhiyun if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
232*4882a593Smuzhiyun *num_meta = 1;
233*4882a593Smuzhiyun else
234*4882a593Smuzhiyun *num_meta = 0;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun return 0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun * optee_supp_recv() - receive request for supplicant
241*4882a593Smuzhiyun * @ctx: context receiving the request
242*4882a593Smuzhiyun * @func: requested function in supplicant
243*4882a593Smuzhiyun * @num_params: number of elements allocated in @param, updated with number
244*4882a593Smuzhiyun * used elements
245*4882a593Smuzhiyun * @param: space for parameters for @func
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * Returns 0 on success or <0 on failure
248*4882a593Smuzhiyun */
optee_supp_recv(struct tee_context * ctx,u32 * func,u32 * num_params,struct tee_param * param)249*4882a593Smuzhiyun int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
250*4882a593Smuzhiyun struct tee_param *param)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun struct tee_device *teedev = ctx->teedev;
253*4882a593Smuzhiyun struct optee *optee = tee_get_drvdata(teedev);
254*4882a593Smuzhiyun struct optee_supp *supp = &optee->supp;
255*4882a593Smuzhiyun struct optee_supp_req *req = NULL;
256*4882a593Smuzhiyun int id;
257*4882a593Smuzhiyun size_t num_meta;
258*4882a593Smuzhiyun int rc;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun rc = supp_check_recv_params(*num_params, param, &num_meta);
261*4882a593Smuzhiyun if (rc)
262*4882a593Smuzhiyun return rc;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun while (true) {
265*4882a593Smuzhiyun mutex_lock(&supp->mutex);
266*4882a593Smuzhiyun req = supp_pop_entry(supp, *num_params - num_meta, &id);
267*4882a593Smuzhiyun mutex_unlock(&supp->mutex);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (req) {
270*4882a593Smuzhiyun if (IS_ERR(req))
271*4882a593Smuzhiyun return PTR_ERR(req);
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /*
276*4882a593Smuzhiyun * If we didn't get a request we'll block in
277*4882a593Smuzhiyun * wait_for_completion() to avoid needless spinning.
278*4882a593Smuzhiyun *
279*4882a593Smuzhiyun * This is where supplicant will be hanging most of
280*4882a593Smuzhiyun * the time, let's make this interruptable so we
281*4882a593Smuzhiyun * can easily restart supplicant if needed.
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun if (wait_for_completion_interruptible(&supp->reqs_c))
284*4882a593Smuzhiyun return -ERESTARTSYS;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (num_meta) {
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun * tee-supplicant support meta parameters -> requsts can be
290*4882a593Smuzhiyun * processed asynchronously.
291*4882a593Smuzhiyun */
292*4882a593Smuzhiyun param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
293*4882a593Smuzhiyun TEE_IOCTL_PARAM_ATTR_META;
294*4882a593Smuzhiyun param->u.value.a = id;
295*4882a593Smuzhiyun param->u.value.b = 0;
296*4882a593Smuzhiyun param->u.value.c = 0;
297*4882a593Smuzhiyun } else {
298*4882a593Smuzhiyun mutex_lock(&supp->mutex);
299*4882a593Smuzhiyun supp->req_id = id;
300*4882a593Smuzhiyun mutex_unlock(&supp->mutex);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun *func = req->func;
304*4882a593Smuzhiyun *num_params = req->num_params + num_meta;
305*4882a593Smuzhiyun memcpy(param + num_meta, req->param,
306*4882a593Smuzhiyun sizeof(struct tee_param) * req->num_params);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
supp_pop_req(struct optee_supp * supp,size_t num_params,struct tee_param * param,size_t * num_meta)311*4882a593Smuzhiyun static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
312*4882a593Smuzhiyun size_t num_params,
313*4882a593Smuzhiyun struct tee_param *param,
314*4882a593Smuzhiyun size_t *num_meta)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun struct optee_supp_req *req;
317*4882a593Smuzhiyun int id;
318*4882a593Smuzhiyun size_t nm;
319*4882a593Smuzhiyun const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
320*4882a593Smuzhiyun TEE_IOCTL_PARAM_ATTR_META;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (!num_params)
323*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (supp->req_id == -1) {
326*4882a593Smuzhiyun if (param->attr != attr)
327*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
328*4882a593Smuzhiyun id = param->u.value.a;
329*4882a593Smuzhiyun nm = 1;
330*4882a593Smuzhiyun } else {
331*4882a593Smuzhiyun id = supp->req_id;
332*4882a593Smuzhiyun nm = 0;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun req = idr_find(&supp->idr, id);
336*4882a593Smuzhiyun if (!req)
337*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if ((num_params - nm) != req->num_params)
340*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun idr_remove(&supp->idr, id);
343*4882a593Smuzhiyun supp->req_id = -1;
344*4882a593Smuzhiyun *num_meta = nm;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun return req;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * optee_supp_send() - send result of request from supplicant
351*4882a593Smuzhiyun * @ctx: context sending result
352*4882a593Smuzhiyun * @ret: return value of request
353*4882a593Smuzhiyun * @num_params: number of parameters returned
354*4882a593Smuzhiyun * @param: returned parameters
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * Returns 0 on success or <0 on failure.
357*4882a593Smuzhiyun */
optee_supp_send(struct tee_context * ctx,u32 ret,u32 num_params,struct tee_param * param)358*4882a593Smuzhiyun int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
359*4882a593Smuzhiyun struct tee_param *param)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct tee_device *teedev = ctx->teedev;
362*4882a593Smuzhiyun struct optee *optee = tee_get_drvdata(teedev);
363*4882a593Smuzhiyun struct optee_supp *supp = &optee->supp;
364*4882a593Smuzhiyun struct optee_supp_req *req;
365*4882a593Smuzhiyun size_t n;
366*4882a593Smuzhiyun size_t num_meta;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun mutex_lock(&supp->mutex);
369*4882a593Smuzhiyun req = supp_pop_req(supp, num_params, param, &num_meta);
370*4882a593Smuzhiyun mutex_unlock(&supp->mutex);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (IS_ERR(req)) {
373*4882a593Smuzhiyun /* Something is wrong, let supplicant restart. */
374*4882a593Smuzhiyun return PTR_ERR(req);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Update out and in/out parameters */
378*4882a593Smuzhiyun for (n = 0; n < req->num_params; n++) {
379*4882a593Smuzhiyun struct tee_param *p = req->param + n;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
382*4882a593Smuzhiyun case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
383*4882a593Smuzhiyun case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
384*4882a593Smuzhiyun p->u.value.a = param[n + num_meta].u.value.a;
385*4882a593Smuzhiyun p->u.value.b = param[n + num_meta].u.value.b;
386*4882a593Smuzhiyun p->u.value.c = param[n + num_meta].u.value.c;
387*4882a593Smuzhiyun break;
388*4882a593Smuzhiyun case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
389*4882a593Smuzhiyun case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
390*4882a593Smuzhiyun p->u.memref.size = param[n + num_meta].u.memref.size;
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun default:
393*4882a593Smuzhiyun break;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun req->ret = ret;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Let the requesting thread continue */
399*4882a593Smuzhiyun complete(&req->c);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun return 0;
402*4882a593Smuzhiyun }
403