xref: /OK3568_Linux_fs/kernel/drivers/media/mc/mc-request.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Media device request objects
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6*4882a593Smuzhiyun  * Copyright (C) 2018 Intel Corporation
7*4882a593Smuzhiyun  * Copyright (C) 2018 Google, Inc.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Author: Hans Verkuil <hans.verkuil@cisco.com>
10*4882a593Smuzhiyun  * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/anon_inodes.h>
14*4882a593Smuzhiyun #include <linux/file.h>
15*4882a593Smuzhiyun #include <linux/refcount.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <media/media-device.h>
18*4882a593Smuzhiyun #include <media/media-request.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun static const char * const request_state[] = {
21*4882a593Smuzhiyun 	[MEDIA_REQUEST_STATE_IDLE]	 = "idle",
22*4882a593Smuzhiyun 	[MEDIA_REQUEST_STATE_VALIDATING] = "validating",
23*4882a593Smuzhiyun 	[MEDIA_REQUEST_STATE_QUEUED]	 = "queued",
24*4882a593Smuzhiyun 	[MEDIA_REQUEST_STATE_COMPLETE]	 = "complete",
25*4882a593Smuzhiyun 	[MEDIA_REQUEST_STATE_CLEANING]	 = "cleaning",
26*4882a593Smuzhiyun 	[MEDIA_REQUEST_STATE_UPDATING]	 = "updating",
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static const char *
media_request_state_str(enum media_request_state state)30*4882a593Smuzhiyun media_request_state_str(enum media_request_state state)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	if (WARN_ON(state >= ARRAY_SIZE(request_state)))
35*4882a593Smuzhiyun 		return "invalid";
36*4882a593Smuzhiyun 	return request_state[state];
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
media_request_clean(struct media_request * req)39*4882a593Smuzhiyun static void media_request_clean(struct media_request *req)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct media_request_object *obj, *obj_safe;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	/* Just a sanity check. No other code path is allowed to change this. */
44*4882a593Smuzhiyun 	WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
45*4882a593Smuzhiyun 	WARN_ON(req->updating_count);
46*4882a593Smuzhiyun 	WARN_ON(req->access_count);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
49*4882a593Smuzhiyun 		media_request_object_unbind(obj);
50*4882a593Smuzhiyun 		media_request_object_put(obj);
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	req->updating_count = 0;
54*4882a593Smuzhiyun 	req->access_count = 0;
55*4882a593Smuzhiyun 	WARN_ON(req->num_incomplete_objects);
56*4882a593Smuzhiyun 	req->num_incomplete_objects = 0;
57*4882a593Smuzhiyun 	wake_up_interruptible_all(&req->poll_wait);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
media_request_release(struct kref * kref)60*4882a593Smuzhiyun static void media_request_release(struct kref *kref)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	struct media_request *req =
63*4882a593Smuzhiyun 		container_of(kref, struct media_request, kref);
64*4882a593Smuzhiyun 	struct media_device *mdev = req->mdev;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/* No other users, no need for a spinlock */
69*4882a593Smuzhiyun 	req->state = MEDIA_REQUEST_STATE_CLEANING;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	media_request_clean(req);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (mdev->ops->req_free)
74*4882a593Smuzhiyun 		mdev->ops->req_free(req);
75*4882a593Smuzhiyun 	else
76*4882a593Smuzhiyun 		kfree(req);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
media_request_put(struct media_request * req)79*4882a593Smuzhiyun void media_request_put(struct media_request *req)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	kref_put(&req->kref, media_request_release);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(media_request_put);
84*4882a593Smuzhiyun 
media_request_close(struct inode * inode,struct file * filp)85*4882a593Smuzhiyun static int media_request_close(struct inode *inode, struct file *filp)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct media_request *req = filp->private_data;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	media_request_put(req);
90*4882a593Smuzhiyun 	return 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
media_request_poll(struct file * filp,struct poll_table_struct * wait)93*4882a593Smuzhiyun static __poll_t media_request_poll(struct file *filp,
94*4882a593Smuzhiyun 				   struct poll_table_struct *wait)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct media_request *req = filp->private_data;
97*4882a593Smuzhiyun 	unsigned long flags;
98*4882a593Smuzhiyun 	__poll_t ret = 0;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (!(poll_requested_events(wait) & EPOLLPRI))
101*4882a593Smuzhiyun 		return 0;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	poll_wait(filp, &req->poll_wait, wait);
104*4882a593Smuzhiyun 	spin_lock_irqsave(&req->lock, flags);
105*4882a593Smuzhiyun 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
106*4882a593Smuzhiyun 		ret = EPOLLPRI;
107*4882a593Smuzhiyun 		goto unlock;
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 	if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
110*4882a593Smuzhiyun 		ret = EPOLLERR;
111*4882a593Smuzhiyun 		goto unlock;
112*4882a593Smuzhiyun 	}
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun unlock:
115*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->lock, flags);
116*4882a593Smuzhiyun 	return ret;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
media_request_ioctl_queue(struct media_request * req)119*4882a593Smuzhiyun static long media_request_ioctl_queue(struct media_request *req)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct media_device *mdev = req->mdev;
122*4882a593Smuzhiyun 	enum media_request_state state;
123*4882a593Smuzhiyun 	unsigned long flags;
124*4882a593Smuzhiyun 	int ret;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/*
129*4882a593Smuzhiyun 	 * Ensure the request that is validated will be the one that gets queued
130*4882a593Smuzhiyun 	 * next by serialising the queueing process. This mutex is also used
131*4882a593Smuzhiyun 	 * to serialize with canceling a vb2 queue and with setting values such
132*4882a593Smuzhiyun 	 * as controls in a request.
133*4882a593Smuzhiyun 	 */
134*4882a593Smuzhiyun 	mutex_lock(&mdev->req_queue_mutex);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	media_request_get(req);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	spin_lock_irqsave(&req->lock, flags);
139*4882a593Smuzhiyun 	if (req->state == MEDIA_REQUEST_STATE_IDLE)
140*4882a593Smuzhiyun 		req->state = MEDIA_REQUEST_STATE_VALIDATING;
141*4882a593Smuzhiyun 	state = req->state;
142*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->lock, flags);
143*4882a593Smuzhiyun 	if (state != MEDIA_REQUEST_STATE_VALIDATING) {
144*4882a593Smuzhiyun 		dev_dbg(mdev->dev,
145*4882a593Smuzhiyun 			"request: unable to queue %s, request in state %s\n",
146*4882a593Smuzhiyun 			req->debug_str, media_request_state_str(state));
147*4882a593Smuzhiyun 		media_request_put(req);
148*4882a593Smuzhiyun 		mutex_unlock(&mdev->req_queue_mutex);
149*4882a593Smuzhiyun 		return -EBUSY;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	ret = mdev->ops->req_validate(req);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/*
155*4882a593Smuzhiyun 	 * If the req_validate was successful, then we mark the state as QUEUED
156*4882a593Smuzhiyun 	 * and call req_queue. The reason we set the state first is that this
157*4882a593Smuzhiyun 	 * allows req_queue to unbind or complete the queued objects in case
158*4882a593Smuzhiyun 	 * they are immediately 'consumed'. State changes from QUEUED to another
159*4882a593Smuzhiyun 	 * state can only happen if either the driver changes the state or if
160*4882a593Smuzhiyun 	 * the user cancels the vb2 queue. The driver can only change the state
161*4882a593Smuzhiyun 	 * after each object is queued through the req_queue op (and note that
162*4882a593Smuzhiyun 	 * that op cannot fail), so setting the state to QUEUED up front is
163*4882a593Smuzhiyun 	 * safe.
164*4882a593Smuzhiyun 	 *
165*4882a593Smuzhiyun 	 * The other reason for changing the state is if the vb2 queue is
166*4882a593Smuzhiyun 	 * canceled, and that uses the req_queue_mutex which is still locked
167*4882a593Smuzhiyun 	 * while req_queue is called, so that's safe as well.
168*4882a593Smuzhiyun 	 */
169*4882a593Smuzhiyun 	spin_lock_irqsave(&req->lock, flags);
170*4882a593Smuzhiyun 	req->state = ret ? MEDIA_REQUEST_STATE_IDLE
171*4882a593Smuzhiyun 			 : MEDIA_REQUEST_STATE_QUEUED;
172*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->lock, flags);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (!ret)
175*4882a593Smuzhiyun 		mdev->ops->req_queue(req);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	mutex_unlock(&mdev->req_queue_mutex);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (ret) {
180*4882a593Smuzhiyun 		dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
181*4882a593Smuzhiyun 			req->debug_str, ret);
182*4882a593Smuzhiyun 		media_request_put(req);
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	return ret;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
media_request_ioctl_reinit(struct media_request * req)188*4882a593Smuzhiyun static long media_request_ioctl_reinit(struct media_request *req)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	struct media_device *mdev = req->mdev;
191*4882a593Smuzhiyun 	unsigned long flags;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	spin_lock_irqsave(&req->lock, flags);
194*4882a593Smuzhiyun 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
195*4882a593Smuzhiyun 	    req->state != MEDIA_REQUEST_STATE_COMPLETE) {
196*4882a593Smuzhiyun 		dev_dbg(mdev->dev,
197*4882a593Smuzhiyun 			"request: %s not in idle or complete state, cannot reinit\n",
198*4882a593Smuzhiyun 			req->debug_str);
199*4882a593Smuzhiyun 		spin_unlock_irqrestore(&req->lock, flags);
200*4882a593Smuzhiyun 		return -EBUSY;
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 	if (req->access_count) {
203*4882a593Smuzhiyun 		dev_dbg(mdev->dev,
204*4882a593Smuzhiyun 			"request: %s is being accessed, cannot reinit\n",
205*4882a593Smuzhiyun 			req->debug_str);
206*4882a593Smuzhiyun 		spin_unlock_irqrestore(&req->lock, flags);
207*4882a593Smuzhiyun 		return -EBUSY;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 	req->state = MEDIA_REQUEST_STATE_CLEANING;
210*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->lock, flags);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	media_request_clean(req);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	spin_lock_irqsave(&req->lock, flags);
215*4882a593Smuzhiyun 	req->state = MEDIA_REQUEST_STATE_IDLE;
216*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->lock, flags);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	return 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
media_request_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)221*4882a593Smuzhiyun static long media_request_ioctl(struct file *filp, unsigned int cmd,
222*4882a593Smuzhiyun 				unsigned long arg)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	struct media_request *req = filp->private_data;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	switch (cmd) {
227*4882a593Smuzhiyun 	case MEDIA_REQUEST_IOC_QUEUE:
228*4882a593Smuzhiyun 		return media_request_ioctl_queue(req);
229*4882a593Smuzhiyun 	case MEDIA_REQUEST_IOC_REINIT:
230*4882a593Smuzhiyun 		return media_request_ioctl_reinit(req);
231*4882a593Smuzhiyun 	default:
232*4882a593Smuzhiyun 		return -ENOIOCTLCMD;
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static const struct file_operations request_fops = {
237*4882a593Smuzhiyun 	.owner = THIS_MODULE,
238*4882a593Smuzhiyun 	.poll = media_request_poll,
239*4882a593Smuzhiyun 	.unlocked_ioctl = media_request_ioctl,
240*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
241*4882a593Smuzhiyun 	.compat_ioctl = media_request_ioctl,
242*4882a593Smuzhiyun #endif /* CONFIG_COMPAT */
243*4882a593Smuzhiyun 	.release = media_request_close,
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun struct media_request *
media_request_get_by_fd(struct media_device * mdev,int request_fd)247*4882a593Smuzhiyun media_request_get_by_fd(struct media_device *mdev, int request_fd)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	struct fd f;
250*4882a593Smuzhiyun 	struct media_request *req;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (!mdev || !mdev->ops ||
253*4882a593Smuzhiyun 	    !mdev->ops->req_validate || !mdev->ops->req_queue)
254*4882a593Smuzhiyun 		return ERR_PTR(-EBADR);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	f = fdget(request_fd);
257*4882a593Smuzhiyun 	if (!f.file)
258*4882a593Smuzhiyun 		goto err_no_req_fd;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	if (f.file->f_op != &request_fops)
261*4882a593Smuzhiyun 		goto err_fput;
262*4882a593Smuzhiyun 	req = f.file->private_data;
263*4882a593Smuzhiyun 	if (req->mdev != mdev)
264*4882a593Smuzhiyun 		goto err_fput;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/*
267*4882a593Smuzhiyun 	 * Note: as long as someone has an open filehandle of the request,
268*4882a593Smuzhiyun 	 * the request can never be released. The fdget() above ensures that
269*4882a593Smuzhiyun 	 * even if userspace closes the request filehandle, the release()
270*4882a593Smuzhiyun 	 * fop won't be called, so the media_request_get() always succeeds
271*4882a593Smuzhiyun 	 * and there is no race condition where the request was released
272*4882a593Smuzhiyun 	 * before media_request_get() is called.
273*4882a593Smuzhiyun 	 */
274*4882a593Smuzhiyun 	media_request_get(req);
275*4882a593Smuzhiyun 	fdput(f);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	return req;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun err_fput:
280*4882a593Smuzhiyun 	fdput(f);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun err_no_req_fd:
283*4882a593Smuzhiyun 	dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
284*4882a593Smuzhiyun 	return ERR_PTR(-EINVAL);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(media_request_get_by_fd);
287*4882a593Smuzhiyun 
media_request_alloc(struct media_device * mdev,int * alloc_fd)288*4882a593Smuzhiyun int media_request_alloc(struct media_device *mdev, int *alloc_fd)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct media_request *req;
291*4882a593Smuzhiyun 	struct file *filp;
292*4882a593Smuzhiyun 	int fd;
293*4882a593Smuzhiyun 	int ret;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/* Either both are NULL or both are non-NULL */
296*4882a593Smuzhiyun 	if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
297*4882a593Smuzhiyun 		return -ENOMEM;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	if (mdev->ops->req_alloc)
300*4882a593Smuzhiyun 		req = mdev->ops->req_alloc(mdev);
301*4882a593Smuzhiyun 	else
302*4882a593Smuzhiyun 		req = kzalloc(sizeof(*req), GFP_KERNEL);
303*4882a593Smuzhiyun 	if (!req)
304*4882a593Smuzhiyun 		return -ENOMEM;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	fd = get_unused_fd_flags(O_CLOEXEC);
307*4882a593Smuzhiyun 	if (fd < 0) {
308*4882a593Smuzhiyun 		ret = fd;
309*4882a593Smuzhiyun 		goto err_free_req;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
313*4882a593Smuzhiyun 	if (IS_ERR(filp)) {
314*4882a593Smuzhiyun 		ret = PTR_ERR(filp);
315*4882a593Smuzhiyun 		goto err_put_fd;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	filp->private_data = req;
319*4882a593Smuzhiyun 	req->mdev = mdev;
320*4882a593Smuzhiyun 	req->state = MEDIA_REQUEST_STATE_IDLE;
321*4882a593Smuzhiyun 	req->num_incomplete_objects = 0;
322*4882a593Smuzhiyun 	kref_init(&req->kref);
323*4882a593Smuzhiyun 	INIT_LIST_HEAD(&req->objects);
324*4882a593Smuzhiyun 	spin_lock_init(&req->lock);
325*4882a593Smuzhiyun 	init_waitqueue_head(&req->poll_wait);
326*4882a593Smuzhiyun 	req->updating_count = 0;
327*4882a593Smuzhiyun 	req->access_count = 0;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	*alloc_fd = fd;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
332*4882a593Smuzhiyun 		 atomic_inc_return(&mdev->request_id), fd);
333*4882a593Smuzhiyun 	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	fd_install(fd, filp);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	return 0;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun err_put_fd:
340*4882a593Smuzhiyun 	put_unused_fd(fd);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun err_free_req:
343*4882a593Smuzhiyun 	if (mdev->ops->req_free)
344*4882a593Smuzhiyun 		mdev->ops->req_free(req);
345*4882a593Smuzhiyun 	else
346*4882a593Smuzhiyun 		kfree(req);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	return ret;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
media_request_object_release(struct kref * kref)351*4882a593Smuzhiyun static void media_request_object_release(struct kref *kref)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct media_request_object *obj =
354*4882a593Smuzhiyun 		container_of(kref, struct media_request_object, kref);
355*4882a593Smuzhiyun 	struct media_request *req = obj->req;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (WARN_ON(req))
358*4882a593Smuzhiyun 		media_request_object_unbind(obj);
359*4882a593Smuzhiyun 	obj->ops->release(obj);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun struct media_request_object *
media_request_object_find(struct media_request * req,const struct media_request_object_ops * ops,void * priv)363*4882a593Smuzhiyun media_request_object_find(struct media_request *req,
364*4882a593Smuzhiyun 			  const struct media_request_object_ops *ops,
365*4882a593Smuzhiyun 			  void *priv)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	struct media_request_object *obj;
368*4882a593Smuzhiyun 	struct media_request_object *found = NULL;
369*4882a593Smuzhiyun 	unsigned long flags;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	if (WARN_ON(!ops || !priv))
372*4882a593Smuzhiyun 		return NULL;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	spin_lock_irqsave(&req->lock, flags);
375*4882a593Smuzhiyun 	list_for_each_entry(obj, &req->objects, list) {
376*4882a593Smuzhiyun 		if (obj->ops == ops && obj->priv == priv) {
377*4882a593Smuzhiyun 			media_request_object_get(obj);
378*4882a593Smuzhiyun 			found = obj;
379*4882a593Smuzhiyun 			break;
380*4882a593Smuzhiyun 		}
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->lock, flags);
383*4882a593Smuzhiyun 	return found;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(media_request_object_find);
386*4882a593Smuzhiyun 
media_request_object_put(struct media_request_object * obj)387*4882a593Smuzhiyun void media_request_object_put(struct media_request_object *obj)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	kref_put(&obj->kref, media_request_object_release);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(media_request_object_put);
392*4882a593Smuzhiyun 
media_request_object_init(struct media_request_object * obj)393*4882a593Smuzhiyun void media_request_object_init(struct media_request_object *obj)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	obj->ops = NULL;
396*4882a593Smuzhiyun 	obj->req = NULL;
397*4882a593Smuzhiyun 	obj->priv = NULL;
398*4882a593Smuzhiyun 	obj->completed = false;
399*4882a593Smuzhiyun 	INIT_LIST_HEAD(&obj->list);
400*4882a593Smuzhiyun 	kref_init(&obj->kref);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(media_request_object_init);
403*4882a593Smuzhiyun 
media_request_object_bind(struct media_request * req,const struct media_request_object_ops * ops,void * priv,bool is_buffer,struct media_request_object * obj)404*4882a593Smuzhiyun int media_request_object_bind(struct media_request *req,
405*4882a593Smuzhiyun 			      const struct media_request_object_ops *ops,
406*4882a593Smuzhiyun 			      void *priv, bool is_buffer,
407*4882a593Smuzhiyun 			      struct media_request_object *obj)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	unsigned long flags;
410*4882a593Smuzhiyun 	int ret = -EBUSY;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	if (WARN_ON(!ops->release))
413*4882a593Smuzhiyun 		return -EBADR;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	spin_lock_irqsave(&req->lock, flags);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
418*4882a593Smuzhiyun 		goto unlock;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	obj->req = req;
421*4882a593Smuzhiyun 	obj->ops = ops;
422*4882a593Smuzhiyun 	obj->priv = priv;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	if (is_buffer)
425*4882a593Smuzhiyun 		list_add_tail(&obj->list, &req->objects);
426*4882a593Smuzhiyun 	else
427*4882a593Smuzhiyun 		list_add(&obj->list, &req->objects);
428*4882a593Smuzhiyun 	req->num_incomplete_objects++;
429*4882a593Smuzhiyun 	ret = 0;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun unlock:
432*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->lock, flags);
433*4882a593Smuzhiyun 	return ret;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(media_request_object_bind);
436*4882a593Smuzhiyun 
media_request_object_unbind(struct media_request_object * obj)437*4882a593Smuzhiyun void media_request_object_unbind(struct media_request_object *obj)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	struct media_request *req = obj->req;
440*4882a593Smuzhiyun 	unsigned long flags;
441*4882a593Smuzhiyun 	bool completed = false;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (WARN_ON(!req))
444*4882a593Smuzhiyun 		return;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	spin_lock_irqsave(&req->lock, flags);
447*4882a593Smuzhiyun 	list_del(&obj->list);
448*4882a593Smuzhiyun 	obj->req = NULL;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
451*4882a593Smuzhiyun 		goto unlock;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
454*4882a593Smuzhiyun 		goto unlock;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
457*4882a593Smuzhiyun 		if (!obj->completed)
458*4882a593Smuzhiyun 			req->num_incomplete_objects--;
459*4882a593Smuzhiyun 		goto unlock;
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if (WARN_ON(!req->num_incomplete_objects))
463*4882a593Smuzhiyun 		goto unlock;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	req->num_incomplete_objects--;
466*4882a593Smuzhiyun 	if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
467*4882a593Smuzhiyun 	    !req->num_incomplete_objects) {
468*4882a593Smuzhiyun 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
469*4882a593Smuzhiyun 		completed = true;
470*4882a593Smuzhiyun 		wake_up_interruptible_all(&req->poll_wait);
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun unlock:
474*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->lock, flags);
475*4882a593Smuzhiyun 	if (obj->ops->unbind)
476*4882a593Smuzhiyun 		obj->ops->unbind(obj);
477*4882a593Smuzhiyun 	if (completed)
478*4882a593Smuzhiyun 		media_request_put(req);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(media_request_object_unbind);
481*4882a593Smuzhiyun 
media_request_object_complete(struct media_request_object * obj)482*4882a593Smuzhiyun void media_request_object_complete(struct media_request_object *obj)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	struct media_request *req = obj->req;
485*4882a593Smuzhiyun 	unsigned long flags;
486*4882a593Smuzhiyun 	bool completed = false;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	spin_lock_irqsave(&req->lock, flags);
489*4882a593Smuzhiyun 	if (obj->completed)
490*4882a593Smuzhiyun 		goto unlock;
491*4882a593Smuzhiyun 	obj->completed = true;
492*4882a593Smuzhiyun 	if (WARN_ON(!req->num_incomplete_objects) ||
493*4882a593Smuzhiyun 	    WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
494*4882a593Smuzhiyun 		goto unlock;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (!--req->num_incomplete_objects) {
497*4882a593Smuzhiyun 		req->state = MEDIA_REQUEST_STATE_COMPLETE;
498*4882a593Smuzhiyun 		wake_up_interruptible_all(&req->poll_wait);
499*4882a593Smuzhiyun 		completed = true;
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun unlock:
502*4882a593Smuzhiyun 	spin_unlock_irqrestore(&req->lock, flags);
503*4882a593Smuzhiyun 	if (completed)
504*4882a593Smuzhiyun 		media_request_put(req);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(media_request_object_complete);
507