xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/sw_sync.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Sync File validation framework
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012 Google, Inc.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/file.h>
9*4882a593Smuzhiyun #include <linux/fs.h>
10*4882a593Smuzhiyun #include <linux/miscdevice.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/uaccess.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/sync_file.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "sync_debug.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
19*4882a593Smuzhiyun #include "sync_trace.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun  * SW SYNC validation framework
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * A sync object driver that uses a 32bit counter to coordinate
25*4882a593Smuzhiyun  * synchronization.  Useful when there is no hardware primitive backing
26*4882a593Smuzhiyun  * the synchronization.
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * To start the framework just open:
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * <debugfs>/sync/sw_sync
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  * That will create a sync timeline, all fences created under this timeline
33*4882a593Smuzhiyun  * file descriptor will belong to the this timeline.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * The 'sw_sync' file can be opened many times as to create different
36*4882a593Smuzhiyun  * timelines.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * Fences can be created with SW_SYNC_IOC_CREATE_FENCE ioctl with struct
39*4882a593Smuzhiyun  * sw_sync_create_fence_data as parameter.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * To increment the timeline counter, SW_SYNC_IOC_INC ioctl should be used
42*4882a593Smuzhiyun  * with the increment as u32. This will update the last signaled value
43*4882a593Smuzhiyun  * from the timeline and signal any fence that has a seqno smaller or equal
44*4882a593Smuzhiyun  * to it.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * struct sw_sync_create_fence_data
47*4882a593Smuzhiyun  * @value:	the seqno to initialise the fence with
48*4882a593Smuzhiyun  * @name:	the name of the new sync point
49*4882a593Smuzhiyun  * @fence:	return the fd of the new sync_file with the created fence
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun struct sw_sync_create_fence_data {
52*4882a593Smuzhiyun 	__u32	value;
53*4882a593Smuzhiyun 	char	name[32];
54*4882a593Smuzhiyun 	__s32	fence; /* fd of new fence */
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define SW_SYNC_IOC_MAGIC	'W'
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\
60*4882a593Smuzhiyun 		struct sw_sync_create_fence_data)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static const struct dma_fence_ops timeline_fence_ops;
65*4882a593Smuzhiyun 
dma_fence_to_sync_pt(struct dma_fence * fence)66*4882a593Smuzhiyun static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	if (fence->ops != &timeline_fence_ops)
69*4882a593Smuzhiyun 		return NULL;
70*4882a593Smuzhiyun 	return container_of(fence, struct sync_pt, base);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun  * sync_timeline_create() - creates a sync object
75*4882a593Smuzhiyun  * @name:	sync_timeline name
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
78*4882a593Smuzhiyun  * case of error.
79*4882a593Smuzhiyun  */
sync_timeline_create(const char * name)80*4882a593Smuzhiyun static struct sync_timeline *sync_timeline_create(const char *name)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	struct sync_timeline *obj;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
85*4882a593Smuzhiyun 	if (!obj)
86*4882a593Smuzhiyun 		return NULL;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	kref_init(&obj->kref);
89*4882a593Smuzhiyun 	obj->context = dma_fence_context_alloc(1);
90*4882a593Smuzhiyun 	strlcpy(obj->name, name, sizeof(obj->name));
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	obj->pt_tree = RB_ROOT;
93*4882a593Smuzhiyun 	INIT_LIST_HEAD(&obj->pt_list);
94*4882a593Smuzhiyun 	spin_lock_init(&obj->lock);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	sync_timeline_debug_add(obj);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return obj;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
sync_timeline_free(struct kref * kref)101*4882a593Smuzhiyun static void sync_timeline_free(struct kref *kref)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct sync_timeline *obj =
104*4882a593Smuzhiyun 		container_of(kref, struct sync_timeline, kref);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	sync_timeline_debug_remove(obj);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	kfree(obj);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
sync_timeline_get(struct sync_timeline * obj)111*4882a593Smuzhiyun static void sync_timeline_get(struct sync_timeline *obj)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	kref_get(&obj->kref);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
sync_timeline_put(struct sync_timeline * obj)116*4882a593Smuzhiyun static void sync_timeline_put(struct sync_timeline *obj)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	kref_put(&obj->kref, sync_timeline_free);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
timeline_fence_get_driver_name(struct dma_fence * fence)121*4882a593Smuzhiyun static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	return "sw_sync";
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
timeline_fence_get_timeline_name(struct dma_fence * fence)126*4882a593Smuzhiyun static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct sync_timeline *parent = dma_fence_parent(fence);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	return parent->name;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
timeline_fence_release(struct dma_fence * fence)133*4882a593Smuzhiyun static void timeline_fence_release(struct dma_fence *fence)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
136*4882a593Smuzhiyun 	struct sync_timeline *parent = dma_fence_parent(fence);
137*4882a593Smuzhiyun 	unsigned long flags;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	spin_lock_irqsave(fence->lock, flags);
140*4882a593Smuzhiyun 	if (!list_empty(&pt->link)) {
141*4882a593Smuzhiyun 		list_del(&pt->link);
142*4882a593Smuzhiyun 		rb_erase(&pt->node, &parent->pt_tree);
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 	spin_unlock_irqrestore(fence->lock, flags);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	sync_timeline_put(parent);
147*4882a593Smuzhiyun 	dma_fence_free(fence);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
timeline_fence_signaled(struct dma_fence * fence)150*4882a593Smuzhiyun static bool timeline_fence_signaled(struct dma_fence *fence)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct sync_timeline *parent = dma_fence_parent(fence);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
timeline_fence_enable_signaling(struct dma_fence * fence)157*4882a593Smuzhiyun static bool timeline_fence_enable_signaling(struct dma_fence *fence)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	return true;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
timeline_fence_value_str(struct dma_fence * fence,char * str,int size)162*4882a593Smuzhiyun static void timeline_fence_value_str(struct dma_fence *fence,
163*4882a593Smuzhiyun 				    char *str, int size)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	snprintf(str, size, "%lld", fence->seqno);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
timeline_fence_timeline_value_str(struct dma_fence * fence,char * str,int size)168*4882a593Smuzhiyun static void timeline_fence_timeline_value_str(struct dma_fence *fence,
169*4882a593Smuzhiyun 					     char *str, int size)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct sync_timeline *parent = dma_fence_parent(fence);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	snprintf(str, size, "%d", parent->value);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun static const struct dma_fence_ops timeline_fence_ops = {
177*4882a593Smuzhiyun 	.get_driver_name = timeline_fence_get_driver_name,
178*4882a593Smuzhiyun 	.get_timeline_name = timeline_fence_get_timeline_name,
179*4882a593Smuzhiyun 	.enable_signaling = timeline_fence_enable_signaling,
180*4882a593Smuzhiyun 	.signaled = timeline_fence_signaled,
181*4882a593Smuzhiyun 	.release = timeline_fence_release,
182*4882a593Smuzhiyun 	.fence_value_str = timeline_fence_value_str,
183*4882a593Smuzhiyun 	.timeline_value_str = timeline_fence_timeline_value_str,
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /**
187*4882a593Smuzhiyun  * sync_timeline_signal() - signal a status change on a sync_timeline
188*4882a593Smuzhiyun  * @obj:	sync_timeline to signal
189*4882a593Smuzhiyun  * @inc:	num to increment on timeline->value
190*4882a593Smuzhiyun  *
191*4882a593Smuzhiyun  * A sync implementation should call this any time one of it's fences
192*4882a593Smuzhiyun  * has signaled or has an error condition.
193*4882a593Smuzhiyun  */
sync_timeline_signal(struct sync_timeline * obj,unsigned int inc)194*4882a593Smuzhiyun static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	struct sync_pt *pt, *next;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	trace_sync_timeline(obj);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	spin_lock_irq(&obj->lock);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	obj->value += inc;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
205*4882a593Smuzhiyun 		if (!timeline_fence_signaled(&pt->base))
206*4882a593Smuzhiyun 			break;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		list_del_init(&pt->link);
209*4882a593Smuzhiyun 		rb_erase(&pt->node, &obj->pt_tree);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 		/*
212*4882a593Smuzhiyun 		 * A signal callback may release the last reference to this
213*4882a593Smuzhiyun 		 * fence, causing it to be freed. That operation has to be
214*4882a593Smuzhiyun 		 * last to avoid a use after free inside this loop, and must
215*4882a593Smuzhiyun 		 * be after we remove the fence from the timeline in order to
216*4882a593Smuzhiyun 		 * prevent deadlocking on timeline->lock inside
217*4882a593Smuzhiyun 		 * timeline_fence_release().
218*4882a593Smuzhiyun 		 */
219*4882a593Smuzhiyun 		dma_fence_signal_locked(&pt->base);
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	spin_unlock_irq(&obj->lock);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /**
226*4882a593Smuzhiyun  * sync_pt_create() - creates a sync pt
227*4882a593Smuzhiyun  * @obj:	parent sync_timeline
228*4882a593Smuzhiyun  * @value:	value of the fence
229*4882a593Smuzhiyun  *
230*4882a593Smuzhiyun  * Creates a new sync_pt (fence) as a child of @parent.  @size bytes will be
231*4882a593Smuzhiyun  * allocated allowing for implementation specific data to be kept after
232*4882a593Smuzhiyun  * the generic sync_timeline struct. Returns the sync_pt object or
233*4882a593Smuzhiyun  * NULL in case of error.
234*4882a593Smuzhiyun  */
sync_pt_create(struct sync_timeline * obj,unsigned int value)235*4882a593Smuzhiyun static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
236*4882a593Smuzhiyun 				      unsigned int value)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	struct sync_pt *pt;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
241*4882a593Smuzhiyun 	if (!pt)
242*4882a593Smuzhiyun 		return NULL;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	sync_timeline_get(obj);
245*4882a593Smuzhiyun 	dma_fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
246*4882a593Smuzhiyun 		       obj->context, value);
247*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pt->link);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spin_lock_irq(&obj->lock);
250*4882a593Smuzhiyun 	if (!dma_fence_is_signaled_locked(&pt->base)) {
251*4882a593Smuzhiyun 		struct rb_node **p = &obj->pt_tree.rb_node;
252*4882a593Smuzhiyun 		struct rb_node *parent = NULL;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 		while (*p) {
255*4882a593Smuzhiyun 			struct sync_pt *other;
256*4882a593Smuzhiyun 			int cmp;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 			parent = *p;
259*4882a593Smuzhiyun 			other = rb_entry(parent, typeof(*pt), node);
260*4882a593Smuzhiyun 			cmp = value - other->base.seqno;
261*4882a593Smuzhiyun 			if (cmp > 0) {
262*4882a593Smuzhiyun 				p = &parent->rb_right;
263*4882a593Smuzhiyun 			} else if (cmp < 0) {
264*4882a593Smuzhiyun 				p = &parent->rb_left;
265*4882a593Smuzhiyun 			} else {
266*4882a593Smuzhiyun 				if (dma_fence_get_rcu(&other->base)) {
267*4882a593Smuzhiyun 					sync_timeline_put(obj);
268*4882a593Smuzhiyun 					kfree(pt);
269*4882a593Smuzhiyun 					pt = other;
270*4882a593Smuzhiyun 					goto unlock;
271*4882a593Smuzhiyun 				}
272*4882a593Smuzhiyun 				p = &parent->rb_left;
273*4882a593Smuzhiyun 			}
274*4882a593Smuzhiyun 		}
275*4882a593Smuzhiyun 		rb_link_node(&pt->node, parent, p);
276*4882a593Smuzhiyun 		rb_insert_color(&pt->node, &obj->pt_tree);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		parent = rb_next(&pt->node);
279*4882a593Smuzhiyun 		list_add_tail(&pt->link,
280*4882a593Smuzhiyun 			      parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
281*4882a593Smuzhiyun 	}
282*4882a593Smuzhiyun unlock:
283*4882a593Smuzhiyun 	spin_unlock_irq(&obj->lock);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	return pt;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun  * *WARNING*
290*4882a593Smuzhiyun  *
291*4882a593Smuzhiyun  * improper use of this can result in deadlocking kernel drivers from userspace.
292*4882a593Smuzhiyun  */
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /* opening sw_sync create a new sync obj */
sw_sync_debugfs_open(struct inode * inode,struct file * file)295*4882a593Smuzhiyun static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct sync_timeline *obj;
298*4882a593Smuzhiyun 	char task_comm[TASK_COMM_LEN];
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	get_task_comm(task_comm, current);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	obj = sync_timeline_create(task_comm);
303*4882a593Smuzhiyun 	if (!obj)
304*4882a593Smuzhiyun 		return -ENOMEM;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	file->private_data = obj;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
sw_sync_debugfs_release(struct inode * inode,struct file * file)311*4882a593Smuzhiyun static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct sync_timeline *obj = file->private_data;
314*4882a593Smuzhiyun 	struct sync_pt *pt, *next;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	spin_lock_irq(&obj->lock);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
319*4882a593Smuzhiyun 		dma_fence_set_error(&pt->base, -ENOENT);
320*4882a593Smuzhiyun 		dma_fence_signal_locked(&pt->base);
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	spin_unlock_irq(&obj->lock);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	sync_timeline_put(obj);
326*4882a593Smuzhiyun 	return 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
sw_sync_ioctl_create_fence(struct sync_timeline * obj,unsigned long arg)329*4882a593Smuzhiyun static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
330*4882a593Smuzhiyun 				       unsigned long arg)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	int fd = get_unused_fd_flags(O_CLOEXEC);
333*4882a593Smuzhiyun 	int err;
334*4882a593Smuzhiyun 	struct sync_pt *pt;
335*4882a593Smuzhiyun 	struct sync_file *sync_file;
336*4882a593Smuzhiyun 	struct sw_sync_create_fence_data data;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (fd < 0)
339*4882a593Smuzhiyun 		return fd;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
342*4882a593Smuzhiyun 		err = -EFAULT;
343*4882a593Smuzhiyun 		goto err;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	pt = sync_pt_create(obj, data.value);
347*4882a593Smuzhiyun 	if (!pt) {
348*4882a593Smuzhiyun 		err = -ENOMEM;
349*4882a593Smuzhiyun 		goto err;
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	sync_file = sync_file_create(&pt->base);
353*4882a593Smuzhiyun 	dma_fence_put(&pt->base);
354*4882a593Smuzhiyun 	if (!sync_file) {
355*4882a593Smuzhiyun 		err = -ENOMEM;
356*4882a593Smuzhiyun 		goto err;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	data.fence = fd;
360*4882a593Smuzhiyun 	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
361*4882a593Smuzhiyun 		fput(sync_file->file);
362*4882a593Smuzhiyun 		err = -EFAULT;
363*4882a593Smuzhiyun 		goto err;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	fd_install(fd, sync_file->file);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	return 0;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun err:
371*4882a593Smuzhiyun 	put_unused_fd(fd);
372*4882a593Smuzhiyun 	return err;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
sw_sync_ioctl_inc(struct sync_timeline * obj,unsigned long arg)375*4882a593Smuzhiyun static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	u32 value;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
380*4882a593Smuzhiyun 		return -EFAULT;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	while (value > INT_MAX)  {
383*4882a593Smuzhiyun 		sync_timeline_signal(obj, INT_MAX);
384*4882a593Smuzhiyun 		value -= INT_MAX;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	sync_timeline_signal(obj, value);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	return 0;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
sw_sync_ioctl(struct file * file,unsigned int cmd,unsigned long arg)392*4882a593Smuzhiyun static long sw_sync_ioctl(struct file *file, unsigned int cmd,
393*4882a593Smuzhiyun 			  unsigned long arg)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	struct sync_timeline *obj = file->private_data;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	switch (cmd) {
398*4882a593Smuzhiyun 	case SW_SYNC_IOC_CREATE_FENCE:
399*4882a593Smuzhiyun 		return sw_sync_ioctl_create_fence(obj, arg);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	case SW_SYNC_IOC_INC:
402*4882a593Smuzhiyun 		return sw_sync_ioctl_inc(obj, arg);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	default:
405*4882a593Smuzhiyun 		return -ENOTTY;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun const struct file_operations sw_sync_debugfs_fops = {
410*4882a593Smuzhiyun 	.open           = sw_sync_debugfs_open,
411*4882a593Smuzhiyun 	.release        = sw_sync_debugfs_release,
412*4882a593Smuzhiyun 	.unlocked_ioctl = sw_sync_ioctl,
413*4882a593Smuzhiyun 	.compat_ioctl	= compat_ptr_ioctl,
414*4882a593Smuzhiyun };
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun static struct miscdevice sw_sync_dev = {
417*4882a593Smuzhiyun 	.minor	= MISC_DYNAMIC_MINOR,
418*4882a593Smuzhiyun 	.name	= "sw_sync",
419*4882a593Smuzhiyun 	.fops	= &sw_sync_debugfs_fops,
420*4882a593Smuzhiyun };
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun module_misc_device(sw_sync_dev);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
425