1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2017 Red Hat
3*4882a593Smuzhiyun * Parts ported from amdgpu (fence wait code).
4*4882a593Smuzhiyun * Copyright 2016 Advanced Micro Devices, Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
14*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
15*4882a593Smuzhiyun * Software.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23*4882a593Smuzhiyun * IN THE SOFTWARE.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * Authors:
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /**
30*4882a593Smuzhiyun * DOC: Overview
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
33*4882a593Smuzhiyun * container for a synchronization primitive which can be used by userspace
34*4882a593Smuzhiyun * to explicitly synchronize GPU commands, can be shared between userspace
35*4882a593Smuzhiyun * processes, and can be shared between different DRM drivers.
36*4882a593Smuzhiyun * Their primary use-case is to implement Vulkan fences and semaphores.
37*4882a593Smuzhiyun * The syncobj userspace API provides ioctls for several operations:
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * - Creation and destruction of syncobjs
40*4882a593Smuzhiyun * - Import and export of syncobjs to/from a syncobj file descriptor
41*4882a593Smuzhiyun * - Import and export a syncobj's underlying fence to/from a sync file
42*4882a593Smuzhiyun * - Reset a syncobj (set its fence to NULL)
43*4882a593Smuzhiyun * - Signal a syncobj (set a trivially signaled fence)
44*4882a593Smuzhiyun * - Wait for a syncobj's fence to appear and be signaled
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * The syncobj userspace API also provides operations to manipulate a syncobj
47*4882a593Smuzhiyun * in terms of a timeline of struct &dma_fence_chain rather than a single
48*4882a593Smuzhiyun * struct &dma_fence, through the following operations:
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * - Signal a given point on the timeline
51*4882a593Smuzhiyun * - Wait for a given point to appear and/or be signaled
52*4882a593Smuzhiyun * - Import and export from/to a given point of a timeline
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * At it's core, a syncobj is simply a wrapper around a pointer to a struct
55*4882a593Smuzhiyun * &dma_fence which may be NULL.
56*4882a593Smuzhiyun * When a syncobj is first created, its pointer is either NULL or a pointer
57*4882a593Smuzhiyun * to an already signaled fence depending on whether the
58*4882a593Smuzhiyun * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
59*4882a593Smuzhiyun * &DRM_IOCTL_SYNCOBJ_CREATE.
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * If the syncobj is considered as a binary (its state is either signaled or
62*4882a593Smuzhiyun * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
63*4882a593Smuzhiyun * the syncobj, the syncobj's fence is replaced with a fence which will be
64*4882a593Smuzhiyun * signaled by the completion of that work.
65*4882a593Smuzhiyun * If the syncobj is considered as a timeline primitive, when GPU work is
66*4882a593Smuzhiyun * enqueued in a DRM driver to signal the a given point of the syncobj, a new
67*4882a593Smuzhiyun * struct &dma_fence_chain pointing to the DRM driver's fence and also
68*4882a593Smuzhiyun * pointing to the previous fence that was in the syncobj. The new struct
69*4882a593Smuzhiyun * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
70*4882a593Smuzhiyun * completion of the DRM driver's work and also any work associated with the
71*4882a593Smuzhiyun * fence previously in the syncobj.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
74*4882a593Smuzhiyun * time the work is enqueued, it waits on the syncobj's fence before
75*4882a593Smuzhiyun * submitting the work to hardware. That fence is either :
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * - The syncobj's current fence if the syncobj is considered as a binary
78*4882a593Smuzhiyun * primitive.
79*4882a593Smuzhiyun * - The struct &dma_fence associated with a given point if the syncobj is
80*4882a593Smuzhiyun * considered as a timeline primitive.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * If the syncobj's fence is NULL or not present in the syncobj's timeline,
83*4882a593Smuzhiyun * the enqueue operation is expected to fail.
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * With binary syncobj, all manipulation of the syncobjs's fence happens in
86*4882a593Smuzhiyun * terms of the current fence at the time the ioctl is called by userspace
87*4882a593Smuzhiyun * regardless of whether that operation is an immediate host-side operation
88*4882a593Smuzhiyun * (signal or reset) or or an operation which is enqueued in some driver
89*4882a593Smuzhiyun * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
90*4882a593Smuzhiyun * to manipulate a syncobj from the host by resetting its pointer to NULL or
91*4882a593Smuzhiyun * setting its pointer to a fence which is already signaled.
92*4882a593Smuzhiyun *
93*4882a593Smuzhiyun * With a timeline syncobj, all manipulation of the synobj's fence happens in
94*4882a593Smuzhiyun * terms of a u64 value referring to point in the timeline. See
95*4882a593Smuzhiyun * dma_fence_chain_find_seqno() to see how a given point is found in the
96*4882a593Smuzhiyun * timeline.
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * Note that applications should be careful to always use timeline set of
99*4882a593Smuzhiyun * ioctl() when dealing with syncobj considered as timeline. Using a binary
100*4882a593Smuzhiyun * set of ioctl() with a syncobj considered as timeline could result incorrect
101*4882a593Smuzhiyun * synchronization. The use of binary syncobj is supported through the
102*4882a593Smuzhiyun * timeline set of ioctl() by using a point value of 0, this will reproduce
103*4882a593Smuzhiyun * the behavior of the binary set of ioctl() (for example replace the
104*4882a593Smuzhiyun * syncobj's fence when signaling).
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * Host-side wait on syncobjs
108*4882a593Smuzhiyun * --------------------------
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
111*4882a593Smuzhiyun * host-side wait on all of the syncobj fences simultaneously.
112*4882a593Smuzhiyun * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
113*4882a593Smuzhiyun * all of the syncobj fences to be signaled before it returns.
114*4882a593Smuzhiyun * Otherwise, it returns once at least one syncobj fence has been signaled
115*4882a593Smuzhiyun * and the index of a signaled fence is written back to the client.
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun * Unlike the enqueued GPU work dependencies which fail if they see a NULL
118*4882a593Smuzhiyun * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
119*4882a593Smuzhiyun * the host-side wait will first wait for the syncobj to receive a non-NULL
120*4882a593Smuzhiyun * fence and then wait on that fence.
121*4882a593Smuzhiyun * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
122*4882a593Smuzhiyun * syncobjs in the array has a NULL fence, -EINVAL will be returned.
123*4882a593Smuzhiyun * Assuming the syncobj starts off with a NULL fence, this allows a client
124*4882a593Smuzhiyun * to do a host wait in one thread (or process) which waits on GPU work
125*4882a593Smuzhiyun * submitted in another thread (or process) without having to manually
126*4882a593Smuzhiyun * synchronize between the two.
127*4882a593Smuzhiyun * This requirement is inherited from the Vulkan fence API.
128*4882a593Smuzhiyun *
129*4882a593Smuzhiyun * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
130*4882a593Smuzhiyun * handles as well as an array of u64 points and does a host-side wait on all
131*4882a593Smuzhiyun * of syncobj fences at the given points simultaneously.
132*4882a593Smuzhiyun *
133*4882a593Smuzhiyun * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
134*4882a593Smuzhiyun * fence to materialize on the timeline without waiting for the fence to be
135*4882a593Smuzhiyun * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
136*4882a593Smuzhiyun * requirement is inherited from the wait-before-signal behavior required by
137*4882a593Smuzhiyun * the Vulkan timeline semaphore API.
138*4882a593Smuzhiyun *
139*4882a593Smuzhiyun *
140*4882a593Smuzhiyun * Import/export of syncobjs
141*4882a593Smuzhiyun * -------------------------
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
144*4882a593Smuzhiyun * provide two mechanisms for import/export of syncobjs.
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * The first lets the client import or export an entire syncobj to a file
147*4882a593Smuzhiyun * descriptor.
148*4882a593Smuzhiyun * These fd's are opaque and have no other use case, except passing the
149*4882a593Smuzhiyun * syncobj between processes.
150*4882a593Smuzhiyun * All exported file descriptors and any syncobj handles created as a
151*4882a593Smuzhiyun * result of importing those file descriptors own a reference to the
152*4882a593Smuzhiyun * same underlying struct &drm_syncobj and the syncobj can be used
153*4882a593Smuzhiyun * persistently across all the processes with which it is shared.
154*4882a593Smuzhiyun * The syncobj is freed only once the last reference is dropped.
155*4882a593Smuzhiyun * Unlike dma-buf, importing a syncobj creates a new handle (with its own
156*4882a593Smuzhiyun * reference) for every import instead of de-duplicating.
157*4882a593Smuzhiyun * The primary use-case of this persistent import/export is for shared
158*4882a593Smuzhiyun * Vulkan fences and semaphores.
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * The second import/export mechanism, which is indicated by
161*4882a593Smuzhiyun * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
162*4882a593Smuzhiyun * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
163*4882a593Smuzhiyun * import/export the syncobj's current fence from/to a &sync_file.
164*4882a593Smuzhiyun * When a syncobj is exported to a sync file, that sync file wraps the
165*4882a593Smuzhiyun * sycnobj's fence at the time of export and any later signal or reset
166*4882a593Smuzhiyun * operations on the syncobj will not affect the exported sync file.
167*4882a593Smuzhiyun * When a sync file is imported into a syncobj, the syncobj's fence is set
168*4882a593Smuzhiyun * to the fence wrapped by that sync file.
169*4882a593Smuzhiyun * Because sync files are immutable, resetting or signaling the syncobj
170*4882a593Smuzhiyun * will not affect any sync files whose fences have been imported into the
171*4882a593Smuzhiyun * syncobj.
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * Import/export of timeline points in timeline syncobjs
175*4882a593Smuzhiyun * -----------------------------------------------------
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
178*4882a593Smuzhiyun * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
179*4882a593Smuzhiyun * into another syncobj.
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * Note that if you want to transfer a struct &dma_fence_chain from a given
182*4882a593Smuzhiyun * point on a timeline syncobj from/into a binary syncobj, you can use the
183*4882a593Smuzhiyun * point 0 to mean take/replace the fence in the syncobj.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #include <linux/anon_inodes.h>
187*4882a593Smuzhiyun #include <linux/file.h>
188*4882a593Smuzhiyun #include <linux/fs.h>
189*4882a593Smuzhiyun #include <linux/sched/signal.h>
190*4882a593Smuzhiyun #include <linux/sync_file.h>
191*4882a593Smuzhiyun #include <linux/uaccess.h>
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun #include <drm/drm.h>
194*4882a593Smuzhiyun #include <drm/drm_drv.h>
195*4882a593Smuzhiyun #include <drm/drm_file.h>
196*4882a593Smuzhiyun #include <drm/drm_gem.h>
197*4882a593Smuzhiyun #include <drm/drm_print.h>
198*4882a593Smuzhiyun #include <drm/drm_syncobj.h>
199*4882a593Smuzhiyun #include <drm/drm_utils.h>
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun #include "drm_internal.h"
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun struct syncobj_wait_entry {
204*4882a593Smuzhiyun struct list_head node;
205*4882a593Smuzhiyun struct task_struct *task;
206*4882a593Smuzhiyun struct dma_fence *fence;
207*4882a593Smuzhiyun struct dma_fence_cb fence_cb;
208*4882a593Smuzhiyun u64 point;
209*4882a593Smuzhiyun };
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
212*4882a593Smuzhiyun struct syncobj_wait_entry *wait);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /**
215*4882a593Smuzhiyun * drm_syncobj_find - lookup and reference a sync object.
216*4882a593Smuzhiyun * @file_private: drm file private pointer
217*4882a593Smuzhiyun * @handle: sync object handle to lookup.
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * Returns a reference to the syncobj pointed to by handle or NULL. The
220*4882a593Smuzhiyun * reference must be released by calling drm_syncobj_put().
221*4882a593Smuzhiyun */
drm_syncobj_find(struct drm_file * file_private,u32 handle)222*4882a593Smuzhiyun struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
223*4882a593Smuzhiyun u32 handle)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct drm_syncobj *syncobj;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun spin_lock(&file_private->syncobj_table_lock);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* Check if we currently have a reference on the object */
230*4882a593Smuzhiyun syncobj = idr_find(&file_private->syncobj_idr, handle);
231*4882a593Smuzhiyun if (syncobj)
232*4882a593Smuzhiyun drm_syncobj_get(syncobj);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun spin_unlock(&file_private->syncobj_table_lock);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun return syncobj;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun EXPORT_SYMBOL(drm_syncobj_find);
239*4882a593Smuzhiyun
drm_syncobj_fence_add_wait(struct drm_syncobj * syncobj,struct syncobj_wait_entry * wait)240*4882a593Smuzhiyun static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
241*4882a593Smuzhiyun struct syncobj_wait_entry *wait)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct dma_fence *fence;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (wait->fence)
246*4882a593Smuzhiyun return;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun spin_lock(&syncobj->lock);
249*4882a593Smuzhiyun /* We've already tried once to get a fence and failed. Now that we
250*4882a593Smuzhiyun * have the lock, try one more time just to be sure we don't add a
251*4882a593Smuzhiyun * callback when a fence has already been set.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
254*4882a593Smuzhiyun if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
255*4882a593Smuzhiyun dma_fence_put(fence);
256*4882a593Smuzhiyun list_add_tail(&wait->node, &syncobj->cb_list);
257*4882a593Smuzhiyun } else if (!fence) {
258*4882a593Smuzhiyun wait->fence = dma_fence_get_stub();
259*4882a593Smuzhiyun } else {
260*4882a593Smuzhiyun wait->fence = fence;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun spin_unlock(&syncobj->lock);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
drm_syncobj_remove_wait(struct drm_syncobj * syncobj,struct syncobj_wait_entry * wait)265*4882a593Smuzhiyun static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
266*4882a593Smuzhiyun struct syncobj_wait_entry *wait)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun if (!wait->node.next)
269*4882a593Smuzhiyun return;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun spin_lock(&syncobj->lock);
272*4882a593Smuzhiyun list_del_init(&wait->node);
273*4882a593Smuzhiyun spin_unlock(&syncobj->lock);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun * drm_syncobj_add_point - add new timeline point to the syncobj
278*4882a593Smuzhiyun * @syncobj: sync object to add timeline point do
279*4882a593Smuzhiyun * @chain: chain node to use to add the point
280*4882a593Smuzhiyun * @fence: fence to encapsulate in the chain node
281*4882a593Smuzhiyun * @point: sequence number to use for the point
282*4882a593Smuzhiyun *
283*4882a593Smuzhiyun * Add the chain node as new timeline point to the syncobj.
284*4882a593Smuzhiyun */
drm_syncobj_add_point(struct drm_syncobj * syncobj,struct dma_fence_chain * chain,struct dma_fence * fence,uint64_t point)285*4882a593Smuzhiyun void drm_syncobj_add_point(struct drm_syncobj *syncobj,
286*4882a593Smuzhiyun struct dma_fence_chain *chain,
287*4882a593Smuzhiyun struct dma_fence *fence,
288*4882a593Smuzhiyun uint64_t point)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun struct syncobj_wait_entry *cur, *tmp;
291*4882a593Smuzhiyun struct dma_fence *prev;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun dma_fence_get(fence);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun spin_lock(&syncobj->lock);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun prev = drm_syncobj_fence_get(syncobj);
298*4882a593Smuzhiyun /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
299*4882a593Smuzhiyun if (prev && prev->seqno >= point)
300*4882a593Smuzhiyun DRM_DEBUG("You are adding an unorder point to timeline!\n");
301*4882a593Smuzhiyun dma_fence_chain_init(chain, prev, fence, point);
302*4882a593Smuzhiyun rcu_assign_pointer(syncobj->fence, &chain->base);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
305*4882a593Smuzhiyun syncobj_wait_syncobj_func(syncobj, cur);
306*4882a593Smuzhiyun spin_unlock(&syncobj->lock);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* Walk the chain once to trigger garbage collection */
309*4882a593Smuzhiyun dma_fence_chain_for_each(fence, prev);
310*4882a593Smuzhiyun dma_fence_put(prev);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun EXPORT_SYMBOL(drm_syncobj_add_point);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /**
315*4882a593Smuzhiyun * drm_syncobj_replace_fence - replace fence in a sync object.
316*4882a593Smuzhiyun * @syncobj: Sync object to replace fence in
317*4882a593Smuzhiyun * @fence: fence to install in sync file.
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * This replaces the fence on a sync object.
320*4882a593Smuzhiyun */
drm_syncobj_replace_fence(struct drm_syncobj * syncobj,struct dma_fence * fence)321*4882a593Smuzhiyun void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
322*4882a593Smuzhiyun struct dma_fence *fence)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct dma_fence *old_fence;
325*4882a593Smuzhiyun struct syncobj_wait_entry *cur, *tmp;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (fence)
328*4882a593Smuzhiyun dma_fence_get(fence);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun spin_lock(&syncobj->lock);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun old_fence = rcu_dereference_protected(syncobj->fence,
333*4882a593Smuzhiyun lockdep_is_held(&syncobj->lock));
334*4882a593Smuzhiyun rcu_assign_pointer(syncobj->fence, fence);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (fence != old_fence) {
337*4882a593Smuzhiyun list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
338*4882a593Smuzhiyun syncobj_wait_syncobj_func(syncobj, cur);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun spin_unlock(&syncobj->lock);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun dma_fence_put(old_fence);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun EXPORT_SYMBOL(drm_syncobj_replace_fence);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /**
348*4882a593Smuzhiyun * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
349*4882a593Smuzhiyun * @syncobj: sync object to assign the fence on
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * Assign a already signaled stub fence to the sync object.
352*4882a593Smuzhiyun */
drm_syncobj_assign_null_handle(struct drm_syncobj * syncobj)353*4882a593Smuzhiyun static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun struct dma_fence *fence = dma_fence_get_stub();
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun drm_syncobj_replace_fence(syncobj, fence);
358*4882a593Smuzhiyun dma_fence_put(fence);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* 5s default for wait submission */
362*4882a593Smuzhiyun #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
363*4882a593Smuzhiyun /**
364*4882a593Smuzhiyun * drm_syncobj_find_fence - lookup and reference the fence in a sync object
365*4882a593Smuzhiyun * @file_private: drm file private pointer
366*4882a593Smuzhiyun * @handle: sync object handle to lookup.
367*4882a593Smuzhiyun * @point: timeline point
368*4882a593Smuzhiyun * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
369*4882a593Smuzhiyun * @fence: out parameter for the fence
370*4882a593Smuzhiyun *
371*4882a593Smuzhiyun * This is just a convenience function that combines drm_syncobj_find() and
372*4882a593Smuzhiyun * drm_syncobj_fence_get().
373*4882a593Smuzhiyun *
374*4882a593Smuzhiyun * Returns 0 on success or a negative error value on failure. On success @fence
375*4882a593Smuzhiyun * contains a reference to the fence, which must be released by calling
376*4882a593Smuzhiyun * dma_fence_put().
377*4882a593Smuzhiyun */
drm_syncobj_find_fence(struct drm_file * file_private,u32 handle,u64 point,u64 flags,struct dma_fence ** fence)378*4882a593Smuzhiyun int drm_syncobj_find_fence(struct drm_file *file_private,
379*4882a593Smuzhiyun u32 handle, u64 point, u64 flags,
380*4882a593Smuzhiyun struct dma_fence **fence)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
383*4882a593Smuzhiyun struct syncobj_wait_entry wait;
384*4882a593Smuzhiyun u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
385*4882a593Smuzhiyun int ret;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (!syncobj)
388*4882a593Smuzhiyun return -ENOENT;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun *fence = drm_syncobj_fence_get(syncobj);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (*fence) {
393*4882a593Smuzhiyun ret = dma_fence_chain_find_seqno(fence, point);
394*4882a593Smuzhiyun if (!ret) {
395*4882a593Smuzhiyun /* If the requested seqno is already signaled
396*4882a593Smuzhiyun * drm_syncobj_find_fence may return a NULL
397*4882a593Smuzhiyun * fence. To make sure the recipient gets
398*4882a593Smuzhiyun * signalled, use a new fence instead.
399*4882a593Smuzhiyun */
400*4882a593Smuzhiyun if (!*fence)
401*4882a593Smuzhiyun *fence = dma_fence_get_stub();
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun goto out;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun dma_fence_put(*fence);
406*4882a593Smuzhiyun } else {
407*4882a593Smuzhiyun ret = -EINVAL;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
411*4882a593Smuzhiyun goto out;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun memset(&wait, 0, sizeof(wait));
414*4882a593Smuzhiyun wait.task = current;
415*4882a593Smuzhiyun wait.point = point;
416*4882a593Smuzhiyun drm_syncobj_fence_add_wait(syncobj, &wait);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun do {
419*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
420*4882a593Smuzhiyun if (wait.fence) {
421*4882a593Smuzhiyun ret = 0;
422*4882a593Smuzhiyun break;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun if (timeout == 0) {
425*4882a593Smuzhiyun ret = -ETIME;
426*4882a593Smuzhiyun break;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if (signal_pending(current)) {
430*4882a593Smuzhiyun ret = -ERESTARTSYS;
431*4882a593Smuzhiyun break;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun timeout = schedule_timeout(timeout);
435*4882a593Smuzhiyun } while (1);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
438*4882a593Smuzhiyun *fence = wait.fence;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (wait.node.next)
441*4882a593Smuzhiyun drm_syncobj_remove_wait(syncobj, &wait);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun out:
444*4882a593Smuzhiyun drm_syncobj_put(syncobj);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun return ret;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun EXPORT_SYMBOL(drm_syncobj_find_fence);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /**
451*4882a593Smuzhiyun * drm_syncobj_free - free a sync object.
452*4882a593Smuzhiyun * @kref: kref to free.
453*4882a593Smuzhiyun *
454*4882a593Smuzhiyun * Only to be called from kref_put in drm_syncobj_put.
455*4882a593Smuzhiyun */
drm_syncobj_free(struct kref * kref)456*4882a593Smuzhiyun void drm_syncobj_free(struct kref *kref)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun struct drm_syncobj *syncobj = container_of(kref,
459*4882a593Smuzhiyun struct drm_syncobj,
460*4882a593Smuzhiyun refcount);
461*4882a593Smuzhiyun drm_syncobj_replace_fence(syncobj, NULL);
462*4882a593Smuzhiyun kfree(syncobj);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun EXPORT_SYMBOL(drm_syncobj_free);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /**
467*4882a593Smuzhiyun * drm_syncobj_create - create a new syncobj
468*4882a593Smuzhiyun * @out_syncobj: returned syncobj
469*4882a593Smuzhiyun * @flags: DRM_SYNCOBJ_* flags
470*4882a593Smuzhiyun * @fence: if non-NULL, the syncobj will represent this fence
471*4882a593Smuzhiyun *
472*4882a593Smuzhiyun * This is the first function to create a sync object. After creating, drivers
473*4882a593Smuzhiyun * probably want to make it available to userspace, either through
474*4882a593Smuzhiyun * drm_syncobj_get_handle() or drm_syncobj_get_fd().
475*4882a593Smuzhiyun *
476*4882a593Smuzhiyun * Returns 0 on success or a negative error value on failure.
477*4882a593Smuzhiyun */
drm_syncobj_create(struct drm_syncobj ** out_syncobj,uint32_t flags,struct dma_fence * fence)478*4882a593Smuzhiyun int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
479*4882a593Smuzhiyun struct dma_fence *fence)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct drm_syncobj *syncobj;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
484*4882a593Smuzhiyun if (!syncobj)
485*4882a593Smuzhiyun return -ENOMEM;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun kref_init(&syncobj->refcount);
488*4882a593Smuzhiyun INIT_LIST_HEAD(&syncobj->cb_list);
489*4882a593Smuzhiyun spin_lock_init(&syncobj->lock);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
492*4882a593Smuzhiyun drm_syncobj_assign_null_handle(syncobj);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (fence)
495*4882a593Smuzhiyun drm_syncobj_replace_fence(syncobj, fence);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun *out_syncobj = syncobj;
498*4882a593Smuzhiyun return 0;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun EXPORT_SYMBOL(drm_syncobj_create);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun * drm_syncobj_get_handle - get a handle from a syncobj
504*4882a593Smuzhiyun * @file_private: drm file private pointer
505*4882a593Smuzhiyun * @syncobj: Sync object to export
506*4882a593Smuzhiyun * @handle: out parameter with the new handle
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * Exports a sync object created with drm_syncobj_create() as a handle on
509*4882a593Smuzhiyun * @file_private to userspace.
510*4882a593Smuzhiyun *
511*4882a593Smuzhiyun * Returns 0 on success or a negative error value on failure.
512*4882a593Smuzhiyun */
drm_syncobj_get_handle(struct drm_file * file_private,struct drm_syncobj * syncobj,u32 * handle)513*4882a593Smuzhiyun int drm_syncobj_get_handle(struct drm_file *file_private,
514*4882a593Smuzhiyun struct drm_syncobj *syncobj, u32 *handle)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun int ret;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /* take a reference to put in the idr */
519*4882a593Smuzhiyun drm_syncobj_get(syncobj);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun idr_preload(GFP_KERNEL);
522*4882a593Smuzhiyun spin_lock(&file_private->syncobj_table_lock);
523*4882a593Smuzhiyun ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
524*4882a593Smuzhiyun spin_unlock(&file_private->syncobj_table_lock);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun idr_preload_end();
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun if (ret < 0) {
529*4882a593Smuzhiyun drm_syncobj_put(syncobj);
530*4882a593Smuzhiyun return ret;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun *handle = ret;
534*4882a593Smuzhiyun return 0;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun EXPORT_SYMBOL(drm_syncobj_get_handle);
537*4882a593Smuzhiyun
drm_syncobj_create_as_handle(struct drm_file * file_private,u32 * handle,uint32_t flags)538*4882a593Smuzhiyun static int drm_syncobj_create_as_handle(struct drm_file *file_private,
539*4882a593Smuzhiyun u32 *handle, uint32_t flags)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun int ret;
542*4882a593Smuzhiyun struct drm_syncobj *syncobj;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun ret = drm_syncobj_create(&syncobj, flags, NULL);
545*4882a593Smuzhiyun if (ret)
546*4882a593Smuzhiyun return ret;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun ret = drm_syncobj_get_handle(file_private, syncobj, handle);
549*4882a593Smuzhiyun drm_syncobj_put(syncobj);
550*4882a593Smuzhiyun return ret;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
drm_syncobj_destroy(struct drm_file * file_private,u32 handle)553*4882a593Smuzhiyun static int drm_syncobj_destroy(struct drm_file *file_private,
554*4882a593Smuzhiyun u32 handle)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun struct drm_syncobj *syncobj;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun spin_lock(&file_private->syncobj_table_lock);
559*4882a593Smuzhiyun syncobj = idr_remove(&file_private->syncobj_idr, handle);
560*4882a593Smuzhiyun spin_unlock(&file_private->syncobj_table_lock);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun if (!syncobj)
563*4882a593Smuzhiyun return -EINVAL;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun drm_syncobj_put(syncobj);
566*4882a593Smuzhiyun return 0;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
drm_syncobj_file_release(struct inode * inode,struct file * file)569*4882a593Smuzhiyun static int drm_syncobj_file_release(struct inode *inode, struct file *file)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun struct drm_syncobj *syncobj = file->private_data;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun drm_syncobj_put(syncobj);
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun static const struct file_operations drm_syncobj_file_fops = {
578*4882a593Smuzhiyun .release = drm_syncobj_file_release,
579*4882a593Smuzhiyun };
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /**
582*4882a593Smuzhiyun * drm_syncobj_get_fd - get a file descriptor from a syncobj
583*4882a593Smuzhiyun * @syncobj: Sync object to export
584*4882a593Smuzhiyun * @p_fd: out parameter with the new file descriptor
585*4882a593Smuzhiyun *
586*4882a593Smuzhiyun * Exports a sync object created with drm_syncobj_create() as a file descriptor.
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * Returns 0 on success or a negative error value on failure.
589*4882a593Smuzhiyun */
drm_syncobj_get_fd(struct drm_syncobj * syncobj,int * p_fd)590*4882a593Smuzhiyun int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun struct file *file;
593*4882a593Smuzhiyun int fd;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun fd = get_unused_fd_flags(O_CLOEXEC);
596*4882a593Smuzhiyun if (fd < 0)
597*4882a593Smuzhiyun return fd;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun file = anon_inode_getfile("syncobj_file",
600*4882a593Smuzhiyun &drm_syncobj_file_fops,
601*4882a593Smuzhiyun syncobj, 0);
602*4882a593Smuzhiyun if (IS_ERR(file)) {
603*4882a593Smuzhiyun put_unused_fd(fd);
604*4882a593Smuzhiyun return PTR_ERR(file);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun drm_syncobj_get(syncobj);
608*4882a593Smuzhiyun fd_install(fd, file);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun *p_fd = fd;
611*4882a593Smuzhiyun return 0;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun EXPORT_SYMBOL(drm_syncobj_get_fd);
614*4882a593Smuzhiyun
drm_syncobj_handle_to_fd(struct drm_file * file_private,u32 handle,int * p_fd)615*4882a593Smuzhiyun static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
616*4882a593Smuzhiyun u32 handle, int *p_fd)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
619*4882a593Smuzhiyun int ret;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (!syncobj)
622*4882a593Smuzhiyun return -EINVAL;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun ret = drm_syncobj_get_fd(syncobj, p_fd);
625*4882a593Smuzhiyun drm_syncobj_put(syncobj);
626*4882a593Smuzhiyun return ret;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
drm_syncobj_fd_to_handle(struct drm_file * file_private,int fd,u32 * handle)629*4882a593Smuzhiyun static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
630*4882a593Smuzhiyun int fd, u32 *handle)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun struct drm_syncobj *syncobj;
633*4882a593Smuzhiyun struct fd f = fdget(fd);
634*4882a593Smuzhiyun int ret;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (!f.file)
637*4882a593Smuzhiyun return -EINVAL;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun if (f.file->f_op != &drm_syncobj_file_fops) {
640*4882a593Smuzhiyun fdput(f);
641*4882a593Smuzhiyun return -EINVAL;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /* take a reference to put in the idr */
645*4882a593Smuzhiyun syncobj = f.file->private_data;
646*4882a593Smuzhiyun drm_syncobj_get(syncobj);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun idr_preload(GFP_KERNEL);
649*4882a593Smuzhiyun spin_lock(&file_private->syncobj_table_lock);
650*4882a593Smuzhiyun ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
651*4882a593Smuzhiyun spin_unlock(&file_private->syncobj_table_lock);
652*4882a593Smuzhiyun idr_preload_end();
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun if (ret > 0) {
655*4882a593Smuzhiyun *handle = ret;
656*4882a593Smuzhiyun ret = 0;
657*4882a593Smuzhiyun } else
658*4882a593Smuzhiyun drm_syncobj_put(syncobj);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun fdput(f);
661*4882a593Smuzhiyun return ret;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
drm_syncobj_import_sync_file_fence(struct drm_file * file_private,int fd,int handle)664*4882a593Smuzhiyun static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
665*4882a593Smuzhiyun int fd, int handle)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun struct dma_fence *fence = sync_file_get_fence(fd);
668*4882a593Smuzhiyun struct drm_syncobj *syncobj;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (!fence)
671*4882a593Smuzhiyun return -EINVAL;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun syncobj = drm_syncobj_find(file_private, handle);
674*4882a593Smuzhiyun if (!syncobj) {
675*4882a593Smuzhiyun dma_fence_put(fence);
676*4882a593Smuzhiyun return -ENOENT;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun drm_syncobj_replace_fence(syncobj, fence);
680*4882a593Smuzhiyun dma_fence_put(fence);
681*4882a593Smuzhiyun drm_syncobj_put(syncobj);
682*4882a593Smuzhiyun return 0;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
drm_syncobj_export_sync_file(struct drm_file * file_private,int handle,int * p_fd)685*4882a593Smuzhiyun static int drm_syncobj_export_sync_file(struct drm_file *file_private,
686*4882a593Smuzhiyun int handle, int *p_fd)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun int ret;
689*4882a593Smuzhiyun struct dma_fence *fence;
690*4882a593Smuzhiyun struct sync_file *sync_file;
691*4882a593Smuzhiyun int fd = get_unused_fd_flags(O_CLOEXEC);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun if (fd < 0)
694*4882a593Smuzhiyun return fd;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
697*4882a593Smuzhiyun if (ret)
698*4882a593Smuzhiyun goto err_put_fd;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun sync_file = sync_file_create(fence);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun dma_fence_put(fence);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun if (!sync_file) {
705*4882a593Smuzhiyun ret = -EINVAL;
706*4882a593Smuzhiyun goto err_put_fd;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun fd_install(fd, sync_file->file);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun *p_fd = fd;
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun err_put_fd:
714*4882a593Smuzhiyun put_unused_fd(fd);
715*4882a593Smuzhiyun return ret;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun /**
718*4882a593Smuzhiyun * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
719*4882a593Smuzhiyun * @file_private: drm file-private structure to set up
720*4882a593Smuzhiyun *
721*4882a593Smuzhiyun * Called at device open time, sets up the structure for handling refcounting
722*4882a593Smuzhiyun * of sync objects.
723*4882a593Smuzhiyun */
724*4882a593Smuzhiyun void
drm_syncobj_open(struct drm_file * file_private)725*4882a593Smuzhiyun drm_syncobj_open(struct drm_file *file_private)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun idr_init_base(&file_private->syncobj_idr, 1);
728*4882a593Smuzhiyun spin_lock_init(&file_private->syncobj_table_lock);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun static int
drm_syncobj_release_handle(int id,void * ptr,void * data)732*4882a593Smuzhiyun drm_syncobj_release_handle(int id, void *ptr, void *data)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun struct drm_syncobj *syncobj = ptr;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun drm_syncobj_put(syncobj);
737*4882a593Smuzhiyun return 0;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /**
741*4882a593Smuzhiyun * drm_syncobj_release - release file-private sync object resources
742*4882a593Smuzhiyun * @file_private: drm file-private structure to clean up
743*4882a593Smuzhiyun *
744*4882a593Smuzhiyun * Called at close time when the filp is going away.
745*4882a593Smuzhiyun *
746*4882a593Smuzhiyun * Releases any remaining references on objects by this filp.
747*4882a593Smuzhiyun */
748*4882a593Smuzhiyun void
drm_syncobj_release(struct drm_file * file_private)749*4882a593Smuzhiyun drm_syncobj_release(struct drm_file *file_private)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun idr_for_each(&file_private->syncobj_idr,
752*4882a593Smuzhiyun &drm_syncobj_release_handle, file_private);
753*4882a593Smuzhiyun idr_destroy(&file_private->syncobj_idr);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun int
drm_syncobj_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)757*4882a593Smuzhiyun drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
758*4882a593Smuzhiyun struct drm_file *file_private)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct drm_syncobj_create *args = data;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
763*4882a593Smuzhiyun return -EOPNOTSUPP;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun /* no valid flags yet */
766*4882a593Smuzhiyun if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
767*4882a593Smuzhiyun return -EINVAL;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun return drm_syncobj_create_as_handle(file_private,
770*4882a593Smuzhiyun &args->handle, args->flags);
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun int
drm_syncobj_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)774*4882a593Smuzhiyun drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
775*4882a593Smuzhiyun struct drm_file *file_private)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun struct drm_syncobj_destroy *args = data;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
780*4882a593Smuzhiyun return -EOPNOTSUPP;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /* make sure padding is empty */
783*4882a593Smuzhiyun if (args->pad)
784*4882a593Smuzhiyun return -EINVAL;
785*4882a593Smuzhiyun return drm_syncobj_destroy(file_private, args->handle);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun int
drm_syncobj_handle_to_fd_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)789*4882a593Smuzhiyun drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
790*4882a593Smuzhiyun struct drm_file *file_private)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun struct drm_syncobj_handle *args = data;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
795*4882a593Smuzhiyun return -EOPNOTSUPP;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun if (args->pad)
798*4882a593Smuzhiyun return -EINVAL;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun if (args->flags != 0 &&
801*4882a593Smuzhiyun args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
802*4882a593Smuzhiyun return -EINVAL;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
805*4882a593Smuzhiyun return drm_syncobj_export_sync_file(file_private, args->handle,
806*4882a593Smuzhiyun &args->fd);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun return drm_syncobj_handle_to_fd(file_private, args->handle,
809*4882a593Smuzhiyun &args->fd);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun int
drm_syncobj_fd_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)813*4882a593Smuzhiyun drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
814*4882a593Smuzhiyun struct drm_file *file_private)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct drm_syncobj_handle *args = data;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
819*4882a593Smuzhiyun return -EOPNOTSUPP;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (args->pad)
822*4882a593Smuzhiyun return -EINVAL;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun if (args->flags != 0 &&
825*4882a593Smuzhiyun args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
826*4882a593Smuzhiyun return -EINVAL;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
829*4882a593Smuzhiyun return drm_syncobj_import_sync_file_fence(file_private,
830*4882a593Smuzhiyun args->fd,
831*4882a593Smuzhiyun args->handle);
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun return drm_syncobj_fd_to_handle(file_private, args->fd,
834*4882a593Smuzhiyun &args->handle);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
drm_syncobj_transfer_to_timeline(struct drm_file * file_private,struct drm_syncobj_transfer * args)837*4882a593Smuzhiyun static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
838*4882a593Smuzhiyun struct drm_syncobj_transfer *args)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun struct drm_syncobj *timeline_syncobj = NULL;
841*4882a593Smuzhiyun struct dma_fence *fence;
842*4882a593Smuzhiyun struct dma_fence_chain *chain;
843*4882a593Smuzhiyun int ret;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
846*4882a593Smuzhiyun if (!timeline_syncobj) {
847*4882a593Smuzhiyun return -ENOENT;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun ret = drm_syncobj_find_fence(file_private, args->src_handle,
850*4882a593Smuzhiyun args->src_point, args->flags,
851*4882a593Smuzhiyun &fence);
852*4882a593Smuzhiyun if (ret)
853*4882a593Smuzhiyun goto err;
854*4882a593Smuzhiyun chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
855*4882a593Smuzhiyun if (!chain) {
856*4882a593Smuzhiyun ret = -ENOMEM;
857*4882a593Smuzhiyun goto err1;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
860*4882a593Smuzhiyun err1:
861*4882a593Smuzhiyun dma_fence_put(fence);
862*4882a593Smuzhiyun err:
863*4882a593Smuzhiyun drm_syncobj_put(timeline_syncobj);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun return ret;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun static int
drm_syncobj_transfer_to_binary(struct drm_file * file_private,struct drm_syncobj_transfer * args)869*4882a593Smuzhiyun drm_syncobj_transfer_to_binary(struct drm_file *file_private,
870*4882a593Smuzhiyun struct drm_syncobj_transfer *args)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun struct drm_syncobj *binary_syncobj = NULL;
873*4882a593Smuzhiyun struct dma_fence *fence;
874*4882a593Smuzhiyun int ret;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
877*4882a593Smuzhiyun if (!binary_syncobj)
878*4882a593Smuzhiyun return -ENOENT;
879*4882a593Smuzhiyun ret = drm_syncobj_find_fence(file_private, args->src_handle,
880*4882a593Smuzhiyun args->src_point, args->flags, &fence);
881*4882a593Smuzhiyun if (ret)
882*4882a593Smuzhiyun goto err;
883*4882a593Smuzhiyun drm_syncobj_replace_fence(binary_syncobj, fence);
884*4882a593Smuzhiyun dma_fence_put(fence);
885*4882a593Smuzhiyun err:
886*4882a593Smuzhiyun drm_syncobj_put(binary_syncobj);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun return ret;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun int
drm_syncobj_transfer_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)891*4882a593Smuzhiyun drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
892*4882a593Smuzhiyun struct drm_file *file_private)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun struct drm_syncobj_transfer *args = data;
895*4882a593Smuzhiyun int ret;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
898*4882a593Smuzhiyun return -EOPNOTSUPP;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun if (args->pad)
901*4882a593Smuzhiyun return -EINVAL;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (args->dst_point)
904*4882a593Smuzhiyun ret = drm_syncobj_transfer_to_timeline(file_private, args);
905*4882a593Smuzhiyun else
906*4882a593Smuzhiyun ret = drm_syncobj_transfer_to_binary(file_private, args);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun return ret;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
syncobj_wait_fence_func(struct dma_fence * fence,struct dma_fence_cb * cb)911*4882a593Smuzhiyun static void syncobj_wait_fence_func(struct dma_fence *fence,
912*4882a593Smuzhiyun struct dma_fence_cb *cb)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun struct syncobj_wait_entry *wait =
915*4882a593Smuzhiyun container_of(cb, struct syncobj_wait_entry, fence_cb);
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun wake_up_process(wait->task);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun
syncobj_wait_syncobj_func(struct drm_syncobj * syncobj,struct syncobj_wait_entry * wait)920*4882a593Smuzhiyun static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
921*4882a593Smuzhiyun struct syncobj_wait_entry *wait)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun struct dma_fence *fence;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /* This happens inside the syncobj lock */
926*4882a593Smuzhiyun fence = rcu_dereference_protected(syncobj->fence,
927*4882a593Smuzhiyun lockdep_is_held(&syncobj->lock));
928*4882a593Smuzhiyun dma_fence_get(fence);
929*4882a593Smuzhiyun if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
930*4882a593Smuzhiyun dma_fence_put(fence);
931*4882a593Smuzhiyun return;
932*4882a593Smuzhiyun } else if (!fence) {
933*4882a593Smuzhiyun wait->fence = dma_fence_get_stub();
934*4882a593Smuzhiyun } else {
935*4882a593Smuzhiyun wait->fence = fence;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun wake_up_process(wait->task);
939*4882a593Smuzhiyun list_del_init(&wait->node);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
drm_syncobj_array_wait_timeout(struct drm_syncobj ** syncobjs,void __user * user_points,uint32_t count,uint32_t flags,signed long timeout,uint32_t * idx)942*4882a593Smuzhiyun static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
943*4882a593Smuzhiyun void __user *user_points,
944*4882a593Smuzhiyun uint32_t count,
945*4882a593Smuzhiyun uint32_t flags,
946*4882a593Smuzhiyun signed long timeout,
947*4882a593Smuzhiyun uint32_t *idx)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun struct syncobj_wait_entry *entries;
950*4882a593Smuzhiyun struct dma_fence *fence;
951*4882a593Smuzhiyun uint64_t *points;
952*4882a593Smuzhiyun uint32_t signaled_count, i;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
955*4882a593Smuzhiyun if (points == NULL)
956*4882a593Smuzhiyun return -ENOMEM;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun if (!user_points) {
959*4882a593Smuzhiyun memset(points, 0, count * sizeof(uint64_t));
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun } else if (copy_from_user(points, user_points,
962*4882a593Smuzhiyun sizeof(uint64_t) * count)) {
963*4882a593Smuzhiyun timeout = -EFAULT;
964*4882a593Smuzhiyun goto err_free_points;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
968*4882a593Smuzhiyun if (!entries) {
969*4882a593Smuzhiyun timeout = -ENOMEM;
970*4882a593Smuzhiyun goto err_free_points;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun /* Walk the list of sync objects and initialize entries. We do
973*4882a593Smuzhiyun * this up-front so that we can properly return -EINVAL if there is
974*4882a593Smuzhiyun * a syncobj with a missing fence and then never have the chance of
975*4882a593Smuzhiyun * returning -EINVAL again.
976*4882a593Smuzhiyun */
977*4882a593Smuzhiyun signaled_count = 0;
978*4882a593Smuzhiyun for (i = 0; i < count; ++i) {
979*4882a593Smuzhiyun struct dma_fence *fence;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun entries[i].task = current;
982*4882a593Smuzhiyun entries[i].point = points[i];
983*4882a593Smuzhiyun fence = drm_syncobj_fence_get(syncobjs[i]);
984*4882a593Smuzhiyun if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
985*4882a593Smuzhiyun dma_fence_put(fence);
986*4882a593Smuzhiyun if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
987*4882a593Smuzhiyun continue;
988*4882a593Smuzhiyun } else {
989*4882a593Smuzhiyun timeout = -EINVAL;
990*4882a593Smuzhiyun goto cleanup_entries;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun if (fence)
995*4882a593Smuzhiyun entries[i].fence = fence;
996*4882a593Smuzhiyun else
997*4882a593Smuzhiyun entries[i].fence = dma_fence_get_stub();
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1000*4882a593Smuzhiyun dma_fence_is_signaled(entries[i].fence)) {
1001*4882a593Smuzhiyun if (signaled_count == 0 && idx)
1002*4882a593Smuzhiyun *idx = i;
1003*4882a593Smuzhiyun signaled_count++;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun if (signaled_count == count ||
1008*4882a593Smuzhiyun (signaled_count > 0 &&
1009*4882a593Smuzhiyun !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
1010*4882a593Smuzhiyun goto cleanup_entries;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun /* There's a very annoying laxness in the dma_fence API here, in
1013*4882a593Smuzhiyun * that backends are not required to automatically report when a
1014*4882a593Smuzhiyun * fence is signaled prior to fence->ops->enable_signaling() being
1015*4882a593Smuzhiyun * called. So here if we fail to match signaled_count, we need to
1016*4882a593Smuzhiyun * fallthough and try a 0 timeout wait!
1017*4882a593Smuzhiyun */
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
1020*4882a593Smuzhiyun for (i = 0; i < count; ++i)
1021*4882a593Smuzhiyun drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun do {
1025*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun signaled_count = 0;
1028*4882a593Smuzhiyun for (i = 0; i < count; ++i) {
1029*4882a593Smuzhiyun fence = entries[i].fence;
1030*4882a593Smuzhiyun if (!fence)
1031*4882a593Smuzhiyun continue;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1034*4882a593Smuzhiyun dma_fence_is_signaled(fence) ||
1035*4882a593Smuzhiyun (!entries[i].fence_cb.func &&
1036*4882a593Smuzhiyun dma_fence_add_callback(fence,
1037*4882a593Smuzhiyun &entries[i].fence_cb,
1038*4882a593Smuzhiyun syncobj_wait_fence_func))) {
1039*4882a593Smuzhiyun /* The fence has been signaled */
1040*4882a593Smuzhiyun if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
1041*4882a593Smuzhiyun signaled_count++;
1042*4882a593Smuzhiyun } else {
1043*4882a593Smuzhiyun if (idx)
1044*4882a593Smuzhiyun *idx = i;
1045*4882a593Smuzhiyun goto done_waiting;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun if (signaled_count == count)
1051*4882a593Smuzhiyun goto done_waiting;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun if (timeout == 0) {
1054*4882a593Smuzhiyun timeout = -ETIME;
1055*4882a593Smuzhiyun goto done_waiting;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun if (signal_pending(current)) {
1059*4882a593Smuzhiyun timeout = -ERESTARTSYS;
1060*4882a593Smuzhiyun goto done_waiting;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun timeout = schedule_timeout(timeout);
1064*4882a593Smuzhiyun } while (1);
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun done_waiting:
1067*4882a593Smuzhiyun __set_current_state(TASK_RUNNING);
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun cleanup_entries:
1070*4882a593Smuzhiyun for (i = 0; i < count; ++i) {
1071*4882a593Smuzhiyun drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
1072*4882a593Smuzhiyun if (entries[i].fence_cb.func)
1073*4882a593Smuzhiyun dma_fence_remove_callback(entries[i].fence,
1074*4882a593Smuzhiyun &entries[i].fence_cb);
1075*4882a593Smuzhiyun dma_fence_put(entries[i].fence);
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun kfree(entries);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun err_free_points:
1080*4882a593Smuzhiyun kfree(points);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun return timeout;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun /**
1086*4882a593Smuzhiyun * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
1087*4882a593Smuzhiyun *
1088*4882a593Smuzhiyun * @timeout_nsec: timeout nsec component in ns, 0 for poll
1089*4882a593Smuzhiyun *
1090*4882a593Smuzhiyun * Calculate the timeout in jiffies from an absolute time in sec/nsec.
1091*4882a593Smuzhiyun */
drm_timeout_abs_to_jiffies(int64_t timeout_nsec)1092*4882a593Smuzhiyun signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
1093*4882a593Smuzhiyun {
1094*4882a593Smuzhiyun ktime_t abs_timeout, now;
1095*4882a593Smuzhiyun u64 timeout_ns, timeout_jiffies64;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun /* make 0 timeout means poll - absolute 0 doesn't seem valid */
1098*4882a593Smuzhiyun if (timeout_nsec == 0)
1099*4882a593Smuzhiyun return 0;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun abs_timeout = ns_to_ktime(timeout_nsec);
1102*4882a593Smuzhiyun now = ktime_get();
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun if (!ktime_after(abs_timeout, now))
1105*4882a593Smuzhiyun return 0;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
1110*4882a593Smuzhiyun /* clamp timeout to avoid infinite timeout */
1111*4882a593Smuzhiyun if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
1112*4882a593Smuzhiyun return MAX_SCHEDULE_TIMEOUT - 1;
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun return timeout_jiffies64 + 1;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
1117*4882a593Smuzhiyun
drm_syncobj_array_wait(struct drm_device * dev,struct drm_file * file_private,struct drm_syncobj_wait * wait,struct drm_syncobj_timeline_wait * timeline_wait,struct drm_syncobj ** syncobjs,bool timeline)1118*4882a593Smuzhiyun static int drm_syncobj_array_wait(struct drm_device *dev,
1119*4882a593Smuzhiyun struct drm_file *file_private,
1120*4882a593Smuzhiyun struct drm_syncobj_wait *wait,
1121*4882a593Smuzhiyun struct drm_syncobj_timeline_wait *timeline_wait,
1122*4882a593Smuzhiyun struct drm_syncobj **syncobjs, bool timeline)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun signed long timeout = 0;
1125*4882a593Smuzhiyun uint32_t first = ~0;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun if (!timeline) {
1128*4882a593Smuzhiyun timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
1129*4882a593Smuzhiyun timeout = drm_syncobj_array_wait_timeout(syncobjs,
1130*4882a593Smuzhiyun NULL,
1131*4882a593Smuzhiyun wait->count_handles,
1132*4882a593Smuzhiyun wait->flags,
1133*4882a593Smuzhiyun timeout, &first);
1134*4882a593Smuzhiyun if (timeout < 0)
1135*4882a593Smuzhiyun return timeout;
1136*4882a593Smuzhiyun wait->first_signaled = first;
1137*4882a593Smuzhiyun } else {
1138*4882a593Smuzhiyun timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
1139*4882a593Smuzhiyun timeout = drm_syncobj_array_wait_timeout(syncobjs,
1140*4882a593Smuzhiyun u64_to_user_ptr(timeline_wait->points),
1141*4882a593Smuzhiyun timeline_wait->count_handles,
1142*4882a593Smuzhiyun timeline_wait->flags,
1143*4882a593Smuzhiyun timeout, &first);
1144*4882a593Smuzhiyun if (timeout < 0)
1145*4882a593Smuzhiyun return timeout;
1146*4882a593Smuzhiyun timeline_wait->first_signaled = first;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun return 0;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
drm_syncobj_array_find(struct drm_file * file_private,void __user * user_handles,uint32_t count_handles,struct drm_syncobj *** syncobjs_out)1151*4882a593Smuzhiyun static int drm_syncobj_array_find(struct drm_file *file_private,
1152*4882a593Smuzhiyun void __user *user_handles,
1153*4882a593Smuzhiyun uint32_t count_handles,
1154*4882a593Smuzhiyun struct drm_syncobj ***syncobjs_out)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun uint32_t i, *handles;
1157*4882a593Smuzhiyun struct drm_syncobj **syncobjs;
1158*4882a593Smuzhiyun int ret;
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1161*4882a593Smuzhiyun if (handles == NULL)
1162*4882a593Smuzhiyun return -ENOMEM;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun if (copy_from_user(handles, user_handles,
1165*4882a593Smuzhiyun sizeof(uint32_t) * count_handles)) {
1166*4882a593Smuzhiyun ret = -EFAULT;
1167*4882a593Smuzhiyun goto err_free_handles;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1171*4882a593Smuzhiyun if (syncobjs == NULL) {
1172*4882a593Smuzhiyun ret = -ENOMEM;
1173*4882a593Smuzhiyun goto err_free_handles;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun for (i = 0; i < count_handles; i++) {
1177*4882a593Smuzhiyun syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1178*4882a593Smuzhiyun if (!syncobjs[i]) {
1179*4882a593Smuzhiyun ret = -ENOENT;
1180*4882a593Smuzhiyun goto err_put_syncobjs;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun kfree(handles);
1185*4882a593Smuzhiyun *syncobjs_out = syncobjs;
1186*4882a593Smuzhiyun return 0;
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun err_put_syncobjs:
1189*4882a593Smuzhiyun while (i-- > 0)
1190*4882a593Smuzhiyun drm_syncobj_put(syncobjs[i]);
1191*4882a593Smuzhiyun kfree(syncobjs);
1192*4882a593Smuzhiyun err_free_handles:
1193*4882a593Smuzhiyun kfree(handles);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun return ret;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
drm_syncobj_array_free(struct drm_syncobj ** syncobjs,uint32_t count)1198*4882a593Smuzhiyun static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1199*4882a593Smuzhiyun uint32_t count)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun uint32_t i;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun for (i = 0; i < count; i++)
1204*4882a593Smuzhiyun drm_syncobj_put(syncobjs[i]);
1205*4882a593Smuzhiyun kfree(syncobjs);
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun int
drm_syncobj_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)1209*4882a593Smuzhiyun drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1210*4882a593Smuzhiyun struct drm_file *file_private)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun struct drm_syncobj_wait *args = data;
1213*4882a593Smuzhiyun struct drm_syncobj **syncobjs;
1214*4882a593Smuzhiyun int ret = 0;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1217*4882a593Smuzhiyun return -EOPNOTSUPP;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1220*4882a593Smuzhiyun DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
1221*4882a593Smuzhiyun return -EINVAL;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun if (args->count_handles == 0)
1224*4882a593Smuzhiyun return -EINVAL;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun ret = drm_syncobj_array_find(file_private,
1227*4882a593Smuzhiyun u64_to_user_ptr(args->handles),
1228*4882a593Smuzhiyun args->count_handles,
1229*4882a593Smuzhiyun &syncobjs);
1230*4882a593Smuzhiyun if (ret < 0)
1231*4882a593Smuzhiyun return ret;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun ret = drm_syncobj_array_wait(dev, file_private,
1234*4882a593Smuzhiyun args, NULL, syncobjs, false);
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun drm_syncobj_array_free(syncobjs, args->count_handles);
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun return ret;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun int
drm_syncobj_timeline_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)1242*4882a593Smuzhiyun drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
1243*4882a593Smuzhiyun struct drm_file *file_private)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun struct drm_syncobj_timeline_wait *args = data;
1246*4882a593Smuzhiyun struct drm_syncobj **syncobjs;
1247*4882a593Smuzhiyun int ret = 0;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1250*4882a593Smuzhiyun return -EOPNOTSUPP;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1253*4882a593Smuzhiyun DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1254*4882a593Smuzhiyun DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
1255*4882a593Smuzhiyun return -EINVAL;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun if (args->count_handles == 0)
1258*4882a593Smuzhiyun return -EINVAL;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun ret = drm_syncobj_array_find(file_private,
1261*4882a593Smuzhiyun u64_to_user_ptr(args->handles),
1262*4882a593Smuzhiyun args->count_handles,
1263*4882a593Smuzhiyun &syncobjs);
1264*4882a593Smuzhiyun if (ret < 0)
1265*4882a593Smuzhiyun return ret;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun ret = drm_syncobj_array_wait(dev, file_private,
1268*4882a593Smuzhiyun NULL, args, syncobjs, true);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun drm_syncobj_array_free(syncobjs, args->count_handles);
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun return ret;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun int
drm_syncobj_reset_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)1277*4882a593Smuzhiyun drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1278*4882a593Smuzhiyun struct drm_file *file_private)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun struct drm_syncobj_array *args = data;
1281*4882a593Smuzhiyun struct drm_syncobj **syncobjs;
1282*4882a593Smuzhiyun uint32_t i;
1283*4882a593Smuzhiyun int ret;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1286*4882a593Smuzhiyun return -EOPNOTSUPP;
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun if (args->pad != 0)
1289*4882a593Smuzhiyun return -EINVAL;
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun if (args->count_handles == 0)
1292*4882a593Smuzhiyun return -EINVAL;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun ret = drm_syncobj_array_find(file_private,
1295*4882a593Smuzhiyun u64_to_user_ptr(args->handles),
1296*4882a593Smuzhiyun args->count_handles,
1297*4882a593Smuzhiyun &syncobjs);
1298*4882a593Smuzhiyun if (ret < 0)
1299*4882a593Smuzhiyun return ret;
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun for (i = 0; i < args->count_handles; i++)
1302*4882a593Smuzhiyun drm_syncobj_replace_fence(syncobjs[i], NULL);
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun drm_syncobj_array_free(syncobjs, args->count_handles);
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun return 0;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun int
drm_syncobj_signal_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)1310*4882a593Smuzhiyun drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1311*4882a593Smuzhiyun struct drm_file *file_private)
1312*4882a593Smuzhiyun {
1313*4882a593Smuzhiyun struct drm_syncobj_array *args = data;
1314*4882a593Smuzhiyun struct drm_syncobj **syncobjs;
1315*4882a593Smuzhiyun uint32_t i;
1316*4882a593Smuzhiyun int ret;
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1319*4882a593Smuzhiyun return -EOPNOTSUPP;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun if (args->pad != 0)
1322*4882a593Smuzhiyun return -EINVAL;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun if (args->count_handles == 0)
1325*4882a593Smuzhiyun return -EINVAL;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun ret = drm_syncobj_array_find(file_private,
1328*4882a593Smuzhiyun u64_to_user_ptr(args->handles),
1329*4882a593Smuzhiyun args->count_handles,
1330*4882a593Smuzhiyun &syncobjs);
1331*4882a593Smuzhiyun if (ret < 0)
1332*4882a593Smuzhiyun return ret;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun for (i = 0; i < args->count_handles; i++)
1335*4882a593Smuzhiyun drm_syncobj_assign_null_handle(syncobjs[i]);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun drm_syncobj_array_free(syncobjs, args->count_handles);
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun return ret;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun int
drm_syncobj_timeline_signal_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)1343*4882a593Smuzhiyun drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
1344*4882a593Smuzhiyun struct drm_file *file_private)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun struct drm_syncobj_timeline_array *args = data;
1347*4882a593Smuzhiyun struct drm_syncobj **syncobjs;
1348*4882a593Smuzhiyun struct dma_fence_chain **chains;
1349*4882a593Smuzhiyun uint64_t *points;
1350*4882a593Smuzhiyun uint32_t i, j;
1351*4882a593Smuzhiyun int ret;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1354*4882a593Smuzhiyun return -EOPNOTSUPP;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun if (args->flags != 0)
1357*4882a593Smuzhiyun return -EINVAL;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun if (args->count_handles == 0)
1360*4882a593Smuzhiyun return -EINVAL;
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun ret = drm_syncobj_array_find(file_private,
1363*4882a593Smuzhiyun u64_to_user_ptr(args->handles),
1364*4882a593Smuzhiyun args->count_handles,
1365*4882a593Smuzhiyun &syncobjs);
1366*4882a593Smuzhiyun if (ret < 0)
1367*4882a593Smuzhiyun return ret;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun points = kmalloc_array(args->count_handles, sizeof(*points),
1370*4882a593Smuzhiyun GFP_KERNEL);
1371*4882a593Smuzhiyun if (!points) {
1372*4882a593Smuzhiyun ret = -ENOMEM;
1373*4882a593Smuzhiyun goto out;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun if (!u64_to_user_ptr(args->points)) {
1376*4882a593Smuzhiyun memset(points, 0, args->count_handles * sizeof(uint64_t));
1377*4882a593Smuzhiyun } else if (copy_from_user(points, u64_to_user_ptr(args->points),
1378*4882a593Smuzhiyun sizeof(uint64_t) * args->count_handles)) {
1379*4882a593Smuzhiyun ret = -EFAULT;
1380*4882a593Smuzhiyun goto err_points;
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
1384*4882a593Smuzhiyun if (!chains) {
1385*4882a593Smuzhiyun ret = -ENOMEM;
1386*4882a593Smuzhiyun goto err_points;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun for (i = 0; i < args->count_handles; i++) {
1389*4882a593Smuzhiyun chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
1390*4882a593Smuzhiyun if (!chains[i]) {
1391*4882a593Smuzhiyun for (j = 0; j < i; j++)
1392*4882a593Smuzhiyun kfree(chains[j]);
1393*4882a593Smuzhiyun ret = -ENOMEM;
1394*4882a593Smuzhiyun goto err_chains;
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun for (i = 0; i < args->count_handles; i++) {
1399*4882a593Smuzhiyun struct dma_fence *fence = dma_fence_get_stub();
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun drm_syncobj_add_point(syncobjs[i], chains[i],
1402*4882a593Smuzhiyun fence, points[i]);
1403*4882a593Smuzhiyun dma_fence_put(fence);
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun err_chains:
1406*4882a593Smuzhiyun kfree(chains);
1407*4882a593Smuzhiyun err_points:
1408*4882a593Smuzhiyun kfree(points);
1409*4882a593Smuzhiyun out:
1410*4882a593Smuzhiyun drm_syncobj_array_free(syncobjs, args->count_handles);
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun return ret;
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun
drm_syncobj_query_ioctl(struct drm_device * dev,void * data,struct drm_file * file_private)1415*4882a593Smuzhiyun int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
1416*4882a593Smuzhiyun struct drm_file *file_private)
1417*4882a593Smuzhiyun {
1418*4882a593Smuzhiyun struct drm_syncobj_timeline_array *args = data;
1419*4882a593Smuzhiyun struct drm_syncobj **syncobjs;
1420*4882a593Smuzhiyun uint64_t __user *points = u64_to_user_ptr(args->points);
1421*4882a593Smuzhiyun uint32_t i;
1422*4882a593Smuzhiyun int ret;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1425*4882a593Smuzhiyun return -EOPNOTSUPP;
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
1428*4882a593Smuzhiyun return -EINVAL;
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun if (args->count_handles == 0)
1431*4882a593Smuzhiyun return -EINVAL;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun ret = drm_syncobj_array_find(file_private,
1434*4882a593Smuzhiyun u64_to_user_ptr(args->handles),
1435*4882a593Smuzhiyun args->count_handles,
1436*4882a593Smuzhiyun &syncobjs);
1437*4882a593Smuzhiyun if (ret < 0)
1438*4882a593Smuzhiyun return ret;
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun for (i = 0; i < args->count_handles; i++) {
1441*4882a593Smuzhiyun struct dma_fence_chain *chain;
1442*4882a593Smuzhiyun struct dma_fence *fence;
1443*4882a593Smuzhiyun uint64_t point;
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun fence = drm_syncobj_fence_get(syncobjs[i]);
1446*4882a593Smuzhiyun chain = to_dma_fence_chain(fence);
1447*4882a593Smuzhiyun if (chain) {
1448*4882a593Smuzhiyun struct dma_fence *iter, *last_signaled =
1449*4882a593Smuzhiyun dma_fence_get(fence);
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun if (args->flags &
1452*4882a593Smuzhiyun DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
1453*4882a593Smuzhiyun point = fence->seqno;
1454*4882a593Smuzhiyun } else {
1455*4882a593Smuzhiyun dma_fence_chain_for_each(iter, fence) {
1456*4882a593Smuzhiyun if (iter->context != fence->context) {
1457*4882a593Smuzhiyun dma_fence_put(iter);
1458*4882a593Smuzhiyun /* It is most likely that timeline has
1459*4882a593Smuzhiyun * unorder points. */
1460*4882a593Smuzhiyun break;
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun dma_fence_put(last_signaled);
1463*4882a593Smuzhiyun last_signaled = dma_fence_get(iter);
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun point = dma_fence_is_signaled(last_signaled) ?
1466*4882a593Smuzhiyun last_signaled->seqno :
1467*4882a593Smuzhiyun to_dma_fence_chain(last_signaled)->prev_seqno;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun dma_fence_put(last_signaled);
1470*4882a593Smuzhiyun } else {
1471*4882a593Smuzhiyun point = 0;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun dma_fence_put(fence);
1474*4882a593Smuzhiyun ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
1475*4882a593Smuzhiyun ret = ret ? -EFAULT : 0;
1476*4882a593Smuzhiyun if (ret)
1477*4882a593Smuzhiyun break;
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun drm_syncobj_array_free(syncobjs, args->count_handles);
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun return ret;
1482*4882a593Smuzhiyun }
1483