xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vc4/vc4_gem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright © 2014 Broadcom
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun  * Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21*4882a593Smuzhiyun  * IN THE SOFTWARE.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <linux/module.h>
25*4882a593Smuzhiyun #include <linux/platform_device.h>
26*4882a593Smuzhiyun #include <linux/pm_runtime.h>
27*4882a593Smuzhiyun #include <linux/device.h>
28*4882a593Smuzhiyun #include <linux/io.h>
29*4882a593Smuzhiyun #include <linux/sched/signal.h>
30*4882a593Smuzhiyun #include <linux/dma-fence-array.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <drm/drm_syncobj.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include "uapi/drm/vc4_drm.h"
35*4882a593Smuzhiyun #include "vc4_drv.h"
36*4882a593Smuzhiyun #include "vc4_regs.h"
37*4882a593Smuzhiyun #include "vc4_trace.h"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static void
vc4_queue_hangcheck(struct drm_device * dev)40*4882a593Smuzhiyun vc4_queue_hangcheck(struct drm_device *dev)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	mod_timer(&vc4->hangcheck.timer,
45*4882a593Smuzhiyun 		  round_jiffies_up(jiffies + msecs_to_jiffies(100)));
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun struct vc4_hang_state {
49*4882a593Smuzhiyun 	struct drm_vc4_get_hang_state user_state;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	u32 bo_count;
52*4882a593Smuzhiyun 	struct drm_gem_object **bo;
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static void
vc4_free_hang_state(struct drm_device * dev,struct vc4_hang_state * state)56*4882a593Smuzhiyun vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	unsigned int i;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	for (i = 0; i < state->user_state.bo_count; i++)
61*4882a593Smuzhiyun 		drm_gem_object_put(state->bo[i]);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	kfree(state);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun int
vc4_get_hang_state_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)67*4882a593Smuzhiyun vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
68*4882a593Smuzhiyun 			 struct drm_file *file_priv)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct drm_vc4_get_hang_state *get_state = data;
71*4882a593Smuzhiyun 	struct drm_vc4_get_hang_state_bo *bo_state;
72*4882a593Smuzhiyun 	struct vc4_hang_state *kernel_state;
73*4882a593Smuzhiyun 	struct drm_vc4_get_hang_state *state;
74*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
75*4882a593Smuzhiyun 	unsigned long irqflags;
76*4882a593Smuzhiyun 	u32 i;
77*4882a593Smuzhiyun 	int ret = 0;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	if (!vc4->v3d) {
80*4882a593Smuzhiyun 		DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
81*4882a593Smuzhiyun 		return -ENODEV;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	spin_lock_irqsave(&vc4->job_lock, irqflags);
85*4882a593Smuzhiyun 	kernel_state = vc4->hang_state;
86*4882a593Smuzhiyun 	if (!kernel_state) {
87*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
88*4882a593Smuzhiyun 		return -ENOENT;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 	state = &kernel_state->user_state;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/* If the user's array isn't big enough, just return the
93*4882a593Smuzhiyun 	 * required array size.
94*4882a593Smuzhiyun 	 */
95*4882a593Smuzhiyun 	if (get_state->bo_count < state->bo_count) {
96*4882a593Smuzhiyun 		get_state->bo_count = state->bo_count;
97*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
98*4882a593Smuzhiyun 		return 0;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	vc4->hang_state = NULL;
102*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* Save the user's BO pointer, so we don't stomp it with the memcpy. */
105*4882a593Smuzhiyun 	state->bo = get_state->bo;
106*4882a593Smuzhiyun 	memcpy(get_state, state, sizeof(*state));
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
109*4882a593Smuzhiyun 	if (!bo_state) {
110*4882a593Smuzhiyun 		ret = -ENOMEM;
111*4882a593Smuzhiyun 		goto err_free;
112*4882a593Smuzhiyun 	}
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	for (i = 0; i < state->bo_count; i++) {
115*4882a593Smuzhiyun 		struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
116*4882a593Smuzhiyun 		u32 handle;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 		ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
119*4882a593Smuzhiyun 					    &handle);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		if (ret) {
122*4882a593Smuzhiyun 			state->bo_count = i;
123*4882a593Smuzhiyun 			goto err_delete_handle;
124*4882a593Smuzhiyun 		}
125*4882a593Smuzhiyun 		bo_state[i].handle = handle;
126*4882a593Smuzhiyun 		bo_state[i].paddr = vc4_bo->base.paddr;
127*4882a593Smuzhiyun 		bo_state[i].size = vc4_bo->base.base.size;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (copy_to_user(u64_to_user_ptr(get_state->bo),
131*4882a593Smuzhiyun 			 bo_state,
132*4882a593Smuzhiyun 			 state->bo_count * sizeof(*bo_state)))
133*4882a593Smuzhiyun 		ret = -EFAULT;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun err_delete_handle:
136*4882a593Smuzhiyun 	if (ret) {
137*4882a593Smuzhiyun 		for (i = 0; i < state->bo_count; i++)
138*4882a593Smuzhiyun 			drm_gem_handle_delete(file_priv, bo_state[i].handle);
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun err_free:
142*4882a593Smuzhiyun 	vc4_free_hang_state(dev, kernel_state);
143*4882a593Smuzhiyun 	kfree(bo_state);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	return ret;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun static void
vc4_save_hang_state(struct drm_device * dev)149*4882a593Smuzhiyun vc4_save_hang_state(struct drm_device *dev)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
152*4882a593Smuzhiyun 	struct drm_vc4_get_hang_state *state;
153*4882a593Smuzhiyun 	struct vc4_hang_state *kernel_state;
154*4882a593Smuzhiyun 	struct vc4_exec_info *exec[2];
155*4882a593Smuzhiyun 	struct vc4_bo *bo;
156*4882a593Smuzhiyun 	unsigned long irqflags;
157*4882a593Smuzhiyun 	unsigned int i, j, k, unref_list_count;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
160*4882a593Smuzhiyun 	if (!kernel_state)
161*4882a593Smuzhiyun 		return;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	state = &kernel_state->user_state;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	spin_lock_irqsave(&vc4->job_lock, irqflags);
166*4882a593Smuzhiyun 	exec[0] = vc4_first_bin_job(vc4);
167*4882a593Smuzhiyun 	exec[1] = vc4_first_render_job(vc4);
168*4882a593Smuzhiyun 	if (!exec[0] && !exec[1]) {
169*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
170*4882a593Smuzhiyun 		return;
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* Get the bos from both binner and renderer into hang state. */
174*4882a593Smuzhiyun 	state->bo_count = 0;
175*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
176*4882a593Smuzhiyun 		if (!exec[i])
177*4882a593Smuzhiyun 			continue;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		unref_list_count = 0;
180*4882a593Smuzhiyun 		list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
181*4882a593Smuzhiyun 			unref_list_count++;
182*4882a593Smuzhiyun 		state->bo_count += exec[i]->bo_count + unref_list_count;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	kernel_state->bo = kcalloc(state->bo_count,
186*4882a593Smuzhiyun 				   sizeof(*kernel_state->bo), GFP_ATOMIC);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (!kernel_state->bo) {
189*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
190*4882a593Smuzhiyun 		return;
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	k = 0;
194*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
195*4882a593Smuzhiyun 		if (!exec[i])
196*4882a593Smuzhiyun 			continue;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		for (j = 0; j < exec[i]->bo_count; j++) {
199*4882a593Smuzhiyun 			bo = to_vc4_bo(&exec[i]->bo[j]->base);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 			/* Retain BOs just in case they were marked purgeable.
202*4882a593Smuzhiyun 			 * This prevents the BO from being purged before
203*4882a593Smuzhiyun 			 * someone had a chance to dump the hang state.
204*4882a593Smuzhiyun 			 */
205*4882a593Smuzhiyun 			WARN_ON(!refcount_read(&bo->usecnt));
206*4882a593Smuzhiyun 			refcount_inc(&bo->usecnt);
207*4882a593Smuzhiyun 			drm_gem_object_get(&exec[i]->bo[j]->base);
208*4882a593Smuzhiyun 			kernel_state->bo[k++] = &exec[i]->bo[j]->base;
209*4882a593Smuzhiyun 		}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 		list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
212*4882a593Smuzhiyun 			/* No need to retain BOs coming from the ->unref_list
213*4882a593Smuzhiyun 			 * because they are naturally unpurgeable.
214*4882a593Smuzhiyun 			 */
215*4882a593Smuzhiyun 			drm_gem_object_get(&bo->base.base);
216*4882a593Smuzhiyun 			kernel_state->bo[k++] = &bo->base.base;
217*4882a593Smuzhiyun 		}
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	WARN_ON_ONCE(k != state->bo_count);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (exec[0])
223*4882a593Smuzhiyun 		state->start_bin = exec[0]->ct0ca;
224*4882a593Smuzhiyun 	if (exec[1])
225*4882a593Smuzhiyun 		state->start_render = exec[1]->ct1ca;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	state->ct0ca = V3D_READ(V3D_CTNCA(0));
230*4882a593Smuzhiyun 	state->ct0ea = V3D_READ(V3D_CTNEA(0));
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	state->ct1ca = V3D_READ(V3D_CTNCA(1));
233*4882a593Smuzhiyun 	state->ct1ea = V3D_READ(V3D_CTNEA(1));
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	state->ct0cs = V3D_READ(V3D_CTNCS(0));
236*4882a593Smuzhiyun 	state->ct1cs = V3D_READ(V3D_CTNCS(1));
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	state->ct0ra0 = V3D_READ(V3D_CT00RA0);
239*4882a593Smuzhiyun 	state->ct1ra0 = V3D_READ(V3D_CT01RA0);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	state->bpca = V3D_READ(V3D_BPCA);
242*4882a593Smuzhiyun 	state->bpcs = V3D_READ(V3D_BPCS);
243*4882a593Smuzhiyun 	state->bpoa = V3D_READ(V3D_BPOA);
244*4882a593Smuzhiyun 	state->bpos = V3D_READ(V3D_BPOS);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	state->vpmbase = V3D_READ(V3D_VPMBASE);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	state->dbge = V3D_READ(V3D_DBGE);
249*4882a593Smuzhiyun 	state->fdbgo = V3D_READ(V3D_FDBGO);
250*4882a593Smuzhiyun 	state->fdbgb = V3D_READ(V3D_FDBGB);
251*4882a593Smuzhiyun 	state->fdbgr = V3D_READ(V3D_FDBGR);
252*4882a593Smuzhiyun 	state->fdbgs = V3D_READ(V3D_FDBGS);
253*4882a593Smuzhiyun 	state->errstat = V3D_READ(V3D_ERRSTAT);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* We need to turn purgeable BOs into unpurgeable ones so that
256*4882a593Smuzhiyun 	 * userspace has a chance to dump the hang state before the kernel
257*4882a593Smuzhiyun 	 * decides to purge those BOs.
258*4882a593Smuzhiyun 	 * Note that BO consistency at dump time cannot be guaranteed. For
259*4882a593Smuzhiyun 	 * example, if the owner of these BOs decides to re-use them or mark
260*4882a593Smuzhiyun 	 * them purgeable again there's nothing we can do to prevent it.
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun 	for (i = 0; i < kernel_state->user_state.bo_count; i++) {
263*4882a593Smuzhiyun 		struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 		if (bo->madv == __VC4_MADV_NOTSUPP)
266*4882a593Smuzhiyun 			continue;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		mutex_lock(&bo->madv_lock);
269*4882a593Smuzhiyun 		if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
270*4882a593Smuzhiyun 			bo->madv = VC4_MADV_WILLNEED;
271*4882a593Smuzhiyun 		refcount_dec(&bo->usecnt);
272*4882a593Smuzhiyun 		mutex_unlock(&bo->madv_lock);
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	spin_lock_irqsave(&vc4->job_lock, irqflags);
276*4882a593Smuzhiyun 	if (vc4->hang_state) {
277*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
278*4882a593Smuzhiyun 		vc4_free_hang_state(dev, kernel_state);
279*4882a593Smuzhiyun 	} else {
280*4882a593Smuzhiyun 		vc4->hang_state = kernel_state;
281*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun static void
vc4_reset(struct drm_device * dev)286*4882a593Smuzhiyun vc4_reset(struct drm_device *dev)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	DRM_INFO("Resetting GPU.\n");
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	mutex_lock(&vc4->power_lock);
293*4882a593Smuzhiyun 	if (vc4->power_refcount) {
294*4882a593Smuzhiyun 		/* Power the device off and back on the by dropping the
295*4882a593Smuzhiyun 		 * reference on runtime PM.
296*4882a593Smuzhiyun 		 */
297*4882a593Smuzhiyun 		pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
298*4882a593Smuzhiyun 		pm_runtime_get_sync(&vc4->v3d->pdev->dev);
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 	mutex_unlock(&vc4->power_lock);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	vc4_irq_reset(dev);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* Rearm the hangcheck -- another job might have been waiting
305*4882a593Smuzhiyun 	 * for our hung one to get kicked off, and vc4_irq_reset()
306*4882a593Smuzhiyun 	 * would have started it.
307*4882a593Smuzhiyun 	 */
308*4882a593Smuzhiyun 	vc4_queue_hangcheck(dev);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun static void
vc4_reset_work(struct work_struct * work)312*4882a593Smuzhiyun vc4_reset_work(struct work_struct *work)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	struct vc4_dev *vc4 =
315*4882a593Smuzhiyun 		container_of(work, struct vc4_dev, hangcheck.reset_work);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	vc4_save_hang_state(&vc4->base);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	vc4_reset(&vc4->base);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun static void
vc4_hangcheck_elapsed(struct timer_list * t)323*4882a593Smuzhiyun vc4_hangcheck_elapsed(struct timer_list *t)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
326*4882a593Smuzhiyun 	struct drm_device *dev = &vc4->base;
327*4882a593Smuzhiyun 	uint32_t ct0ca, ct1ca;
328*4882a593Smuzhiyun 	unsigned long irqflags;
329*4882a593Smuzhiyun 	struct vc4_exec_info *bin_exec, *render_exec;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	spin_lock_irqsave(&vc4->job_lock, irqflags);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	bin_exec = vc4_first_bin_job(vc4);
334*4882a593Smuzhiyun 	render_exec = vc4_first_render_job(vc4);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/* If idle, we can stop watching for hangs. */
337*4882a593Smuzhiyun 	if (!bin_exec && !render_exec) {
338*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
339*4882a593Smuzhiyun 		return;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	ct0ca = V3D_READ(V3D_CTNCA(0));
343*4882a593Smuzhiyun 	ct1ca = V3D_READ(V3D_CTNCA(1));
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/* If we've made any progress in execution, rearm the timer
346*4882a593Smuzhiyun 	 * and wait.
347*4882a593Smuzhiyun 	 */
348*4882a593Smuzhiyun 	if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
349*4882a593Smuzhiyun 	    (render_exec && ct1ca != render_exec->last_ct1ca)) {
350*4882a593Smuzhiyun 		if (bin_exec)
351*4882a593Smuzhiyun 			bin_exec->last_ct0ca = ct0ca;
352*4882a593Smuzhiyun 		if (render_exec)
353*4882a593Smuzhiyun 			render_exec->last_ct1ca = ct1ca;
354*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
355*4882a593Smuzhiyun 		vc4_queue_hangcheck(dev);
356*4882a593Smuzhiyun 		return;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* We've gone too long with no progress, reset.  This has to
362*4882a593Smuzhiyun 	 * be done from a work struct, since resetting can sleep and
363*4882a593Smuzhiyun 	 * this timer hook isn't allowed to.
364*4882a593Smuzhiyun 	 */
365*4882a593Smuzhiyun 	schedule_work(&vc4->hangcheck.reset_work);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun static void
submit_cl(struct drm_device * dev,uint32_t thread,uint32_t start,uint32_t end)369*4882a593Smuzhiyun submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* Set the current and end address of the control list.
374*4882a593Smuzhiyun 	 * Writing the end register is what starts the job.
375*4882a593Smuzhiyun 	 */
376*4882a593Smuzhiyun 	V3D_WRITE(V3D_CTNCA(thread), start);
377*4882a593Smuzhiyun 	V3D_WRITE(V3D_CTNEA(thread), end);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun int
vc4_wait_for_seqno(struct drm_device * dev,uint64_t seqno,uint64_t timeout_ns,bool interruptible)381*4882a593Smuzhiyun vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
382*4882a593Smuzhiyun 		   bool interruptible)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
385*4882a593Smuzhiyun 	int ret = 0;
386*4882a593Smuzhiyun 	unsigned long timeout_expire;
387*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (vc4->finished_seqno >= seqno)
390*4882a593Smuzhiyun 		return 0;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (timeout_ns == 0)
393*4882a593Smuzhiyun 		return -ETIME;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
398*4882a593Smuzhiyun 	for (;;) {
399*4882a593Smuzhiyun 		prepare_to_wait(&vc4->job_wait_queue, &wait,
400*4882a593Smuzhiyun 				interruptible ? TASK_INTERRUPTIBLE :
401*4882a593Smuzhiyun 				TASK_UNINTERRUPTIBLE);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 		if (interruptible && signal_pending(current)) {
404*4882a593Smuzhiyun 			ret = -ERESTARTSYS;
405*4882a593Smuzhiyun 			break;
406*4882a593Smuzhiyun 		}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		if (vc4->finished_seqno >= seqno)
409*4882a593Smuzhiyun 			break;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		if (timeout_ns != ~0ull) {
412*4882a593Smuzhiyun 			if (time_after_eq(jiffies, timeout_expire)) {
413*4882a593Smuzhiyun 				ret = -ETIME;
414*4882a593Smuzhiyun 				break;
415*4882a593Smuzhiyun 			}
416*4882a593Smuzhiyun 			schedule_timeout(timeout_expire - jiffies);
417*4882a593Smuzhiyun 		} else {
418*4882a593Smuzhiyun 			schedule();
419*4882a593Smuzhiyun 		}
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	finish_wait(&vc4->job_wait_queue, &wait);
423*4882a593Smuzhiyun 	trace_vc4_wait_for_seqno_end(dev, seqno);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return ret;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun static void
vc4_flush_caches(struct drm_device * dev)429*4882a593Smuzhiyun vc4_flush_caches(struct drm_device *dev)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	/* Flush the GPU L2 caches.  These caches sit on top of system
434*4882a593Smuzhiyun 	 * L3 (the 128kb or so shared with the CPU), and are
435*4882a593Smuzhiyun 	 * non-allocating in the L3.
436*4882a593Smuzhiyun 	 */
437*4882a593Smuzhiyun 	V3D_WRITE(V3D_L2CACTL,
438*4882a593Smuzhiyun 		  V3D_L2CACTL_L2CCLR);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	V3D_WRITE(V3D_SLCACTL,
441*4882a593Smuzhiyun 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
442*4882a593Smuzhiyun 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
443*4882a593Smuzhiyun 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
444*4882a593Smuzhiyun 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun static void
vc4_flush_texture_caches(struct drm_device * dev)448*4882a593Smuzhiyun vc4_flush_texture_caches(struct drm_device *dev)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	V3D_WRITE(V3D_L2CACTL,
453*4882a593Smuzhiyun 		  V3D_L2CACTL_L2CCLR);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	V3D_WRITE(V3D_SLCACTL,
456*4882a593Smuzhiyun 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
457*4882a593Smuzhiyun 		  VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun /* Sets the registers for the next job to be actually be executed in
461*4882a593Smuzhiyun  * the hardware.
462*4882a593Smuzhiyun  *
463*4882a593Smuzhiyun  * The job_lock should be held during this.
464*4882a593Smuzhiyun  */
465*4882a593Smuzhiyun void
vc4_submit_next_bin_job(struct drm_device * dev)466*4882a593Smuzhiyun vc4_submit_next_bin_job(struct drm_device *dev)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
469*4882a593Smuzhiyun 	struct vc4_exec_info *exec;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun again:
472*4882a593Smuzhiyun 	exec = vc4_first_bin_job(vc4);
473*4882a593Smuzhiyun 	if (!exec)
474*4882a593Smuzhiyun 		return;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	vc4_flush_caches(dev);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/* Only start the perfmon if it was not already started by a previous
479*4882a593Smuzhiyun 	 * job.
480*4882a593Smuzhiyun 	 */
481*4882a593Smuzhiyun 	if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
482*4882a593Smuzhiyun 		vc4_perfmon_start(vc4, exec->perfmon);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/* Either put the job in the binner if it uses the binner, or
485*4882a593Smuzhiyun 	 * immediately move it to the to-be-rendered queue.
486*4882a593Smuzhiyun 	 */
487*4882a593Smuzhiyun 	if (exec->ct0ca != exec->ct0ea) {
488*4882a593Smuzhiyun 		submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
489*4882a593Smuzhiyun 	} else {
490*4882a593Smuzhiyun 		struct vc4_exec_info *next;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 		vc4_move_job_to_render(dev, exec);
493*4882a593Smuzhiyun 		next = vc4_first_bin_job(vc4);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		/* We can't start the next bin job if the previous job had a
496*4882a593Smuzhiyun 		 * different perfmon instance attached to it. The same goes
497*4882a593Smuzhiyun 		 * if one of them had a perfmon attached to it and the other
498*4882a593Smuzhiyun 		 * one doesn't.
499*4882a593Smuzhiyun 		 */
500*4882a593Smuzhiyun 		if (next && next->perfmon == exec->perfmon)
501*4882a593Smuzhiyun 			goto again;
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun void
vc4_submit_next_render_job(struct drm_device * dev)506*4882a593Smuzhiyun vc4_submit_next_render_job(struct drm_device *dev)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
509*4882a593Smuzhiyun 	struct vc4_exec_info *exec = vc4_first_render_job(vc4);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (!exec)
512*4882a593Smuzhiyun 		return;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* A previous RCL may have written to one of our textures, and
515*4882a593Smuzhiyun 	 * our full cache flush at bin time may have occurred before
516*4882a593Smuzhiyun 	 * that RCL completed.  Flush the texture cache now, but not
517*4882a593Smuzhiyun 	 * the instructions or uniforms (since we don't write those
518*4882a593Smuzhiyun 	 * from an RCL).
519*4882a593Smuzhiyun 	 */
520*4882a593Smuzhiyun 	vc4_flush_texture_caches(dev);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun void
vc4_move_job_to_render(struct drm_device * dev,struct vc4_exec_info * exec)526*4882a593Smuzhiyun vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
529*4882a593Smuzhiyun 	bool was_empty = list_empty(&vc4->render_job_list);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	list_move_tail(&exec->head, &vc4->render_job_list);
532*4882a593Smuzhiyun 	if (was_empty)
533*4882a593Smuzhiyun 		vc4_submit_next_render_job(dev);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun static void
vc4_update_bo_seqnos(struct vc4_exec_info * exec,uint64_t seqno)537*4882a593Smuzhiyun vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	struct vc4_bo *bo;
540*4882a593Smuzhiyun 	unsigned i;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	for (i = 0; i < exec->bo_count; i++) {
543*4882a593Smuzhiyun 		bo = to_vc4_bo(&exec->bo[i]->base);
544*4882a593Smuzhiyun 		bo->seqno = seqno;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 		dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
547*4882a593Smuzhiyun 	}
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	list_for_each_entry(bo, &exec->unref_list, unref_head) {
550*4882a593Smuzhiyun 		bo->seqno = seqno;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	for (i = 0; i < exec->rcl_write_bo_count; i++) {
554*4882a593Smuzhiyun 		bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
555*4882a593Smuzhiyun 		bo->write_seqno = seqno;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
558*4882a593Smuzhiyun 	}
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun static void
vc4_unlock_bo_reservations(struct drm_device * dev,struct vc4_exec_info * exec,struct ww_acquire_ctx * acquire_ctx)562*4882a593Smuzhiyun vc4_unlock_bo_reservations(struct drm_device *dev,
563*4882a593Smuzhiyun 			   struct vc4_exec_info *exec,
564*4882a593Smuzhiyun 			   struct ww_acquire_ctx *acquire_ctx)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	int i;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	for (i = 0; i < exec->bo_count; i++) {
569*4882a593Smuzhiyun 		struct drm_gem_object *bo = &exec->bo[i]->base;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 		dma_resv_unlock(bo->resv);
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	ww_acquire_fini(acquire_ctx);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun /* Takes the reservation lock on all the BOs being referenced, so that
578*4882a593Smuzhiyun  * at queue submit time we can update the reservations.
579*4882a593Smuzhiyun  *
580*4882a593Smuzhiyun  * We don't lock the RCL the tile alloc/state BOs, or overflow memory
581*4882a593Smuzhiyun  * (all of which are on exec->unref_list).  They're entirely private
582*4882a593Smuzhiyun  * to vc4, so we don't attach dma-buf fences to them.
583*4882a593Smuzhiyun  */
584*4882a593Smuzhiyun static int
vc4_lock_bo_reservations(struct drm_device * dev,struct vc4_exec_info * exec,struct ww_acquire_ctx * acquire_ctx)585*4882a593Smuzhiyun vc4_lock_bo_reservations(struct drm_device *dev,
586*4882a593Smuzhiyun 			 struct vc4_exec_info *exec,
587*4882a593Smuzhiyun 			 struct ww_acquire_ctx *acquire_ctx)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	int contended_lock = -1;
590*4882a593Smuzhiyun 	int i, ret;
591*4882a593Smuzhiyun 	struct drm_gem_object *bo;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun retry:
596*4882a593Smuzhiyun 	if (contended_lock != -1) {
597*4882a593Smuzhiyun 		bo = &exec->bo[contended_lock]->base;
598*4882a593Smuzhiyun 		ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
599*4882a593Smuzhiyun 		if (ret) {
600*4882a593Smuzhiyun 			ww_acquire_done(acquire_ctx);
601*4882a593Smuzhiyun 			return ret;
602*4882a593Smuzhiyun 		}
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	for (i = 0; i < exec->bo_count; i++) {
606*4882a593Smuzhiyun 		if (i == contended_lock)
607*4882a593Smuzhiyun 			continue;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 		bo = &exec->bo[i]->base;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 		ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
612*4882a593Smuzhiyun 		if (ret) {
613*4882a593Smuzhiyun 			int j;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 			for (j = 0; j < i; j++) {
616*4882a593Smuzhiyun 				bo = &exec->bo[j]->base;
617*4882a593Smuzhiyun 				dma_resv_unlock(bo->resv);
618*4882a593Smuzhiyun 			}
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 			if (contended_lock != -1 && contended_lock >= i) {
621*4882a593Smuzhiyun 				bo = &exec->bo[contended_lock]->base;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 				dma_resv_unlock(bo->resv);
624*4882a593Smuzhiyun 			}
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 			if (ret == -EDEADLK) {
627*4882a593Smuzhiyun 				contended_lock = i;
628*4882a593Smuzhiyun 				goto retry;
629*4882a593Smuzhiyun 			}
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 			ww_acquire_done(acquire_ctx);
632*4882a593Smuzhiyun 			return ret;
633*4882a593Smuzhiyun 		}
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	ww_acquire_done(acquire_ctx);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	/* Reserve space for our shared (read-only) fence references,
639*4882a593Smuzhiyun 	 * before we commit the CL to the hardware.
640*4882a593Smuzhiyun 	 */
641*4882a593Smuzhiyun 	for (i = 0; i < exec->bo_count; i++) {
642*4882a593Smuzhiyun 		bo = &exec->bo[i]->base;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 		ret = dma_resv_reserve_shared(bo->resv, 1);
645*4882a593Smuzhiyun 		if (ret) {
646*4882a593Smuzhiyun 			vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
647*4882a593Smuzhiyun 			return ret;
648*4882a593Smuzhiyun 		}
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	return 0;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun /* Queues a struct vc4_exec_info for execution.  If no job is
655*4882a593Smuzhiyun  * currently executing, then submits it.
656*4882a593Smuzhiyun  *
657*4882a593Smuzhiyun  * Unlike most GPUs, our hardware only handles one command list at a
658*4882a593Smuzhiyun  * time.  To queue multiple jobs at once, we'd need to edit the
659*4882a593Smuzhiyun  * previous command list to have a jump to the new one at the end, and
660*4882a593Smuzhiyun  * then bump the end address.  That's a change for a later date,
661*4882a593Smuzhiyun  * though.
662*4882a593Smuzhiyun  */
663*4882a593Smuzhiyun static int
vc4_queue_submit(struct drm_device * dev,struct vc4_exec_info * exec,struct ww_acquire_ctx * acquire_ctx,struct drm_syncobj * out_sync)664*4882a593Smuzhiyun vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
665*4882a593Smuzhiyun 		 struct ww_acquire_ctx *acquire_ctx,
666*4882a593Smuzhiyun 		 struct drm_syncobj *out_sync)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
669*4882a593Smuzhiyun 	struct vc4_exec_info *renderjob;
670*4882a593Smuzhiyun 	uint64_t seqno;
671*4882a593Smuzhiyun 	unsigned long irqflags;
672*4882a593Smuzhiyun 	struct vc4_fence *fence;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
675*4882a593Smuzhiyun 	if (!fence)
676*4882a593Smuzhiyun 		return -ENOMEM;
677*4882a593Smuzhiyun 	fence->dev = dev;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	spin_lock_irqsave(&vc4->job_lock, irqflags);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	seqno = ++vc4->emit_seqno;
682*4882a593Smuzhiyun 	exec->seqno = seqno;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
685*4882a593Smuzhiyun 		       vc4->dma_fence_context, exec->seqno);
686*4882a593Smuzhiyun 	fence->seqno = exec->seqno;
687*4882a593Smuzhiyun 	exec->fence = &fence->base;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	if (out_sync)
690*4882a593Smuzhiyun 		drm_syncobj_replace_fence(out_sync, exec->fence);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	vc4_update_bo_seqnos(exec, seqno);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	list_add_tail(&exec->head, &vc4->bin_job_list);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	/* If no bin job was executing and if the render job (if any) has the
699*4882a593Smuzhiyun 	 * same perfmon as our job attached to it (or if both jobs don't have
700*4882a593Smuzhiyun 	 * perfmon activated), then kick ours off.  Otherwise, it'll get
701*4882a593Smuzhiyun 	 * started when the previous job's flush/render done interrupt occurs.
702*4882a593Smuzhiyun 	 */
703*4882a593Smuzhiyun 	renderjob = vc4_first_render_job(vc4);
704*4882a593Smuzhiyun 	if (vc4_first_bin_job(vc4) == exec &&
705*4882a593Smuzhiyun 	    (!renderjob || renderjob->perfmon == exec->perfmon)) {
706*4882a593Smuzhiyun 		vc4_submit_next_bin_job(dev);
707*4882a593Smuzhiyun 		vc4_queue_hangcheck(dev);
708*4882a593Smuzhiyun 	}
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	return 0;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun /**
716*4882a593Smuzhiyun  * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
717*4882a593Smuzhiyun  * referenced by the job.
718*4882a593Smuzhiyun  * @dev: DRM device
719*4882a593Smuzhiyun  * @file_priv: DRM file for this fd
720*4882a593Smuzhiyun  * @exec: V3D job being set up
721*4882a593Smuzhiyun  *
722*4882a593Smuzhiyun  * The command validator needs to reference BOs by their index within
723*4882a593Smuzhiyun  * the submitted job's BO list.  This does the validation of the job's
724*4882a593Smuzhiyun  * BO list and reference counting for the lifetime of the job.
725*4882a593Smuzhiyun  */
726*4882a593Smuzhiyun static int
vc4_cl_lookup_bos(struct drm_device * dev,struct drm_file * file_priv,struct vc4_exec_info * exec)727*4882a593Smuzhiyun vc4_cl_lookup_bos(struct drm_device *dev,
728*4882a593Smuzhiyun 		  struct drm_file *file_priv,
729*4882a593Smuzhiyun 		  struct vc4_exec_info *exec)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun 	struct drm_vc4_submit_cl *args = exec->args;
732*4882a593Smuzhiyun 	uint32_t *handles;
733*4882a593Smuzhiyun 	int ret = 0;
734*4882a593Smuzhiyun 	int i;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	exec->bo_count = args->bo_handle_count;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	if (!exec->bo_count) {
739*4882a593Smuzhiyun 		/* See comment on bo_index for why we have to check
740*4882a593Smuzhiyun 		 * this.
741*4882a593Smuzhiyun 		 */
742*4882a593Smuzhiyun 		DRM_DEBUG("Rendering requires BOs to validate\n");
743*4882a593Smuzhiyun 		return -EINVAL;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	exec->bo = kvmalloc_array(exec->bo_count,
747*4882a593Smuzhiyun 				    sizeof(struct drm_gem_cma_object *),
748*4882a593Smuzhiyun 				    GFP_KERNEL | __GFP_ZERO);
749*4882a593Smuzhiyun 	if (!exec->bo) {
750*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate validated BO pointers\n");
751*4882a593Smuzhiyun 		return -ENOMEM;
752*4882a593Smuzhiyun 	}
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
755*4882a593Smuzhiyun 	if (!handles) {
756*4882a593Smuzhiyun 		ret = -ENOMEM;
757*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate incoming GEM handles\n");
758*4882a593Smuzhiyun 		goto fail;
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
762*4882a593Smuzhiyun 			   exec->bo_count * sizeof(uint32_t))) {
763*4882a593Smuzhiyun 		ret = -EFAULT;
764*4882a593Smuzhiyun 		DRM_ERROR("Failed to copy in GEM handles\n");
765*4882a593Smuzhiyun 		goto fail;
766*4882a593Smuzhiyun 	}
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	spin_lock(&file_priv->table_lock);
769*4882a593Smuzhiyun 	for (i = 0; i < exec->bo_count; i++) {
770*4882a593Smuzhiyun 		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
771*4882a593Smuzhiyun 						     handles[i]);
772*4882a593Smuzhiyun 		if (!bo) {
773*4882a593Smuzhiyun 			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
774*4882a593Smuzhiyun 				  i, handles[i]);
775*4882a593Smuzhiyun 			ret = -EINVAL;
776*4882a593Smuzhiyun 			break;
777*4882a593Smuzhiyun 		}
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 		drm_gem_object_get(bo);
780*4882a593Smuzhiyun 		exec->bo[i] = (struct drm_gem_cma_object *)bo;
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 	spin_unlock(&file_priv->table_lock);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	if (ret)
785*4882a593Smuzhiyun 		goto fail_put_bo;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	for (i = 0; i < exec->bo_count; i++) {
788*4882a593Smuzhiyun 		ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
789*4882a593Smuzhiyun 		if (ret)
790*4882a593Smuzhiyun 			goto fail_dec_usecnt;
791*4882a593Smuzhiyun 	}
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	kvfree(handles);
794*4882a593Smuzhiyun 	return 0;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun fail_dec_usecnt:
797*4882a593Smuzhiyun 	/* Decrease usecnt on acquired objects.
798*4882a593Smuzhiyun 	 * We cannot rely on  vc4_complete_exec() to release resources here,
799*4882a593Smuzhiyun 	 * because vc4_complete_exec() has no information about which BO has
800*4882a593Smuzhiyun 	 * had its ->usecnt incremented.
801*4882a593Smuzhiyun 	 * To make things easier we just free everything explicitly and set
802*4882a593Smuzhiyun 	 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
803*4882a593Smuzhiyun 	 * step.
804*4882a593Smuzhiyun 	 */
805*4882a593Smuzhiyun 	for (i-- ; i >= 0; i--)
806*4882a593Smuzhiyun 		vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun fail_put_bo:
809*4882a593Smuzhiyun 	/* Release any reference to acquired objects. */
810*4882a593Smuzhiyun 	for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
811*4882a593Smuzhiyun 		drm_gem_object_put(&exec->bo[i]->base);
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun fail:
814*4882a593Smuzhiyun 	kvfree(handles);
815*4882a593Smuzhiyun 	kvfree(exec->bo);
816*4882a593Smuzhiyun 	exec->bo = NULL;
817*4882a593Smuzhiyun 	return ret;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun static int
vc4_get_bcl(struct drm_device * dev,struct vc4_exec_info * exec)821*4882a593Smuzhiyun vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	struct drm_vc4_submit_cl *args = exec->args;
824*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
825*4882a593Smuzhiyun 	void *temp = NULL;
826*4882a593Smuzhiyun 	void *bin;
827*4882a593Smuzhiyun 	int ret = 0;
828*4882a593Smuzhiyun 	uint32_t bin_offset = 0;
829*4882a593Smuzhiyun 	uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
830*4882a593Smuzhiyun 					     16);
831*4882a593Smuzhiyun 	uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
832*4882a593Smuzhiyun 	uint32_t exec_size = uniforms_offset + args->uniforms_size;
833*4882a593Smuzhiyun 	uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
834*4882a593Smuzhiyun 					  args->shader_rec_count);
835*4882a593Smuzhiyun 	struct vc4_bo *bo;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (shader_rec_offset < args->bin_cl_size ||
838*4882a593Smuzhiyun 	    uniforms_offset < shader_rec_offset ||
839*4882a593Smuzhiyun 	    exec_size < uniforms_offset ||
840*4882a593Smuzhiyun 	    args->shader_rec_count >= (UINT_MAX /
841*4882a593Smuzhiyun 					  sizeof(struct vc4_shader_state)) ||
842*4882a593Smuzhiyun 	    temp_size < exec_size) {
843*4882a593Smuzhiyun 		DRM_DEBUG("overflow in exec arguments\n");
844*4882a593Smuzhiyun 		ret = -EINVAL;
845*4882a593Smuzhiyun 		goto fail;
846*4882a593Smuzhiyun 	}
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	/* Allocate space where we'll store the copied in user command lists
849*4882a593Smuzhiyun 	 * and shader records.
850*4882a593Smuzhiyun 	 *
851*4882a593Smuzhiyun 	 * We don't just copy directly into the BOs because we need to
852*4882a593Smuzhiyun 	 * read the contents back for validation, and I think the
853*4882a593Smuzhiyun 	 * bo->vaddr is uncached access.
854*4882a593Smuzhiyun 	 */
855*4882a593Smuzhiyun 	temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
856*4882a593Smuzhiyun 	if (!temp) {
857*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate storage for copying "
858*4882a593Smuzhiyun 			  "in bin/render CLs.\n");
859*4882a593Smuzhiyun 		ret = -ENOMEM;
860*4882a593Smuzhiyun 		goto fail;
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 	bin = temp + bin_offset;
863*4882a593Smuzhiyun 	exec->shader_rec_u = temp + shader_rec_offset;
864*4882a593Smuzhiyun 	exec->uniforms_u = temp + uniforms_offset;
865*4882a593Smuzhiyun 	exec->shader_state = temp + exec_size;
866*4882a593Smuzhiyun 	exec->shader_state_size = args->shader_rec_count;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	if (copy_from_user(bin,
869*4882a593Smuzhiyun 			   u64_to_user_ptr(args->bin_cl),
870*4882a593Smuzhiyun 			   args->bin_cl_size)) {
871*4882a593Smuzhiyun 		ret = -EFAULT;
872*4882a593Smuzhiyun 		goto fail;
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	if (copy_from_user(exec->shader_rec_u,
876*4882a593Smuzhiyun 			   u64_to_user_ptr(args->shader_rec),
877*4882a593Smuzhiyun 			   args->shader_rec_size)) {
878*4882a593Smuzhiyun 		ret = -EFAULT;
879*4882a593Smuzhiyun 		goto fail;
880*4882a593Smuzhiyun 	}
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	if (copy_from_user(exec->uniforms_u,
883*4882a593Smuzhiyun 			   u64_to_user_ptr(args->uniforms),
884*4882a593Smuzhiyun 			   args->uniforms_size)) {
885*4882a593Smuzhiyun 		ret = -EFAULT;
886*4882a593Smuzhiyun 		goto fail;
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
890*4882a593Smuzhiyun 	if (IS_ERR(bo)) {
891*4882a593Smuzhiyun 		DRM_ERROR("Couldn't allocate BO for binning\n");
892*4882a593Smuzhiyun 		ret = PTR_ERR(bo);
893*4882a593Smuzhiyun 		goto fail;
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 	exec->exec_bo = &bo->base;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
898*4882a593Smuzhiyun 		      &exec->unref_list);
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	exec->ct0ca = exec->exec_bo->paddr + bin_offset;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	exec->bin_u = bin;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
905*4882a593Smuzhiyun 	exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
906*4882a593Smuzhiyun 	exec->shader_rec_size = args->shader_rec_size;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
909*4882a593Smuzhiyun 	exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
910*4882a593Smuzhiyun 	exec->uniforms_size = args->uniforms_size;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	ret = vc4_validate_bin_cl(dev,
913*4882a593Smuzhiyun 				  exec->exec_bo->vaddr + bin_offset,
914*4882a593Smuzhiyun 				  bin,
915*4882a593Smuzhiyun 				  exec);
916*4882a593Smuzhiyun 	if (ret)
917*4882a593Smuzhiyun 		goto fail;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	ret = vc4_validate_shader_recs(dev, exec);
920*4882a593Smuzhiyun 	if (ret)
921*4882a593Smuzhiyun 		goto fail;
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	if (exec->found_tile_binning_mode_config_packet) {
924*4882a593Smuzhiyun 		ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
925*4882a593Smuzhiyun 		if (ret)
926*4882a593Smuzhiyun 			goto fail;
927*4882a593Smuzhiyun 	}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	/* Block waiting on any previous rendering into the CS's VBO,
930*4882a593Smuzhiyun 	 * IB, or textures, so that pixels are actually written by the
931*4882a593Smuzhiyun 	 * time we try to read them.
932*4882a593Smuzhiyun 	 */
933*4882a593Smuzhiyun 	ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun fail:
936*4882a593Smuzhiyun 	kvfree(temp);
937*4882a593Smuzhiyun 	return ret;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun static void
vc4_complete_exec(struct drm_device * dev,struct vc4_exec_info * exec)941*4882a593Smuzhiyun vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
944*4882a593Smuzhiyun 	unsigned long irqflags;
945*4882a593Smuzhiyun 	unsigned i;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	/* If we got force-completed because of GPU reset rather than
948*4882a593Smuzhiyun 	 * through our IRQ handler, signal the fence now.
949*4882a593Smuzhiyun 	 */
950*4882a593Smuzhiyun 	if (exec->fence) {
951*4882a593Smuzhiyun 		dma_fence_signal(exec->fence);
952*4882a593Smuzhiyun 		dma_fence_put(exec->fence);
953*4882a593Smuzhiyun 	}
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	if (exec->bo) {
956*4882a593Smuzhiyun 		for (i = 0; i < exec->bo_count; i++) {
957*4882a593Smuzhiyun 			struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 			vc4_bo_dec_usecnt(bo);
960*4882a593Smuzhiyun 			drm_gem_object_put(&exec->bo[i]->base);
961*4882a593Smuzhiyun 		}
962*4882a593Smuzhiyun 		kvfree(exec->bo);
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	while (!list_empty(&exec->unref_list)) {
966*4882a593Smuzhiyun 		struct vc4_bo *bo = list_first_entry(&exec->unref_list,
967*4882a593Smuzhiyun 						     struct vc4_bo, unref_head);
968*4882a593Smuzhiyun 		list_del(&bo->unref_head);
969*4882a593Smuzhiyun 		drm_gem_object_put(&bo->base.base);
970*4882a593Smuzhiyun 	}
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	/* Free up the allocation of any bin slots we used. */
973*4882a593Smuzhiyun 	spin_lock_irqsave(&vc4->job_lock, irqflags);
974*4882a593Smuzhiyun 	vc4->bin_alloc_used &= ~exec->bin_slots;
975*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	/* Release the reference on the binner BO if needed. */
978*4882a593Smuzhiyun 	if (exec->bin_bo_used)
979*4882a593Smuzhiyun 		vc4_v3d_bin_bo_put(vc4);
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	/* Release the reference we had on the perf monitor. */
982*4882a593Smuzhiyun 	vc4_perfmon_put(exec->perfmon);
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	vc4_v3d_pm_put(vc4);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	kfree(exec);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun void
vc4_job_handle_completed(struct vc4_dev * vc4)990*4882a593Smuzhiyun vc4_job_handle_completed(struct vc4_dev *vc4)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	unsigned long irqflags;
993*4882a593Smuzhiyun 	struct vc4_seqno_cb *cb, *cb_temp;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	spin_lock_irqsave(&vc4->job_lock, irqflags);
996*4882a593Smuzhiyun 	while (!list_empty(&vc4->job_done_list)) {
997*4882a593Smuzhiyun 		struct vc4_exec_info *exec =
998*4882a593Smuzhiyun 			list_first_entry(&vc4->job_done_list,
999*4882a593Smuzhiyun 					 struct vc4_exec_info, head);
1000*4882a593Smuzhiyun 		list_del(&exec->head);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1003*4882a593Smuzhiyun 		vc4_complete_exec(&vc4->base, exec);
1004*4882a593Smuzhiyun 		spin_lock_irqsave(&vc4->job_lock, irqflags);
1005*4882a593Smuzhiyun 	}
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1008*4882a593Smuzhiyun 		if (cb->seqno <= vc4->finished_seqno) {
1009*4882a593Smuzhiyun 			list_del_init(&cb->work.entry);
1010*4882a593Smuzhiyun 			schedule_work(&cb->work);
1011*4882a593Smuzhiyun 		}
1012*4882a593Smuzhiyun 	}
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
vc4_seqno_cb_work(struct work_struct * work)1017*4882a593Smuzhiyun static void vc4_seqno_cb_work(struct work_struct *work)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun 	struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	cb->func(cb);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun 
vc4_queue_seqno_cb(struct drm_device * dev,struct vc4_seqno_cb * cb,uint64_t seqno,void (* func)(struct vc4_seqno_cb * cb))1024*4882a593Smuzhiyun int vc4_queue_seqno_cb(struct drm_device *dev,
1025*4882a593Smuzhiyun 		       struct vc4_seqno_cb *cb, uint64_t seqno,
1026*4882a593Smuzhiyun 		       void (*func)(struct vc4_seqno_cb *cb))
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1029*4882a593Smuzhiyun 	int ret = 0;
1030*4882a593Smuzhiyun 	unsigned long irqflags;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	cb->func = func;
1033*4882a593Smuzhiyun 	INIT_WORK(&cb->work, vc4_seqno_cb_work);
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	spin_lock_irqsave(&vc4->job_lock, irqflags);
1036*4882a593Smuzhiyun 	if (seqno > vc4->finished_seqno) {
1037*4882a593Smuzhiyun 		cb->seqno = seqno;
1038*4882a593Smuzhiyun 		list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1039*4882a593Smuzhiyun 	} else {
1040*4882a593Smuzhiyun 		schedule_work(&cb->work);
1041*4882a593Smuzhiyun 	}
1042*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	return ret;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun /* Scheduled when any job has been completed, this walks the list of
1048*4882a593Smuzhiyun  * jobs that had completed and unrefs their BOs and frees their exec
1049*4882a593Smuzhiyun  * structs.
1050*4882a593Smuzhiyun  */
1051*4882a593Smuzhiyun static void
vc4_job_done_work(struct work_struct * work)1052*4882a593Smuzhiyun vc4_job_done_work(struct work_struct *work)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun 	struct vc4_dev *vc4 =
1055*4882a593Smuzhiyun 		container_of(work, struct vc4_dev, job_done_work);
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	vc4_job_handle_completed(vc4);
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun static int
vc4_wait_for_seqno_ioctl_helper(struct drm_device * dev,uint64_t seqno,uint64_t * timeout_ns)1061*4882a593Smuzhiyun vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
1062*4882a593Smuzhiyun 				uint64_t seqno,
1063*4882a593Smuzhiyun 				uint64_t *timeout_ns)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun 	unsigned long start = jiffies;
1066*4882a593Smuzhiyun 	int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
1069*4882a593Smuzhiyun 		uint64_t delta = jiffies_to_nsecs(jiffies - start);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 		if (*timeout_ns >= delta)
1072*4882a593Smuzhiyun 			*timeout_ns -= delta;
1073*4882a593Smuzhiyun 	}
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	return ret;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun int
vc4_wait_seqno_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1079*4882a593Smuzhiyun vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
1080*4882a593Smuzhiyun 		     struct drm_file *file_priv)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun 	struct drm_vc4_wait_seqno *args = data;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
1085*4882a593Smuzhiyun 					       &args->timeout_ns);
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun int
vc4_wait_bo_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1089*4882a593Smuzhiyun vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
1090*4882a593Smuzhiyun 		  struct drm_file *file_priv)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun 	int ret;
1093*4882a593Smuzhiyun 	struct drm_vc4_wait_bo *args = data;
1094*4882a593Smuzhiyun 	struct drm_gem_object *gem_obj;
1095*4882a593Smuzhiyun 	struct vc4_bo *bo;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	if (args->pad != 0)
1098*4882a593Smuzhiyun 		return -EINVAL;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1101*4882a593Smuzhiyun 	if (!gem_obj) {
1102*4882a593Smuzhiyun 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1103*4882a593Smuzhiyun 		return -EINVAL;
1104*4882a593Smuzhiyun 	}
1105*4882a593Smuzhiyun 	bo = to_vc4_bo(gem_obj);
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
1108*4882a593Smuzhiyun 					      &args->timeout_ns);
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	drm_gem_object_put(gem_obj);
1111*4882a593Smuzhiyun 	return ret;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun /**
1115*4882a593Smuzhiyun  * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4.
1116*4882a593Smuzhiyun  * @dev: DRM device
1117*4882a593Smuzhiyun  * @data: ioctl argument
1118*4882a593Smuzhiyun  * @file_priv: DRM file for this fd
1119*4882a593Smuzhiyun  *
1120*4882a593Smuzhiyun  * This is the main entrypoint for userspace to submit a 3D frame to
1121*4882a593Smuzhiyun  * the GPU.  Userspace provides the binner command list (if
1122*4882a593Smuzhiyun  * applicable), and the kernel sets up the render command list to draw
1123*4882a593Smuzhiyun  * to the framebuffer described in the ioctl, using the command lists
1124*4882a593Smuzhiyun  * that the 3D engine's binner will produce.
1125*4882a593Smuzhiyun  */
1126*4882a593Smuzhiyun int
vc4_submit_cl_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1127*4882a593Smuzhiyun vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
1128*4882a593Smuzhiyun 		    struct drm_file *file_priv)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1131*4882a593Smuzhiyun 	struct vc4_file *vc4file = file_priv->driver_priv;
1132*4882a593Smuzhiyun 	struct drm_vc4_submit_cl *args = data;
1133*4882a593Smuzhiyun 	struct drm_syncobj *out_sync = NULL;
1134*4882a593Smuzhiyun 	struct vc4_exec_info *exec;
1135*4882a593Smuzhiyun 	struct ww_acquire_ctx acquire_ctx;
1136*4882a593Smuzhiyun 	struct dma_fence *in_fence;
1137*4882a593Smuzhiyun 	int ret = 0;
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	if (!vc4->v3d) {
1140*4882a593Smuzhiyun 		DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
1141*4882a593Smuzhiyun 		return -ENODEV;
1142*4882a593Smuzhiyun 	}
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
1145*4882a593Smuzhiyun 			     VC4_SUBMIT_CL_FIXED_RCL_ORDER |
1146*4882a593Smuzhiyun 			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
1147*4882a593Smuzhiyun 			     VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
1148*4882a593Smuzhiyun 		DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
1149*4882a593Smuzhiyun 		return -EINVAL;
1150*4882a593Smuzhiyun 	}
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	if (args->pad2 != 0) {
1153*4882a593Smuzhiyun 		DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2);
1154*4882a593Smuzhiyun 		return -EINVAL;
1155*4882a593Smuzhiyun 	}
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1158*4882a593Smuzhiyun 	if (!exec) {
1159*4882a593Smuzhiyun 		DRM_ERROR("malloc failure on exec struct\n");
1160*4882a593Smuzhiyun 		return -ENOMEM;
1161*4882a593Smuzhiyun 	}
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	ret = vc4_v3d_pm_get(vc4);
1164*4882a593Smuzhiyun 	if (ret) {
1165*4882a593Smuzhiyun 		kfree(exec);
1166*4882a593Smuzhiyun 		return ret;
1167*4882a593Smuzhiyun 	}
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	exec->args = args;
1170*4882a593Smuzhiyun 	INIT_LIST_HEAD(&exec->unref_list);
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1173*4882a593Smuzhiyun 	if (ret)
1174*4882a593Smuzhiyun 		goto fail;
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	if (args->perfmonid) {
1177*4882a593Smuzhiyun 		exec->perfmon = vc4_perfmon_find(vc4file,
1178*4882a593Smuzhiyun 						 args->perfmonid);
1179*4882a593Smuzhiyun 		if (!exec->perfmon) {
1180*4882a593Smuzhiyun 			ret = -ENOENT;
1181*4882a593Smuzhiyun 			goto fail;
1182*4882a593Smuzhiyun 		}
1183*4882a593Smuzhiyun 	}
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	if (args->in_sync) {
1186*4882a593Smuzhiyun 		ret = drm_syncobj_find_fence(file_priv, args->in_sync,
1187*4882a593Smuzhiyun 					     0, 0, &in_fence);
1188*4882a593Smuzhiyun 		if (ret)
1189*4882a593Smuzhiyun 			goto fail;
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 		/* When the fence (or fence array) is exclusively from our
1192*4882a593Smuzhiyun 		 * context we can skip the wait since jobs are executed in
1193*4882a593Smuzhiyun 		 * order of their submission through this ioctl and this can
1194*4882a593Smuzhiyun 		 * only have fences from a prior job.
1195*4882a593Smuzhiyun 		 */
1196*4882a593Smuzhiyun 		if (!dma_fence_match_context(in_fence,
1197*4882a593Smuzhiyun 					     vc4->dma_fence_context)) {
1198*4882a593Smuzhiyun 			ret = dma_fence_wait(in_fence, true);
1199*4882a593Smuzhiyun 			if (ret) {
1200*4882a593Smuzhiyun 				dma_fence_put(in_fence);
1201*4882a593Smuzhiyun 				goto fail;
1202*4882a593Smuzhiyun 			}
1203*4882a593Smuzhiyun 		}
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 		dma_fence_put(in_fence);
1206*4882a593Smuzhiyun 	}
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	if (exec->args->bin_cl_size != 0) {
1209*4882a593Smuzhiyun 		ret = vc4_get_bcl(dev, exec);
1210*4882a593Smuzhiyun 		if (ret)
1211*4882a593Smuzhiyun 			goto fail;
1212*4882a593Smuzhiyun 	} else {
1213*4882a593Smuzhiyun 		exec->ct0ca = 0;
1214*4882a593Smuzhiyun 		exec->ct0ea = 0;
1215*4882a593Smuzhiyun 	}
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	ret = vc4_get_rcl(dev, exec);
1218*4882a593Smuzhiyun 	if (ret)
1219*4882a593Smuzhiyun 		goto fail;
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1222*4882a593Smuzhiyun 	if (ret)
1223*4882a593Smuzhiyun 		goto fail;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	if (args->out_sync) {
1226*4882a593Smuzhiyun 		out_sync = drm_syncobj_find(file_priv, args->out_sync);
1227*4882a593Smuzhiyun 		if (!out_sync) {
1228*4882a593Smuzhiyun 			ret = -EINVAL;
1229*4882a593Smuzhiyun 			goto fail;
1230*4882a593Smuzhiyun 		}
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 		/* We replace the fence in out_sync in vc4_queue_submit since
1233*4882a593Smuzhiyun 		 * the render job could execute immediately after that call.
1234*4882a593Smuzhiyun 		 * If it finishes before our ioctl processing resumes the
1235*4882a593Smuzhiyun 		 * render job fence could already have been freed.
1236*4882a593Smuzhiyun 		 */
1237*4882a593Smuzhiyun 	}
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	/* Clear this out of the struct we'll be putting in the queue,
1240*4882a593Smuzhiyun 	 * since it's part of our stack.
1241*4882a593Smuzhiyun 	 */
1242*4882a593Smuzhiyun 	exec->args = NULL;
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	/* The syncobj isn't part of the exec data and we need to free our
1247*4882a593Smuzhiyun 	 * reference even if job submission failed.
1248*4882a593Smuzhiyun 	 */
1249*4882a593Smuzhiyun 	if (out_sync)
1250*4882a593Smuzhiyun 		drm_syncobj_put(out_sync);
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	if (ret)
1253*4882a593Smuzhiyun 		goto fail;
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	/* Return the seqno for our job. */
1256*4882a593Smuzhiyun 	args->seqno = vc4->emit_seqno;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	return 0;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun fail:
1261*4882a593Smuzhiyun 	vc4_complete_exec(&vc4->base, exec);
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	return ret;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun static void vc4_gem_destroy(struct drm_device *dev, void *unused);
vc4_gem_init(struct drm_device * dev)1267*4882a593Smuzhiyun int vc4_gem_init(struct drm_device *dev)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	vc4->dma_fence_context = dma_fence_context_alloc(1);
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc4->bin_job_list);
1274*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc4->render_job_list);
1275*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc4->job_done_list);
1276*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc4->seqno_cb_list);
1277*4882a593Smuzhiyun 	spin_lock_init(&vc4->job_lock);
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1280*4882a593Smuzhiyun 	timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	mutex_init(&vc4->power_lock);
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc4->purgeable.list);
1287*4882a593Smuzhiyun 	mutex_init(&vc4->purgeable.lock);
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun 
vc4_gem_destroy(struct drm_device * dev,void * unused)1292*4882a593Smuzhiyun static void vc4_gem_destroy(struct drm_device *dev, void *unused)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	/* Waiting for exec to finish would need to be done before
1297*4882a593Smuzhiyun 	 * unregistering V3D.
1298*4882a593Smuzhiyun 	 */
1299*4882a593Smuzhiyun 	WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	/* V3D should already have disabled its interrupt and cleared
1302*4882a593Smuzhiyun 	 * the overflow allocation registers.  Now free the object.
1303*4882a593Smuzhiyun 	 */
1304*4882a593Smuzhiyun 	if (vc4->bin_bo) {
1305*4882a593Smuzhiyun 		drm_gem_object_put(&vc4->bin_bo->base.base);
1306*4882a593Smuzhiyun 		vc4->bin_bo = NULL;
1307*4882a593Smuzhiyun 	}
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	if (vc4->hang_state)
1310*4882a593Smuzhiyun 		vc4_free_hang_state(dev, vc4->hang_state);
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun 
vc4_gem_madvise_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1313*4882a593Smuzhiyun int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
1314*4882a593Smuzhiyun 			  struct drm_file *file_priv)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun 	struct drm_vc4_gem_madvise *args = data;
1317*4882a593Smuzhiyun 	struct drm_gem_object *gem_obj;
1318*4882a593Smuzhiyun 	struct vc4_bo *bo;
1319*4882a593Smuzhiyun 	int ret;
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	switch (args->madv) {
1322*4882a593Smuzhiyun 	case VC4_MADV_DONTNEED:
1323*4882a593Smuzhiyun 	case VC4_MADV_WILLNEED:
1324*4882a593Smuzhiyun 		break;
1325*4882a593Smuzhiyun 	default:
1326*4882a593Smuzhiyun 		return -EINVAL;
1327*4882a593Smuzhiyun 	}
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	if (args->pad != 0)
1330*4882a593Smuzhiyun 		return -EINVAL;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
1333*4882a593Smuzhiyun 	if (!gem_obj) {
1334*4882a593Smuzhiyun 		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
1335*4882a593Smuzhiyun 		return -ENOENT;
1336*4882a593Smuzhiyun 	}
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	bo = to_vc4_bo(gem_obj);
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	/* Only BOs exposed to userspace can be purged. */
1341*4882a593Smuzhiyun 	if (bo->madv == __VC4_MADV_NOTSUPP) {
1342*4882a593Smuzhiyun 		DRM_DEBUG("madvise not supported on this BO\n");
1343*4882a593Smuzhiyun 		ret = -EINVAL;
1344*4882a593Smuzhiyun 		goto out_put_gem;
1345*4882a593Smuzhiyun 	}
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	/* Not sure it's safe to purge imported BOs. Let's just assume it's
1348*4882a593Smuzhiyun 	 * not until proven otherwise.
1349*4882a593Smuzhiyun 	 */
1350*4882a593Smuzhiyun 	if (gem_obj->import_attach) {
1351*4882a593Smuzhiyun 		DRM_DEBUG("madvise not supported on imported BOs\n");
1352*4882a593Smuzhiyun 		ret = -EINVAL;
1353*4882a593Smuzhiyun 		goto out_put_gem;
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	mutex_lock(&bo->madv_lock);
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
1359*4882a593Smuzhiyun 	    !refcount_read(&bo->usecnt)) {
1360*4882a593Smuzhiyun 		/* If the BO is about to be marked as purgeable, is not used
1361*4882a593Smuzhiyun 		 * and is not already purgeable or purged, add it to the
1362*4882a593Smuzhiyun 		 * purgeable list.
1363*4882a593Smuzhiyun 		 */
1364*4882a593Smuzhiyun 		vc4_bo_add_to_purgeable_pool(bo);
1365*4882a593Smuzhiyun 	} else if (args->madv == VC4_MADV_WILLNEED &&
1366*4882a593Smuzhiyun 		   bo->madv == VC4_MADV_DONTNEED &&
1367*4882a593Smuzhiyun 		   !refcount_read(&bo->usecnt)) {
1368*4882a593Smuzhiyun 		/* The BO has not been purged yet, just remove it from
1369*4882a593Smuzhiyun 		 * the purgeable list.
1370*4882a593Smuzhiyun 		 */
1371*4882a593Smuzhiyun 		vc4_bo_remove_from_purgeable_pool(bo);
1372*4882a593Smuzhiyun 	}
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	/* Save the purged state. */
1375*4882a593Smuzhiyun 	args->retained = bo->madv != __VC4_MADV_PURGED;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	/* Update internal madv state only if the bo was not purged. */
1378*4882a593Smuzhiyun 	if (bo->madv != __VC4_MADV_PURGED)
1379*4882a593Smuzhiyun 		bo->madv = args->madv;
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	mutex_unlock(&bo->madv_lock);
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	ret = 0;
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun out_put_gem:
1386*4882a593Smuzhiyun 	drm_gem_object_put(gem_obj);
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	return ret;
1389*4882a593Smuzhiyun }
1390