xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/nouveau/nouveau_gem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2008 Ben Skeggs.
3*4882a593Smuzhiyun  * All Rights Reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining
6*4882a593Smuzhiyun  * a copy of this software and associated documentation files (the
7*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
8*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
9*4882a593Smuzhiyun  * distribute, sublicense, and/or sell copies of the Software, and to
10*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
11*4882a593Smuzhiyun  * the following conditions:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
14*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial
15*4882a593Smuzhiyun  * portions of the Software.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20*4882a593Smuzhiyun  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21*4882a593Smuzhiyun  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22*4882a593Smuzhiyun  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23*4882a593Smuzhiyun  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "nouveau_drv.h"
28*4882a593Smuzhiyun #include "nouveau_dma.h"
29*4882a593Smuzhiyun #include "nouveau_fence.h"
30*4882a593Smuzhiyun #include "nouveau_abi16.h"
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include "nouveau_ttm.h"
33*4882a593Smuzhiyun #include "nouveau_gem.h"
34*4882a593Smuzhiyun #include "nouveau_mem.h"
35*4882a593Smuzhiyun #include "nouveau_vmm.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <nvif/class.h>
38*4882a593Smuzhiyun #include <nvif/push206e.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun void
nouveau_gem_object_del(struct drm_gem_object * gem)41*4882a593Smuzhiyun nouveau_gem_object_del(struct drm_gem_object *gem)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
44*4882a593Smuzhiyun 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
45*4882a593Smuzhiyun 	struct device *dev = drm->dev->dev;
46*4882a593Smuzhiyun 	int ret;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(dev);
49*4882a593Smuzhiyun 	if (WARN_ON(ret < 0 && ret != -EACCES)) {
50*4882a593Smuzhiyun 		pm_runtime_put_autosuspend(dev);
51*4882a593Smuzhiyun 		return;
52*4882a593Smuzhiyun 	}
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (gem->import_attach)
55*4882a593Smuzhiyun 		drm_prime_gem_destroy(gem, nvbo->bo.sg);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	ttm_bo_put(&nvbo->bo);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(dev);
60*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(dev);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun int
nouveau_gem_object_open(struct drm_gem_object * gem,struct drm_file * file_priv)64*4882a593Smuzhiyun nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct nouveau_cli *cli = nouveau_cli(file_priv);
67*4882a593Smuzhiyun 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
68*4882a593Smuzhiyun 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
69*4882a593Smuzhiyun 	struct device *dev = drm->dev->dev;
70*4882a593Smuzhiyun 	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
71*4882a593Smuzhiyun 	struct nouveau_vma *vma;
72*4882a593Smuzhiyun 	int ret;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
75*4882a593Smuzhiyun 		return 0;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
78*4882a593Smuzhiyun 	if (ret)
79*4882a593Smuzhiyun 		return ret;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(dev);
82*4882a593Smuzhiyun 	if (ret < 0 && ret != -EACCES) {
83*4882a593Smuzhiyun 		pm_runtime_put_autosuspend(dev);
84*4882a593Smuzhiyun 		goto out;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	ret = nouveau_vma_new(nvbo, vmm, &vma);
88*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(dev);
89*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(dev);
90*4882a593Smuzhiyun out:
91*4882a593Smuzhiyun 	ttm_bo_unreserve(&nvbo->bo);
92*4882a593Smuzhiyun 	return ret;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun struct nouveau_gem_object_unmap {
96*4882a593Smuzhiyun 	struct nouveau_cli_work work;
97*4882a593Smuzhiyun 	struct nouveau_vma *vma;
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun static void
nouveau_gem_object_delete(struct nouveau_vma * vma)101*4882a593Smuzhiyun nouveau_gem_object_delete(struct nouveau_vma *vma)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	nouveau_fence_unref(&vma->fence);
104*4882a593Smuzhiyun 	nouveau_vma_del(&vma);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun static void
nouveau_gem_object_delete_work(struct nouveau_cli_work * w)108*4882a593Smuzhiyun nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	struct nouveau_gem_object_unmap *work =
111*4882a593Smuzhiyun 		container_of(w, typeof(*work), work);
112*4882a593Smuzhiyun 	nouveau_gem_object_delete(work->vma);
113*4882a593Smuzhiyun 	kfree(work);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun static void
nouveau_gem_object_unmap(struct nouveau_bo * nvbo,struct nouveau_vma * vma)117*4882a593Smuzhiyun nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
120*4882a593Smuzhiyun 	struct nouveau_gem_object_unmap *work;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	list_del_init(&vma->head);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (!fence) {
125*4882a593Smuzhiyun 		nouveau_gem_object_delete(vma);
126*4882a593Smuzhiyun 		return;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
130*4882a593Smuzhiyun 		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
131*4882a593Smuzhiyun 		nouveau_gem_object_delete(vma);
132*4882a593Smuzhiyun 		return;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	work->work.func = nouveau_gem_object_delete_work;
136*4882a593Smuzhiyun 	work->vma = vma;
137*4882a593Smuzhiyun 	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun void
nouveau_gem_object_close(struct drm_gem_object * gem,struct drm_file * file_priv)141*4882a593Smuzhiyun nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	struct nouveau_cli *cli = nouveau_cli(file_priv);
144*4882a593Smuzhiyun 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
145*4882a593Smuzhiyun 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
146*4882a593Smuzhiyun 	struct device *dev = drm->dev->dev;
147*4882a593Smuzhiyun 	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
148*4882a593Smuzhiyun 	struct nouveau_vma *vma;
149*4882a593Smuzhiyun 	int ret;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
152*4882a593Smuzhiyun 		return;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
155*4882a593Smuzhiyun 	if (ret)
156*4882a593Smuzhiyun 		return;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	vma = nouveau_vma_find(nvbo, vmm);
159*4882a593Smuzhiyun 	if (vma) {
160*4882a593Smuzhiyun 		if (--vma->refs == 0) {
161*4882a593Smuzhiyun 			ret = pm_runtime_get_sync(dev);
162*4882a593Smuzhiyun 			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
163*4882a593Smuzhiyun 				nouveau_gem_object_unmap(nvbo, vma);
164*4882a593Smuzhiyun 				pm_runtime_mark_last_busy(dev);
165*4882a593Smuzhiyun 			}
166*4882a593Smuzhiyun 			pm_runtime_put_autosuspend(dev);
167*4882a593Smuzhiyun 		}
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 	ttm_bo_unreserve(&nvbo->bo);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun int
nouveau_gem_new(struct nouveau_cli * cli,u64 size,int align,uint32_t domain,uint32_t tile_mode,uint32_t tile_flags,struct nouveau_bo ** pnvbo)173*4882a593Smuzhiyun nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
174*4882a593Smuzhiyun 		uint32_t tile_mode, uint32_t tile_flags,
175*4882a593Smuzhiyun 		struct nouveau_bo **pnvbo)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct nouveau_drm *drm = cli->drm;
178*4882a593Smuzhiyun 	struct nouveau_bo *nvbo;
179*4882a593Smuzhiyun 	int ret;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
182*4882a593Smuzhiyun 		domain |= NOUVEAU_GEM_DOMAIN_CPU;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
185*4882a593Smuzhiyun 				tile_flags);
186*4882a593Smuzhiyun 	if (IS_ERR(nvbo))
187*4882a593Smuzhiyun 		return PTR_ERR(nvbo);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	/* Initialize the embedded gem-object. We return a single gem-reference
190*4882a593Smuzhiyun 	 * to the caller, instead of a normal nouveau_bo ttm reference. */
191*4882a593Smuzhiyun 	ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
192*4882a593Smuzhiyun 	if (ret) {
193*4882a593Smuzhiyun 		drm_gem_object_release(&nvbo->bo.base);
194*4882a593Smuzhiyun 		kfree(nvbo);
195*4882a593Smuzhiyun 		return ret;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
199*4882a593Smuzhiyun 	if (ret)
200*4882a593Smuzhiyun 		return ret;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* we restrict allowed domains on nv50+ to only the types
203*4882a593Smuzhiyun 	 * that were requested at creation time.  not possibly on
204*4882a593Smuzhiyun 	 * earlier chips without busting the ABI.
205*4882a593Smuzhiyun 	 */
206*4882a593Smuzhiyun 	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
207*4882a593Smuzhiyun 			      NOUVEAU_GEM_DOMAIN_GART;
208*4882a593Smuzhiyun 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
209*4882a593Smuzhiyun 		nvbo->valid_domains &= domain;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
212*4882a593Smuzhiyun 	*pnvbo = nvbo;
213*4882a593Smuzhiyun 	return 0;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun static int
nouveau_gem_info(struct drm_file * file_priv,struct drm_gem_object * gem,struct drm_nouveau_gem_info * rep)217*4882a593Smuzhiyun nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
218*4882a593Smuzhiyun 		 struct drm_nouveau_gem_info *rep)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	struct nouveau_cli *cli = nouveau_cli(file_priv);
221*4882a593Smuzhiyun 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
222*4882a593Smuzhiyun 	struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
223*4882a593Smuzhiyun 	struct nouveau_vma *vma;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if (is_power_of_2(nvbo->valid_domains))
226*4882a593Smuzhiyun 		rep->domain = nvbo->valid_domains;
227*4882a593Smuzhiyun 	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
228*4882a593Smuzhiyun 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
229*4882a593Smuzhiyun 	else
230*4882a593Smuzhiyun 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
231*4882a593Smuzhiyun 	rep->offset = nvbo->offset;
232*4882a593Smuzhiyun 	if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
233*4882a593Smuzhiyun 		vma = nouveau_vma_find(nvbo, vmm);
234*4882a593Smuzhiyun 		if (!vma)
235*4882a593Smuzhiyun 			return -EINVAL;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		rep->offset = vma->addr;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
241*4882a593Smuzhiyun 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
242*4882a593Smuzhiyun 	rep->tile_mode = nvbo->mode;
243*4882a593Smuzhiyun 	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
244*4882a593Smuzhiyun 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
245*4882a593Smuzhiyun 		rep->tile_flags |= nvbo->kind << 8;
246*4882a593Smuzhiyun 	else
247*4882a593Smuzhiyun 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
248*4882a593Smuzhiyun 		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
249*4882a593Smuzhiyun 	else
250*4882a593Smuzhiyun 		rep->tile_flags |= nvbo->zeta;
251*4882a593Smuzhiyun 	return 0;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun int
nouveau_gem_ioctl_new(struct drm_device * dev,void * data,struct drm_file * file_priv)255*4882a593Smuzhiyun nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
256*4882a593Smuzhiyun 		      struct drm_file *file_priv)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct nouveau_cli *cli = nouveau_cli(file_priv);
259*4882a593Smuzhiyun 	struct drm_nouveau_gem_new *req = data;
260*4882a593Smuzhiyun 	struct nouveau_bo *nvbo = NULL;
261*4882a593Smuzhiyun 	int ret = 0;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	ret = nouveau_gem_new(cli, req->info.size, req->align,
264*4882a593Smuzhiyun 			      req->info.domain, req->info.tile_mode,
265*4882a593Smuzhiyun 			      req->info.tile_flags, &nvbo);
266*4882a593Smuzhiyun 	if (ret)
267*4882a593Smuzhiyun 		return ret;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
270*4882a593Smuzhiyun 				    &req->info.handle);
271*4882a593Smuzhiyun 	if (ret == 0) {
272*4882a593Smuzhiyun 		ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
273*4882a593Smuzhiyun 		if (ret)
274*4882a593Smuzhiyun 			drm_gem_handle_delete(file_priv, req->info.handle);
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* drop reference from allocate - handle holds it now */
278*4882a593Smuzhiyun 	drm_gem_object_put(&nvbo->bo.base);
279*4882a593Smuzhiyun 	return ret;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun static int
nouveau_gem_set_domain(struct drm_gem_object * gem,uint32_t read_domains,uint32_t write_domains,uint32_t valid_domains)283*4882a593Smuzhiyun nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
284*4882a593Smuzhiyun 		       uint32_t write_domains, uint32_t valid_domains)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
287*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = &nvbo->bo;
288*4882a593Smuzhiyun 	uint32_t domains = valid_domains & nvbo->valid_domains &
289*4882a593Smuzhiyun 		(write_domains ? write_domains : read_domains);
290*4882a593Smuzhiyun 	uint32_t pref_domains = 0;;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	if (!domains)
293*4882a593Smuzhiyun 		return -EINVAL;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
298*4882a593Smuzhiyun 	    bo->mem.mem_type == TTM_PL_VRAM)
299*4882a593Smuzhiyun 		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
302*4882a593Smuzhiyun 		 bo->mem.mem_type == TTM_PL_TT)
303*4882a593Smuzhiyun 		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
306*4882a593Smuzhiyun 		pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	else
309*4882a593Smuzhiyun 		pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	return 0;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun struct validate_op {
317*4882a593Smuzhiyun 	struct list_head list;
318*4882a593Smuzhiyun 	struct ww_acquire_ctx ticket;
319*4882a593Smuzhiyun };
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun static void
validate_fini_no_ticket(struct validate_op * op,struct nouveau_channel * chan,struct nouveau_fence * fence,struct drm_nouveau_gem_pushbuf_bo * pbbo)322*4882a593Smuzhiyun validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
323*4882a593Smuzhiyun 			struct nouveau_fence *fence,
324*4882a593Smuzhiyun 			struct drm_nouveau_gem_pushbuf_bo *pbbo)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	struct nouveau_bo *nvbo;
327*4882a593Smuzhiyun 	struct drm_nouveau_gem_pushbuf_bo *b;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	while (!list_empty(&op->list)) {
330*4882a593Smuzhiyun 		nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
331*4882a593Smuzhiyun 		b = &pbbo[nvbo->pbbo_index];
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 		if (likely(fence)) {
334*4882a593Smuzhiyun 			nouveau_bo_fence(nvbo, fence, !!b->write_domains);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 			if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
337*4882a593Smuzhiyun 				struct nouveau_vma *vma =
338*4882a593Smuzhiyun 					(void *)(unsigned long)b->user_priv;
339*4882a593Smuzhiyun 				nouveau_fence_unref(&vma->fence);
340*4882a593Smuzhiyun 				dma_fence_get(&fence->base);
341*4882a593Smuzhiyun 				vma->fence = fence;
342*4882a593Smuzhiyun 			}
343*4882a593Smuzhiyun 		}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 		if (unlikely(nvbo->validate_mapped)) {
346*4882a593Smuzhiyun 			ttm_bo_kunmap(&nvbo->kmap);
347*4882a593Smuzhiyun 			nvbo->validate_mapped = false;
348*4882a593Smuzhiyun 		}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		list_del(&nvbo->entry);
351*4882a593Smuzhiyun 		nvbo->reserved_by = NULL;
352*4882a593Smuzhiyun 		ttm_bo_unreserve(&nvbo->bo);
353*4882a593Smuzhiyun 		drm_gem_object_put(&nvbo->bo.base);
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun static void
validate_fini(struct validate_op * op,struct nouveau_channel * chan,struct nouveau_fence * fence,struct drm_nouveau_gem_pushbuf_bo * pbbo)358*4882a593Smuzhiyun validate_fini(struct validate_op *op, struct nouveau_channel *chan,
359*4882a593Smuzhiyun 	      struct nouveau_fence *fence,
360*4882a593Smuzhiyun 	      struct drm_nouveau_gem_pushbuf_bo *pbbo)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	validate_fini_no_ticket(op, chan, fence, pbbo);
363*4882a593Smuzhiyun 	ww_acquire_fini(&op->ticket);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun static int
validate_init(struct nouveau_channel * chan,struct drm_file * file_priv,struct drm_nouveau_gem_pushbuf_bo * pbbo,int nr_buffers,struct validate_op * op)367*4882a593Smuzhiyun validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
368*4882a593Smuzhiyun 	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
369*4882a593Smuzhiyun 	      int nr_buffers, struct validate_op *op)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	struct nouveau_cli *cli = nouveau_cli(file_priv);
372*4882a593Smuzhiyun 	int trycnt = 0;
373*4882a593Smuzhiyun 	int ret = -EINVAL, i;
374*4882a593Smuzhiyun 	struct nouveau_bo *res_bo = NULL;
375*4882a593Smuzhiyun 	LIST_HEAD(gart_list);
376*4882a593Smuzhiyun 	LIST_HEAD(vram_list);
377*4882a593Smuzhiyun 	LIST_HEAD(both_list);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	ww_acquire_init(&op->ticket, &reservation_ww_class);
380*4882a593Smuzhiyun retry:
381*4882a593Smuzhiyun 	if (++trycnt > 100000) {
382*4882a593Smuzhiyun 		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
383*4882a593Smuzhiyun 		return -EINVAL;
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	for (i = 0; i < nr_buffers; i++) {
387*4882a593Smuzhiyun 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
388*4882a593Smuzhiyun 		struct drm_gem_object *gem;
389*4882a593Smuzhiyun 		struct nouveau_bo *nvbo;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 		gem = drm_gem_object_lookup(file_priv, b->handle);
392*4882a593Smuzhiyun 		if (!gem) {
393*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
394*4882a593Smuzhiyun 			ret = -ENOENT;
395*4882a593Smuzhiyun 			break;
396*4882a593Smuzhiyun 		}
397*4882a593Smuzhiyun 		nvbo = nouveau_gem_object(gem);
398*4882a593Smuzhiyun 		if (nvbo == res_bo) {
399*4882a593Smuzhiyun 			res_bo = NULL;
400*4882a593Smuzhiyun 			drm_gem_object_put(gem);
401*4882a593Smuzhiyun 			continue;
402*4882a593Smuzhiyun 		}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
405*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
406*4882a593Smuzhiyun 				      "validation list\n", b->handle);
407*4882a593Smuzhiyun 			drm_gem_object_put(gem);
408*4882a593Smuzhiyun 			ret = -EINVAL;
409*4882a593Smuzhiyun 			break;
410*4882a593Smuzhiyun 		}
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 		ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
413*4882a593Smuzhiyun 		if (ret) {
414*4882a593Smuzhiyun 			list_splice_tail_init(&vram_list, &op->list);
415*4882a593Smuzhiyun 			list_splice_tail_init(&gart_list, &op->list);
416*4882a593Smuzhiyun 			list_splice_tail_init(&both_list, &op->list);
417*4882a593Smuzhiyun 			validate_fini_no_ticket(op, chan, NULL, NULL);
418*4882a593Smuzhiyun 			if (unlikely(ret == -EDEADLK)) {
419*4882a593Smuzhiyun 				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
420*4882a593Smuzhiyun 							      &op->ticket);
421*4882a593Smuzhiyun 				if (!ret)
422*4882a593Smuzhiyun 					res_bo = nvbo;
423*4882a593Smuzhiyun 			}
424*4882a593Smuzhiyun 			if (unlikely(ret)) {
425*4882a593Smuzhiyun 				if (ret != -ERESTARTSYS)
426*4882a593Smuzhiyun 					NV_PRINTK(err, cli, "fail reserve\n");
427*4882a593Smuzhiyun 				break;
428*4882a593Smuzhiyun 			}
429*4882a593Smuzhiyun 		}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 		if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
432*4882a593Smuzhiyun 			struct nouveau_vmm *vmm = chan->vmm;
433*4882a593Smuzhiyun 			struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
434*4882a593Smuzhiyun 			if (!vma) {
435*4882a593Smuzhiyun 				NV_PRINTK(err, cli, "vma not found!\n");
436*4882a593Smuzhiyun 				ret = -EINVAL;
437*4882a593Smuzhiyun 				break;
438*4882a593Smuzhiyun 			}
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 			b->user_priv = (uint64_t)(unsigned long)vma;
441*4882a593Smuzhiyun 		} else {
442*4882a593Smuzhiyun 			b->user_priv = (uint64_t)(unsigned long)nvbo;
443*4882a593Smuzhiyun 		}
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		nvbo->reserved_by = file_priv;
446*4882a593Smuzhiyun 		nvbo->pbbo_index = i;
447*4882a593Smuzhiyun 		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
448*4882a593Smuzhiyun 		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
449*4882a593Smuzhiyun 			list_add_tail(&nvbo->entry, &both_list);
450*4882a593Smuzhiyun 		else
451*4882a593Smuzhiyun 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
452*4882a593Smuzhiyun 			list_add_tail(&nvbo->entry, &vram_list);
453*4882a593Smuzhiyun 		else
454*4882a593Smuzhiyun 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
455*4882a593Smuzhiyun 			list_add_tail(&nvbo->entry, &gart_list);
456*4882a593Smuzhiyun 		else {
457*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
458*4882a593Smuzhiyun 				 b->valid_domains);
459*4882a593Smuzhiyun 			list_add_tail(&nvbo->entry, &both_list);
460*4882a593Smuzhiyun 			ret = -EINVAL;
461*4882a593Smuzhiyun 			break;
462*4882a593Smuzhiyun 		}
463*4882a593Smuzhiyun 		if (nvbo == res_bo)
464*4882a593Smuzhiyun 			goto retry;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	ww_acquire_done(&op->ticket);
468*4882a593Smuzhiyun 	list_splice_tail(&vram_list, &op->list);
469*4882a593Smuzhiyun 	list_splice_tail(&gart_list, &op->list);
470*4882a593Smuzhiyun 	list_splice_tail(&both_list, &op->list);
471*4882a593Smuzhiyun 	if (ret)
472*4882a593Smuzhiyun 		validate_fini(op, chan, NULL, NULL);
473*4882a593Smuzhiyun 	return ret;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun static int
validate_list(struct nouveau_channel * chan,struct nouveau_cli * cli,struct list_head * list,struct drm_nouveau_gem_pushbuf_bo * pbbo)478*4882a593Smuzhiyun validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
479*4882a593Smuzhiyun 	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	struct nouveau_drm *drm = chan->drm;
482*4882a593Smuzhiyun 	struct nouveau_bo *nvbo;
483*4882a593Smuzhiyun 	int ret, relocs = 0;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	list_for_each_entry(nvbo, list, entry) {
486*4882a593Smuzhiyun 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 		ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
489*4882a593Smuzhiyun 					     b->write_domains,
490*4882a593Smuzhiyun 					     b->valid_domains);
491*4882a593Smuzhiyun 		if (unlikely(ret)) {
492*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "fail set_domain\n");
493*4882a593Smuzhiyun 			return ret;
494*4882a593Smuzhiyun 		}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		ret = nouveau_bo_validate(nvbo, true, false);
497*4882a593Smuzhiyun 		if (unlikely(ret)) {
498*4882a593Smuzhiyun 			if (ret != -ERESTARTSYS)
499*4882a593Smuzhiyun 				NV_PRINTK(err, cli, "fail ttm_validate\n");
500*4882a593Smuzhiyun 			return ret;
501*4882a593Smuzhiyun 		}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
504*4882a593Smuzhiyun 		if (unlikely(ret)) {
505*4882a593Smuzhiyun 			if (ret != -ERESTARTSYS)
506*4882a593Smuzhiyun 				NV_PRINTK(err, cli, "fail post-validate sync\n");
507*4882a593Smuzhiyun 			return ret;
508*4882a593Smuzhiyun 		}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
511*4882a593Smuzhiyun 			if (nvbo->offset == b->presumed.offset &&
512*4882a593Smuzhiyun 			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
513*4882a593Smuzhiyun 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
514*4882a593Smuzhiyun 			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
515*4882a593Smuzhiyun 			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
516*4882a593Smuzhiyun 				continue;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
519*4882a593Smuzhiyun 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
520*4882a593Smuzhiyun 			else
521*4882a593Smuzhiyun 				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
522*4882a593Smuzhiyun 			b->presumed.offset = nvbo->offset;
523*4882a593Smuzhiyun 			b->presumed.valid = 0;
524*4882a593Smuzhiyun 			relocs++;
525*4882a593Smuzhiyun 		}
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	return relocs;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun static int
nouveau_gem_pushbuf_validate(struct nouveau_channel * chan,struct drm_file * file_priv,struct drm_nouveau_gem_pushbuf_bo * pbbo,int nr_buffers,struct validate_op * op,bool * apply_relocs)532*4882a593Smuzhiyun nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
533*4882a593Smuzhiyun 			     struct drm_file *file_priv,
534*4882a593Smuzhiyun 			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
535*4882a593Smuzhiyun 			     int nr_buffers,
536*4882a593Smuzhiyun 			     struct validate_op *op, bool *apply_relocs)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	struct nouveau_cli *cli = nouveau_cli(file_priv);
539*4882a593Smuzhiyun 	int ret;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	INIT_LIST_HEAD(&op->list);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (nr_buffers == 0)
544*4882a593Smuzhiyun 		return 0;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
547*4882a593Smuzhiyun 	if (unlikely(ret)) {
548*4882a593Smuzhiyun 		if (ret != -ERESTARTSYS)
549*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "validate_init\n");
550*4882a593Smuzhiyun 		return ret;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	ret = validate_list(chan, cli, &op->list, pbbo);
554*4882a593Smuzhiyun 	if (unlikely(ret < 0)) {
555*4882a593Smuzhiyun 		if (ret != -ERESTARTSYS)
556*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "validating bo list\n");
557*4882a593Smuzhiyun 		validate_fini(op, chan, NULL, NULL);
558*4882a593Smuzhiyun 		return ret;
559*4882a593Smuzhiyun 	} else if (ret > 0) {
560*4882a593Smuzhiyun 		*apply_relocs = true;
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	return 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun static inline void
u_free(void * addr)567*4882a593Smuzhiyun u_free(void *addr)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	kvfree(addr);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun static inline void *
u_memcpya(uint64_t user,unsigned nmemb,unsigned size)573*4882a593Smuzhiyun u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	void *mem;
576*4882a593Smuzhiyun 	void __user *userptr = (void __force __user *)(uintptr_t)user;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	size *= nmemb;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	mem = kvmalloc(size, GFP_KERNEL);
581*4882a593Smuzhiyun 	if (!mem)
582*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (copy_from_user(mem, userptr, size)) {
585*4882a593Smuzhiyun 		u_free(mem);
586*4882a593Smuzhiyun 		return ERR_PTR(-EFAULT);
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	return mem;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli * cli,struct drm_nouveau_gem_pushbuf * req,struct drm_nouveau_gem_pushbuf_reloc * reloc,struct drm_nouveau_gem_pushbuf_bo * bo)593*4882a593Smuzhiyun nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
594*4882a593Smuzhiyun 				struct drm_nouveau_gem_pushbuf *req,
595*4882a593Smuzhiyun 				struct drm_nouveau_gem_pushbuf_reloc *reloc,
596*4882a593Smuzhiyun 				struct drm_nouveau_gem_pushbuf_bo *bo)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	int ret = 0;
599*4882a593Smuzhiyun 	unsigned i;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	for (i = 0; i < req->nr_relocs; i++) {
602*4882a593Smuzhiyun 		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
603*4882a593Smuzhiyun 		struct drm_nouveau_gem_pushbuf_bo *b;
604*4882a593Smuzhiyun 		struct nouveau_bo *nvbo;
605*4882a593Smuzhiyun 		uint32_t data;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 		if (unlikely(r->bo_index >= req->nr_buffers)) {
608*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "reloc bo index invalid\n");
609*4882a593Smuzhiyun 			ret = -EINVAL;
610*4882a593Smuzhiyun 			break;
611*4882a593Smuzhiyun 		}
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 		b = &bo[r->bo_index];
614*4882a593Smuzhiyun 		if (b->presumed.valid)
615*4882a593Smuzhiyun 			continue;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
618*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
619*4882a593Smuzhiyun 			ret = -EINVAL;
620*4882a593Smuzhiyun 			break;
621*4882a593Smuzhiyun 		}
622*4882a593Smuzhiyun 		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 		if (unlikely(r->reloc_bo_offset + 4 >
625*4882a593Smuzhiyun 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
626*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "reloc outside of bo\n");
627*4882a593Smuzhiyun 			ret = -EINVAL;
628*4882a593Smuzhiyun 			break;
629*4882a593Smuzhiyun 		}
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 		if (!nvbo->kmap.virtual) {
632*4882a593Smuzhiyun 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
633*4882a593Smuzhiyun 					  &nvbo->kmap);
634*4882a593Smuzhiyun 			if (ret) {
635*4882a593Smuzhiyun 				NV_PRINTK(err, cli, "failed kmap for reloc\n");
636*4882a593Smuzhiyun 				break;
637*4882a593Smuzhiyun 			}
638*4882a593Smuzhiyun 			nvbo->validate_mapped = true;
639*4882a593Smuzhiyun 		}
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
642*4882a593Smuzhiyun 			data = b->presumed.offset + r->data;
643*4882a593Smuzhiyun 		else
644*4882a593Smuzhiyun 		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
645*4882a593Smuzhiyun 			data = (b->presumed.offset + r->data) >> 32;
646*4882a593Smuzhiyun 		else
647*4882a593Smuzhiyun 			data = r->data;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
650*4882a593Smuzhiyun 			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
651*4882a593Smuzhiyun 				data |= r->tor;
652*4882a593Smuzhiyun 			else
653*4882a593Smuzhiyun 				data |= r->vor;
654*4882a593Smuzhiyun 		}
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 		ret = ttm_bo_wait(&nvbo->bo, false, false);
657*4882a593Smuzhiyun 		if (ret) {
658*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
659*4882a593Smuzhiyun 			break;
660*4882a593Smuzhiyun 		}
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
663*4882a593Smuzhiyun 	}
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	return ret;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun int
nouveau_gem_ioctl_pushbuf(struct drm_device * dev,void * data,struct drm_file * file_priv)669*4882a593Smuzhiyun nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
670*4882a593Smuzhiyun 			  struct drm_file *file_priv)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
673*4882a593Smuzhiyun 	struct nouveau_cli *cli = nouveau_cli(file_priv);
674*4882a593Smuzhiyun 	struct nouveau_abi16_chan *temp;
675*4882a593Smuzhiyun 	struct nouveau_drm *drm = nouveau_drm(dev);
676*4882a593Smuzhiyun 	struct drm_nouveau_gem_pushbuf *req = data;
677*4882a593Smuzhiyun 	struct drm_nouveau_gem_pushbuf_push *push;
678*4882a593Smuzhiyun 	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
679*4882a593Smuzhiyun 	struct drm_nouveau_gem_pushbuf_bo *bo;
680*4882a593Smuzhiyun 	struct nouveau_channel *chan = NULL;
681*4882a593Smuzhiyun 	struct validate_op op;
682*4882a593Smuzhiyun 	struct nouveau_fence *fence = NULL;
683*4882a593Smuzhiyun 	int i, j, ret = 0;
684*4882a593Smuzhiyun 	bool do_reloc = false, sync = false;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	if (unlikely(!abi16))
687*4882a593Smuzhiyun 		return -ENOMEM;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	list_for_each_entry(temp, &abi16->channels, head) {
690*4882a593Smuzhiyun 		if (temp->chan->chid == req->channel) {
691*4882a593Smuzhiyun 			chan = temp->chan;
692*4882a593Smuzhiyun 			break;
693*4882a593Smuzhiyun 		}
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	if (!chan)
697*4882a593Smuzhiyun 		return nouveau_abi16_put(abi16, -ENOENT);
698*4882a593Smuzhiyun 	if (unlikely(atomic_read(&chan->killed)))
699*4882a593Smuzhiyun 		return nouveau_abi16_put(abi16, -ENODEV);
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	req->vram_available = drm->gem.vram_available;
704*4882a593Smuzhiyun 	req->gart_available = drm->gem.gart_available;
705*4882a593Smuzhiyun 	if (unlikely(req->nr_push == 0))
706*4882a593Smuzhiyun 		goto out_next;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
709*4882a593Smuzhiyun 		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
710*4882a593Smuzhiyun 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
711*4882a593Smuzhiyun 		return nouveau_abi16_put(abi16, -EINVAL);
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
715*4882a593Smuzhiyun 		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
716*4882a593Smuzhiyun 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
717*4882a593Smuzhiyun 		return nouveau_abi16_put(abi16, -EINVAL);
718*4882a593Smuzhiyun 	}
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
721*4882a593Smuzhiyun 		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
722*4882a593Smuzhiyun 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
723*4882a593Smuzhiyun 		return nouveau_abi16_put(abi16, -EINVAL);
724*4882a593Smuzhiyun 	}
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
727*4882a593Smuzhiyun 	if (IS_ERR(push))
728*4882a593Smuzhiyun 		return nouveau_abi16_put(abi16, PTR_ERR(push));
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
731*4882a593Smuzhiyun 	if (IS_ERR(bo)) {
732*4882a593Smuzhiyun 		u_free(push);
733*4882a593Smuzhiyun 		return nouveau_abi16_put(abi16, PTR_ERR(bo));
734*4882a593Smuzhiyun 	}
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	/* Ensure all push buffers are on validate list */
737*4882a593Smuzhiyun 	for (i = 0; i < req->nr_push; i++) {
738*4882a593Smuzhiyun 		if (push[i].bo_index >= req->nr_buffers) {
739*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
740*4882a593Smuzhiyun 			ret = -EINVAL;
741*4882a593Smuzhiyun 			goto out_prevalid;
742*4882a593Smuzhiyun 		}
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	/* Validate buffer list */
746*4882a593Smuzhiyun revalidate:
747*4882a593Smuzhiyun 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
748*4882a593Smuzhiyun 					   req->nr_buffers, &op, &do_reloc);
749*4882a593Smuzhiyun 	if (ret) {
750*4882a593Smuzhiyun 		if (ret != -ERESTARTSYS)
751*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "validate: %d\n", ret);
752*4882a593Smuzhiyun 		goto out_prevalid;
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	/* Apply any relocations that are required */
756*4882a593Smuzhiyun 	if (do_reloc) {
757*4882a593Smuzhiyun 		if (!reloc) {
758*4882a593Smuzhiyun 			validate_fini(&op, chan, NULL, bo);
759*4882a593Smuzhiyun 			reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
760*4882a593Smuzhiyun 			if (IS_ERR(reloc)) {
761*4882a593Smuzhiyun 				ret = PTR_ERR(reloc);
762*4882a593Smuzhiyun 				goto out_prevalid;
763*4882a593Smuzhiyun 			}
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 			goto revalidate;
766*4882a593Smuzhiyun 		}
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
769*4882a593Smuzhiyun 		if (ret) {
770*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
771*4882a593Smuzhiyun 			goto out;
772*4882a593Smuzhiyun 		}
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	if (chan->dma.ib_max) {
776*4882a593Smuzhiyun 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
777*4882a593Smuzhiyun 		if (ret) {
778*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
779*4882a593Smuzhiyun 			goto out;
780*4882a593Smuzhiyun 		}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 		for (i = 0; i < req->nr_push; i++) {
783*4882a593Smuzhiyun 			struct nouveau_vma *vma = (void *)(unsigned long)
784*4882a593Smuzhiyun 				bo[push[i].bo_index].user_priv;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 			nv50_dma_push(chan, vma->addr + push[i].offset,
787*4882a593Smuzhiyun 				      push[i].length);
788*4882a593Smuzhiyun 		}
789*4882a593Smuzhiyun 	} else
790*4882a593Smuzhiyun 	if (drm->client.device.info.chipset >= 0x25) {
791*4882a593Smuzhiyun 		ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
792*4882a593Smuzhiyun 		if (ret) {
793*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
794*4882a593Smuzhiyun 			goto out;
795*4882a593Smuzhiyun 		}
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 		for (i = 0; i < req->nr_push; i++) {
798*4882a593Smuzhiyun 			struct nouveau_bo *nvbo = (void *)(unsigned long)
799*4882a593Smuzhiyun 				bo[push[i].bo_index].user_priv;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 			PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
802*4882a593Smuzhiyun 			PUSH_DATA(chan->chan.push, 0);
803*4882a593Smuzhiyun 		}
804*4882a593Smuzhiyun 	} else {
805*4882a593Smuzhiyun 		ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
806*4882a593Smuzhiyun 		if (ret) {
807*4882a593Smuzhiyun 			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
808*4882a593Smuzhiyun 			goto out;
809*4882a593Smuzhiyun 		}
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 		for (i = 0; i < req->nr_push; i++) {
812*4882a593Smuzhiyun 			struct nouveau_bo *nvbo = (void *)(unsigned long)
813*4882a593Smuzhiyun 				bo[push[i].bo_index].user_priv;
814*4882a593Smuzhiyun 			uint32_t cmd;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 			cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
817*4882a593Smuzhiyun 			cmd |= 0x20000000;
818*4882a593Smuzhiyun 			if (unlikely(cmd != req->suffix0)) {
819*4882a593Smuzhiyun 				if (!nvbo->kmap.virtual) {
820*4882a593Smuzhiyun 					ret = ttm_bo_kmap(&nvbo->bo, 0,
821*4882a593Smuzhiyun 							  nvbo->bo.mem.
822*4882a593Smuzhiyun 							  num_pages,
823*4882a593Smuzhiyun 							  &nvbo->kmap);
824*4882a593Smuzhiyun 					if (ret) {
825*4882a593Smuzhiyun 						WIND_RING(chan);
826*4882a593Smuzhiyun 						goto out;
827*4882a593Smuzhiyun 					}
828*4882a593Smuzhiyun 					nvbo->validate_mapped = true;
829*4882a593Smuzhiyun 				}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 				nouveau_bo_wr32(nvbo, (push[i].offset +
832*4882a593Smuzhiyun 						push[i].length - 8) / 4, cmd);
833*4882a593Smuzhiyun 			}
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 			PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
836*4882a593Smuzhiyun 			PUSH_DATA(chan->chan.push, 0);
837*4882a593Smuzhiyun 			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
838*4882a593Smuzhiyun 				PUSH_DATA(chan->chan.push, 0);
839*4882a593Smuzhiyun 		}
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	ret = nouveau_fence_new(chan, false, &fence);
843*4882a593Smuzhiyun 	if (ret) {
844*4882a593Smuzhiyun 		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
845*4882a593Smuzhiyun 		WIND_RING(chan);
846*4882a593Smuzhiyun 		goto out;
847*4882a593Smuzhiyun 	}
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	if (sync) {
850*4882a593Smuzhiyun 		if (!(ret = nouveau_fence_wait(fence, false, false))) {
851*4882a593Smuzhiyun 			if ((ret = dma_fence_get_status(&fence->base)) == 1)
852*4882a593Smuzhiyun 				ret = 0;
853*4882a593Smuzhiyun 		}
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun out:
857*4882a593Smuzhiyun 	validate_fini(&op, chan, fence, bo);
858*4882a593Smuzhiyun 	nouveau_fence_unref(&fence);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	if (do_reloc) {
861*4882a593Smuzhiyun 		struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
862*4882a593Smuzhiyun 			u64_to_user_ptr(req->buffers);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 		for (i = 0; i < req->nr_buffers; i++) {
865*4882a593Smuzhiyun 			if (bo[i].presumed.valid)
866*4882a593Smuzhiyun 				continue;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 			if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
869*4882a593Smuzhiyun 					 sizeof(bo[i].presumed))) {
870*4882a593Smuzhiyun 				ret = -EFAULT;
871*4882a593Smuzhiyun 				break;
872*4882a593Smuzhiyun 			}
873*4882a593Smuzhiyun 		}
874*4882a593Smuzhiyun 	}
875*4882a593Smuzhiyun out_prevalid:
876*4882a593Smuzhiyun 	if (!IS_ERR(reloc))
877*4882a593Smuzhiyun 		u_free(reloc);
878*4882a593Smuzhiyun 	u_free(bo);
879*4882a593Smuzhiyun 	u_free(push);
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun out_next:
882*4882a593Smuzhiyun 	if (chan->dma.ib_max) {
883*4882a593Smuzhiyun 		req->suffix0 = 0x00000000;
884*4882a593Smuzhiyun 		req->suffix1 = 0x00000000;
885*4882a593Smuzhiyun 	} else
886*4882a593Smuzhiyun 	if (drm->client.device.info.chipset >= 0x25) {
887*4882a593Smuzhiyun 		req->suffix0 = 0x00020000;
888*4882a593Smuzhiyun 		req->suffix1 = 0x00000000;
889*4882a593Smuzhiyun 	} else {
890*4882a593Smuzhiyun 		req->suffix0 = 0x20000000 |
891*4882a593Smuzhiyun 			      (chan->push.addr + ((chan->dma.cur + 2) << 2));
892*4882a593Smuzhiyun 		req->suffix1 = 0x00000000;
893*4882a593Smuzhiyun 	}
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	return nouveau_abi16_put(abi16, ret);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun int
nouveau_gem_ioctl_cpu_prep(struct drm_device * dev,void * data,struct drm_file * file_priv)899*4882a593Smuzhiyun nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
900*4882a593Smuzhiyun 			   struct drm_file *file_priv)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun 	struct drm_nouveau_gem_cpu_prep *req = data;
903*4882a593Smuzhiyun 	struct drm_gem_object *gem;
904*4882a593Smuzhiyun 	struct nouveau_bo *nvbo;
905*4882a593Smuzhiyun 	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
906*4882a593Smuzhiyun 	bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
907*4882a593Smuzhiyun 	long lret;
908*4882a593Smuzhiyun 	int ret;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	gem = drm_gem_object_lookup(file_priv, req->handle);
911*4882a593Smuzhiyun 	if (!gem)
912*4882a593Smuzhiyun 		return -ENOENT;
913*4882a593Smuzhiyun 	nvbo = nouveau_gem_object(gem);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
916*4882a593Smuzhiyun 						   no_wait ? 0 : 30 * HZ);
917*4882a593Smuzhiyun 	if (!lret)
918*4882a593Smuzhiyun 		ret = -EBUSY;
919*4882a593Smuzhiyun 	else if (lret > 0)
920*4882a593Smuzhiyun 		ret = 0;
921*4882a593Smuzhiyun 	else
922*4882a593Smuzhiyun 		ret = lret;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	nouveau_bo_sync_for_cpu(nvbo);
925*4882a593Smuzhiyun 	drm_gem_object_put(gem);
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	return ret;
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun int
nouveau_gem_ioctl_cpu_fini(struct drm_device * dev,void * data,struct drm_file * file_priv)931*4882a593Smuzhiyun nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
932*4882a593Smuzhiyun 			   struct drm_file *file_priv)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	struct drm_nouveau_gem_cpu_fini *req = data;
935*4882a593Smuzhiyun 	struct drm_gem_object *gem;
936*4882a593Smuzhiyun 	struct nouveau_bo *nvbo;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	gem = drm_gem_object_lookup(file_priv, req->handle);
939*4882a593Smuzhiyun 	if (!gem)
940*4882a593Smuzhiyun 		return -ENOENT;
941*4882a593Smuzhiyun 	nvbo = nouveau_gem_object(gem);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	nouveau_bo_sync_for_device(nvbo);
944*4882a593Smuzhiyun 	drm_gem_object_put(gem);
945*4882a593Smuzhiyun 	return 0;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun int
nouveau_gem_ioctl_info(struct drm_device * dev,void * data,struct drm_file * file_priv)949*4882a593Smuzhiyun nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
950*4882a593Smuzhiyun 		       struct drm_file *file_priv)
951*4882a593Smuzhiyun {
952*4882a593Smuzhiyun 	struct drm_nouveau_gem_info *req = data;
953*4882a593Smuzhiyun 	struct drm_gem_object *gem;
954*4882a593Smuzhiyun 	int ret;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	gem = drm_gem_object_lookup(file_priv, req->handle);
957*4882a593Smuzhiyun 	if (!gem)
958*4882a593Smuzhiyun 		return -ENOENT;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	ret = nouveau_gem_info(file_priv, gem, req);
961*4882a593Smuzhiyun 	drm_gem_object_put(gem);
962*4882a593Smuzhiyun 	return ret;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
965