xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/i915_vma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright © 2016 Intel Corporation
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun  * Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21*4882a593Smuzhiyun  * IN THE SOFTWARE.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <linux/sched/mm.h>
26*4882a593Smuzhiyun #include <drm/drm_gem.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include "display/intel_frontbuffer.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include "gt/intel_engine.h"
31*4882a593Smuzhiyun #include "gt/intel_engine_heartbeat.h"
32*4882a593Smuzhiyun #include "gt/intel_gt.h"
33*4882a593Smuzhiyun #include "gt/intel_gt_requests.h"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include "i915_drv.h"
36*4882a593Smuzhiyun #include "i915_globals.h"
37*4882a593Smuzhiyun #include "i915_sw_fence_work.h"
38*4882a593Smuzhiyun #include "i915_trace.h"
39*4882a593Smuzhiyun #include "i915_vma.h"
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static struct i915_global_vma {
42*4882a593Smuzhiyun 	struct i915_global base;
43*4882a593Smuzhiyun 	struct kmem_cache *slab_vmas;
44*4882a593Smuzhiyun } global;
45*4882a593Smuzhiyun 
i915_vma_alloc(void)46*4882a593Smuzhiyun struct i915_vma *i915_vma_alloc(void)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
i915_vma_free(struct i915_vma * vma)51*4882a593Smuzhiyun void i915_vma_free(struct i915_vma *vma)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	return kmem_cache_free(global.slab_vmas, vma);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #include <linux/stackdepot.h>
59*4882a593Smuzhiyun 
vma_print_allocator(struct i915_vma * vma,const char * reason)60*4882a593Smuzhiyun static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	unsigned long *entries;
63*4882a593Smuzhiyun 	unsigned int nr_entries;
64*4882a593Smuzhiyun 	char buf[512];
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	if (!vma->node.stack) {
67*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68*4882a593Smuzhiyun 				 vma->node.start, vma->node.size, reason);
69*4882a593Smuzhiyun 		return;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73*4882a593Smuzhiyun 	stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
74*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75*4882a593Smuzhiyun 			 vma->node.start, vma->node.size, reason, buf);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #else
79*4882a593Smuzhiyun 
vma_print_allocator(struct i915_vma * vma,const char * reason)80*4882a593Smuzhiyun static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun 
active_to_vma(struct i915_active * ref)86*4882a593Smuzhiyun static inline struct i915_vma *active_to_vma(struct i915_active *ref)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	return container_of(ref, typeof(struct i915_vma), active);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
__i915_vma_active(struct i915_active * ref)91*4882a593Smuzhiyun static int __i915_vma_active(struct i915_active *ref)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun __i915_active_call
__i915_vma_retire(struct i915_active * ref)97*4882a593Smuzhiyun static void __i915_vma_retire(struct i915_active *ref)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	i915_vma_put(active_to_vma(ref));
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun static struct i915_vma *
vma_create(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)103*4882a593Smuzhiyun vma_create(struct drm_i915_gem_object *obj,
104*4882a593Smuzhiyun 	   struct i915_address_space *vm,
105*4882a593Smuzhiyun 	   const struct i915_ggtt_view *view)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct i915_vma *pos = ERR_PTR(-E2BIG);
108*4882a593Smuzhiyun 	struct i915_vma *vma;
109*4882a593Smuzhiyun 	struct rb_node *rb, **p;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/* The aliasing_ppgtt should never be used directly! */
112*4882a593Smuzhiyun 	GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	vma = i915_vma_alloc();
115*4882a593Smuzhiyun 	if (vma == NULL)
116*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	kref_init(&vma->ref);
119*4882a593Smuzhiyun 	mutex_init(&vma->pages_mutex);
120*4882a593Smuzhiyun 	vma->vm = i915_vm_get(vm);
121*4882a593Smuzhiyun 	vma->ops = &vm->vma_ops;
122*4882a593Smuzhiyun 	vma->obj = obj;
123*4882a593Smuzhiyun 	vma->resv = obj->base.resv;
124*4882a593Smuzhiyun 	vma->size = obj->base.size;
125*4882a593Smuzhiyun 	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* Declare ourselves safe for use inside shrinkers */
130*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
131*4882a593Smuzhiyun 		fs_reclaim_acquire(GFP_KERNEL);
132*4882a593Smuzhiyun 		might_lock(&vma->active.mutex);
133*4882a593Smuzhiyun 		fs_reclaim_release(GFP_KERNEL);
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vma->closed_link);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
139*4882a593Smuzhiyun 		vma->ggtt_view = *view;
140*4882a593Smuzhiyun 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
141*4882a593Smuzhiyun 			GEM_BUG_ON(range_overflows_t(u64,
142*4882a593Smuzhiyun 						     view->partial.offset,
143*4882a593Smuzhiyun 						     view->partial.size,
144*4882a593Smuzhiyun 						     obj->base.size >> PAGE_SHIFT));
145*4882a593Smuzhiyun 			vma->size = view->partial.size;
146*4882a593Smuzhiyun 			vma->size <<= PAGE_SHIFT;
147*4882a593Smuzhiyun 			GEM_BUG_ON(vma->size > obj->base.size);
148*4882a593Smuzhiyun 		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
149*4882a593Smuzhiyun 			vma->size = intel_rotation_info_size(&view->rotated);
150*4882a593Smuzhiyun 			vma->size <<= PAGE_SHIFT;
151*4882a593Smuzhiyun 		} else if (view->type == I915_GGTT_VIEW_REMAPPED) {
152*4882a593Smuzhiyun 			vma->size = intel_remapped_info_size(&view->remapped);
153*4882a593Smuzhiyun 			vma->size <<= PAGE_SHIFT;
154*4882a593Smuzhiyun 		}
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (unlikely(vma->size > vm->total))
158*4882a593Smuzhiyun 		goto err_vma;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	spin_lock(&obj->vma.lock);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (i915_is_ggtt(vm)) {
165*4882a593Smuzhiyun 		if (unlikely(overflows_type(vma->size, u32)))
166*4882a593Smuzhiyun 			goto err_unlock;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
169*4882a593Smuzhiyun 						      i915_gem_object_get_tiling(obj),
170*4882a593Smuzhiyun 						      i915_gem_object_get_stride(obj));
171*4882a593Smuzhiyun 		if (unlikely(vma->fence_size < vma->size || /* overflow */
172*4882a593Smuzhiyun 			     vma->fence_size > vm->total))
173*4882a593Smuzhiyun 			goto err_unlock;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 		vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
178*4882a593Smuzhiyun 								i915_gem_object_get_tiling(obj),
179*4882a593Smuzhiyun 								i915_gem_object_get_stride(obj));
180*4882a593Smuzhiyun 		GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	rb = NULL;
186*4882a593Smuzhiyun 	p = &obj->vma.tree.rb_node;
187*4882a593Smuzhiyun 	while (*p) {
188*4882a593Smuzhiyun 		long cmp;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		rb = *p;
191*4882a593Smuzhiyun 		pos = rb_entry(rb, struct i915_vma, obj_node);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		/*
194*4882a593Smuzhiyun 		 * If the view already exists in the tree, another thread
195*4882a593Smuzhiyun 		 * already created a matching vma, so return the older instance
196*4882a593Smuzhiyun 		 * and dispose of ours.
197*4882a593Smuzhiyun 		 */
198*4882a593Smuzhiyun 		cmp = i915_vma_compare(pos, vm, view);
199*4882a593Smuzhiyun 		if (cmp < 0)
200*4882a593Smuzhiyun 			p = &rb->rb_right;
201*4882a593Smuzhiyun 		else if (cmp > 0)
202*4882a593Smuzhiyun 			p = &rb->rb_left;
203*4882a593Smuzhiyun 		else
204*4882a593Smuzhiyun 			goto err_unlock;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 	rb_link_node(&vma->obj_node, rb, p);
207*4882a593Smuzhiyun 	rb_insert_color(&vma->obj_node, &obj->vma.tree);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (i915_vma_is_ggtt(vma))
210*4882a593Smuzhiyun 		/*
211*4882a593Smuzhiyun 		 * We put the GGTT vma at the start of the vma-list, followed
212*4882a593Smuzhiyun 		 * by the ppGGTT vma. This allows us to break early when
213*4882a593Smuzhiyun 		 * iterating over only the GGTT vma for an object, see
214*4882a593Smuzhiyun 		 * for_each_ggtt_vma()
215*4882a593Smuzhiyun 		 */
216*4882a593Smuzhiyun 		list_add(&vma->obj_link, &obj->vma.list);
217*4882a593Smuzhiyun 	else
218*4882a593Smuzhiyun 		list_add_tail(&vma->obj_link, &obj->vma.list);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	spin_unlock(&obj->vma.lock);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	return vma;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun err_unlock:
225*4882a593Smuzhiyun 	spin_unlock(&obj->vma.lock);
226*4882a593Smuzhiyun err_vma:
227*4882a593Smuzhiyun 	i915_vm_put(vm);
228*4882a593Smuzhiyun 	i915_vma_free(vma);
229*4882a593Smuzhiyun 	return pos;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun static struct i915_vma *
vma_lookup(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)233*4882a593Smuzhiyun vma_lookup(struct drm_i915_gem_object *obj,
234*4882a593Smuzhiyun 	   struct i915_address_space *vm,
235*4882a593Smuzhiyun 	   const struct i915_ggtt_view *view)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct rb_node *rb;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	rb = obj->vma.tree.rb_node;
240*4882a593Smuzhiyun 	while (rb) {
241*4882a593Smuzhiyun 		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
242*4882a593Smuzhiyun 		long cmp;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 		cmp = i915_vma_compare(vma, vm, view);
245*4882a593Smuzhiyun 		if (cmp == 0)
246*4882a593Smuzhiyun 			return vma;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		if (cmp < 0)
249*4882a593Smuzhiyun 			rb = rb->rb_right;
250*4882a593Smuzhiyun 		else
251*4882a593Smuzhiyun 			rb = rb->rb_left;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	return NULL;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun  * i915_vma_instance - return the singleton instance of the VMA
259*4882a593Smuzhiyun  * @obj: parent &struct drm_i915_gem_object to be mapped
260*4882a593Smuzhiyun  * @vm: address space in which the mapping is located
261*4882a593Smuzhiyun  * @view: additional mapping requirements
262*4882a593Smuzhiyun  *
263*4882a593Smuzhiyun  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
264*4882a593Smuzhiyun  * the same @view characteristics. If a match is not found, one is created.
265*4882a593Smuzhiyun  * Once created, the VMA is kept until either the object is freed, or the
266*4882a593Smuzhiyun  * address space is closed.
267*4882a593Smuzhiyun  *
268*4882a593Smuzhiyun  * Returns the vma, or an error pointer.
269*4882a593Smuzhiyun  */
270*4882a593Smuzhiyun struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_ggtt_view * view)271*4882a593Smuzhiyun i915_vma_instance(struct drm_i915_gem_object *obj,
272*4882a593Smuzhiyun 		  struct i915_address_space *vm,
273*4882a593Smuzhiyun 		  const struct i915_ggtt_view *view)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	struct i915_vma *vma;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	GEM_BUG_ON(view && !i915_is_ggtt(vm));
278*4882a593Smuzhiyun 	GEM_BUG_ON(!atomic_read(&vm->open));
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	spin_lock(&obj->vma.lock);
281*4882a593Smuzhiyun 	vma = vma_lookup(obj, vm, view);
282*4882a593Smuzhiyun 	spin_unlock(&obj->vma.lock);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	/* vma_create() will resolve the race if another creates the vma */
285*4882a593Smuzhiyun 	if (unlikely(!vma))
286*4882a593Smuzhiyun 		vma = vma_create(obj, vm, view);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
289*4882a593Smuzhiyun 	return vma;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun struct i915_vma_work {
293*4882a593Smuzhiyun 	struct dma_fence_work base;
294*4882a593Smuzhiyun 	struct i915_address_space *vm;
295*4882a593Smuzhiyun 	struct i915_vm_pt_stash stash;
296*4882a593Smuzhiyun 	struct i915_vma *vma;
297*4882a593Smuzhiyun 	struct drm_i915_gem_object *pinned;
298*4882a593Smuzhiyun 	struct i915_sw_dma_fence_cb cb;
299*4882a593Smuzhiyun 	enum i915_cache_level cache_level;
300*4882a593Smuzhiyun 	unsigned int flags;
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun 
__vma_bind(struct dma_fence_work * work)303*4882a593Smuzhiyun static int __vma_bind(struct dma_fence_work *work)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
306*4882a593Smuzhiyun 	struct i915_vma *vma = vw->vma;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	vma->ops->bind_vma(vw->vm, &vw->stash,
309*4882a593Smuzhiyun 			   vma, vw->cache_level, vw->flags);
310*4882a593Smuzhiyun 	return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
__vma_release(struct dma_fence_work * work)313*4882a593Smuzhiyun static void __vma_release(struct dma_fence_work *work)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	if (vw->pinned) {
318*4882a593Smuzhiyun 		__i915_gem_object_unpin_pages(vw->pinned);
319*4882a593Smuzhiyun 		i915_gem_object_put(vw->pinned);
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	i915_vm_free_pt_stash(vw->vm, &vw->stash);
323*4882a593Smuzhiyun 	i915_vm_put(vw->vm);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun static const struct dma_fence_work_ops bind_ops = {
327*4882a593Smuzhiyun 	.name = "bind",
328*4882a593Smuzhiyun 	.work = __vma_bind,
329*4882a593Smuzhiyun 	.release = __vma_release,
330*4882a593Smuzhiyun };
331*4882a593Smuzhiyun 
i915_vma_work(void)332*4882a593Smuzhiyun struct i915_vma_work *i915_vma_work(void)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct i915_vma_work *vw;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	vw = kzalloc(sizeof(*vw), GFP_KERNEL);
337*4882a593Smuzhiyun 	if (!vw)
338*4882a593Smuzhiyun 		return NULL;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	dma_fence_work_init(&vw->base, &bind_ops);
341*4882a593Smuzhiyun 	vw->base.dma.error = -EAGAIN; /* disable the worker by default */
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	return vw;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
i915_vma_wait_for_bind(struct i915_vma * vma)346*4882a593Smuzhiyun int i915_vma_wait_for_bind(struct i915_vma *vma)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	int err = 0;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	if (rcu_access_pointer(vma->active.excl.fence)) {
351*4882a593Smuzhiyun 		struct dma_fence *fence;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 		rcu_read_lock();
354*4882a593Smuzhiyun 		fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
355*4882a593Smuzhiyun 		rcu_read_unlock();
356*4882a593Smuzhiyun 		if (fence) {
357*4882a593Smuzhiyun 			err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
358*4882a593Smuzhiyun 			dma_fence_put(fence);
359*4882a593Smuzhiyun 		}
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	return err;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /**
366*4882a593Smuzhiyun  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
367*4882a593Smuzhiyun  * @vma: VMA to map
368*4882a593Smuzhiyun  * @cache_level: mapping cache level
369*4882a593Smuzhiyun  * @flags: flags like global or local mapping
370*4882a593Smuzhiyun  * @work: preallocated worker for allocating and binding the PTE
371*4882a593Smuzhiyun  *
372*4882a593Smuzhiyun  * DMA addresses are taken from the scatter-gather table of this object (or of
373*4882a593Smuzhiyun  * this VMA in case of non-default GGTT views) and PTE entries set up.
374*4882a593Smuzhiyun  * Note that DMA addresses are also the only part of the SG table we care about.
375*4882a593Smuzhiyun  */
i915_vma_bind(struct i915_vma * vma,enum i915_cache_level cache_level,u32 flags,struct i915_vma_work * work)376*4882a593Smuzhiyun int i915_vma_bind(struct i915_vma *vma,
377*4882a593Smuzhiyun 		  enum i915_cache_level cache_level,
378*4882a593Smuzhiyun 		  u32 flags,
379*4882a593Smuzhiyun 		  struct i915_vma_work *work)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	u32 bind_flags;
382*4882a593Smuzhiyun 	u32 vma_flags;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
385*4882a593Smuzhiyun 	GEM_BUG_ON(vma->size > vma->node.size);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
388*4882a593Smuzhiyun 					      vma->node.size,
389*4882a593Smuzhiyun 					      vma->vm->total)))
390*4882a593Smuzhiyun 		return -ENODEV;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (GEM_DEBUG_WARN_ON(!flags))
393*4882a593Smuzhiyun 		return -EINVAL;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	bind_flags = flags;
396*4882a593Smuzhiyun 	bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	vma_flags = atomic_read(&vma->flags);
399*4882a593Smuzhiyun 	vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	bind_flags &= ~vma_flags;
402*4882a593Smuzhiyun 	if (bind_flags == 0)
403*4882a593Smuzhiyun 		return 0;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	GEM_BUG_ON(!vma->pages);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	trace_i915_vma_bind(vma, bind_flags);
408*4882a593Smuzhiyun 	if (work && bind_flags & vma->vm->bind_async_flags) {
409*4882a593Smuzhiyun 		struct dma_fence *prev;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		work->vma = vma;
412*4882a593Smuzhiyun 		work->cache_level = cache_level;
413*4882a593Smuzhiyun 		work->flags = bind_flags;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		/*
416*4882a593Smuzhiyun 		 * Note we only want to chain up to the migration fence on
417*4882a593Smuzhiyun 		 * the pages (not the object itself). As we don't track that,
418*4882a593Smuzhiyun 		 * yet, we have to use the exclusive fence instead.
419*4882a593Smuzhiyun 		 *
420*4882a593Smuzhiyun 		 * Also note that we do not want to track the async vma as
421*4882a593Smuzhiyun 		 * part of the obj->resv->excl_fence as it only affects
422*4882a593Smuzhiyun 		 * execution and not content or object's backing store lifetime.
423*4882a593Smuzhiyun 		 */
424*4882a593Smuzhiyun 		prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
425*4882a593Smuzhiyun 		if (prev) {
426*4882a593Smuzhiyun 			__i915_sw_fence_await_dma_fence(&work->base.chain,
427*4882a593Smuzhiyun 							prev,
428*4882a593Smuzhiyun 							&work->cb);
429*4882a593Smuzhiyun 			dma_fence_put(prev);
430*4882a593Smuzhiyun 		}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		work->base.dma.error = 0; /* enable the queue_work() */
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 		if (vma->obj) {
435*4882a593Smuzhiyun 			__i915_gem_object_pin_pages(vma->obj);
436*4882a593Smuzhiyun 			work->pinned = i915_gem_object_get(vma->obj);
437*4882a593Smuzhiyun 		}
438*4882a593Smuzhiyun 	} else {
439*4882a593Smuzhiyun 		vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if (vma->obj)
443*4882a593Smuzhiyun 		set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	atomic_or(bind_flags, &vma->flags);
446*4882a593Smuzhiyun 	return 0;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
i915_vma_pin_iomap(struct i915_vma * vma)449*4882a593Smuzhiyun void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	void __iomem *ptr;
452*4882a593Smuzhiyun 	int err;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
455*4882a593Smuzhiyun 		err = -ENODEV;
456*4882a593Smuzhiyun 		goto err;
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
460*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	ptr = READ_ONCE(vma->iomap);
463*4882a593Smuzhiyun 	if (ptr == NULL) {
464*4882a593Smuzhiyun 		ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
465*4882a593Smuzhiyun 					vma->node.start,
466*4882a593Smuzhiyun 					vma->node.size);
467*4882a593Smuzhiyun 		if (ptr == NULL) {
468*4882a593Smuzhiyun 			err = -ENOMEM;
469*4882a593Smuzhiyun 			goto err;
470*4882a593Smuzhiyun 		}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
473*4882a593Smuzhiyun 			io_mapping_unmap(ptr);
474*4882a593Smuzhiyun 			ptr = vma->iomap;
475*4882a593Smuzhiyun 		}
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	__i915_vma_pin(vma);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	err = i915_vma_pin_fence(vma);
481*4882a593Smuzhiyun 	if (err)
482*4882a593Smuzhiyun 		goto err_unpin;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	i915_vma_set_ggtt_write(vma);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/* NB Access through the GTT requires the device to be awake. */
487*4882a593Smuzhiyun 	return ptr;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun err_unpin:
490*4882a593Smuzhiyun 	__i915_vma_unpin(vma);
491*4882a593Smuzhiyun err:
492*4882a593Smuzhiyun 	return IO_ERR_PTR(err);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
i915_vma_flush_writes(struct i915_vma * vma)495*4882a593Smuzhiyun void i915_vma_flush_writes(struct i915_vma *vma)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	if (i915_vma_unset_ggtt_write(vma))
498*4882a593Smuzhiyun 		intel_gt_flush_ggtt_writes(vma->vm->gt);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
i915_vma_unpin_iomap(struct i915_vma * vma)501*4882a593Smuzhiyun void i915_vma_unpin_iomap(struct i915_vma *vma)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	GEM_BUG_ON(vma->iomap == NULL);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	i915_vma_flush_writes(vma);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	i915_vma_unpin_fence(vma);
508*4882a593Smuzhiyun 	i915_vma_unpin(vma);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
i915_vma_unpin_and_release(struct i915_vma ** p_vma,unsigned int flags)511*4882a593Smuzhiyun void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	struct i915_vma *vma;
514*4882a593Smuzhiyun 	struct drm_i915_gem_object *obj;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	vma = fetch_and_zero(p_vma);
517*4882a593Smuzhiyun 	if (!vma)
518*4882a593Smuzhiyun 		return;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	obj = vma->obj;
521*4882a593Smuzhiyun 	GEM_BUG_ON(!obj);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	i915_vma_unpin(vma);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	if (flags & I915_VMA_RELEASE_MAP)
526*4882a593Smuzhiyun 		i915_gem_object_unpin_map(obj);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	i915_gem_object_put(obj);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
i915_vma_misplaced(const struct i915_vma * vma,u64 size,u64 alignment,u64 flags)531*4882a593Smuzhiyun bool i915_vma_misplaced(const struct i915_vma *vma,
532*4882a593Smuzhiyun 			u64 size, u64 alignment, u64 flags)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	if (!drm_mm_node_allocated(&vma->node))
535*4882a593Smuzhiyun 		return false;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
538*4882a593Smuzhiyun 		return true;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	if (vma->node.size < size)
541*4882a593Smuzhiyun 		return true;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
544*4882a593Smuzhiyun 	if (alignment && !IS_ALIGNED(vma->node.start, alignment))
545*4882a593Smuzhiyun 		return true;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
548*4882a593Smuzhiyun 		return true;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	if (flags & PIN_OFFSET_BIAS &&
551*4882a593Smuzhiyun 	    vma->node.start < (flags & PIN_OFFSET_MASK))
552*4882a593Smuzhiyun 		return true;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	if (flags & PIN_OFFSET_FIXED &&
555*4882a593Smuzhiyun 	    vma->node.start != (flags & PIN_OFFSET_MASK))
556*4882a593Smuzhiyun 		return true;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	return false;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
__i915_vma_set_map_and_fenceable(struct i915_vma * vma)561*4882a593Smuzhiyun void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	bool mappable, fenceable;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
566*4882a593Smuzhiyun 	GEM_BUG_ON(!vma->fence_size);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	fenceable = (vma->node.size >= vma->fence_size &&
569*4882a593Smuzhiyun 		     IS_ALIGNED(vma->node.start, vma->fence_alignment));
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	if (mappable && fenceable)
574*4882a593Smuzhiyun 		set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
575*4882a593Smuzhiyun 	else
576*4882a593Smuzhiyun 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
i915_gem_valid_gtt_space(struct i915_vma * vma,unsigned long color)579*4882a593Smuzhiyun bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	struct drm_mm_node *node = &vma->node;
582*4882a593Smuzhiyun 	struct drm_mm_node *other;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	/*
585*4882a593Smuzhiyun 	 * On some machines we have to be careful when putting differing types
586*4882a593Smuzhiyun 	 * of snoopable memory together to avoid the prefetcher crossing memory
587*4882a593Smuzhiyun 	 * domains and dying. During vm initialisation, we decide whether or not
588*4882a593Smuzhiyun 	 * these constraints apply and set the drm_mm.color_adjust
589*4882a593Smuzhiyun 	 * appropriately.
590*4882a593Smuzhiyun 	 */
591*4882a593Smuzhiyun 	if (!i915_vm_has_cache_coloring(vma->vm))
592*4882a593Smuzhiyun 		return true;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/* Only valid to be called on an already inserted vma */
595*4882a593Smuzhiyun 	GEM_BUG_ON(!drm_mm_node_allocated(node));
596*4882a593Smuzhiyun 	GEM_BUG_ON(list_empty(&node->node_list));
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	other = list_prev_entry(node, node_list);
599*4882a593Smuzhiyun 	if (i915_node_color_differs(other, color) &&
600*4882a593Smuzhiyun 	    !drm_mm_hole_follows(other))
601*4882a593Smuzhiyun 		return false;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	other = list_next_entry(node, node_list);
604*4882a593Smuzhiyun 	if (i915_node_color_differs(other, color) &&
605*4882a593Smuzhiyun 	    !drm_mm_hole_follows(node))
606*4882a593Smuzhiyun 		return false;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	return true;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun /**
612*4882a593Smuzhiyun  * i915_vma_insert - finds a slot for the vma in its address space
613*4882a593Smuzhiyun  * @vma: the vma
614*4882a593Smuzhiyun  * @size: requested size in bytes (can be larger than the VMA)
615*4882a593Smuzhiyun  * @alignment: required alignment
616*4882a593Smuzhiyun  * @flags: mask of PIN_* flags to use
617*4882a593Smuzhiyun  *
618*4882a593Smuzhiyun  * First we try to allocate some free space that meets the requirements for
619*4882a593Smuzhiyun  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
620*4882a593Smuzhiyun  * preferrably the oldest idle entry to make room for the new VMA.
621*4882a593Smuzhiyun  *
622*4882a593Smuzhiyun  * Returns:
623*4882a593Smuzhiyun  * 0 on success, negative error code otherwise.
624*4882a593Smuzhiyun  */
625*4882a593Smuzhiyun static int
i915_vma_insert(struct i915_vma * vma,u64 size,u64 alignment,u64 flags)626*4882a593Smuzhiyun i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	unsigned long color;
629*4882a593Smuzhiyun 	u64 start, end;
630*4882a593Smuzhiyun 	int ret;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
633*4882a593Smuzhiyun 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	size = max(size, vma->size);
636*4882a593Smuzhiyun 	alignment = max(alignment, vma->display_alignment);
637*4882a593Smuzhiyun 	if (flags & PIN_MAPPABLE) {
638*4882a593Smuzhiyun 		size = max_t(typeof(size), size, vma->fence_size);
639*4882a593Smuzhiyun 		alignment = max_t(typeof(alignment),
640*4882a593Smuzhiyun 				  alignment, vma->fence_alignment);
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
644*4882a593Smuzhiyun 	GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
645*4882a593Smuzhiyun 	GEM_BUG_ON(!is_power_of_2(alignment));
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
648*4882a593Smuzhiyun 	GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	end = vma->vm->total;
651*4882a593Smuzhiyun 	if (flags & PIN_MAPPABLE)
652*4882a593Smuzhiyun 		end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
653*4882a593Smuzhiyun 	if (flags & PIN_ZONE_4G)
654*4882a593Smuzhiyun 		end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
655*4882a593Smuzhiyun 	GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* If binding the object/GGTT view requires more space than the entire
658*4882a593Smuzhiyun 	 * aperture has, reject it early before evicting everything in a vain
659*4882a593Smuzhiyun 	 * attempt to find space.
660*4882a593Smuzhiyun 	 */
661*4882a593Smuzhiyun 	if (size > end) {
662*4882a593Smuzhiyun 		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
663*4882a593Smuzhiyun 			  size, flags & PIN_MAPPABLE ? "mappable" : "total",
664*4882a593Smuzhiyun 			  end);
665*4882a593Smuzhiyun 		return -ENOSPC;
666*4882a593Smuzhiyun 	}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	color = 0;
669*4882a593Smuzhiyun 	if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
670*4882a593Smuzhiyun 		color = vma->obj->cache_level;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	if (flags & PIN_OFFSET_FIXED) {
673*4882a593Smuzhiyun 		u64 offset = flags & PIN_OFFSET_MASK;
674*4882a593Smuzhiyun 		if (!IS_ALIGNED(offset, alignment) ||
675*4882a593Smuzhiyun 		    range_overflows(offset, size, end))
676*4882a593Smuzhiyun 			return -EINVAL;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
679*4882a593Smuzhiyun 					   size, offset, color,
680*4882a593Smuzhiyun 					   flags);
681*4882a593Smuzhiyun 		if (ret)
682*4882a593Smuzhiyun 			return ret;
683*4882a593Smuzhiyun 	} else {
684*4882a593Smuzhiyun 		/*
685*4882a593Smuzhiyun 		 * We only support huge gtt pages through the 48b PPGTT,
686*4882a593Smuzhiyun 		 * however we also don't want to force any alignment for
687*4882a593Smuzhiyun 		 * objects which need to be tightly packed into the low 32bits.
688*4882a593Smuzhiyun 		 *
689*4882a593Smuzhiyun 		 * Note that we assume that GGTT are limited to 4GiB for the
690*4882a593Smuzhiyun 		 * forseeable future. See also i915_ggtt_offset().
691*4882a593Smuzhiyun 		 */
692*4882a593Smuzhiyun 		if (upper_32_bits(end - 1) &&
693*4882a593Smuzhiyun 		    vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
694*4882a593Smuzhiyun 			/*
695*4882a593Smuzhiyun 			 * We can't mix 64K and 4K PTEs in the same page-table
696*4882a593Smuzhiyun 			 * (2M block), and so to avoid the ugliness and
697*4882a593Smuzhiyun 			 * complexity of coloring we opt for just aligning 64K
698*4882a593Smuzhiyun 			 * objects to 2M.
699*4882a593Smuzhiyun 			 */
700*4882a593Smuzhiyun 			u64 page_alignment =
701*4882a593Smuzhiyun 				rounddown_pow_of_two(vma->page_sizes.sg |
702*4882a593Smuzhiyun 						     I915_GTT_PAGE_SIZE_2M);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 			/*
705*4882a593Smuzhiyun 			 * Check we don't expand for the limited Global GTT
706*4882a593Smuzhiyun 			 * (mappable aperture is even more precious!). This
707*4882a593Smuzhiyun 			 * also checks that we exclude the aliasing-ppgtt.
708*4882a593Smuzhiyun 			 */
709*4882a593Smuzhiyun 			GEM_BUG_ON(i915_vma_is_ggtt(vma));
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 			alignment = max(alignment, page_alignment);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 			if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
714*4882a593Smuzhiyun 				size = round_up(size, I915_GTT_PAGE_SIZE_2M);
715*4882a593Smuzhiyun 		}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 		ret = i915_gem_gtt_insert(vma->vm, &vma->node,
718*4882a593Smuzhiyun 					  size, alignment, color,
719*4882a593Smuzhiyun 					  start, end, flags);
720*4882a593Smuzhiyun 		if (ret)
721*4882a593Smuzhiyun 			return ret;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 		GEM_BUG_ON(vma->node.start < start);
724*4882a593Smuzhiyun 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
725*4882a593Smuzhiyun 	}
726*4882a593Smuzhiyun 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
727*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	return 0;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun static void
i915_vma_detach(struct i915_vma * vma)735*4882a593Smuzhiyun i915_vma_detach(struct i915_vma *vma)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
738*4882a593Smuzhiyun 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	/*
741*4882a593Smuzhiyun 	 * And finally now the object is completely decoupled from this
742*4882a593Smuzhiyun 	 * vma, we can drop its hold on the backing storage and allow
743*4882a593Smuzhiyun 	 * it to be reaped by the shrinker.
744*4882a593Smuzhiyun 	 */
745*4882a593Smuzhiyun 	list_del(&vma->vm_link);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
try_qad_pin(struct i915_vma * vma,unsigned int flags)748*4882a593Smuzhiyun static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun 	unsigned int bound;
751*4882a593Smuzhiyun 	bool pinned = true;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	bound = atomic_read(&vma->flags);
754*4882a593Smuzhiyun 	do {
755*4882a593Smuzhiyun 		if (unlikely(flags & ~bound))
756*4882a593Smuzhiyun 			return false;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
759*4882a593Smuzhiyun 			return false;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 		if (!(bound & I915_VMA_PIN_MASK))
762*4882a593Smuzhiyun 			goto unpinned;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
765*4882a593Smuzhiyun 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	return true;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun unpinned:
770*4882a593Smuzhiyun 	/*
771*4882a593Smuzhiyun 	 * If pin_count==0, but we are bound, check under the lock to avoid
772*4882a593Smuzhiyun 	 * racing with a concurrent i915_vma_unbind().
773*4882a593Smuzhiyun 	 */
774*4882a593Smuzhiyun 	mutex_lock(&vma->vm->mutex);
775*4882a593Smuzhiyun 	do {
776*4882a593Smuzhiyun 		if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
777*4882a593Smuzhiyun 			pinned = false;
778*4882a593Smuzhiyun 			break;
779*4882a593Smuzhiyun 		}
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 		if (unlikely(flags & ~bound)) {
782*4882a593Smuzhiyun 			pinned = false;
783*4882a593Smuzhiyun 			break;
784*4882a593Smuzhiyun 		}
785*4882a593Smuzhiyun 	} while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
786*4882a593Smuzhiyun 	mutex_unlock(&vma->vm->mutex);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	return pinned;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
vma_get_pages(struct i915_vma * vma)791*4882a593Smuzhiyun static int vma_get_pages(struct i915_vma *vma)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	int err = 0;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	if (atomic_add_unless(&vma->pages_count, 1, 0))
796*4882a593Smuzhiyun 		return 0;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	/* Allocations ahoy! */
799*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&vma->pages_mutex))
800*4882a593Smuzhiyun 		return -EINTR;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	if (!atomic_read(&vma->pages_count)) {
803*4882a593Smuzhiyun 		if (vma->obj) {
804*4882a593Smuzhiyun 			err = i915_gem_object_pin_pages(vma->obj);
805*4882a593Smuzhiyun 			if (err)
806*4882a593Smuzhiyun 				goto unlock;
807*4882a593Smuzhiyun 		}
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 		err = vma->ops->set_pages(vma);
810*4882a593Smuzhiyun 		if (err) {
811*4882a593Smuzhiyun 			if (vma->obj)
812*4882a593Smuzhiyun 				i915_gem_object_unpin_pages(vma->obj);
813*4882a593Smuzhiyun 			goto unlock;
814*4882a593Smuzhiyun 		}
815*4882a593Smuzhiyun 	}
816*4882a593Smuzhiyun 	atomic_inc(&vma->pages_count);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun unlock:
819*4882a593Smuzhiyun 	mutex_unlock(&vma->pages_mutex);
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	return err;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun 
__vma_put_pages(struct i915_vma * vma,unsigned int count)824*4882a593Smuzhiyun static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun 	/* We allocate under vma_get_pages, so beware the shrinker */
827*4882a593Smuzhiyun 	mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
828*4882a593Smuzhiyun 	GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
829*4882a593Smuzhiyun 	if (atomic_sub_return(count, &vma->pages_count) == 0) {
830*4882a593Smuzhiyun 		vma->ops->clear_pages(vma);
831*4882a593Smuzhiyun 		GEM_BUG_ON(vma->pages);
832*4882a593Smuzhiyun 		if (vma->obj)
833*4882a593Smuzhiyun 			i915_gem_object_unpin_pages(vma->obj);
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 	mutex_unlock(&vma->pages_mutex);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun 
vma_put_pages(struct i915_vma * vma)838*4882a593Smuzhiyun static void vma_put_pages(struct i915_vma *vma)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun 	if (atomic_add_unless(&vma->pages_count, -1, 1))
841*4882a593Smuzhiyun 		return;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	__vma_put_pages(vma, 1);
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun 
vma_unbind_pages(struct i915_vma * vma)846*4882a593Smuzhiyun static void vma_unbind_pages(struct i915_vma *vma)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun 	unsigned int count;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	lockdep_assert_held(&vma->vm->mutex);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	/* The upper portion of pages_count is the number of bindings */
853*4882a593Smuzhiyun 	count = atomic_read(&vma->pages_count);
854*4882a593Smuzhiyun 	count >>= I915_VMA_PAGES_BIAS;
855*4882a593Smuzhiyun 	GEM_BUG_ON(!count);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	__vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
i915_vma_pin_ww(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u64 size,u64 alignment,u64 flags)860*4882a593Smuzhiyun int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
861*4882a593Smuzhiyun 		    u64 size, u64 alignment, u64 flags)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	struct i915_vma_work *work = NULL;
864*4882a593Smuzhiyun 	intel_wakeref_t wakeref = 0;
865*4882a593Smuzhiyun 	unsigned int bound;
866*4882a593Smuzhiyun 	int err;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun #ifdef CONFIG_PROVE_LOCKING
869*4882a593Smuzhiyun 	if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex))
870*4882a593Smuzhiyun 		WARN_ON(!ww);
871*4882a593Smuzhiyun #endif
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
874*4882a593Smuzhiyun 	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	/* First try and grab the pin without rebinding the vma */
879*4882a593Smuzhiyun 	if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
880*4882a593Smuzhiyun 		return 0;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	err = vma_get_pages(vma);
883*4882a593Smuzhiyun 	if (err)
884*4882a593Smuzhiyun 		return err;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	if (flags & PIN_GLOBAL)
887*4882a593Smuzhiyun 		wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (flags & vma->vm->bind_async_flags) {
890*4882a593Smuzhiyun 		work = i915_vma_work();
891*4882a593Smuzhiyun 		if (!work) {
892*4882a593Smuzhiyun 			err = -ENOMEM;
893*4882a593Smuzhiyun 			goto err_rpm;
894*4882a593Smuzhiyun 		}
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 		work->vm = i915_vm_get(vma->vm);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 		/* Allocate enough page directories to used PTE */
899*4882a593Smuzhiyun 		if (vma->vm->allocate_va_range) {
900*4882a593Smuzhiyun 			err = i915_vm_alloc_pt_stash(vma->vm,
901*4882a593Smuzhiyun 						     &work->stash,
902*4882a593Smuzhiyun 						     vma->size);
903*4882a593Smuzhiyun 			if (err)
904*4882a593Smuzhiyun 				goto err_fence;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 			err = i915_vm_pin_pt_stash(vma->vm,
907*4882a593Smuzhiyun 						   &work->stash);
908*4882a593Smuzhiyun 			if (err)
909*4882a593Smuzhiyun 				goto err_fence;
910*4882a593Smuzhiyun 		}
911*4882a593Smuzhiyun 	}
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	/*
914*4882a593Smuzhiyun 	 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
915*4882a593Smuzhiyun 	 *
916*4882a593Smuzhiyun 	 * We conflate the Global GTT with the user's vma when using the
917*4882a593Smuzhiyun 	 * aliasing-ppgtt, but it is still vitally important to try and
918*4882a593Smuzhiyun 	 * keep the use cases distinct. For example, userptr objects are
919*4882a593Smuzhiyun 	 * not allowed inside the Global GTT as that will cause lock
920*4882a593Smuzhiyun 	 * inversions when we have to evict them the mmu_notifier callbacks -
921*4882a593Smuzhiyun 	 * but they are allowed to be part of the user ppGTT which can never
922*4882a593Smuzhiyun 	 * be mapped. As such we try to give the distinct users of the same
923*4882a593Smuzhiyun 	 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
924*4882a593Smuzhiyun 	 * and i915_ppgtt separate].
925*4882a593Smuzhiyun 	 *
926*4882a593Smuzhiyun 	 * NB this may cause us to mask real lock inversions -- while the
927*4882a593Smuzhiyun 	 * code is safe today, lockdep may not be able to spot future
928*4882a593Smuzhiyun 	 * transgressions.
929*4882a593Smuzhiyun 	 */
930*4882a593Smuzhiyun 	err = mutex_lock_interruptible_nested(&vma->vm->mutex,
931*4882a593Smuzhiyun 					      !(flags & PIN_GLOBAL));
932*4882a593Smuzhiyun 	if (err)
933*4882a593Smuzhiyun 		goto err_fence;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	/* No more allocations allowed now we hold vm->mutex */
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	if (unlikely(i915_vma_is_closed(vma))) {
938*4882a593Smuzhiyun 		err = -ENOENT;
939*4882a593Smuzhiyun 		goto err_unlock;
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	bound = atomic_read(&vma->flags);
943*4882a593Smuzhiyun 	if (unlikely(bound & I915_VMA_ERROR)) {
944*4882a593Smuzhiyun 		err = -ENOMEM;
945*4882a593Smuzhiyun 		goto err_unlock;
946*4882a593Smuzhiyun 	}
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
949*4882a593Smuzhiyun 		err = -EAGAIN; /* pins are meant to be fairly temporary */
950*4882a593Smuzhiyun 		goto err_unlock;
951*4882a593Smuzhiyun 	}
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
954*4882a593Smuzhiyun 		__i915_vma_pin(vma);
955*4882a593Smuzhiyun 		goto err_unlock;
956*4882a593Smuzhiyun 	}
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	err = i915_active_acquire(&vma->active);
959*4882a593Smuzhiyun 	if (err)
960*4882a593Smuzhiyun 		goto err_unlock;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	if (!(bound & I915_VMA_BIND_MASK)) {
963*4882a593Smuzhiyun 		err = i915_vma_insert(vma, size, alignment, flags);
964*4882a593Smuzhiyun 		if (err)
965*4882a593Smuzhiyun 			goto err_active;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 		if (i915_is_ggtt(vma->vm))
968*4882a593Smuzhiyun 			__i915_vma_set_map_and_fenceable(vma);
969*4882a593Smuzhiyun 	}
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	GEM_BUG_ON(!vma->pages);
972*4882a593Smuzhiyun 	err = i915_vma_bind(vma,
973*4882a593Smuzhiyun 			    vma->obj ? vma->obj->cache_level : 0,
974*4882a593Smuzhiyun 			    flags, work);
975*4882a593Smuzhiyun 	if (err)
976*4882a593Smuzhiyun 		goto err_remove;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	/* There should only be at most 2 active bindings (user, global) */
979*4882a593Smuzhiyun 	GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
980*4882a593Smuzhiyun 	atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
981*4882a593Smuzhiyun 	list_move_tail(&vma->vm_link, &vma->vm->bound_list);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	__i915_vma_pin(vma);
984*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
985*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
986*4882a593Smuzhiyun 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun err_remove:
989*4882a593Smuzhiyun 	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
990*4882a593Smuzhiyun 		i915_vma_detach(vma);
991*4882a593Smuzhiyun 		drm_mm_remove_node(&vma->node);
992*4882a593Smuzhiyun 	}
993*4882a593Smuzhiyun err_active:
994*4882a593Smuzhiyun 	i915_active_release(&vma->active);
995*4882a593Smuzhiyun err_unlock:
996*4882a593Smuzhiyun 	mutex_unlock(&vma->vm->mutex);
997*4882a593Smuzhiyun err_fence:
998*4882a593Smuzhiyun 	if (work)
999*4882a593Smuzhiyun 		dma_fence_work_commit_imm(&work->base);
1000*4882a593Smuzhiyun err_rpm:
1001*4882a593Smuzhiyun 	if (wakeref)
1002*4882a593Smuzhiyun 		intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1003*4882a593Smuzhiyun 	vma_put_pages(vma);
1004*4882a593Smuzhiyun 	return err;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun 
flush_idle_contexts(struct intel_gt * gt)1007*4882a593Smuzhiyun static void flush_idle_contexts(struct intel_gt *gt)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	struct intel_engine_cs *engine;
1010*4882a593Smuzhiyun 	enum intel_engine_id id;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	for_each_engine(engine, gt, id)
1013*4882a593Smuzhiyun 		intel_engine_flush_barriers(engine);
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun 
i915_ggtt_pin(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u32 align,unsigned int flags)1018*4882a593Smuzhiyun int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1019*4882a593Smuzhiyun 		  u32 align, unsigned int flags)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun 	struct i915_address_space *vm = vma->vm;
1022*4882a593Smuzhiyun 	int err;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	do {
1027*4882a593Smuzhiyun 		err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1028*4882a593Smuzhiyun 		if (err != -ENOSPC) {
1029*4882a593Smuzhiyun 			if (!err) {
1030*4882a593Smuzhiyun 				err = i915_vma_wait_for_bind(vma);
1031*4882a593Smuzhiyun 				if (err)
1032*4882a593Smuzhiyun 					i915_vma_unpin(vma);
1033*4882a593Smuzhiyun 			}
1034*4882a593Smuzhiyun 			return err;
1035*4882a593Smuzhiyun 		}
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 		/* Unlike i915_vma_pin, we don't take no for an answer! */
1038*4882a593Smuzhiyun 		flush_idle_contexts(vm->gt);
1039*4882a593Smuzhiyun 		if (mutex_lock_interruptible(&vm->mutex) == 0) {
1040*4882a593Smuzhiyun 			i915_gem_evict_vm(vm);
1041*4882a593Smuzhiyun 			mutex_unlock(&vm->mutex);
1042*4882a593Smuzhiyun 		}
1043*4882a593Smuzhiyun 	} while (1);
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun 
__vma_close(struct i915_vma * vma,struct intel_gt * gt)1046*4882a593Smuzhiyun static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun 	/*
1049*4882a593Smuzhiyun 	 * We defer actually closing, unbinding and destroying the VMA until
1050*4882a593Smuzhiyun 	 * the next idle point, or if the object is freed in the meantime. By
1051*4882a593Smuzhiyun 	 * postponing the unbind, we allow for it to be resurrected by the
1052*4882a593Smuzhiyun 	 * client, avoiding the work required to rebind the VMA. This is
1053*4882a593Smuzhiyun 	 * advantageous for DRI, where the client/server pass objects
1054*4882a593Smuzhiyun 	 * between themselves, temporarily opening a local VMA to the
1055*4882a593Smuzhiyun 	 * object, and then closing it again. The same object is then reused
1056*4882a593Smuzhiyun 	 * on the next frame (or two, depending on the depth of the swap queue)
1057*4882a593Smuzhiyun 	 * causing us to rebind the VMA once more. This ends up being a lot
1058*4882a593Smuzhiyun 	 * of wasted work for the steady state.
1059*4882a593Smuzhiyun 	 */
1060*4882a593Smuzhiyun 	GEM_BUG_ON(i915_vma_is_closed(vma));
1061*4882a593Smuzhiyun 	list_add(&vma->closed_link, &gt->closed_vma);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
i915_vma_close(struct i915_vma * vma)1064*4882a593Smuzhiyun void i915_vma_close(struct i915_vma *vma)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun 	struct intel_gt *gt = vma->vm->gt;
1067*4882a593Smuzhiyun 	unsigned long flags;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	if (i915_vma_is_ggtt(vma))
1070*4882a593Smuzhiyun 		return;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	GEM_BUG_ON(!atomic_read(&vma->open_count));
1073*4882a593Smuzhiyun 	if (atomic_dec_and_lock_irqsave(&vma->open_count,
1074*4882a593Smuzhiyun 					&gt->closed_lock,
1075*4882a593Smuzhiyun 					flags)) {
1076*4882a593Smuzhiyun 		__vma_close(vma, gt);
1077*4882a593Smuzhiyun 		spin_unlock_irqrestore(&gt->closed_lock, flags);
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
__i915_vma_remove_closed(struct i915_vma * vma)1081*4882a593Smuzhiyun static void __i915_vma_remove_closed(struct i915_vma *vma)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	struct intel_gt *gt = vma->vm->gt;
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	spin_lock_irq(&gt->closed_lock);
1086*4882a593Smuzhiyun 	list_del_init(&vma->closed_link);
1087*4882a593Smuzhiyun 	spin_unlock_irq(&gt->closed_lock);
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun 
i915_vma_reopen(struct i915_vma * vma)1090*4882a593Smuzhiyun void i915_vma_reopen(struct i915_vma *vma)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun 	if (i915_vma_is_closed(vma))
1093*4882a593Smuzhiyun 		__i915_vma_remove_closed(vma);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun 
i915_vma_release(struct kref * ref)1096*4882a593Smuzhiyun void i915_vma_release(struct kref *ref)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun 	struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	if (drm_mm_node_allocated(&vma->node)) {
1101*4882a593Smuzhiyun 		mutex_lock(&vma->vm->mutex);
1102*4882a593Smuzhiyun 		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1103*4882a593Smuzhiyun 		WARN_ON(__i915_vma_unbind(vma));
1104*4882a593Smuzhiyun 		mutex_unlock(&vma->vm->mutex);
1105*4882a593Smuzhiyun 		GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1106*4882a593Smuzhiyun 	}
1107*4882a593Smuzhiyun 	GEM_BUG_ON(i915_vma_is_active(vma));
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	if (vma->obj) {
1110*4882a593Smuzhiyun 		struct drm_i915_gem_object *obj = vma->obj;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 		spin_lock(&obj->vma.lock);
1113*4882a593Smuzhiyun 		list_del(&vma->obj_link);
1114*4882a593Smuzhiyun 		if (!RB_EMPTY_NODE(&vma->obj_node))
1115*4882a593Smuzhiyun 			rb_erase(&vma->obj_node, &obj->vma.tree);
1116*4882a593Smuzhiyun 		spin_unlock(&obj->vma.lock);
1117*4882a593Smuzhiyun 	}
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	__i915_vma_remove_closed(vma);
1120*4882a593Smuzhiyun 	i915_vm_put(vma->vm);
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	i915_active_fini(&vma->active);
1123*4882a593Smuzhiyun 	i915_vma_free(vma);
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun 
i915_vma_parked(struct intel_gt * gt)1126*4882a593Smuzhiyun void i915_vma_parked(struct intel_gt *gt)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun 	struct i915_vma *vma, *next;
1129*4882a593Smuzhiyun 	LIST_HEAD(closed);
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	spin_lock_irq(&gt->closed_lock);
1132*4882a593Smuzhiyun 	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1133*4882a593Smuzhiyun 		struct drm_i915_gem_object *obj = vma->obj;
1134*4882a593Smuzhiyun 		struct i915_address_space *vm = vma->vm;
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 		/* XXX All to avoid keeping a reference on i915_vma itself */
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 		if (!kref_get_unless_zero(&obj->base.refcount))
1139*4882a593Smuzhiyun 			continue;
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 		if (!i915_vm_tryopen(vm)) {
1142*4882a593Smuzhiyun 			i915_gem_object_put(obj);
1143*4882a593Smuzhiyun 			continue;
1144*4882a593Smuzhiyun 		}
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 		list_move(&vma->closed_link, &closed);
1147*4882a593Smuzhiyun 	}
1148*4882a593Smuzhiyun 	spin_unlock_irq(&gt->closed_lock);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	/* As the GT is held idle, no vma can be reopened as we destroy them */
1151*4882a593Smuzhiyun 	list_for_each_entry_safe(vma, next, &closed, closed_link) {
1152*4882a593Smuzhiyun 		struct drm_i915_gem_object *obj = vma->obj;
1153*4882a593Smuzhiyun 		struct i915_address_space *vm = vma->vm;
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 		INIT_LIST_HEAD(&vma->closed_link);
1156*4882a593Smuzhiyun 		__i915_vma_put(vma);
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 		i915_gem_object_put(obj);
1159*4882a593Smuzhiyun 		i915_vm_close(vm);
1160*4882a593Smuzhiyun 	}
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun 
__i915_vma_iounmap(struct i915_vma * vma)1163*4882a593Smuzhiyun static void __i915_vma_iounmap(struct i915_vma *vma)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun 	GEM_BUG_ON(i915_vma_is_pinned(vma));
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	if (vma->iomap == NULL)
1168*4882a593Smuzhiyun 		return;
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	io_mapping_unmap(vma->iomap);
1171*4882a593Smuzhiyun 	vma->iomap = NULL;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun 
i915_vma_revoke_mmap(struct i915_vma * vma)1174*4882a593Smuzhiyun void i915_vma_revoke_mmap(struct i915_vma *vma)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun 	struct drm_vma_offset_node *node;
1177*4882a593Smuzhiyun 	u64 vma_offset;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	if (!i915_vma_has_userfault(vma))
1180*4882a593Smuzhiyun 		return;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1183*4882a593Smuzhiyun 	GEM_BUG_ON(!vma->obj->userfault_count);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	node = &vma->mmo->vma_node;
1186*4882a593Smuzhiyun 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1187*4882a593Smuzhiyun 	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1188*4882a593Smuzhiyun 			    drm_vma_node_offset_addr(node) + vma_offset,
1189*4882a593Smuzhiyun 			    vma->size,
1190*4882a593Smuzhiyun 			    1);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	i915_vma_unset_userfault(vma);
1193*4882a593Smuzhiyun 	if (!--vma->obj->userfault_count)
1194*4882a593Smuzhiyun 		list_del(&vma->obj->userfault_link);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun static int
__i915_request_await_bind(struct i915_request * rq,struct i915_vma * vma)1198*4882a593Smuzhiyun __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	return __i915_request_await_exclusive(rq, &vma->active);
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun 
__i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq)1203*4882a593Smuzhiyun int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun 	int err;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	/* Wait for the vma to be bound before we start! */
1210*4882a593Smuzhiyun 	err = __i915_request_await_bind(rq, vma);
1211*4882a593Smuzhiyun 	if (err)
1212*4882a593Smuzhiyun 		return err;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	return i915_active_add_request(&vma->active, rq);
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun 
i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq,unsigned int flags)1217*4882a593Smuzhiyun int i915_vma_move_to_active(struct i915_vma *vma,
1218*4882a593Smuzhiyun 			    struct i915_request *rq,
1219*4882a593Smuzhiyun 			    unsigned int flags)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun 	struct drm_i915_gem_object *obj = vma->obj;
1222*4882a593Smuzhiyun 	int err;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	assert_object_held(obj);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	err = __i915_vma_move_to_active(vma, rq);
1227*4882a593Smuzhiyun 	if (unlikely(err))
1228*4882a593Smuzhiyun 		return err;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	if (flags & EXEC_OBJECT_WRITE) {
1231*4882a593Smuzhiyun 		struct intel_frontbuffer *front;
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 		front = __intel_frontbuffer_get(obj);
1234*4882a593Smuzhiyun 		if (unlikely(front)) {
1235*4882a593Smuzhiyun 			if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1236*4882a593Smuzhiyun 				i915_active_add_request(&front->write, rq);
1237*4882a593Smuzhiyun 			intel_frontbuffer_put(front);
1238*4882a593Smuzhiyun 		}
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 		dma_resv_add_excl_fence(vma->resv, &rq->fence);
1241*4882a593Smuzhiyun 		obj->write_domain = I915_GEM_DOMAIN_RENDER;
1242*4882a593Smuzhiyun 		obj->read_domains = 0;
1243*4882a593Smuzhiyun 	} else {
1244*4882a593Smuzhiyun 		err = dma_resv_reserve_shared(vma->resv, 1);
1245*4882a593Smuzhiyun 		if (unlikely(err))
1246*4882a593Smuzhiyun 			return err;
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 		dma_resv_add_shared_fence(vma->resv, &rq->fence);
1249*4882a593Smuzhiyun 		obj->write_domain = 0;
1250*4882a593Smuzhiyun 	}
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1253*4882a593Smuzhiyun 		i915_active_add_request(&vma->fence->active, rq);
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
1256*4882a593Smuzhiyun 	obj->mm.dirty = true;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_vma_is_active(vma));
1259*4882a593Smuzhiyun 	return 0;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun 
__i915_vma_evict(struct i915_vma * vma)1262*4882a593Smuzhiyun void __i915_vma_evict(struct i915_vma *vma)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun 	GEM_BUG_ON(i915_vma_is_pinned(vma));
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	if (i915_vma_is_map_and_fenceable(vma)) {
1267*4882a593Smuzhiyun 		/* Force a pagefault for domain tracking on next user access */
1268*4882a593Smuzhiyun 		i915_vma_revoke_mmap(vma);
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 		/*
1271*4882a593Smuzhiyun 		 * Check that we have flushed all writes through the GGTT
1272*4882a593Smuzhiyun 		 * before the unbind, other due to non-strict nature of those
1273*4882a593Smuzhiyun 		 * indirect writes they may end up referencing the GGTT PTE
1274*4882a593Smuzhiyun 		 * after the unbind.
1275*4882a593Smuzhiyun 		 *
1276*4882a593Smuzhiyun 		 * Note that we may be concurrently poking at the GGTT_WRITE
1277*4882a593Smuzhiyun 		 * bit from set-domain, as we mark all GGTT vma associated
1278*4882a593Smuzhiyun 		 * with an object. We know this is for another vma, as we
1279*4882a593Smuzhiyun 		 * are currently unbinding this one -- so if this vma will be
1280*4882a593Smuzhiyun 		 * reused, it will be refaulted and have its dirty bit set
1281*4882a593Smuzhiyun 		 * before the next write.
1282*4882a593Smuzhiyun 		 */
1283*4882a593Smuzhiyun 		i915_vma_flush_writes(vma);
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 		/* release the fence reg _after_ flushing */
1286*4882a593Smuzhiyun 		i915_vma_revoke_fence(vma);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 		__i915_vma_iounmap(vma);
1289*4882a593Smuzhiyun 		clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1290*4882a593Smuzhiyun 	}
1291*4882a593Smuzhiyun 	GEM_BUG_ON(vma->fence);
1292*4882a593Smuzhiyun 	GEM_BUG_ON(i915_vma_has_userfault(vma));
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	if (likely(atomic_read(&vma->vm->open))) {
1295*4882a593Smuzhiyun 		trace_i915_vma_unbind(vma);
1296*4882a593Smuzhiyun 		vma->ops->unbind_vma(vma->vm, vma);
1297*4882a593Smuzhiyun 	}
1298*4882a593Smuzhiyun 	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1299*4882a593Smuzhiyun 		   &vma->flags);
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	i915_vma_detach(vma);
1302*4882a593Smuzhiyun 	vma_unbind_pages(vma);
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun 
__i915_vma_unbind(struct i915_vma * vma)1305*4882a593Smuzhiyun int __i915_vma_unbind(struct i915_vma *vma)
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun 	int ret;
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	lockdep_assert_held(&vma->vm->mutex);
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	if (!drm_mm_node_allocated(&vma->node))
1312*4882a593Smuzhiyun 		return 0;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	if (i915_vma_is_pinned(vma)) {
1315*4882a593Smuzhiyun 		vma_print_allocator(vma, "is pinned");
1316*4882a593Smuzhiyun 		return -EAGAIN;
1317*4882a593Smuzhiyun 	}
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	/*
1320*4882a593Smuzhiyun 	 * After confirming that no one else is pinning this vma, wait for
1321*4882a593Smuzhiyun 	 * any laggards who may have crept in during the wait (through
1322*4882a593Smuzhiyun 	 * a residual pin skipping the vm->mutex) to complete.
1323*4882a593Smuzhiyun 	 */
1324*4882a593Smuzhiyun 	ret = i915_vma_sync(vma);
1325*4882a593Smuzhiyun 	if (ret)
1326*4882a593Smuzhiyun 		return ret;
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	GEM_BUG_ON(i915_vma_is_active(vma));
1329*4882a593Smuzhiyun 	__i915_vma_evict(vma);
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1332*4882a593Smuzhiyun 	return 0;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun 
i915_vma_unbind(struct i915_vma * vma)1335*4882a593Smuzhiyun int i915_vma_unbind(struct i915_vma *vma)
1336*4882a593Smuzhiyun {
1337*4882a593Smuzhiyun 	struct i915_address_space *vm = vma->vm;
1338*4882a593Smuzhiyun 	intel_wakeref_t wakeref = 0;
1339*4882a593Smuzhiyun 	int err;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	/* Optimistic wait before taking the mutex */
1342*4882a593Smuzhiyun 	err = i915_vma_sync(vma);
1343*4882a593Smuzhiyun 	if (err)
1344*4882a593Smuzhiyun 		return err;
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 	if (!drm_mm_node_allocated(&vma->node))
1347*4882a593Smuzhiyun 		return 0;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	if (i915_vma_is_pinned(vma)) {
1350*4882a593Smuzhiyun 		vma_print_allocator(vma, "is pinned");
1351*4882a593Smuzhiyun 		return -EAGAIN;
1352*4882a593Smuzhiyun 	}
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1355*4882a593Smuzhiyun 		/* XXX not always required: nop_clear_range */
1356*4882a593Smuzhiyun 		wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1359*4882a593Smuzhiyun 	if (err)
1360*4882a593Smuzhiyun 		goto out_rpm;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	err = __i915_vma_unbind(vma);
1363*4882a593Smuzhiyun 	mutex_unlock(&vm->mutex);
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun out_rpm:
1366*4882a593Smuzhiyun 	if (wakeref)
1367*4882a593Smuzhiyun 		intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1368*4882a593Smuzhiyun 	return err;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun 
i915_vma_make_unshrinkable(struct i915_vma * vma)1371*4882a593Smuzhiyun struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun 	i915_gem_object_make_unshrinkable(vma->obj);
1374*4882a593Smuzhiyun 	return vma;
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun 
i915_vma_make_shrinkable(struct i915_vma * vma)1377*4882a593Smuzhiyun void i915_vma_make_shrinkable(struct i915_vma *vma)
1378*4882a593Smuzhiyun {
1379*4882a593Smuzhiyun 	i915_gem_object_make_shrinkable(vma->obj);
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun 
i915_vma_make_purgeable(struct i915_vma * vma)1382*4882a593Smuzhiyun void i915_vma_make_purgeable(struct i915_vma *vma)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun 	i915_gem_object_make_purgeable(vma->obj);
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1388*4882a593Smuzhiyun #include "selftests/i915_vma.c"
1389*4882a593Smuzhiyun #endif
1390*4882a593Smuzhiyun 
i915_global_vma_shrink(void)1391*4882a593Smuzhiyun static void i915_global_vma_shrink(void)
1392*4882a593Smuzhiyun {
1393*4882a593Smuzhiyun 	kmem_cache_shrink(global.slab_vmas);
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun 
i915_global_vma_exit(void)1396*4882a593Smuzhiyun static void i915_global_vma_exit(void)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun 	kmem_cache_destroy(global.slab_vmas);
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun static struct i915_global_vma global = { {
1402*4882a593Smuzhiyun 	.shrink = i915_global_vma_shrink,
1403*4882a593Smuzhiyun 	.exit = i915_global_vma_exit,
1404*4882a593Smuzhiyun } };
1405*4882a593Smuzhiyun 
i915_global_vma_init(void)1406*4882a593Smuzhiyun int __init i915_global_vma_init(void)
1407*4882a593Smuzhiyun {
1408*4882a593Smuzhiyun 	global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1409*4882a593Smuzhiyun 	if (!global.slab_vmas)
1410*4882a593Smuzhiyun 		return -ENOMEM;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	i915_global_register(&global.base);
1413*4882a593Smuzhiyun 	return 0;
1414*4882a593Smuzhiyun }
1415