Lines Matching refs:vma

51 void i915_vma_free(struct i915_vma *vma)  in i915_vma_free()  argument
53 return kmem_cache_free(global.slab_vmas, vma); in i915_vma_free()
60 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
66 if (!vma->node.stack) { in vma_print_allocator()
68 vma->node.start, vma->node.size, reason); in vma_print_allocator()
72 nr_entries = stack_depot_fetch(vma->node.stack, &entries); in vma_print_allocator()
75 vma->node.start, vma->node.size, reason, buf); in vma_print_allocator()
80 static void vma_print_allocator(struct i915_vma *vma, const char *reason) in vma_print_allocator() argument
108 struct i915_vma *vma; in vma_create() local
114 vma = i915_vma_alloc(); in vma_create()
115 if (vma == NULL) in vma_create()
118 kref_init(&vma->ref); in vma_create()
119 mutex_init(&vma->pages_mutex); in vma_create()
120 vma->vm = i915_vm_get(vm); in vma_create()
121 vma->ops = &vm->vma_ops; in vma_create()
122 vma->obj = obj; in vma_create()
123 vma->resv = obj->base.resv; in vma_create()
124 vma->size = obj->base.size; in vma_create()
125 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; in vma_create()
127 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire); in vma_create()
132 might_lock(&vma->active.mutex); in vma_create()
136 INIT_LIST_HEAD(&vma->closed_link); in vma_create()
139 vma->ggtt_view = *view; in vma_create()
145 vma->size = view->partial.size; in vma_create()
146 vma->size <<= PAGE_SHIFT; in vma_create()
147 GEM_BUG_ON(vma->size > obj->base.size); in vma_create()
149 vma->size = intel_rotation_info_size(&view->rotated); in vma_create()
150 vma->size <<= PAGE_SHIFT; in vma_create()
152 vma->size = intel_remapped_info_size(&view->remapped); in vma_create()
153 vma->size <<= PAGE_SHIFT; in vma_create()
157 if (unlikely(vma->size > vm->total)) in vma_create()
160 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); in vma_create()
162 spin_lock(&obj->vma.lock); in vma_create()
165 if (unlikely(overflows_type(vma->size, u32))) in vma_create()
168 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, in vma_create()
171 if (unlikely(vma->fence_size < vma->size || /* overflow */ in vma_create()
172 vma->fence_size > vm->total)) in vma_create()
175 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); in vma_create()
177 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size, in vma_create()
180 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); in vma_create()
182 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); in vma_create()
186 p = &obj->vma.tree.rb_node; in vma_create()
206 rb_link_node(&vma->obj_node, rb, p); in vma_create()
207 rb_insert_color(&vma->obj_node, &obj->vma.tree); in vma_create()
209 if (i915_vma_is_ggtt(vma)) in vma_create()
216 list_add(&vma->obj_link, &obj->vma.list); in vma_create()
218 list_add_tail(&vma->obj_link, &obj->vma.list); in vma_create()
220 spin_unlock(&obj->vma.lock); in vma_create()
222 return vma; in vma_create()
225 spin_unlock(&obj->vma.lock); in vma_create()
228 i915_vma_free(vma); in vma_create()
239 rb = obj->vma.tree.rb_node; in vma_lookup()
241 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node); in vma_lookup() local
244 cmp = i915_vma_compare(vma, vm, view); in vma_lookup()
246 return vma; in vma_lookup()
275 struct i915_vma *vma; in i915_vma_instance() local
280 spin_lock(&obj->vma.lock); in i915_vma_instance()
281 vma = vma_lookup(obj, vm, view); in i915_vma_instance()
282 spin_unlock(&obj->vma.lock); in i915_vma_instance()
285 if (unlikely(!vma)) in i915_vma_instance()
286 vma = vma_create(obj, vm, view); in i915_vma_instance()
288 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view)); in i915_vma_instance()
289 return vma; in i915_vma_instance()
296 struct i915_vma *vma; member
306 struct i915_vma *vma = vw->vma; in __vma_bind() local
308 vma->ops->bind_vma(vw->vm, &vw->stash, in __vma_bind()
309 vma, vw->cache_level, vw->flags); in __vma_bind()
346 int i915_vma_wait_for_bind(struct i915_vma *vma) in i915_vma_wait_for_bind() argument
350 if (rcu_access_pointer(vma->active.excl.fence)) { in i915_vma_wait_for_bind()
354 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); in i915_vma_wait_for_bind()
376 int i915_vma_bind(struct i915_vma *vma, in i915_vma_bind() argument
384 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_bind()
385 GEM_BUG_ON(vma->size > vma->node.size); in i915_vma_bind()
387 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, in i915_vma_bind()
388 vma->node.size, in i915_vma_bind()
389 vma->vm->total))) in i915_vma_bind()
398 vma_flags = atomic_read(&vma->flags); in i915_vma_bind()
405 GEM_BUG_ON(!vma->pages); in i915_vma_bind()
407 trace_i915_vma_bind(vma, bind_flags); in i915_vma_bind()
408 if (work && bind_flags & vma->vm->bind_async_flags) { in i915_vma_bind()
411 work->vma = vma; in i915_vma_bind()
424 prev = i915_active_set_exclusive(&vma->active, &work->base.dma); in i915_vma_bind()
434 if (vma->obj) { in i915_vma_bind()
435 __i915_gem_object_pin_pages(vma->obj); in i915_vma_bind()
436 work->pinned = i915_gem_object_get(vma->obj); in i915_vma_bind()
439 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); in i915_vma_bind()
442 if (vma->obj) in i915_vma_bind()
443 set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags); in i915_vma_bind()
445 atomic_or(bind_flags, &vma->flags); in i915_vma_bind()
449 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) in i915_vma_pin_iomap() argument
454 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { in i915_vma_pin_iomap()
459 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in i915_vma_pin_iomap()
460 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); in i915_vma_pin_iomap()
462 ptr = READ_ONCE(vma->iomap); in i915_vma_pin_iomap()
464 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, in i915_vma_pin_iomap()
465 vma->node.start, in i915_vma_pin_iomap()
466 vma->node.size); in i915_vma_pin_iomap()
472 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { in i915_vma_pin_iomap()
474 ptr = vma->iomap; in i915_vma_pin_iomap()
478 __i915_vma_pin(vma); in i915_vma_pin_iomap()
480 err = i915_vma_pin_fence(vma); in i915_vma_pin_iomap()
484 i915_vma_set_ggtt_write(vma); in i915_vma_pin_iomap()
490 __i915_vma_unpin(vma); in i915_vma_pin_iomap()
495 void i915_vma_flush_writes(struct i915_vma *vma) in i915_vma_flush_writes() argument
497 if (i915_vma_unset_ggtt_write(vma)) in i915_vma_flush_writes()
498 intel_gt_flush_ggtt_writes(vma->vm->gt); in i915_vma_flush_writes()
501 void i915_vma_unpin_iomap(struct i915_vma *vma) in i915_vma_unpin_iomap() argument
503 GEM_BUG_ON(vma->iomap == NULL); in i915_vma_unpin_iomap()
505 i915_vma_flush_writes(vma); in i915_vma_unpin_iomap()
507 i915_vma_unpin_fence(vma); in i915_vma_unpin_iomap()
508 i915_vma_unpin(vma); in i915_vma_unpin_iomap()
513 struct i915_vma *vma; in i915_vma_unpin_and_release() local
516 vma = fetch_and_zero(p_vma); in i915_vma_unpin_and_release()
517 if (!vma) in i915_vma_unpin_and_release()
520 obj = vma->obj; in i915_vma_unpin_and_release()
523 i915_vma_unpin(vma); in i915_vma_unpin_and_release()
531 bool i915_vma_misplaced(const struct i915_vma *vma, in i915_vma_misplaced() argument
534 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_misplaced()
537 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) in i915_vma_misplaced()
540 if (vma->node.size < size) in i915_vma_misplaced()
544 if (alignment && !IS_ALIGNED(vma->node.start, alignment)) in i915_vma_misplaced()
547 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) in i915_vma_misplaced()
551 vma->node.start < (flags & PIN_OFFSET_MASK)) in i915_vma_misplaced()
555 vma->node.start != (flags & PIN_OFFSET_MASK)) in i915_vma_misplaced()
561 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) in __i915_vma_set_map_and_fenceable() argument
565 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in __i915_vma_set_map_and_fenceable()
566 GEM_BUG_ON(!vma->fence_size); in __i915_vma_set_map_and_fenceable()
568 fenceable = (vma->node.size >= vma->fence_size && in __i915_vma_set_map_and_fenceable()
569 IS_ALIGNED(vma->node.start, vma->fence_alignment)); in __i915_vma_set_map_and_fenceable()
571 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; in __i915_vma_set_map_and_fenceable()
574 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); in __i915_vma_set_map_and_fenceable()
576 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); in __i915_vma_set_map_and_fenceable()
579 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) in i915_gem_valid_gtt_space() argument
581 struct drm_mm_node *node = &vma->node; in i915_gem_valid_gtt_space()
591 if (!i915_vm_has_cache_coloring(vma->vm)) in i915_gem_valid_gtt_space()
626 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) in i915_vma_insert() argument
632 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); in i915_vma_insert()
633 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); in i915_vma_insert()
635 size = max(size, vma->size); in i915_vma_insert()
636 alignment = max(alignment, vma->display_alignment); in i915_vma_insert()
638 size = max_t(typeof(size), size, vma->fence_size); in i915_vma_insert()
640 alignment, vma->fence_alignment); in i915_vma_insert()
650 end = vma->vm->total; in i915_vma_insert()
652 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); in i915_vma_insert()
669 if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) in i915_vma_insert()
670 color = vma->obj->cache_level; in i915_vma_insert()
678 ret = i915_gem_gtt_reserve(vma->vm, &vma->node, in i915_vma_insert()
693 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { in i915_vma_insert()
701 rounddown_pow_of_two(vma->page_sizes.sg | in i915_vma_insert()
709 GEM_BUG_ON(i915_vma_is_ggtt(vma)); in i915_vma_insert()
713 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) in i915_vma_insert()
717 ret = i915_gem_gtt_insert(vma->vm, &vma->node, in i915_vma_insert()
723 GEM_BUG_ON(vma->node.start < start); in i915_vma_insert()
724 GEM_BUG_ON(vma->node.start + vma->node.size > end); in i915_vma_insert()
726 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_insert()
727 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); in i915_vma_insert()
729 list_add_tail(&vma->vm_link, &vma->vm->bound_list); in i915_vma_insert()
735 i915_vma_detach(struct i915_vma *vma) in i915_vma_detach() argument
737 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); in i915_vma_detach()
738 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); in i915_vma_detach()
745 list_del(&vma->vm_link); in i915_vma_detach()
748 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) in try_qad_pin() argument
753 bound = atomic_read(&vma->flags); in try_qad_pin()
765 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); in try_qad_pin()
774 mutex_lock(&vma->vm->mutex); in try_qad_pin()
785 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); in try_qad_pin()
786 mutex_unlock(&vma->vm->mutex); in try_qad_pin()
791 static int vma_get_pages(struct i915_vma *vma) in vma_get_pages() argument
795 if (atomic_add_unless(&vma->pages_count, 1, 0)) in vma_get_pages()
799 if (mutex_lock_interruptible(&vma->pages_mutex)) in vma_get_pages()
802 if (!atomic_read(&vma->pages_count)) { in vma_get_pages()
803 if (vma->obj) { in vma_get_pages()
804 err = i915_gem_object_pin_pages(vma->obj); in vma_get_pages()
809 err = vma->ops->set_pages(vma); in vma_get_pages()
811 if (vma->obj) in vma_get_pages()
812 i915_gem_object_unpin_pages(vma->obj); in vma_get_pages()
816 atomic_inc(&vma->pages_count); in vma_get_pages()
819 mutex_unlock(&vma->pages_mutex); in vma_get_pages()
824 static void __vma_put_pages(struct i915_vma *vma, unsigned int count) in __vma_put_pages() argument
827 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); in __vma_put_pages()
828 GEM_BUG_ON(atomic_read(&vma->pages_count) < count); in __vma_put_pages()
829 if (atomic_sub_return(count, &vma->pages_count) == 0) { in __vma_put_pages()
830 vma->ops->clear_pages(vma); in __vma_put_pages()
831 GEM_BUG_ON(vma->pages); in __vma_put_pages()
832 if (vma->obj) in __vma_put_pages()
833 i915_gem_object_unpin_pages(vma->obj); in __vma_put_pages()
835 mutex_unlock(&vma->pages_mutex); in __vma_put_pages()
838 static void vma_put_pages(struct i915_vma *vma) in vma_put_pages() argument
840 if (atomic_add_unless(&vma->pages_count, -1, 1)) in vma_put_pages()
843 __vma_put_pages(vma, 1); in vma_put_pages()
846 static void vma_unbind_pages(struct i915_vma *vma) in vma_unbind_pages() argument
850 lockdep_assert_held(&vma->vm->mutex); in vma_unbind_pages()
853 count = atomic_read(&vma->pages_count); in vma_unbind_pages()
857 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); in vma_unbind_pages()
860 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, in i915_vma_pin_ww() argument
869 if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex)) in i915_vma_pin_ww()
879 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) in i915_vma_pin_ww()
882 err = vma_get_pages(vma); in i915_vma_pin_ww()
887 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); in i915_vma_pin_ww()
889 if (flags & vma->vm->bind_async_flags) { in i915_vma_pin_ww()
896 work->vm = i915_vm_get(vma->vm); in i915_vma_pin_ww()
899 if (vma->vm->allocate_va_range) { in i915_vma_pin_ww()
900 err = i915_vm_alloc_pt_stash(vma->vm, in i915_vma_pin_ww()
902 vma->size); in i915_vma_pin_ww()
906 err = i915_vm_pin_pt_stash(vma->vm, in i915_vma_pin_ww()
930 err = mutex_lock_interruptible_nested(&vma->vm->mutex, in i915_vma_pin_ww()
937 if (unlikely(i915_vma_is_closed(vma))) { in i915_vma_pin_ww()
942 bound = atomic_read(&vma->flags); in i915_vma_pin_ww()
954 __i915_vma_pin(vma); in i915_vma_pin_ww()
958 err = i915_active_acquire(&vma->active); in i915_vma_pin_ww()
963 err = i915_vma_insert(vma, size, alignment, flags); in i915_vma_pin_ww()
967 if (i915_is_ggtt(vma->vm)) in i915_vma_pin_ww()
968 __i915_vma_set_map_and_fenceable(vma); in i915_vma_pin_ww()
971 GEM_BUG_ON(!vma->pages); in i915_vma_pin_ww()
972 err = i915_vma_bind(vma, in i915_vma_pin_ww()
973 vma->obj ? vma->obj->cache_level : 0, in i915_vma_pin_ww()
980 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); in i915_vma_pin_ww()
981 list_move_tail(&vma->vm_link, &vma->vm->bound_list); in i915_vma_pin_ww()
983 __i915_vma_pin(vma); in i915_vma_pin_ww()
984 GEM_BUG_ON(!i915_vma_is_pinned(vma)); in i915_vma_pin_ww()
985 GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); in i915_vma_pin_ww()
986 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); in i915_vma_pin_ww()
989 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { in i915_vma_pin_ww()
990 i915_vma_detach(vma); in i915_vma_pin_ww()
991 drm_mm_remove_node(&vma->node); in i915_vma_pin_ww()
994 i915_active_release(&vma->active); in i915_vma_pin_ww()
996 mutex_unlock(&vma->vm->mutex); in i915_vma_pin_ww()
1002 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); in i915_vma_pin_ww()
1003 vma_put_pages(vma); in i915_vma_pin_ww()
1018 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, in i915_ggtt_pin() argument
1021 struct i915_address_space *vm = vma->vm; in i915_ggtt_pin()
1024 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); in i915_ggtt_pin()
1027 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL); in i915_ggtt_pin()
1030 err = i915_vma_wait_for_bind(vma); in i915_ggtt_pin()
1032 i915_vma_unpin(vma); in i915_ggtt_pin()
1046 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt) in __vma_close() argument
1060 GEM_BUG_ON(i915_vma_is_closed(vma)); in __vma_close()
1061 list_add(&vma->closed_link, &gt->closed_vma); in __vma_close()
1064 void i915_vma_close(struct i915_vma *vma) in i915_vma_close() argument
1066 struct intel_gt *gt = vma->vm->gt; in i915_vma_close()
1069 if (i915_vma_is_ggtt(vma)) in i915_vma_close()
1072 GEM_BUG_ON(!atomic_read(&vma->open_count)); in i915_vma_close()
1073 if (atomic_dec_and_lock_irqsave(&vma->open_count, in i915_vma_close()
1076 __vma_close(vma, gt); in i915_vma_close()
1081 static void __i915_vma_remove_closed(struct i915_vma *vma) in __i915_vma_remove_closed() argument
1083 struct intel_gt *gt = vma->vm->gt; in __i915_vma_remove_closed()
1086 list_del_init(&vma->closed_link); in __i915_vma_remove_closed()
1090 void i915_vma_reopen(struct i915_vma *vma) in i915_vma_reopen() argument
1092 if (i915_vma_is_closed(vma)) in i915_vma_reopen()
1093 __i915_vma_remove_closed(vma); in i915_vma_reopen()
1098 struct i915_vma *vma = container_of(ref, typeof(*vma), ref); in i915_vma_release() local
1100 if (drm_mm_node_allocated(&vma->node)) { in i915_vma_release()
1101 mutex_lock(&vma->vm->mutex); in i915_vma_release()
1102 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in i915_vma_release()
1103 WARN_ON(__i915_vma_unbind(vma)); in i915_vma_release()
1104 mutex_unlock(&vma->vm->mutex); in i915_vma_release()
1105 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); in i915_vma_release()
1107 GEM_BUG_ON(i915_vma_is_active(vma)); in i915_vma_release()
1109 if (vma->obj) { in i915_vma_release()
1110 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_release()
1112 spin_lock(&obj->vma.lock); in i915_vma_release()
1113 list_del(&vma->obj_link); in i915_vma_release()
1114 if (!RB_EMPTY_NODE(&vma->obj_node)) in i915_vma_release()
1115 rb_erase(&vma->obj_node, &obj->vma.tree); in i915_vma_release()
1116 spin_unlock(&obj->vma.lock); in i915_vma_release()
1119 __i915_vma_remove_closed(vma); in i915_vma_release()
1120 i915_vm_put(vma->vm); in i915_vma_release()
1122 i915_active_fini(&vma->active); in i915_vma_release()
1123 i915_vma_free(vma); in i915_vma_release()
1128 struct i915_vma *vma, *next; in i915_vma_parked() local
1132 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) { in i915_vma_parked()
1133 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_parked()
1134 struct i915_address_space *vm = vma->vm; in i915_vma_parked()
1146 list_move(&vma->closed_link, &closed); in i915_vma_parked()
1151 list_for_each_entry_safe(vma, next, &closed, closed_link) { in i915_vma_parked()
1152 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_parked()
1153 struct i915_address_space *vm = vma->vm; in i915_vma_parked()
1155 INIT_LIST_HEAD(&vma->closed_link); in i915_vma_parked()
1156 __i915_vma_put(vma); in i915_vma_parked()
1163 static void __i915_vma_iounmap(struct i915_vma *vma) in __i915_vma_iounmap() argument
1165 GEM_BUG_ON(i915_vma_is_pinned(vma)); in __i915_vma_iounmap()
1167 if (vma->iomap == NULL) in __i915_vma_iounmap()
1170 io_mapping_unmap(vma->iomap); in __i915_vma_iounmap()
1171 vma->iomap = NULL; in __i915_vma_iounmap()
1174 void i915_vma_revoke_mmap(struct i915_vma *vma) in i915_vma_revoke_mmap() argument
1179 if (!i915_vma_has_userfault(vma)) in i915_vma_revoke_mmap()
1182 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); in i915_vma_revoke_mmap()
1183 GEM_BUG_ON(!vma->obj->userfault_count); in i915_vma_revoke_mmap()
1185 node = &vma->mmo->vma_node; in i915_vma_revoke_mmap()
1186 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; in i915_vma_revoke_mmap()
1187 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping, in i915_vma_revoke_mmap()
1189 vma->size, in i915_vma_revoke_mmap()
1192 i915_vma_unset_userfault(vma); in i915_vma_revoke_mmap()
1193 if (!--vma->obj->userfault_count) in i915_vma_revoke_mmap()
1194 list_del(&vma->obj->userfault_link); in i915_vma_revoke_mmap()
1198 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma) in __i915_request_await_bind() argument
1200 return __i915_request_await_exclusive(rq, &vma->active); in __i915_request_await_bind()
1203 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) in __i915_vma_move_to_active() argument
1207 GEM_BUG_ON(!i915_vma_is_pinned(vma)); in __i915_vma_move_to_active()
1210 err = __i915_request_await_bind(rq, vma); in __i915_vma_move_to_active()
1214 return i915_active_add_request(&vma->active, rq); in __i915_vma_move_to_active()
1217 int i915_vma_move_to_active(struct i915_vma *vma, in i915_vma_move_to_active() argument
1221 struct drm_i915_gem_object *obj = vma->obj; in i915_vma_move_to_active()
1226 err = __i915_vma_move_to_active(vma, rq); in i915_vma_move_to_active()
1240 dma_resv_add_excl_fence(vma->resv, &rq->fence); in i915_vma_move_to_active()
1244 err = dma_resv_reserve_shared(vma->resv, 1); in i915_vma_move_to_active()
1248 dma_resv_add_shared_fence(vma->resv, &rq->fence); in i915_vma_move_to_active()
1252 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) in i915_vma_move_to_active()
1253 i915_active_add_request(&vma->fence->active, rq); in i915_vma_move_to_active()
1258 GEM_BUG_ON(!i915_vma_is_active(vma)); in i915_vma_move_to_active()
1262 void __i915_vma_evict(struct i915_vma *vma) in __i915_vma_evict() argument
1264 GEM_BUG_ON(i915_vma_is_pinned(vma)); in __i915_vma_evict()
1266 if (i915_vma_is_map_and_fenceable(vma)) { in __i915_vma_evict()
1268 i915_vma_revoke_mmap(vma); in __i915_vma_evict()
1283 i915_vma_flush_writes(vma); in __i915_vma_evict()
1286 i915_vma_revoke_fence(vma); in __i915_vma_evict()
1288 __i915_vma_iounmap(vma); in __i915_vma_evict()
1289 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); in __i915_vma_evict()
1291 GEM_BUG_ON(vma->fence); in __i915_vma_evict()
1292 GEM_BUG_ON(i915_vma_has_userfault(vma)); in __i915_vma_evict()
1294 if (likely(atomic_read(&vma->vm->open))) { in __i915_vma_evict()
1295 trace_i915_vma_unbind(vma); in __i915_vma_evict()
1296 vma->ops->unbind_vma(vma->vm, vma); in __i915_vma_evict()
1299 &vma->flags); in __i915_vma_evict()
1301 i915_vma_detach(vma); in __i915_vma_evict()
1302 vma_unbind_pages(vma); in __i915_vma_evict()
1305 int __i915_vma_unbind(struct i915_vma *vma) in __i915_vma_unbind() argument
1309 lockdep_assert_held(&vma->vm->mutex); in __i915_vma_unbind()
1311 if (!drm_mm_node_allocated(&vma->node)) in __i915_vma_unbind()
1314 if (i915_vma_is_pinned(vma)) { in __i915_vma_unbind()
1315 vma_print_allocator(vma, "is pinned"); in __i915_vma_unbind()
1324 ret = i915_vma_sync(vma); in __i915_vma_unbind()
1328 GEM_BUG_ON(i915_vma_is_active(vma)); in __i915_vma_unbind()
1329 __i915_vma_evict(vma); in __i915_vma_unbind()
1331 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ in __i915_vma_unbind()
1335 int i915_vma_unbind(struct i915_vma *vma) in i915_vma_unbind() argument
1337 struct i915_address_space *vm = vma->vm; in i915_vma_unbind()
1342 err = i915_vma_sync(vma); in i915_vma_unbind()
1346 if (!drm_mm_node_allocated(&vma->node)) in i915_vma_unbind()
1349 if (i915_vma_is_pinned(vma)) { in i915_vma_unbind()
1350 vma_print_allocator(vma, "is pinned"); in i915_vma_unbind()
1354 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) in i915_vma_unbind()
1358 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); in i915_vma_unbind()
1362 err = __i915_vma_unbind(vma); in i915_vma_unbind()
1371 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) in i915_vma_make_unshrinkable() argument
1373 i915_gem_object_make_unshrinkable(vma->obj); in i915_vma_make_unshrinkable()
1374 return vma; in i915_vma_make_unshrinkable()
1377 void i915_vma_make_shrinkable(struct i915_vma *vma) in i915_vma_make_shrinkable() argument
1379 i915_gem_object_make_shrinkable(vma->obj); in i915_vma_make_shrinkable()
1382 void i915_vma_make_purgeable(struct i915_vma *vma) in i915_vma_make_purgeable() argument
1384 i915_gem_object_make_purgeable(vma->obj); in i915_vma_make_purgeable()