Lines Matching refs:bo

62 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)  in ttm_bo_default_destroy()  argument
64 kfree(bo); in ttm_bo_default_destroy()
67 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, in ttm_bo_mem_space_debug() argument
75 bo, bo->mem.num_pages, bo->mem.size >> 10, in ttm_bo_mem_space_debug()
76 bo->mem.size >> 20); in ttm_bo_mem_space_debug()
81 man = ttm_manager_type(bo->bdev, mem_type); in ttm_bo_mem_space_debug()
112 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, in ttm_bo_add_mem_to_lru() argument
115 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_add_mem_to_lru()
118 if (!list_empty(&bo->lru)) in ttm_bo_add_mem_to_lru()
125 list_add_tail(&bo->lru, &man->lru[bo->priority]); in ttm_bo_add_mem_to_lru()
127 if (man->use_tt && bo->ttm && in ttm_bo_add_mem_to_lru()
128 !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | in ttm_bo_add_mem_to_lru()
130 list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); in ttm_bo_add_mem_to_lru()
134 static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) in ttm_bo_del_from_lru() argument
136 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_del_from_lru()
139 if (!list_empty(&bo->swap)) { in ttm_bo_del_from_lru()
140 list_del_init(&bo->swap); in ttm_bo_del_from_lru()
143 if (!list_empty(&bo->lru)) { in ttm_bo_del_from_lru()
144 list_del_init(&bo->lru); in ttm_bo_del_from_lru()
149 bdev->driver->del_from_lru_notify(bo); in ttm_bo_del_from_lru()
153 struct ttm_buffer_object *bo) in ttm_bo_bulk_move_set_pos() argument
156 pos->first = bo; in ttm_bo_bulk_move_set_pos()
157 pos->last = bo; in ttm_bo_bulk_move_set_pos()
160 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, in ttm_bo_move_to_lru_tail() argument
163 dma_resv_assert_held(bo->base.resv); in ttm_bo_move_to_lru_tail()
165 ttm_bo_del_from_lru(bo); in ttm_bo_move_to_lru_tail()
166 ttm_bo_add_mem_to_lru(bo, &bo->mem); in ttm_bo_move_to_lru_tail()
168 if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { in ttm_bo_move_to_lru_tail()
169 switch (bo->mem.mem_type) { in ttm_bo_move_to_lru_tail()
171 ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); in ttm_bo_move_to_lru_tail()
175 ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo); in ttm_bo_move_to_lru_tail()
178 if (bo->ttm && !(bo->ttm->page_flags & in ttm_bo_move_to_lru_tail()
180 ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo); in ttm_bo_move_to_lru_tail()
235 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, in ttm_bo_handle_move_mem() argument
239 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_handle_move_mem()
240 struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); in ttm_bo_handle_move_mem()
244 ttm_bo_unmap_virtual(bo); in ttm_bo_handle_move_mem()
254 ret = ttm_tt_create(bo, old_man->use_tt); in ttm_bo_handle_move_mem()
258 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); in ttm_bo_handle_move_mem()
263 ret = ttm_tt_populate(bdev, bo->ttm, ctx); in ttm_bo_handle_move_mem()
267 ret = ttm_bo_tt_bind(bo, mem); in ttm_bo_handle_move_mem()
272 if (bo->mem.mem_type == TTM_PL_SYSTEM) { in ttm_bo_handle_move_mem()
274 bdev->driver->move_notify(bo, evict, mem); in ttm_bo_handle_move_mem()
275 bo->mem = *mem; in ttm_bo_handle_move_mem()
281 bdev->driver->move_notify(bo, evict, mem); in ttm_bo_handle_move_mem()
284 ret = ttm_bo_move_ttm(bo, ctx, mem); in ttm_bo_handle_move_mem()
286 ret = bdev->driver->move(bo, evict, ctx, mem); in ttm_bo_handle_move_mem()
288 ret = ttm_bo_move_memcpy(bo, ctx, mem); in ttm_bo_handle_move_mem()
292 swap(*mem, bo->mem); in ttm_bo_handle_move_mem()
293 bdev->driver->move_notify(bo, false, mem); in ttm_bo_handle_move_mem()
294 swap(*mem, bo->mem); in ttm_bo_handle_move_mem()
301 ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; in ttm_bo_handle_move_mem()
305 new_man = ttm_manager_type(bdev, bo->mem.mem_type); in ttm_bo_handle_move_mem()
307 ttm_bo_tt_destroy(bo); in ttm_bo_handle_move_mem()
320 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) in ttm_bo_cleanup_memtype_use() argument
322 if (bo->bdev->driver->move_notify) in ttm_bo_cleanup_memtype_use()
323 bo->bdev->driver->move_notify(bo, false, NULL); in ttm_bo_cleanup_memtype_use()
325 ttm_bo_tt_destroy(bo); in ttm_bo_cleanup_memtype_use()
326 ttm_resource_free(bo, &bo->mem); in ttm_bo_cleanup_memtype_use()
329 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) in ttm_bo_individualize_resv() argument
333 if (bo->base.resv == &bo->base._resv) in ttm_bo_individualize_resv()
336 BUG_ON(!dma_resv_trylock(&bo->base._resv)); in ttm_bo_individualize_resv()
338 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); in ttm_bo_individualize_resv()
339 dma_resv_unlock(&bo->base._resv); in ttm_bo_individualize_resv()
343 if (bo->type != ttm_bo_type_sg) { in ttm_bo_individualize_resv()
349 bo->base.resv = &bo->base._resv; in ttm_bo_individualize_resv()
356 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) in ttm_bo_flush_all_fences() argument
358 struct dma_resv *resv = &bo->base._resv; in ttm_bo_flush_all_fences()
391 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, in ttm_bo_cleanup_refs() argument
395 struct dma_resv *resv = &bo->base._resv; in ttm_bo_cleanup_refs()
407 dma_resv_unlock(bo->base.resv); in ttm_bo_cleanup_refs()
419 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { in ttm_bo_cleanup_refs()
434 if (ret || unlikely(list_empty(&bo->ddestroy))) { in ttm_bo_cleanup_refs()
436 dma_resv_unlock(bo->base.resv); in ttm_bo_cleanup_refs()
441 ttm_bo_del_from_lru(bo); in ttm_bo_cleanup_refs()
442 list_del_init(&bo->ddestroy); in ttm_bo_cleanup_refs()
444 ttm_bo_cleanup_memtype_use(bo); in ttm_bo_cleanup_refs()
447 dma_resv_unlock(bo->base.resv); in ttm_bo_cleanup_refs()
449 ttm_bo_put(bo); in ttm_bo_cleanup_refs()
468 struct ttm_buffer_object *bo; in ttm_bo_delayed_delete() local
470 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object, in ttm_bo_delayed_delete()
472 list_move_tail(&bo->ddestroy, &removed); in ttm_bo_delayed_delete()
473 if (!ttm_bo_get_unless_zero(bo)) in ttm_bo_delayed_delete()
476 if (remove_all || bo->base.resv != &bo->base._resv) { in ttm_bo_delayed_delete()
478 dma_resv_lock(bo->base.resv, NULL); in ttm_bo_delayed_delete()
481 ttm_bo_cleanup_refs(bo, false, !remove_all, true); in ttm_bo_delayed_delete()
483 } else if (dma_resv_trylock(bo->base.resv)) { in ttm_bo_delayed_delete()
484 ttm_bo_cleanup_refs(bo, false, !remove_all, true); in ttm_bo_delayed_delete()
489 ttm_bo_put(bo); in ttm_bo_delayed_delete()
511 struct ttm_buffer_object *bo = in ttm_bo_release() local
513 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_release()
514 size_t acc_size = bo->acc_size; in ttm_bo_release()
517 if (!bo->deleted) { in ttm_bo_release()
518 ret = ttm_bo_individualize_resv(bo); in ttm_bo_release()
523 dma_resv_wait_timeout_rcu(bo->base.resv, true, false, in ttm_bo_release()
527 if (bo->bdev->driver->release_notify) in ttm_bo_release()
528 bo->bdev->driver->release_notify(bo); in ttm_bo_release()
530 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); in ttm_bo_release()
531 ttm_mem_io_free(bdev, &bo->mem); in ttm_bo_release()
534 if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || in ttm_bo_release()
535 !dma_resv_trylock(bo->base.resv)) { in ttm_bo_release()
537 ttm_bo_flush_all_fences(bo); in ttm_bo_release()
538 bo->deleted = true; in ttm_bo_release()
547 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { in ttm_bo_release()
548 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; in ttm_bo_release()
549 ttm_bo_del_from_lru(bo); in ttm_bo_release()
550 ttm_bo_add_mem_to_lru(bo, &bo->mem); in ttm_bo_release()
553 kref_init(&bo->kref); in ttm_bo_release()
554 list_add_tail(&bo->ddestroy, &bdev->ddestroy); in ttm_bo_release()
563 ttm_bo_del_from_lru(bo); in ttm_bo_release()
564 list_del(&bo->ddestroy); in ttm_bo_release()
567 ttm_bo_cleanup_memtype_use(bo); in ttm_bo_release()
568 dma_resv_unlock(bo->base.resv); in ttm_bo_release()
571 dma_fence_put(bo->moving); in ttm_bo_release()
572 if (!ttm_bo_uses_embedded_gem_object(bo)) in ttm_bo_release()
573 dma_resv_fini(&bo->base._resv); in ttm_bo_release()
574 bo->destroy(bo); in ttm_bo_release()
578 void ttm_bo_put(struct ttm_buffer_object *bo) in ttm_bo_put() argument
580 kref_put(&bo->kref, ttm_bo_release); in ttm_bo_put()
598 static int ttm_bo_evict(struct ttm_buffer_object *bo, in ttm_bo_evict() argument
601 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_evict()
606 dma_resv_assert_held(bo->base.resv); in ttm_bo_evict()
610 bdev->driver->evict_flags(bo, &placement); in ttm_bo_evict()
613 ttm_bo_wait(bo, false, false); in ttm_bo_evict()
615 ttm_bo_cleanup_memtype_use(bo); in ttm_bo_evict()
616 return ttm_tt_create(bo, false); in ttm_bo_evict()
619 evict_mem = bo->mem; in ttm_bo_evict()
624 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); in ttm_bo_evict()
628 bo); in ttm_bo_evict()
629 ttm_bo_mem_space_debug(bo, &placement); in ttm_bo_evict()
634 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ctx); in ttm_bo_evict()
638 ttm_resource_free(bo, &evict_mem); in ttm_bo_evict()
644 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, in ttm_bo_eviction_valuable() argument
650 if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) || in ttm_bo_eviction_valuable()
651 (place->lpfn && place->lpfn <= bo->mem.start)) in ttm_bo_eviction_valuable()
668 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, in ttm_bo_evict_swapout_allowable() argument
673 if (bo->base.resv == ctx->resv) { in ttm_bo_evict_swapout_allowable()
674 dma_resv_assert_held(bo->base.resv); in ttm_bo_evict_swapout_allowable()
681 ret = dma_resv_trylock(bo->base.resv); in ttm_bo_evict_swapout_allowable()
731 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; in ttm_mem_evict_first() local
738 list_for_each_entry(bo, &man->lru[i], lru) { in ttm_mem_evict_first()
741 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, in ttm_mem_evict_first()
744 dma_resv_locking_ctx(bo->base.resv)) in ttm_mem_evict_first()
745 busy_bo = bo; in ttm_mem_evict_first()
749 if (place && !bdev->driver->eviction_valuable(bo, in ttm_mem_evict_first()
752 dma_resv_unlock(bo->base.resv); in ttm_mem_evict_first()
755 if (!ttm_bo_get_unless_zero(bo)) { in ttm_mem_evict_first()
757 dma_resv_unlock(bo->base.resv); in ttm_mem_evict_first()
764 if (&bo->lru != &man->lru[i]) in ttm_mem_evict_first()
767 bo = NULL; in ttm_mem_evict_first()
770 if (!bo) { in ttm_mem_evict_first()
780 if (bo->deleted) { in ttm_mem_evict_first()
781 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible, in ttm_mem_evict_first()
783 ttm_bo_put(bo); in ttm_mem_evict_first()
789 ret = ttm_bo_evict(bo, ctx); in ttm_mem_evict_first()
791 ttm_bo_unreserve(bo); in ttm_mem_evict_first()
793 ttm_bo_move_to_lru_tail_unlocked(bo); in ttm_mem_evict_first()
795 ttm_bo_put(bo); in ttm_mem_evict_first()
802 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, in ttm_bo_add_move_fence() argument
822 dma_resv_add_shared_fence(bo->base.resv, fence); in ttm_bo_add_move_fence()
824 ret = dma_resv_reserve_shared(bo->base.resv, 1); in ttm_bo_add_move_fence()
830 dma_fence_put(bo->moving); in ttm_bo_add_move_fence()
831 bo->moving = fence; in ttm_bo_add_move_fence()
839 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, in ttm_bo_mem_force_space() argument
844 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_mem_force_space()
849 ticket = dma_resv_locking_ctx(bo->base.resv); in ttm_bo_mem_force_space()
851 ret = ttm_resource_alloc(bo, place, mem); in ttm_bo_mem_force_space()
862 return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); in ttm_bo_mem_force_space()
899 static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, in ttm_bo_mem_placement() argument
904 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_mem_placement()
912 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, in ttm_bo_mem_placement()
920 ttm_bo_del_from_lru(bo); in ttm_bo_mem_placement()
921 ttm_bo_add_mem_to_lru(bo, mem); in ttm_bo_mem_placement()
935 int ttm_bo_mem_space(struct ttm_buffer_object *bo, in ttm_bo_mem_space() argument
940 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_mem_space()
944 ret = dma_resv_reserve_shared(bo->base.resv, 1); in ttm_bo_mem_space()
952 ret = ttm_bo_mem_placement(bo, place, mem, ctx); in ttm_bo_mem_space()
957 ret = ttm_resource_alloc(bo, place, mem); in ttm_bo_mem_space()
964 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); in ttm_bo_mem_space()
966 ttm_resource_free(bo, mem); in ttm_bo_mem_space()
978 ret = ttm_bo_mem_placement(bo, place, mem, ctx); in ttm_bo_mem_space()
983 ret = ttm_bo_mem_force_space(bo, place, mem, ctx); in ttm_bo_mem_space()
998 if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { in ttm_bo_mem_space()
999 ttm_bo_move_to_lru_tail_unlocked(bo); in ttm_bo_mem_space()
1006 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, in ttm_bo_move_buffer() argument
1013 dma_resv_assert_held(bo->base.resv); in ttm_bo_move_buffer()
1015 mem.num_pages = bo->num_pages; in ttm_bo_move_buffer()
1017 mem.page_alignment = bo->mem.page_alignment; in ttm_bo_move_buffer()
1025 ret = ttm_bo_mem_space(bo, placement, &mem, ctx); in ttm_bo_move_buffer()
1028 ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx); in ttm_bo_move_buffer()
1031 ttm_resource_free(bo, &mem); in ttm_bo_move_buffer()
1078 int ttm_bo_validate(struct ttm_buffer_object *bo, in ttm_bo_validate() argument
1085 dma_resv_assert_held(bo->base.resv); in ttm_bo_validate()
1091 ret = ttm_bo_pipeline_gutting(bo); in ttm_bo_validate()
1095 return ttm_tt_create(bo, false); in ttm_bo_validate()
1101 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { in ttm_bo_validate()
1102 ret = ttm_bo_move_buffer(bo, placement, ctx); in ttm_bo_validate()
1106 bo->mem.placement &= TTM_PL_MASK_CACHING; in ttm_bo_validate()
1107 bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING; in ttm_bo_validate()
1112 if (bo->mem.mem_type == TTM_PL_SYSTEM) { in ttm_bo_validate()
1113 ret = ttm_tt_create(bo, true); in ttm_bo_validate()
1122 struct ttm_buffer_object *bo, in ttm_bo_init_reserved() argument
1142 (*destroy)(bo); in ttm_bo_init_reserved()
1144 kfree(bo); in ttm_bo_init_reserved()
1152 (*destroy)(bo); in ttm_bo_init_reserved()
1154 kfree(bo); in ttm_bo_init_reserved()
1158 bo->destroy = destroy ? destroy : ttm_bo_default_destroy; in ttm_bo_init_reserved()
1160 kref_init(&bo->kref); in ttm_bo_init_reserved()
1161 INIT_LIST_HEAD(&bo->lru); in ttm_bo_init_reserved()
1162 INIT_LIST_HEAD(&bo->ddestroy); in ttm_bo_init_reserved()
1163 INIT_LIST_HEAD(&bo->swap); in ttm_bo_init_reserved()
1164 bo->bdev = bdev; in ttm_bo_init_reserved()
1165 bo->type = type; in ttm_bo_init_reserved()
1166 bo->num_pages = num_pages; in ttm_bo_init_reserved()
1167 bo->mem.size = num_pages << PAGE_SHIFT; in ttm_bo_init_reserved()
1168 bo->mem.mem_type = TTM_PL_SYSTEM; in ttm_bo_init_reserved()
1169 bo->mem.num_pages = bo->num_pages; in ttm_bo_init_reserved()
1170 bo->mem.mm_node = NULL; in ttm_bo_init_reserved()
1171 bo->mem.page_alignment = page_alignment; in ttm_bo_init_reserved()
1172 bo->mem.bus.offset = 0; in ttm_bo_init_reserved()
1173 bo->mem.bus.addr = NULL; in ttm_bo_init_reserved()
1174 bo->moving = NULL; in ttm_bo_init_reserved()
1175 bo->mem.placement = TTM_PL_FLAG_CACHED; in ttm_bo_init_reserved()
1176 bo->acc_size = acc_size; in ttm_bo_init_reserved()
1177 bo->sg = sg; in ttm_bo_init_reserved()
1179 bo->base.resv = resv; in ttm_bo_init_reserved()
1180 dma_resv_assert_held(bo->base.resv); in ttm_bo_init_reserved()
1182 bo->base.resv = &bo->base._resv; in ttm_bo_init_reserved()
1184 if (!ttm_bo_uses_embedded_gem_object(bo)) { in ttm_bo_init_reserved()
1189 dma_resv_init(&bo->base._resv); in ttm_bo_init_reserved()
1190 drm_vma_node_reset(&bo->base.vma_node); in ttm_bo_init_reserved()
1198 if (bo->type == ttm_bo_type_device || in ttm_bo_init_reserved()
1199 bo->type == ttm_bo_type_sg) in ttm_bo_init_reserved()
1200 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, in ttm_bo_init_reserved()
1201 bo->mem.num_pages); in ttm_bo_init_reserved()
1207 locked = dma_resv_trylock(bo->base.resv); in ttm_bo_init_reserved()
1212 ret = ttm_bo_validate(bo, placement, ctx); in ttm_bo_init_reserved()
1216 ttm_bo_unreserve(bo); in ttm_bo_init_reserved()
1218 ttm_bo_put(bo); in ttm_bo_init_reserved()
1222 ttm_bo_move_to_lru_tail_unlocked(bo); in ttm_bo_init_reserved()
1229 struct ttm_buffer_object *bo, in ttm_bo_init() argument
1243 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement, in ttm_bo_init()
1250 ttm_bo_unreserve(bo); in ttm_bo_init()
1291 struct ttm_buffer_object *bo; in ttm_bo_create() local
1295 bo = kzalloc(sizeof(*bo), GFP_KERNEL); in ttm_bo_create()
1296 if (unlikely(bo == NULL)) in ttm_bo_create()
1300 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, in ttm_bo_create()
1304 *p_bo = bo; in ttm_bo_create()
1473 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) in ttm_bo_unmap_virtual() argument
1475 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_unmap_virtual()
1477 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); in ttm_bo_unmap_virtual()
1478 ttm_mem_io_free(bdev, &bo->mem); in ttm_bo_unmap_virtual()
1482 int ttm_bo_wait(struct ttm_buffer_object *bo, in ttm_bo_wait() argument
1488 if (dma_resv_test_signaled_rcu(bo->base.resv, true)) in ttm_bo_wait()
1494 timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true, in ttm_bo_wait()
1502 dma_resv_add_excl_fence(bo->base.resv, NULL); in ttm_bo_wait()
1513 struct ttm_buffer_object *bo; in ttm_bo_swapout() local
1520 list_for_each_entry(bo, &glob->swap_lru[i], swap) { in ttm_bo_swapout()
1521 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, in ttm_bo_swapout()
1525 if (!ttm_bo_get_unless_zero(bo)) { in ttm_bo_swapout()
1527 dma_resv_unlock(bo->base.resv); in ttm_bo_swapout()
1543 if (bo->deleted) { in ttm_bo_swapout()
1544 ret = ttm_bo_cleanup_refs(bo, false, false, locked); in ttm_bo_swapout()
1545 ttm_bo_put(bo); in ttm_bo_swapout()
1549 ttm_bo_del_from_lru(bo); in ttm_bo_swapout()
1556 if (bo->mem.mem_type != TTM_PL_SYSTEM || in ttm_bo_swapout()
1557 bo->ttm->caching_state != tt_cached) { in ttm_bo_swapout()
1561 evict_mem = bo->mem; in ttm_bo_swapout()
1566 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx); in ttm_bo_swapout()
1575 ret = ttm_bo_wait(bo, false, false); in ttm_bo_swapout()
1579 ttm_bo_unmap_virtual(bo); in ttm_bo_swapout()
1586 if (bo->bdev->driver->swap_notify) in ttm_bo_swapout()
1587 bo->bdev->driver->swap_notify(bo); in ttm_bo_swapout()
1589 ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage); in ttm_bo_swapout()
1598 dma_resv_unlock(bo->base.resv); in ttm_bo_swapout()
1599 ttm_bo_put(bo); in ttm_bo_swapout()
1615 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo) in ttm_bo_tt_destroy() argument
1617 if (bo->ttm == NULL) in ttm_bo_tt_destroy()
1620 ttm_tt_destroy(bo->bdev, bo->ttm); in ttm_bo_tt_destroy()
1621 bo->ttm = NULL; in ttm_bo_tt_destroy()
1624 int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem) in ttm_bo_tt_bind() argument
1626 return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem); in ttm_bo_tt_bind()
1629 void ttm_bo_tt_unbind(struct ttm_buffer_object *bo) in ttm_bo_tt_unbind() argument
1631 bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm); in ttm_bo_tt_unbind()