xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/ttm/ttm_execbuf_util.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  * All Rights Reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
8*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
9*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
10*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
11*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
12*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
13*4882a593Smuzhiyun  * the following conditions:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
16*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
17*4882a593Smuzhiyun  * of the Software.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  **************************************************************************/
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <drm/ttm/ttm_execbuf_util.h>
30*4882a593Smuzhiyun #include <drm/ttm/ttm_bo_driver.h>
31*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
32*4882a593Smuzhiyun #include <linux/wait.h>
33*4882a593Smuzhiyun #include <linux/sched.h>
34*4882a593Smuzhiyun #include <linux/module.h>
35*4882a593Smuzhiyun 
ttm_eu_backoff_reservation_reverse(struct list_head * list,struct ttm_validate_buffer * entry)36*4882a593Smuzhiyun static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37*4882a593Smuzhiyun 					      struct ttm_validate_buffer *entry)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	list_for_each_entry_continue_reverse(entry, list, head) {
40*4882a593Smuzhiyun 		struct ttm_buffer_object *bo = entry->bo;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 		dma_resv_unlock(bo->base.resv);
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
ttm_eu_backoff_reservation(struct ww_acquire_ctx * ticket,struct list_head * list)46*4882a593Smuzhiyun void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
47*4882a593Smuzhiyun 				struct list_head *list)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct ttm_validate_buffer *entry;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (list_empty(list))
52*4882a593Smuzhiyun 		return;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	spin_lock(&ttm_bo_glob.lru_lock);
55*4882a593Smuzhiyun 	list_for_each_entry(entry, list, head) {
56*4882a593Smuzhiyun 		struct ttm_buffer_object *bo = entry->bo;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 		ttm_bo_move_to_lru_tail(bo, NULL);
59*4882a593Smuzhiyun 		dma_resv_unlock(bo->base.resv);
60*4882a593Smuzhiyun 	}
61*4882a593Smuzhiyun 	spin_unlock(&ttm_bo_glob.lru_lock);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (ticket)
64*4882a593Smuzhiyun 		ww_acquire_fini(ticket);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_eu_backoff_reservation);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun  * Reserve buffers for validation.
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * If a buffer in the list is marked for CPU access, we back off and
72*4882a593Smuzhiyun  * wait for that buffer to become free for GPU access.
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * If a buffer is reserved for another validation, the validator with
75*4882a593Smuzhiyun  * the highest validation sequence backs off and waits for that buffer
76*4882a593Smuzhiyun  * to become unreserved. This prevents deadlocks when validating multiple
77*4882a593Smuzhiyun  * buffers in different orders.
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun 
ttm_eu_reserve_buffers(struct ww_acquire_ctx * ticket,struct list_head * list,bool intr,struct list_head * dups)80*4882a593Smuzhiyun int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
81*4882a593Smuzhiyun 			   struct list_head *list, bool intr,
82*4882a593Smuzhiyun 			   struct list_head *dups)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	struct ttm_validate_buffer *entry;
85*4882a593Smuzhiyun 	int ret;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if (list_empty(list))
88*4882a593Smuzhiyun 		return 0;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	if (ticket)
91*4882a593Smuzhiyun 		ww_acquire_init(ticket, &reservation_ww_class);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	list_for_each_entry(entry, list, head) {
94*4882a593Smuzhiyun 		struct ttm_buffer_object *bo = entry->bo;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 		ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
97*4882a593Smuzhiyun 		if (ret == -EALREADY && dups) {
98*4882a593Smuzhiyun 			struct ttm_validate_buffer *safe = entry;
99*4882a593Smuzhiyun 			entry = list_prev_entry(entry, head);
100*4882a593Smuzhiyun 			list_del(&safe->head);
101*4882a593Smuzhiyun 			list_add(&safe->head, dups);
102*4882a593Smuzhiyun 			continue;
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 		if (!ret) {
106*4882a593Smuzhiyun 			if (!entry->num_shared)
107*4882a593Smuzhiyun 				continue;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 			ret = dma_resv_reserve_shared(bo->base.resv,
110*4882a593Smuzhiyun 								entry->num_shared);
111*4882a593Smuzhiyun 			if (!ret)
112*4882a593Smuzhiyun 				continue;
113*4882a593Smuzhiyun 		}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		/* uh oh, we lost out, drop every reservation and try
116*4882a593Smuzhiyun 		 * to only reserve this buffer, then start over if
117*4882a593Smuzhiyun 		 * this succeeds.
118*4882a593Smuzhiyun 		 */
119*4882a593Smuzhiyun 		ttm_eu_backoff_reservation_reverse(list, entry);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		if (ret == -EDEADLK) {
122*4882a593Smuzhiyun 			ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
123*4882a593Smuzhiyun 		}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 		if (!ret && entry->num_shared)
126*4882a593Smuzhiyun 			ret = dma_resv_reserve_shared(bo->base.resv,
127*4882a593Smuzhiyun 								entry->num_shared);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 		if (unlikely(ret != 0)) {
130*4882a593Smuzhiyun 			if (ticket) {
131*4882a593Smuzhiyun 				ww_acquire_done(ticket);
132*4882a593Smuzhiyun 				ww_acquire_fini(ticket);
133*4882a593Smuzhiyun 			}
134*4882a593Smuzhiyun 			return ret;
135*4882a593Smuzhiyun 		}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		/* move this item to the front of the list,
138*4882a593Smuzhiyun 		 * forces correct iteration of the loop without keeping track
139*4882a593Smuzhiyun 		 */
140*4882a593Smuzhiyun 		list_del(&entry->head);
141*4882a593Smuzhiyun 		list_add(&entry->head, list);
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return 0;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_eu_reserve_buffers);
147*4882a593Smuzhiyun 
ttm_eu_fence_buffer_objects(struct ww_acquire_ctx * ticket,struct list_head * list,struct dma_fence * fence)148*4882a593Smuzhiyun void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
149*4882a593Smuzhiyun 				 struct list_head *list,
150*4882a593Smuzhiyun 				 struct dma_fence *fence)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct ttm_validate_buffer *entry;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (list_empty(list))
155*4882a593Smuzhiyun 		return;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	spin_lock(&ttm_bo_glob.lru_lock);
158*4882a593Smuzhiyun 	list_for_each_entry(entry, list, head) {
159*4882a593Smuzhiyun 		struct ttm_buffer_object *bo = entry->bo;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		if (entry->num_shared)
162*4882a593Smuzhiyun 			dma_resv_add_shared_fence(bo->base.resv, fence);
163*4882a593Smuzhiyun 		else
164*4882a593Smuzhiyun 			dma_resv_add_excl_fence(bo->base.resv, fence);
165*4882a593Smuzhiyun 		ttm_bo_move_to_lru_tail(bo, NULL);
166*4882a593Smuzhiyun 		dma_resv_unlock(bo->base.resv);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 	spin_unlock(&ttm_bo_glob.lru_lock);
169*4882a593Smuzhiyun 	if (ticket)
170*4882a593Smuzhiyun 		ww_acquire_fini(ticket);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
173