xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/radeon/radeon_object.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2009 Jerome Glisse.
3*4882a593Smuzhiyun  * All Rights Reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
7*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
8*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
9*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
10*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
11*4882a593Smuzhiyun  * the following conditions:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
22*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
23*4882a593Smuzhiyun  * of the Software.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Authors:
28*4882a593Smuzhiyun  *    Jerome Glisse <glisse@freedesktop.org>
29*4882a593Smuzhiyun  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30*4882a593Smuzhiyun  *    Dave Airlie
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <linux/io.h>
34*4882a593Smuzhiyun #include <linux/list.h>
35*4882a593Smuzhiyun #include <linux/slab.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <drm/drm_cache.h>
38*4882a593Smuzhiyun #include <drm/drm_prime.h>
39*4882a593Smuzhiyun #include <drm/radeon_drm.h>
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #include "radeon.h"
42*4882a593Smuzhiyun #include "radeon_trace.h"
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun int radeon_ttm_init(struct radeon_device *rdev);
45*4882a593Smuzhiyun void radeon_ttm_fini(struct radeon_device *rdev);
46*4882a593Smuzhiyun static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
50*4882a593Smuzhiyun  * function are calling it.
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun 
radeon_update_memory_usage(struct radeon_bo * bo,unsigned mem_type,int sign)53*4882a593Smuzhiyun static void radeon_update_memory_usage(struct radeon_bo *bo,
54*4882a593Smuzhiyun 				       unsigned mem_type, int sign)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	struct radeon_device *rdev = bo->rdev;
57*4882a593Smuzhiyun 	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	switch (mem_type) {
60*4882a593Smuzhiyun 	case TTM_PL_TT:
61*4882a593Smuzhiyun 		if (sign > 0)
62*4882a593Smuzhiyun 			atomic64_add(size, &rdev->gtt_usage);
63*4882a593Smuzhiyun 		else
64*4882a593Smuzhiyun 			atomic64_sub(size, &rdev->gtt_usage);
65*4882a593Smuzhiyun 		break;
66*4882a593Smuzhiyun 	case TTM_PL_VRAM:
67*4882a593Smuzhiyun 		if (sign > 0)
68*4882a593Smuzhiyun 			atomic64_add(size, &rdev->vram_usage);
69*4882a593Smuzhiyun 		else
70*4882a593Smuzhiyun 			atomic64_sub(size, &rdev->vram_usage);
71*4882a593Smuzhiyun 		break;
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
radeon_ttm_bo_destroy(struct ttm_buffer_object * tbo)75*4882a593Smuzhiyun static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct radeon_bo *bo;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	bo = container_of(tbo, struct radeon_bo, tbo);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	mutex_lock(&bo->rdev->gem.mutex);
84*4882a593Smuzhiyun 	list_del_init(&bo->list);
85*4882a593Smuzhiyun 	mutex_unlock(&bo->rdev->gem.mutex);
86*4882a593Smuzhiyun 	radeon_bo_clear_surface_reg(bo);
87*4882a593Smuzhiyun 	WARN_ON_ONCE(!list_empty(&bo->va));
88*4882a593Smuzhiyun 	if (bo->tbo.base.import_attach)
89*4882a593Smuzhiyun 		drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
90*4882a593Smuzhiyun 	drm_gem_object_release(&bo->tbo.base);
91*4882a593Smuzhiyun 	kfree(bo);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object * bo)94*4882a593Smuzhiyun bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	if (bo->destroy == &radeon_ttm_bo_destroy)
97*4882a593Smuzhiyun 		return true;
98*4882a593Smuzhiyun 	return false;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
radeon_ttm_placement_from_domain(struct radeon_bo * rbo,u32 domain)101*4882a593Smuzhiyun void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	u32 c = 0, i;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	rbo->placement.placement = rbo->placements;
106*4882a593Smuzhiyun 	rbo->placement.busy_placement = rbo->placements;
107*4882a593Smuzhiyun 	if (domain & RADEON_GEM_DOMAIN_VRAM) {
108*4882a593Smuzhiyun 		/* Try placing BOs which don't need CPU access outside of the
109*4882a593Smuzhiyun 		 * CPU accessible part of VRAM
110*4882a593Smuzhiyun 		 */
111*4882a593Smuzhiyun 		if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
112*4882a593Smuzhiyun 		    rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
113*4882a593Smuzhiyun 			rbo->placements[c].fpfn =
114*4882a593Smuzhiyun 				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
115*4882a593Smuzhiyun 			rbo->placements[c].mem_type = TTM_PL_VRAM;
116*4882a593Smuzhiyun 			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
117*4882a593Smuzhiyun 						     TTM_PL_FLAG_UNCACHED;
118*4882a593Smuzhiyun 		}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 		rbo->placements[c].fpfn = 0;
121*4882a593Smuzhiyun 		rbo->placements[c].mem_type = TTM_PL_VRAM;
122*4882a593Smuzhiyun 		rbo->placements[c++].flags = TTM_PL_FLAG_WC |
123*4882a593Smuzhiyun 					     TTM_PL_FLAG_UNCACHED;
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (domain & RADEON_GEM_DOMAIN_GTT) {
127*4882a593Smuzhiyun 		if (rbo->flags & RADEON_GEM_GTT_UC) {
128*4882a593Smuzhiyun 			rbo->placements[c].fpfn = 0;
129*4882a593Smuzhiyun 			rbo->placements[c].mem_type = TTM_PL_TT;
130*4882a593Smuzhiyun 			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
133*4882a593Smuzhiyun 			   (rbo->rdev->flags & RADEON_IS_AGP)) {
134*4882a593Smuzhiyun 			rbo->placements[c].fpfn = 0;
135*4882a593Smuzhiyun 			rbo->placements[c].mem_type = TTM_PL_TT;
136*4882a593Smuzhiyun 			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
137*4882a593Smuzhiyun 				TTM_PL_FLAG_UNCACHED;
138*4882a593Smuzhiyun 		} else {
139*4882a593Smuzhiyun 			rbo->placements[c].fpfn = 0;
140*4882a593Smuzhiyun 			rbo->placements[c].mem_type = TTM_PL_TT;
141*4882a593Smuzhiyun 			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
142*4882a593Smuzhiyun 		}
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (domain & RADEON_GEM_DOMAIN_CPU) {
146*4882a593Smuzhiyun 		if (rbo->flags & RADEON_GEM_GTT_UC) {
147*4882a593Smuzhiyun 			rbo->placements[c].fpfn = 0;
148*4882a593Smuzhiyun 			rbo->placements[c].mem_type = TTM_PL_SYSTEM;
149*4882a593Smuzhiyun 			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
152*4882a593Smuzhiyun 		    rbo->rdev->flags & RADEON_IS_AGP) {
153*4882a593Smuzhiyun 			rbo->placements[c].fpfn = 0;
154*4882a593Smuzhiyun 			rbo->placements[c].mem_type = TTM_PL_SYSTEM;
155*4882a593Smuzhiyun 			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
156*4882a593Smuzhiyun 				TTM_PL_FLAG_UNCACHED;
157*4882a593Smuzhiyun 		} else {
158*4882a593Smuzhiyun 			rbo->placements[c].fpfn = 0;
159*4882a593Smuzhiyun 			rbo->placements[c].mem_type = TTM_PL_SYSTEM;
160*4882a593Smuzhiyun 			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
161*4882a593Smuzhiyun 		}
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 	if (!c) {
164*4882a593Smuzhiyun 		rbo->placements[c].fpfn = 0;
165*4882a593Smuzhiyun 		rbo->placements[c].mem_type = TTM_PL_SYSTEM;
166*4882a593Smuzhiyun 		rbo->placements[c++].flags = TTM_PL_MASK_CACHING;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	rbo->placement.num_placement = c;
170*4882a593Smuzhiyun 	rbo->placement.num_busy_placement = c;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	for (i = 0; i < c; ++i) {
173*4882a593Smuzhiyun 		if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
174*4882a593Smuzhiyun 		    (rbo->placements[i].mem_type == TTM_PL_VRAM) &&
175*4882a593Smuzhiyun 		    !rbo->placements[i].fpfn)
176*4882a593Smuzhiyun 			rbo->placements[i].lpfn =
177*4882a593Smuzhiyun 				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
178*4882a593Smuzhiyun 		else
179*4882a593Smuzhiyun 			rbo->placements[i].lpfn = 0;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
radeon_bo_create(struct radeon_device * rdev,unsigned long size,int byte_align,bool kernel,u32 domain,u32 flags,struct sg_table * sg,struct dma_resv * resv,struct radeon_bo ** bo_ptr)183*4882a593Smuzhiyun int radeon_bo_create(struct radeon_device *rdev,
184*4882a593Smuzhiyun 		     unsigned long size, int byte_align, bool kernel,
185*4882a593Smuzhiyun 		     u32 domain, u32 flags, struct sg_table *sg,
186*4882a593Smuzhiyun 		     struct dma_resv *resv,
187*4882a593Smuzhiyun 		     struct radeon_bo **bo_ptr)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct radeon_bo *bo;
190*4882a593Smuzhiyun 	enum ttm_bo_type type;
191*4882a593Smuzhiyun 	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
192*4882a593Smuzhiyun 	size_t acc_size;
193*4882a593Smuzhiyun 	int r;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	size = ALIGN(size, PAGE_SIZE);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (kernel) {
198*4882a593Smuzhiyun 		type = ttm_bo_type_kernel;
199*4882a593Smuzhiyun 	} else if (sg) {
200*4882a593Smuzhiyun 		type = ttm_bo_type_sg;
201*4882a593Smuzhiyun 	} else {
202*4882a593Smuzhiyun 		type = ttm_bo_type_device;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 	*bo_ptr = NULL;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
207*4882a593Smuzhiyun 				       sizeof(struct radeon_bo));
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
210*4882a593Smuzhiyun 	if (bo == NULL)
211*4882a593Smuzhiyun 		return -ENOMEM;
212*4882a593Smuzhiyun 	drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
213*4882a593Smuzhiyun 	bo->rdev = rdev;
214*4882a593Smuzhiyun 	bo->surface_reg = -1;
215*4882a593Smuzhiyun 	INIT_LIST_HEAD(&bo->list);
216*4882a593Smuzhiyun 	INIT_LIST_HEAD(&bo->va);
217*4882a593Smuzhiyun 	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
218*4882a593Smuzhiyun 				       RADEON_GEM_DOMAIN_GTT |
219*4882a593Smuzhiyun 				       RADEON_GEM_DOMAIN_CPU);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	bo->flags = flags;
222*4882a593Smuzhiyun 	/* PCI GART is always snooped */
223*4882a593Smuzhiyun 	if (!(rdev->flags & RADEON_IS_PCIE))
224*4882a593Smuzhiyun 		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
227*4882a593Smuzhiyun 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
228*4882a593Smuzhiyun 	 */
229*4882a593Smuzhiyun 	if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
230*4882a593Smuzhiyun 		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun #ifdef CONFIG_X86_32
233*4882a593Smuzhiyun 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
234*4882a593Smuzhiyun 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
235*4882a593Smuzhiyun 	 */
236*4882a593Smuzhiyun 	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
237*4882a593Smuzhiyun #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
238*4882a593Smuzhiyun 	/* Don't try to enable write-combining when it can't work, or things
239*4882a593Smuzhiyun 	 * may be slow
240*4882a593Smuzhiyun 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
241*4882a593Smuzhiyun 	 */
242*4882a593Smuzhiyun #ifndef CONFIG_COMPILE_TEST
243*4882a593Smuzhiyun #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
244*4882a593Smuzhiyun 	 thanks to write-combining
245*4882a593Smuzhiyun #endif
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if (bo->flags & RADEON_GEM_GTT_WC)
248*4882a593Smuzhiyun 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
249*4882a593Smuzhiyun 			      "better performance thanks to write-combining\n");
250*4882a593Smuzhiyun 	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
251*4882a593Smuzhiyun #else
252*4882a593Smuzhiyun 	/* For architectures that don't support WC memory,
253*4882a593Smuzhiyun 	 * mask out the WC flag from the BO
254*4882a593Smuzhiyun 	 */
255*4882a593Smuzhiyun 	if (!drm_arch_can_wc_memory())
256*4882a593Smuzhiyun 		bo->flags &= ~RADEON_GEM_GTT_WC;
257*4882a593Smuzhiyun #endif
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	radeon_ttm_placement_from_domain(bo, domain);
260*4882a593Smuzhiyun 	/* Kernel allocation are uninterruptible */
261*4882a593Smuzhiyun 	down_read(&rdev->pm.mclk_lock);
262*4882a593Smuzhiyun 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
263*4882a593Smuzhiyun 			&bo->placement, page_align, !kernel, acc_size,
264*4882a593Smuzhiyun 			sg, resv, &radeon_ttm_bo_destroy);
265*4882a593Smuzhiyun 	up_read(&rdev->pm.mclk_lock);
266*4882a593Smuzhiyun 	if (unlikely(r != 0)) {
267*4882a593Smuzhiyun 		return r;
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 	*bo_ptr = bo;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	trace_radeon_bo_create(bo);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
radeon_bo_kmap(struct radeon_bo * bo,void ** ptr)276*4882a593Smuzhiyun int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	bool is_iomem;
279*4882a593Smuzhiyun 	int r;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	if (bo->kptr) {
282*4882a593Smuzhiyun 		if (ptr) {
283*4882a593Smuzhiyun 			*ptr = bo->kptr;
284*4882a593Smuzhiyun 		}
285*4882a593Smuzhiyun 		return 0;
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
288*4882a593Smuzhiyun 	if (r) {
289*4882a593Smuzhiyun 		return r;
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
292*4882a593Smuzhiyun 	if (ptr) {
293*4882a593Smuzhiyun 		*ptr = bo->kptr;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 	radeon_bo_check_tiling(bo, 0, 0);
296*4882a593Smuzhiyun 	return 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
radeon_bo_kunmap(struct radeon_bo * bo)299*4882a593Smuzhiyun void radeon_bo_kunmap(struct radeon_bo *bo)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	if (bo->kptr == NULL)
302*4882a593Smuzhiyun 		return;
303*4882a593Smuzhiyun 	bo->kptr = NULL;
304*4882a593Smuzhiyun 	radeon_bo_check_tiling(bo, 0, 0);
305*4882a593Smuzhiyun 	ttm_bo_kunmap(&bo->kmap);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
radeon_bo_ref(struct radeon_bo * bo)308*4882a593Smuzhiyun struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	if (bo == NULL)
311*4882a593Smuzhiyun 		return NULL;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	ttm_bo_get(&bo->tbo);
314*4882a593Smuzhiyun 	return bo;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
radeon_bo_unref(struct radeon_bo ** bo)317*4882a593Smuzhiyun void radeon_bo_unref(struct radeon_bo **bo)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	struct ttm_buffer_object *tbo;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if ((*bo) == NULL)
322*4882a593Smuzhiyun 		return;
323*4882a593Smuzhiyun 	tbo = &((*bo)->tbo);
324*4882a593Smuzhiyun 	ttm_bo_put(tbo);
325*4882a593Smuzhiyun 	*bo = NULL;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
radeon_bo_pin_restricted(struct radeon_bo * bo,u32 domain,u64 max_offset,u64 * gpu_addr)328*4882a593Smuzhiyun int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
329*4882a593Smuzhiyun 			     u64 *gpu_addr)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = { false, false };
332*4882a593Smuzhiyun 	int r, i;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
335*4882a593Smuzhiyun 		return -EPERM;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (bo->pin_count) {
338*4882a593Smuzhiyun 		bo->pin_count++;
339*4882a593Smuzhiyun 		if (gpu_addr)
340*4882a593Smuzhiyun 			*gpu_addr = radeon_bo_gpu_offset(bo);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		if (max_offset != 0) {
343*4882a593Smuzhiyun 			u64 domain_start;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 			if (domain == RADEON_GEM_DOMAIN_VRAM)
346*4882a593Smuzhiyun 				domain_start = bo->rdev->mc.vram_start;
347*4882a593Smuzhiyun 			else
348*4882a593Smuzhiyun 				domain_start = bo->rdev->mc.gtt_start;
349*4882a593Smuzhiyun 			WARN_ON_ONCE(max_offset <
350*4882a593Smuzhiyun 				     (radeon_bo_gpu_offset(bo) - domain_start));
351*4882a593Smuzhiyun 		}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 		return 0;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 	if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
356*4882a593Smuzhiyun 		/* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
357*4882a593Smuzhiyun 		return -EINVAL;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	radeon_ttm_placement_from_domain(bo, domain);
361*4882a593Smuzhiyun 	for (i = 0; i < bo->placement.num_placement; i++) {
362*4882a593Smuzhiyun 		/* force to pin into visible video ram */
363*4882a593Smuzhiyun 		if ((bo->placements[i].mem_type == TTM_PL_VRAM) &&
364*4882a593Smuzhiyun 		    !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
365*4882a593Smuzhiyun 		    (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
366*4882a593Smuzhiyun 			bo->placements[i].lpfn =
367*4882a593Smuzhiyun 				bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
368*4882a593Smuzhiyun 		else
369*4882a593Smuzhiyun 			bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
375*4882a593Smuzhiyun 	if (likely(r == 0)) {
376*4882a593Smuzhiyun 		bo->pin_count = 1;
377*4882a593Smuzhiyun 		if (gpu_addr != NULL)
378*4882a593Smuzhiyun 			*gpu_addr = radeon_bo_gpu_offset(bo);
379*4882a593Smuzhiyun 		if (domain == RADEON_GEM_DOMAIN_VRAM)
380*4882a593Smuzhiyun 			bo->rdev->vram_pin_size += radeon_bo_size(bo);
381*4882a593Smuzhiyun 		else
382*4882a593Smuzhiyun 			bo->rdev->gart_pin_size += radeon_bo_size(bo);
383*4882a593Smuzhiyun 	} else {
384*4882a593Smuzhiyun 		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 	return r;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun 
radeon_bo_pin(struct radeon_bo * bo,u32 domain,u64 * gpu_addr)389*4882a593Smuzhiyun int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
radeon_bo_unpin(struct radeon_bo * bo)394*4882a593Smuzhiyun int radeon_bo_unpin(struct radeon_bo *bo)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = { false, false };
397*4882a593Smuzhiyun 	int r, i;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (!bo->pin_count) {
400*4882a593Smuzhiyun 		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
401*4882a593Smuzhiyun 		return 0;
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 	bo->pin_count--;
404*4882a593Smuzhiyun 	if (bo->pin_count)
405*4882a593Smuzhiyun 		return 0;
406*4882a593Smuzhiyun 	for (i = 0; i < bo->placement.num_placement; i++) {
407*4882a593Smuzhiyun 		bo->placements[i].lpfn = 0;
408*4882a593Smuzhiyun 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
411*4882a593Smuzhiyun 	if (likely(r == 0)) {
412*4882a593Smuzhiyun 		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
413*4882a593Smuzhiyun 			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
414*4882a593Smuzhiyun 		else
415*4882a593Smuzhiyun 			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
416*4882a593Smuzhiyun 	} else {
417*4882a593Smuzhiyun 		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 	return r;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
radeon_bo_evict_vram(struct radeon_device * rdev)422*4882a593Smuzhiyun int radeon_bo_evict_vram(struct radeon_device *rdev)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
425*4882a593Smuzhiyun #ifndef CONFIG_HIBERNATION
426*4882a593Smuzhiyun 	if (rdev->flags & RADEON_IS_IGP) {
427*4882a593Smuzhiyun 		if (rdev->mc.igp_sideport_enabled == false)
428*4882a593Smuzhiyun 			/* Useless to evict on IGP chips */
429*4882a593Smuzhiyun 			return 0;
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun #endif
432*4882a593Smuzhiyun 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
radeon_bo_force_delete(struct radeon_device * rdev)435*4882a593Smuzhiyun void radeon_bo_force_delete(struct radeon_device *rdev)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	struct radeon_bo *bo, *n;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (list_empty(&rdev->gem.objects)) {
440*4882a593Smuzhiyun 		return;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 	dev_err(rdev->dev, "Userspace still has active objects !\n");
443*4882a593Smuzhiyun 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
444*4882a593Smuzhiyun 		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
445*4882a593Smuzhiyun 			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
446*4882a593Smuzhiyun 			*((unsigned long *)&bo->tbo.base.refcount));
447*4882a593Smuzhiyun 		mutex_lock(&bo->rdev->gem.mutex);
448*4882a593Smuzhiyun 		list_del_init(&bo->list);
449*4882a593Smuzhiyun 		mutex_unlock(&bo->rdev->gem.mutex);
450*4882a593Smuzhiyun 		/* this should unref the ttm bo */
451*4882a593Smuzhiyun 		drm_gem_object_put(&bo->tbo.base);
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
radeon_bo_init(struct radeon_device * rdev)455*4882a593Smuzhiyun int radeon_bo_init(struct radeon_device *rdev)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	/* reserve PAT memory space to WC for VRAM */
458*4882a593Smuzhiyun 	arch_io_reserve_memtype_wc(rdev->mc.aper_base,
459*4882a593Smuzhiyun 				   rdev->mc.aper_size);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	/* Add an MTRR for the VRAM */
462*4882a593Smuzhiyun 	if (!rdev->fastfb_working) {
463*4882a593Smuzhiyun 		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
464*4882a593Smuzhiyun 						      rdev->mc.aper_size);
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
467*4882a593Smuzhiyun 		rdev->mc.mc_vram_size >> 20,
468*4882a593Smuzhiyun 		(unsigned long long)rdev->mc.aper_size >> 20);
469*4882a593Smuzhiyun 	DRM_INFO("RAM width %dbits %cDR\n",
470*4882a593Smuzhiyun 			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
471*4882a593Smuzhiyun 	return radeon_ttm_init(rdev);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
radeon_bo_fini(struct radeon_device * rdev)474*4882a593Smuzhiyun void radeon_bo_fini(struct radeon_device *rdev)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	radeon_ttm_fini(rdev);
477*4882a593Smuzhiyun 	arch_phys_wc_del(rdev->mc.vram_mtrr);
478*4882a593Smuzhiyun 	arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun /* Returns how many bytes TTM can move per IB.
482*4882a593Smuzhiyun  */
radeon_bo_get_threshold_for_moves(struct radeon_device * rdev)483*4882a593Smuzhiyun static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	u64 real_vram_size = rdev->mc.real_vram_size;
486*4882a593Smuzhiyun 	u64 vram_usage = atomic64_read(&rdev->vram_usage);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	/* This function is based on the current VRAM usage.
489*4882a593Smuzhiyun 	 *
490*4882a593Smuzhiyun 	 * - If all of VRAM is free, allow relocating the number of bytes that
491*4882a593Smuzhiyun 	 *   is equal to 1/4 of the size of VRAM for this IB.
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	 * - If more than one half of VRAM is occupied, only allow relocating
494*4882a593Smuzhiyun 	 *   1 MB of data for this IB.
495*4882a593Smuzhiyun 	 *
496*4882a593Smuzhiyun 	 * - From 0 to one half of used VRAM, the threshold decreases
497*4882a593Smuzhiyun 	 *   linearly.
498*4882a593Smuzhiyun 	 *         __________________
499*4882a593Smuzhiyun 	 * 1/4 of -|\               |
500*4882a593Smuzhiyun 	 * VRAM    | \              |
501*4882a593Smuzhiyun 	 *         |  \             |
502*4882a593Smuzhiyun 	 *         |   \            |
503*4882a593Smuzhiyun 	 *         |    \           |
504*4882a593Smuzhiyun 	 *         |     \          |
505*4882a593Smuzhiyun 	 *         |      \         |
506*4882a593Smuzhiyun 	 *         |       \________|1 MB
507*4882a593Smuzhiyun 	 *         |----------------|
508*4882a593Smuzhiyun 	 *    VRAM 0 %             100 %
509*4882a593Smuzhiyun 	 *         used            used
510*4882a593Smuzhiyun 	 *
511*4882a593Smuzhiyun 	 * Note: It's a threshold, not a limit. The threshold must be crossed
512*4882a593Smuzhiyun 	 * for buffer relocations to stop, so any buffer of an arbitrary size
513*4882a593Smuzhiyun 	 * can be moved as long as the threshold isn't crossed before
514*4882a593Smuzhiyun 	 * the relocation takes place. We don't want to disable buffer
515*4882a593Smuzhiyun 	 * relocations completely.
516*4882a593Smuzhiyun 	 *
517*4882a593Smuzhiyun 	 * The idea is that buffers should be placed in VRAM at creation time
518*4882a593Smuzhiyun 	 * and TTM should only do a minimum number of relocations during
519*4882a593Smuzhiyun 	 * command submission. In practice, you need to submit at least
520*4882a593Smuzhiyun 	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
521*4882a593Smuzhiyun 	 *
522*4882a593Smuzhiyun 	 * Also, things can get pretty crazy under memory pressure and actual
523*4882a593Smuzhiyun 	 * VRAM usage can change a lot, so playing safe even at 50% does
524*4882a593Smuzhiyun 	 * consistently increase performance.
525*4882a593Smuzhiyun 	 */
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	u64 half_vram = real_vram_size >> 1;
528*4882a593Smuzhiyun 	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
529*4882a593Smuzhiyun 	u64 bytes_moved_threshold = half_free_vram >> 1;
530*4882a593Smuzhiyun 	return max(bytes_moved_threshold, 1024*1024ull);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
radeon_bo_list_validate(struct radeon_device * rdev,struct ww_acquire_ctx * ticket,struct list_head * head,int ring)533*4882a593Smuzhiyun int radeon_bo_list_validate(struct radeon_device *rdev,
534*4882a593Smuzhiyun 			    struct ww_acquire_ctx *ticket,
535*4882a593Smuzhiyun 			    struct list_head *head, int ring)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = { true, false };
538*4882a593Smuzhiyun 	struct radeon_bo_list *lobj;
539*4882a593Smuzhiyun 	struct list_head duplicates;
540*4882a593Smuzhiyun 	int r;
541*4882a593Smuzhiyun 	u64 bytes_moved = 0, initial_bytes_moved;
542*4882a593Smuzhiyun 	u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	INIT_LIST_HEAD(&duplicates);
545*4882a593Smuzhiyun 	r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
546*4882a593Smuzhiyun 	if (unlikely(r != 0)) {
547*4882a593Smuzhiyun 		return r;
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	list_for_each_entry(lobj, head, tv.head) {
551*4882a593Smuzhiyun 		struct radeon_bo *bo = lobj->robj;
552*4882a593Smuzhiyun 		if (!bo->pin_count) {
553*4882a593Smuzhiyun 			u32 domain = lobj->preferred_domains;
554*4882a593Smuzhiyun 			u32 allowed = lobj->allowed_domains;
555*4882a593Smuzhiyun 			u32 current_domain =
556*4882a593Smuzhiyun 				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 			/* Check if this buffer will be moved and don't move it
559*4882a593Smuzhiyun 			 * if we have moved too many buffers for this IB already.
560*4882a593Smuzhiyun 			 *
561*4882a593Smuzhiyun 			 * Note that this allows moving at least one buffer of
562*4882a593Smuzhiyun 			 * any size, because it doesn't take the current "bo"
563*4882a593Smuzhiyun 			 * into account. We don't want to disallow buffer moves
564*4882a593Smuzhiyun 			 * completely.
565*4882a593Smuzhiyun 			 */
566*4882a593Smuzhiyun 			if ((allowed & current_domain) != 0 &&
567*4882a593Smuzhiyun 			    (domain & current_domain) == 0 && /* will be moved */
568*4882a593Smuzhiyun 			    bytes_moved > bytes_moved_threshold) {
569*4882a593Smuzhiyun 				/* don't move it */
570*4882a593Smuzhiyun 				domain = current_domain;
571*4882a593Smuzhiyun 			}
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		retry:
574*4882a593Smuzhiyun 			radeon_ttm_placement_from_domain(bo, domain);
575*4882a593Smuzhiyun 			if (ring == R600_RING_TYPE_UVD_INDEX)
576*4882a593Smuzhiyun 				radeon_uvd_force_into_uvd_segment(bo, allowed);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
579*4882a593Smuzhiyun 			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
580*4882a593Smuzhiyun 			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
581*4882a593Smuzhiyun 				       initial_bytes_moved;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 			if (unlikely(r)) {
584*4882a593Smuzhiyun 				if (r != -ERESTARTSYS &&
585*4882a593Smuzhiyun 				    domain != lobj->allowed_domains) {
586*4882a593Smuzhiyun 					domain = lobj->allowed_domains;
587*4882a593Smuzhiyun 					goto retry;
588*4882a593Smuzhiyun 				}
589*4882a593Smuzhiyun 				ttm_eu_backoff_reservation(ticket, head);
590*4882a593Smuzhiyun 				return r;
591*4882a593Smuzhiyun 			}
592*4882a593Smuzhiyun 		}
593*4882a593Smuzhiyun 		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
594*4882a593Smuzhiyun 		lobj->tiling_flags = bo->tiling_flags;
595*4882a593Smuzhiyun 	}
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	list_for_each_entry(lobj, &duplicates, tv.head) {
598*4882a593Smuzhiyun 		lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
599*4882a593Smuzhiyun 		lobj->tiling_flags = lobj->robj->tiling_flags;
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	return 0;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
radeon_bo_get_surface_reg(struct radeon_bo * bo)605*4882a593Smuzhiyun int radeon_bo_get_surface_reg(struct radeon_bo *bo)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	struct radeon_device *rdev = bo->rdev;
608*4882a593Smuzhiyun 	struct radeon_surface_reg *reg;
609*4882a593Smuzhiyun 	struct radeon_bo *old_object;
610*4882a593Smuzhiyun 	int steal;
611*4882a593Smuzhiyun 	int i;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	dma_resv_assert_held(bo->tbo.base.resv);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (!bo->tiling_flags)
616*4882a593Smuzhiyun 		return 0;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	if (bo->surface_reg >= 0) {
619*4882a593Smuzhiyun 		reg = &rdev->surface_regs[bo->surface_reg];
620*4882a593Smuzhiyun 		i = bo->surface_reg;
621*4882a593Smuzhiyun 		goto out;
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	steal = -1;
625*4882a593Smuzhiyun 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		reg = &rdev->surface_regs[i];
628*4882a593Smuzhiyun 		if (!reg->bo)
629*4882a593Smuzhiyun 			break;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 		old_object = reg->bo;
632*4882a593Smuzhiyun 		if (old_object->pin_count == 0)
633*4882a593Smuzhiyun 			steal = i;
634*4882a593Smuzhiyun 	}
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/* if we are all out */
637*4882a593Smuzhiyun 	if (i == RADEON_GEM_MAX_SURFACES) {
638*4882a593Smuzhiyun 		if (steal == -1)
639*4882a593Smuzhiyun 			return -ENOMEM;
640*4882a593Smuzhiyun 		/* find someone with a surface reg and nuke their BO */
641*4882a593Smuzhiyun 		reg = &rdev->surface_regs[steal];
642*4882a593Smuzhiyun 		old_object = reg->bo;
643*4882a593Smuzhiyun 		/* blow away the mapping */
644*4882a593Smuzhiyun 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
645*4882a593Smuzhiyun 		ttm_bo_unmap_virtual(&old_object->tbo);
646*4882a593Smuzhiyun 		old_object->surface_reg = -1;
647*4882a593Smuzhiyun 		i = steal;
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	bo->surface_reg = i;
651*4882a593Smuzhiyun 	reg->bo = bo;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun out:
654*4882a593Smuzhiyun 	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
655*4882a593Smuzhiyun 			       bo->tbo.mem.start << PAGE_SHIFT,
656*4882a593Smuzhiyun 			       bo->tbo.num_pages << PAGE_SHIFT);
657*4882a593Smuzhiyun 	return 0;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
radeon_bo_clear_surface_reg(struct radeon_bo * bo)660*4882a593Smuzhiyun static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	struct radeon_device *rdev = bo->rdev;
663*4882a593Smuzhiyun 	struct radeon_surface_reg *reg;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	if (bo->surface_reg == -1)
666*4882a593Smuzhiyun 		return;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	reg = &rdev->surface_regs[bo->surface_reg];
669*4882a593Smuzhiyun 	radeon_clear_surface_reg(rdev, bo->surface_reg);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	reg->bo = NULL;
672*4882a593Smuzhiyun 	bo->surface_reg = -1;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
radeon_bo_set_tiling_flags(struct radeon_bo * bo,uint32_t tiling_flags,uint32_t pitch)675*4882a593Smuzhiyun int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
676*4882a593Smuzhiyun 				uint32_t tiling_flags, uint32_t pitch)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	struct radeon_device *rdev = bo->rdev;
679*4882a593Smuzhiyun 	int r;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (rdev->family >= CHIP_CEDAR) {
682*4882a593Smuzhiyun 		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
685*4882a593Smuzhiyun 		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
686*4882a593Smuzhiyun 		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
687*4882a593Smuzhiyun 		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
688*4882a593Smuzhiyun 		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
689*4882a593Smuzhiyun 		switch (bankw) {
690*4882a593Smuzhiyun 		case 0:
691*4882a593Smuzhiyun 		case 1:
692*4882a593Smuzhiyun 		case 2:
693*4882a593Smuzhiyun 		case 4:
694*4882a593Smuzhiyun 		case 8:
695*4882a593Smuzhiyun 			break;
696*4882a593Smuzhiyun 		default:
697*4882a593Smuzhiyun 			return -EINVAL;
698*4882a593Smuzhiyun 		}
699*4882a593Smuzhiyun 		switch (bankh) {
700*4882a593Smuzhiyun 		case 0:
701*4882a593Smuzhiyun 		case 1:
702*4882a593Smuzhiyun 		case 2:
703*4882a593Smuzhiyun 		case 4:
704*4882a593Smuzhiyun 		case 8:
705*4882a593Smuzhiyun 			break;
706*4882a593Smuzhiyun 		default:
707*4882a593Smuzhiyun 			return -EINVAL;
708*4882a593Smuzhiyun 		}
709*4882a593Smuzhiyun 		switch (mtaspect) {
710*4882a593Smuzhiyun 		case 0:
711*4882a593Smuzhiyun 		case 1:
712*4882a593Smuzhiyun 		case 2:
713*4882a593Smuzhiyun 		case 4:
714*4882a593Smuzhiyun 		case 8:
715*4882a593Smuzhiyun 			break;
716*4882a593Smuzhiyun 		default:
717*4882a593Smuzhiyun 			return -EINVAL;
718*4882a593Smuzhiyun 		}
719*4882a593Smuzhiyun 		if (tilesplit > 6) {
720*4882a593Smuzhiyun 			return -EINVAL;
721*4882a593Smuzhiyun 		}
722*4882a593Smuzhiyun 		if (stilesplit > 6) {
723*4882a593Smuzhiyun 			return -EINVAL;
724*4882a593Smuzhiyun 		}
725*4882a593Smuzhiyun 	}
726*4882a593Smuzhiyun 	r = radeon_bo_reserve(bo, false);
727*4882a593Smuzhiyun 	if (unlikely(r != 0))
728*4882a593Smuzhiyun 		return r;
729*4882a593Smuzhiyun 	bo->tiling_flags = tiling_flags;
730*4882a593Smuzhiyun 	bo->pitch = pitch;
731*4882a593Smuzhiyun 	radeon_bo_unreserve(bo);
732*4882a593Smuzhiyun 	return 0;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun 
radeon_bo_get_tiling_flags(struct radeon_bo * bo,uint32_t * tiling_flags,uint32_t * pitch)735*4882a593Smuzhiyun void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
736*4882a593Smuzhiyun 				uint32_t *tiling_flags,
737*4882a593Smuzhiyun 				uint32_t *pitch)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	dma_resv_assert_held(bo->tbo.base.resv);
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	if (tiling_flags)
742*4882a593Smuzhiyun 		*tiling_flags = bo->tiling_flags;
743*4882a593Smuzhiyun 	if (pitch)
744*4882a593Smuzhiyun 		*pitch = bo->pitch;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
radeon_bo_check_tiling(struct radeon_bo * bo,bool has_moved,bool force_drop)747*4882a593Smuzhiyun int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
748*4882a593Smuzhiyun 				bool force_drop)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun 	if (!force_drop)
751*4882a593Smuzhiyun 		dma_resv_assert_held(bo->tbo.base.resv);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
754*4882a593Smuzhiyun 		return 0;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	if (force_drop) {
757*4882a593Smuzhiyun 		radeon_bo_clear_surface_reg(bo);
758*4882a593Smuzhiyun 		return 0;
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
762*4882a593Smuzhiyun 		if (!has_moved)
763*4882a593Smuzhiyun 			return 0;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 		if (bo->surface_reg >= 0)
766*4882a593Smuzhiyun 			radeon_bo_clear_surface_reg(bo);
767*4882a593Smuzhiyun 		return 0;
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	if ((bo->surface_reg >= 0) && !has_moved)
771*4882a593Smuzhiyun 		return 0;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	return radeon_bo_get_surface_reg(bo);
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun 
radeon_bo_move_notify(struct ttm_buffer_object * bo,bool evict,struct ttm_resource * new_mem)776*4882a593Smuzhiyun void radeon_bo_move_notify(struct ttm_buffer_object *bo,
777*4882a593Smuzhiyun 			   bool evict,
778*4882a593Smuzhiyun 			   struct ttm_resource *new_mem)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	struct radeon_bo *rbo;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	if (!radeon_ttm_bo_is_radeon_bo(bo))
783*4882a593Smuzhiyun 		return;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	rbo = container_of(bo, struct radeon_bo, tbo);
786*4882a593Smuzhiyun 	radeon_bo_check_tiling(rbo, 0, 1);
787*4882a593Smuzhiyun 	radeon_vm_bo_invalidate(rbo->rdev, rbo);
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/* update statistics */
790*4882a593Smuzhiyun 	if (!new_mem)
791*4882a593Smuzhiyun 		return;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
794*4882a593Smuzhiyun 	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun 
radeon_bo_fault_reserve_notify(struct ttm_buffer_object * bo)797*4882a593Smuzhiyun int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = { false, false };
800*4882a593Smuzhiyun 	struct radeon_device *rdev;
801*4882a593Smuzhiyun 	struct radeon_bo *rbo;
802*4882a593Smuzhiyun 	unsigned long offset, size, lpfn;
803*4882a593Smuzhiyun 	int i, r;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	if (!radeon_ttm_bo_is_radeon_bo(bo))
806*4882a593Smuzhiyun 		return 0;
807*4882a593Smuzhiyun 	rbo = container_of(bo, struct radeon_bo, tbo);
808*4882a593Smuzhiyun 	radeon_bo_check_tiling(rbo, 0, 0);
809*4882a593Smuzhiyun 	rdev = rbo->rdev;
810*4882a593Smuzhiyun 	if (bo->mem.mem_type != TTM_PL_VRAM)
811*4882a593Smuzhiyun 		return 0;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	size = bo->mem.num_pages << PAGE_SHIFT;
814*4882a593Smuzhiyun 	offset = bo->mem.start << PAGE_SHIFT;
815*4882a593Smuzhiyun 	if ((offset + size) <= rdev->mc.visible_vram_size)
816*4882a593Smuzhiyun 		return 0;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	/* Can't move a pinned BO to visible VRAM */
819*4882a593Smuzhiyun 	if (rbo->pin_count > 0)
820*4882a593Smuzhiyun 		return -EINVAL;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	/* hurrah the memory is not visible ! */
823*4882a593Smuzhiyun 	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
824*4882a593Smuzhiyun 	lpfn =	rdev->mc.visible_vram_size >> PAGE_SHIFT;
825*4882a593Smuzhiyun 	for (i = 0; i < rbo->placement.num_placement; i++) {
826*4882a593Smuzhiyun 		/* Force into visible VRAM */
827*4882a593Smuzhiyun 		if ((rbo->placements[i].mem_type == TTM_PL_VRAM) &&
828*4882a593Smuzhiyun 		    (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
829*4882a593Smuzhiyun 			rbo->placements[i].lpfn = lpfn;
830*4882a593Smuzhiyun 	}
831*4882a593Smuzhiyun 	r = ttm_bo_validate(bo, &rbo->placement, &ctx);
832*4882a593Smuzhiyun 	if (unlikely(r == -ENOMEM)) {
833*4882a593Smuzhiyun 		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
834*4882a593Smuzhiyun 		return ttm_bo_validate(bo, &rbo->placement, &ctx);
835*4882a593Smuzhiyun 	} else if (unlikely(r != 0)) {
836*4882a593Smuzhiyun 		return r;
837*4882a593Smuzhiyun 	}
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	offset = bo->mem.start << PAGE_SHIFT;
840*4882a593Smuzhiyun 	/* this should never happen */
841*4882a593Smuzhiyun 	if ((offset + size) > rdev->mc.visible_vram_size)
842*4882a593Smuzhiyun 		return -EINVAL;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	return 0;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun 
radeon_bo_wait(struct radeon_bo * bo,u32 * mem_type,bool no_wait)847*4882a593Smuzhiyun int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	int r;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
852*4882a593Smuzhiyun 	if (unlikely(r != 0))
853*4882a593Smuzhiyun 		return r;
854*4882a593Smuzhiyun 	if (mem_type)
855*4882a593Smuzhiyun 		*mem_type = bo->tbo.mem.mem_type;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	r = ttm_bo_wait(&bo->tbo, true, no_wait);
858*4882a593Smuzhiyun 	ttm_bo_unreserve(&bo->tbo);
859*4882a593Smuzhiyun 	return r;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun /**
863*4882a593Smuzhiyun  * radeon_bo_fence - add fence to buffer object
864*4882a593Smuzhiyun  *
865*4882a593Smuzhiyun  * @bo: buffer object in question
866*4882a593Smuzhiyun  * @fence: fence to add
867*4882a593Smuzhiyun  * @shared: true if fence should be added shared
868*4882a593Smuzhiyun  *
869*4882a593Smuzhiyun  */
radeon_bo_fence(struct radeon_bo * bo,struct radeon_fence * fence,bool shared)870*4882a593Smuzhiyun void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
871*4882a593Smuzhiyun 		     bool shared)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	struct dma_resv *resv = bo->tbo.base.resv;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	if (shared)
876*4882a593Smuzhiyun 		dma_resv_add_shared_fence(resv, &fence->base);
877*4882a593Smuzhiyun 	else
878*4882a593Smuzhiyun 		dma_resv_add_excl_fence(resv, &fence->base);
879*4882a593Smuzhiyun }
880