xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/i915_gem_gtt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: MIT
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright © 2010 Daniel Vetter
4*4882a593Smuzhiyun  * Copyright © 2020 Intel Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/slab.h> /* fault-inject.h is not standalone! */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/fault-inject.h>
10*4882a593Smuzhiyun #include <linux/log2.h>
11*4882a593Smuzhiyun #include <linux/random.h>
12*4882a593Smuzhiyun #include <linux/seq_file.h>
13*4882a593Smuzhiyun #include <linux/stop_machine.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/set_memory.h>
16*4882a593Smuzhiyun #include <asm/smp.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "display/intel_frontbuffer.h"
19*4882a593Smuzhiyun #include "gt/intel_gt.h"
20*4882a593Smuzhiyun #include "gt/intel_gt_requests.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "i915_drv.h"
23*4882a593Smuzhiyun #include "i915_scatterlist.h"
24*4882a593Smuzhiyun #include "i915_trace.h"
25*4882a593Smuzhiyun #include "i915_vgpu.h"
26*4882a593Smuzhiyun 
i915_gem_gtt_prepare_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)27*4882a593Smuzhiyun int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
28*4882a593Smuzhiyun 			       struct sg_table *pages)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	do {
31*4882a593Smuzhiyun 		if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
32*4882a593Smuzhiyun 				     pages->sgl, pages->nents,
33*4882a593Smuzhiyun 				     PCI_DMA_BIDIRECTIONAL,
34*4882a593Smuzhiyun 				     DMA_ATTR_SKIP_CPU_SYNC |
35*4882a593Smuzhiyun 				     DMA_ATTR_NO_KERNEL_MAPPING |
36*4882a593Smuzhiyun 				     DMA_ATTR_NO_WARN))
37*4882a593Smuzhiyun 			return 0;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 		/*
40*4882a593Smuzhiyun 		 * If the DMA remap fails, one cause can be that we have
41*4882a593Smuzhiyun 		 * too many objects pinned in a small remapping table,
42*4882a593Smuzhiyun 		 * such as swiotlb. Incrementally purge all other objects and
43*4882a593Smuzhiyun 		 * try again - if there are no more pages to remove from
44*4882a593Smuzhiyun 		 * the DMA remapper, i915_gem_shrink will return 0.
45*4882a593Smuzhiyun 		 */
46*4882a593Smuzhiyun 		GEM_BUG_ON(obj->mm.pages == pages);
47*4882a593Smuzhiyun 	} while (i915_gem_shrink(to_i915(obj->base.dev),
48*4882a593Smuzhiyun 				 obj->base.size >> PAGE_SHIFT, NULL,
49*4882a593Smuzhiyun 				 I915_SHRINK_BOUND |
50*4882a593Smuzhiyun 				 I915_SHRINK_UNBOUND));
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	return -ENOSPC;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
i915_gem_gtt_finish_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)55*4882a593Smuzhiyun void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
56*4882a593Smuzhiyun 			       struct sg_table *pages)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
59*4882a593Smuzhiyun 	struct device *kdev = &dev_priv->drm.pdev->dev;
60*4882a593Smuzhiyun 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (unlikely(ggtt->do_idle_maps)) {
63*4882a593Smuzhiyun 		/* XXX This does not prevent more requests being submitted! */
64*4882a593Smuzhiyun 		if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
65*4882a593Smuzhiyun 						     -MAX_SCHEDULE_TIMEOUT)) {
66*4882a593Smuzhiyun 			drm_err(&dev_priv->drm,
67*4882a593Smuzhiyun 				"Failed to wait for idle; VT'd may hang.\n");
68*4882a593Smuzhiyun 			/* Wait a bit, in hopes it avoids the hang */
69*4882a593Smuzhiyun 			udelay(10);
70*4882a593Smuzhiyun 		}
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun  * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
78*4882a593Smuzhiyun  * @vm: the &struct i915_address_space
79*4882a593Smuzhiyun  * @node: the &struct drm_mm_node (typically i915_vma.mode)
80*4882a593Smuzhiyun  * @size: how much space to allocate inside the GTT,
81*4882a593Smuzhiyun  *        must be #I915_GTT_PAGE_SIZE aligned
82*4882a593Smuzhiyun  * @offset: where to insert inside the GTT,
83*4882a593Smuzhiyun  *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
84*4882a593Smuzhiyun  *          (@offset + @size) must fit within the address space
85*4882a593Smuzhiyun  * @color: color to apply to node, if this node is not from a VMA,
86*4882a593Smuzhiyun  *         color must be #I915_COLOR_UNEVICTABLE
87*4882a593Smuzhiyun  * @flags: control search and eviction behaviour
88*4882a593Smuzhiyun  *
89*4882a593Smuzhiyun  * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
90*4882a593Smuzhiyun  * the address space (using @size and @color). If the @node does not fit, it
91*4882a593Smuzhiyun  * tries to evict any overlapping nodes from the GTT, including any
92*4882a593Smuzhiyun  * neighbouring nodes if the colors do not match (to ensure guard pages between
93*4882a593Smuzhiyun  * differing domains). See i915_gem_evict_for_node() for the gory details
94*4882a593Smuzhiyun  * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
95*4882a593Smuzhiyun  * evicting active overlapping objects, and any overlapping node that is pinned
96*4882a593Smuzhiyun  * or marked as unevictable will also result in failure.
97*4882a593Smuzhiyun  *
98*4882a593Smuzhiyun  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
99*4882a593Smuzhiyun  * asked to wait for eviction and interrupted.
100*4882a593Smuzhiyun  */
i915_gem_gtt_reserve(struct i915_address_space * vm,struct drm_mm_node * node,u64 size,u64 offset,unsigned long color,unsigned int flags)101*4882a593Smuzhiyun int i915_gem_gtt_reserve(struct i915_address_space *vm,
102*4882a593Smuzhiyun 			 struct drm_mm_node *node,
103*4882a593Smuzhiyun 			 u64 size, u64 offset, unsigned long color,
104*4882a593Smuzhiyun 			 unsigned int flags)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	int err;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	GEM_BUG_ON(!size);
109*4882a593Smuzhiyun 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
110*4882a593Smuzhiyun 	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
111*4882a593Smuzhiyun 	GEM_BUG_ON(range_overflows(offset, size, vm->total));
112*4882a593Smuzhiyun 	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
113*4882a593Smuzhiyun 	GEM_BUG_ON(drm_mm_node_allocated(node));
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	node->size = size;
116*4882a593Smuzhiyun 	node->start = offset;
117*4882a593Smuzhiyun 	node->color = color;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	err = drm_mm_reserve_node(&vm->mm, node);
120*4882a593Smuzhiyun 	if (err != -ENOSPC)
121*4882a593Smuzhiyun 		return err;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (flags & PIN_NOEVICT)
124*4882a593Smuzhiyun 		return -ENOSPC;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	err = i915_gem_evict_for_node(vm, node, flags);
127*4882a593Smuzhiyun 	if (err == 0)
128*4882a593Smuzhiyun 		err = drm_mm_reserve_node(&vm->mm, node);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	return err;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
random_offset(u64 start,u64 end,u64 len,u64 align)133*4882a593Smuzhiyun static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	u64 range, addr;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	GEM_BUG_ON(range_overflows(start, len, end));
138*4882a593Smuzhiyun 	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	range = round_down(end - len, align) - round_up(start, align);
141*4882a593Smuzhiyun 	if (range) {
142*4882a593Smuzhiyun 		if (sizeof(unsigned long) == sizeof(u64)) {
143*4882a593Smuzhiyun 			addr = get_random_long();
144*4882a593Smuzhiyun 		} else {
145*4882a593Smuzhiyun 			addr = get_random_int();
146*4882a593Smuzhiyun 			if (range > U32_MAX) {
147*4882a593Smuzhiyun 				addr <<= 32;
148*4882a593Smuzhiyun 				addr |= get_random_int();
149*4882a593Smuzhiyun 			}
150*4882a593Smuzhiyun 		}
151*4882a593Smuzhiyun 		div64_u64_rem(addr, range, &addr);
152*4882a593Smuzhiyun 		start += addr;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return round_up(start, align);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /**
159*4882a593Smuzhiyun  * i915_gem_gtt_insert - insert a node into an address_space (GTT)
160*4882a593Smuzhiyun  * @vm: the &struct i915_address_space
161*4882a593Smuzhiyun  * @node: the &struct drm_mm_node (typically i915_vma.node)
162*4882a593Smuzhiyun  * @size: how much space to allocate inside the GTT,
163*4882a593Smuzhiyun  *        must be #I915_GTT_PAGE_SIZE aligned
164*4882a593Smuzhiyun  * @alignment: required alignment of starting offset, may be 0 but
165*4882a593Smuzhiyun  *             if specified, this must be a power-of-two and at least
166*4882a593Smuzhiyun  *             #I915_GTT_MIN_ALIGNMENT
167*4882a593Smuzhiyun  * @color: color to apply to node
168*4882a593Smuzhiyun  * @start: start of any range restriction inside GTT (0 for all),
169*4882a593Smuzhiyun  *         must be #I915_GTT_PAGE_SIZE aligned
170*4882a593Smuzhiyun  * @end: end of any range restriction inside GTT (U64_MAX for all),
171*4882a593Smuzhiyun  *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
172*4882a593Smuzhiyun  * @flags: control search and eviction behaviour
173*4882a593Smuzhiyun  *
174*4882a593Smuzhiyun  * i915_gem_gtt_insert() first searches for an available hole into which
175*4882a593Smuzhiyun  * is can insert the node. The hole address is aligned to @alignment and
176*4882a593Smuzhiyun  * its @size must then fit entirely within the [@start, @end] bounds. The
177*4882a593Smuzhiyun  * nodes on either side of the hole must match @color, or else a guard page
178*4882a593Smuzhiyun  * will be inserted between the two nodes (or the node evicted). If no
179*4882a593Smuzhiyun  * suitable hole is found, first a victim is randomly selected and tested
180*4882a593Smuzhiyun  * for eviction, otherwise then the LRU list of objects within the GTT
181*4882a593Smuzhiyun  * is scanned to find the first set of replacement nodes to create the hole.
182*4882a593Smuzhiyun  * Those old overlapping nodes are evicted from the GTT (and so must be
183*4882a593Smuzhiyun  * rebound before any future use). Any node that is currently pinned cannot
184*4882a593Smuzhiyun  * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
185*4882a593Smuzhiyun  * active and #PIN_NONBLOCK is specified, that node is also skipped when
186*4882a593Smuzhiyun  * searching for an eviction candidate. See i915_gem_evict_something() for
187*4882a593Smuzhiyun  * the gory details on the eviction algorithm.
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
190*4882a593Smuzhiyun  * asked to wait for eviction and interrupted.
191*4882a593Smuzhiyun  */
i915_gem_gtt_insert(struct i915_address_space * vm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,u64 start,u64 end,unsigned int flags)192*4882a593Smuzhiyun int i915_gem_gtt_insert(struct i915_address_space *vm,
193*4882a593Smuzhiyun 			struct drm_mm_node *node,
194*4882a593Smuzhiyun 			u64 size, u64 alignment, unsigned long color,
195*4882a593Smuzhiyun 			u64 start, u64 end, unsigned int flags)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	enum drm_mm_insert_mode mode;
198*4882a593Smuzhiyun 	u64 offset;
199*4882a593Smuzhiyun 	int err;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	lockdep_assert_held(&vm->mutex);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	GEM_BUG_ON(!size);
204*4882a593Smuzhiyun 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
205*4882a593Smuzhiyun 	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
206*4882a593Smuzhiyun 	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
207*4882a593Smuzhiyun 	GEM_BUG_ON(start >= end);
208*4882a593Smuzhiyun 	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
209*4882a593Smuzhiyun 	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
210*4882a593Smuzhiyun 	GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
211*4882a593Smuzhiyun 	GEM_BUG_ON(drm_mm_node_allocated(node));
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (unlikely(range_overflows(start, size, end)))
214*4882a593Smuzhiyun 		return -ENOSPC;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
217*4882a593Smuzhiyun 		return -ENOSPC;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	mode = DRM_MM_INSERT_BEST;
220*4882a593Smuzhiyun 	if (flags & PIN_HIGH)
221*4882a593Smuzhiyun 		mode = DRM_MM_INSERT_HIGHEST;
222*4882a593Smuzhiyun 	if (flags & PIN_MAPPABLE)
223*4882a593Smuzhiyun 		mode = DRM_MM_INSERT_LOW;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
226*4882a593Smuzhiyun 	 * so we know that we always have a minimum alignment of 4096.
227*4882a593Smuzhiyun 	 * The drm_mm range manager is optimised to return results
228*4882a593Smuzhiyun 	 * with zero alignment, so where possible use the optimal
229*4882a593Smuzhiyun 	 * path.
230*4882a593Smuzhiyun 	 */
231*4882a593Smuzhiyun 	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
232*4882a593Smuzhiyun 	if (alignment <= I915_GTT_MIN_ALIGNMENT)
233*4882a593Smuzhiyun 		alignment = 0;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	err = drm_mm_insert_node_in_range(&vm->mm, node,
236*4882a593Smuzhiyun 					  size, alignment, color,
237*4882a593Smuzhiyun 					  start, end, mode);
238*4882a593Smuzhiyun 	if (err != -ENOSPC)
239*4882a593Smuzhiyun 		return err;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (mode & DRM_MM_INSERT_ONCE) {
242*4882a593Smuzhiyun 		err = drm_mm_insert_node_in_range(&vm->mm, node,
243*4882a593Smuzhiyun 						  size, alignment, color,
244*4882a593Smuzhiyun 						  start, end,
245*4882a593Smuzhiyun 						  DRM_MM_INSERT_BEST);
246*4882a593Smuzhiyun 		if (err != -ENOSPC)
247*4882a593Smuzhiyun 			return err;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (flags & PIN_NOEVICT)
251*4882a593Smuzhiyun 		return -ENOSPC;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	/*
254*4882a593Smuzhiyun 	 * No free space, pick a slot at random.
255*4882a593Smuzhiyun 	 *
256*4882a593Smuzhiyun 	 * There is a pathological case here using a GTT shared between
257*4882a593Smuzhiyun 	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
258*4882a593Smuzhiyun 	 *
259*4882a593Smuzhiyun 	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
260*4882a593Smuzhiyun 	 *         (64k objects)             (448k objects)
261*4882a593Smuzhiyun 	 *
262*4882a593Smuzhiyun 	 * Now imagine that the eviction LRU is ordered top-down (just because
263*4882a593Smuzhiyun 	 * pathology meets real life), and that we need to evict an object to
264*4882a593Smuzhiyun 	 * make room inside the aperture. The eviction scan then has to walk
265*4882a593Smuzhiyun 	 * the 448k list before it finds one within range. And now imagine that
266*4882a593Smuzhiyun 	 * it has to search for a new hole between every byte inside the memcpy,
267*4882a593Smuzhiyun 	 * for several simultaneous clients.
268*4882a593Smuzhiyun 	 *
269*4882a593Smuzhiyun 	 * On a full-ppgtt system, if we have run out of available space, there
270*4882a593Smuzhiyun 	 * will be lots and lots of objects in the eviction list! Again,
271*4882a593Smuzhiyun 	 * searching that LRU list may be slow if we are also applying any
272*4882a593Smuzhiyun 	 * range restrictions (e.g. restriction to low 4GiB) and so, for
273*4882a593Smuzhiyun 	 * simplicity and similarilty between different GTT, try the single
274*4882a593Smuzhiyun 	 * random replacement first.
275*4882a593Smuzhiyun 	 */
276*4882a593Smuzhiyun 	offset = random_offset(start, end,
277*4882a593Smuzhiyun 			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
278*4882a593Smuzhiyun 	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
279*4882a593Smuzhiyun 	if (err != -ENOSPC)
280*4882a593Smuzhiyun 		return err;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	if (flags & PIN_NOSEARCH)
283*4882a593Smuzhiyun 		return -ENOSPC;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* Randomly selected placement is pinned, do a search */
286*4882a593Smuzhiyun 	err = i915_gem_evict_something(vm, size, alignment, color,
287*4882a593Smuzhiyun 				       start, end, flags);
288*4882a593Smuzhiyun 	if (err)
289*4882a593Smuzhiyun 		return err;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	return drm_mm_insert_node_in_range(&vm->mm, node,
292*4882a593Smuzhiyun 					   size, alignment, color,
293*4882a593Smuzhiyun 					   start, end, DRM_MM_INSERT_EVICT);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
297*4882a593Smuzhiyun #include "selftests/i915_gem_gtt.c"
298*4882a593Smuzhiyun #endif
299