xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gt/intel_ring.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * SPDX-License-Identifier: MIT
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright © 2019 Intel Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "gem/i915_gem_object.h"
8*4882a593Smuzhiyun #include "i915_drv.h"
9*4882a593Smuzhiyun #include "i915_vma.h"
10*4882a593Smuzhiyun #include "intel_engine.h"
11*4882a593Smuzhiyun #include "intel_ring.h"
12*4882a593Smuzhiyun #include "intel_timeline.h"
13*4882a593Smuzhiyun 
intel_ring_update_space(struct intel_ring * ring)14*4882a593Smuzhiyun unsigned int intel_ring_update_space(struct intel_ring *ring)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	unsigned int space;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	space = __intel_ring_space(ring->head, ring->emit, ring->size);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	ring->space = space;
21*4882a593Smuzhiyun 	return space;
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun 
__intel_ring_pin(struct intel_ring * ring)24*4882a593Smuzhiyun void __intel_ring_pin(struct intel_ring *ring)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	GEM_BUG_ON(!atomic_read(&ring->pin_count));
27*4882a593Smuzhiyun 	atomic_inc(&ring->pin_count);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
intel_ring_pin(struct intel_ring * ring,struct i915_gem_ww_ctx * ww)30*4882a593Smuzhiyun int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct i915_vma *vma = ring->vma;
33*4882a593Smuzhiyun 	unsigned int flags;
34*4882a593Smuzhiyun 	void *addr;
35*4882a593Smuzhiyun 	int ret;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	if (atomic_fetch_inc(&ring->pin_count))
38*4882a593Smuzhiyun 		return 0;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
41*4882a593Smuzhiyun 	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	if (vma->obj->stolen)
44*4882a593Smuzhiyun 		flags |= PIN_MAPPABLE;
45*4882a593Smuzhiyun 	else
46*4882a593Smuzhiyun 		flags |= PIN_HIGH;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	ret = i915_ggtt_pin(vma, ww, 0, flags);
49*4882a593Smuzhiyun 	if (unlikely(ret))
50*4882a593Smuzhiyun 		goto err_unpin;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (i915_vma_is_map_and_fenceable(vma))
53*4882a593Smuzhiyun 		addr = (void __force *)i915_vma_pin_iomap(vma);
54*4882a593Smuzhiyun 	else
55*4882a593Smuzhiyun 		addr = i915_gem_object_pin_map(vma->obj,
56*4882a593Smuzhiyun 					       i915_coherent_map_type(vma->vm->i915));
57*4882a593Smuzhiyun 	if (IS_ERR(addr)) {
58*4882a593Smuzhiyun 		ret = PTR_ERR(addr);
59*4882a593Smuzhiyun 		goto err_ring;
60*4882a593Smuzhiyun 	}
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	i915_vma_make_unshrinkable(vma);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/* Discard any unused bytes beyond that submitted to hw. */
65*4882a593Smuzhiyun 	intel_ring_reset(ring, ring->emit);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	ring->vaddr = addr;
68*4882a593Smuzhiyun 	return 0;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun err_ring:
71*4882a593Smuzhiyun 	i915_vma_unpin(vma);
72*4882a593Smuzhiyun err_unpin:
73*4882a593Smuzhiyun 	atomic_dec(&ring->pin_count);
74*4882a593Smuzhiyun 	return ret;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
intel_ring_reset(struct intel_ring * ring,u32 tail)77*4882a593Smuzhiyun void intel_ring_reset(struct intel_ring *ring, u32 tail)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	tail = intel_ring_wrap(ring, tail);
80*4882a593Smuzhiyun 	ring->tail = tail;
81*4882a593Smuzhiyun 	ring->head = tail;
82*4882a593Smuzhiyun 	ring->emit = tail;
83*4882a593Smuzhiyun 	intel_ring_update_space(ring);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
intel_ring_unpin(struct intel_ring * ring)86*4882a593Smuzhiyun void intel_ring_unpin(struct intel_ring *ring)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct i915_vma *vma = ring->vma;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	if (!atomic_dec_and_test(&ring->pin_count))
91*4882a593Smuzhiyun 		return;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	i915_vma_unset_ggtt_write(vma);
94*4882a593Smuzhiyun 	if (i915_vma_is_map_and_fenceable(vma))
95*4882a593Smuzhiyun 		i915_vma_unpin_iomap(vma);
96*4882a593Smuzhiyun 	else
97*4882a593Smuzhiyun 		i915_gem_object_unpin_map(vma->obj);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	i915_vma_make_purgeable(vma);
100*4882a593Smuzhiyun 	i915_vma_unpin(vma);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
create_ring_vma(struct i915_ggtt * ggtt,int size)103*4882a593Smuzhiyun static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct i915_address_space *vm = &ggtt->vm;
106*4882a593Smuzhiyun 	struct drm_i915_private *i915 = vm->i915;
107*4882a593Smuzhiyun 	struct drm_i915_gem_object *obj;
108*4882a593Smuzhiyun 	struct i915_vma *vma;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	obj = ERR_PTR(-ENODEV);
111*4882a593Smuzhiyun 	if (i915_ggtt_has_aperture(ggtt))
112*4882a593Smuzhiyun 		obj = i915_gem_object_create_stolen(i915, size);
113*4882a593Smuzhiyun 	if (IS_ERR(obj))
114*4882a593Smuzhiyun 		obj = i915_gem_object_create_internal(i915, size);
115*4882a593Smuzhiyun 	if (IS_ERR(obj))
116*4882a593Smuzhiyun 		return ERR_CAST(obj);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/*
119*4882a593Smuzhiyun 	 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
120*4882a593Smuzhiyun 	 * if supported by the platform's GGTT.
121*4882a593Smuzhiyun 	 */
122*4882a593Smuzhiyun 	if (vm->has_read_only)
123*4882a593Smuzhiyun 		i915_gem_object_set_readonly(obj);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	vma = i915_vma_instance(obj, vm, NULL);
126*4882a593Smuzhiyun 	if (IS_ERR(vma))
127*4882a593Smuzhiyun 		goto err;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return vma;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun err:
132*4882a593Smuzhiyun 	i915_gem_object_put(obj);
133*4882a593Smuzhiyun 	return vma;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs * engine,int size)137*4882a593Smuzhiyun intel_engine_create_ring(struct intel_engine_cs *engine, int size)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct drm_i915_private *i915 = engine->i915;
140*4882a593Smuzhiyun 	struct intel_ring *ring;
141*4882a593Smuzhiyun 	struct i915_vma *vma;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	GEM_BUG_ON(!is_power_of_2(size));
144*4882a593Smuzhiyun 	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
147*4882a593Smuzhiyun 	if (!ring)
148*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	kref_init(&ring->ref);
151*4882a593Smuzhiyun 	ring->size = size;
152*4882a593Smuzhiyun 	ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/*
155*4882a593Smuzhiyun 	 * Workaround an erratum on the i830 which causes a hang if
156*4882a593Smuzhiyun 	 * the TAIL pointer points to within the last 2 cachelines
157*4882a593Smuzhiyun 	 * of the buffer.
158*4882a593Smuzhiyun 	 */
159*4882a593Smuzhiyun 	ring->effective_size = size;
160*4882a593Smuzhiyun 	if (IS_I830(i915) || IS_I845G(i915))
161*4882a593Smuzhiyun 		ring->effective_size -= 2 * CACHELINE_BYTES;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	intel_ring_update_space(ring);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	vma = create_ring_vma(engine->gt->ggtt, size);
166*4882a593Smuzhiyun 	if (IS_ERR(vma)) {
167*4882a593Smuzhiyun 		kfree(ring);
168*4882a593Smuzhiyun 		return ERR_CAST(vma);
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 	ring->vma = vma;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	return ring;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
intel_ring_free(struct kref * ref)175*4882a593Smuzhiyun void intel_ring_free(struct kref *ref)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	i915_vma_put(ring->vma);
180*4882a593Smuzhiyun 	kfree(ring);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun static noinline int
wait_for_space(struct intel_ring * ring,struct intel_timeline * tl,unsigned int bytes)184*4882a593Smuzhiyun wait_for_space(struct intel_ring *ring,
185*4882a593Smuzhiyun 	       struct intel_timeline *tl,
186*4882a593Smuzhiyun 	       unsigned int bytes)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct i915_request *target;
189*4882a593Smuzhiyun 	long timeout;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (intel_ring_update_space(ring) >= bytes)
192*4882a593Smuzhiyun 		return 0;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	GEM_BUG_ON(list_empty(&tl->requests));
195*4882a593Smuzhiyun 	list_for_each_entry(target, &tl->requests, link) {
196*4882a593Smuzhiyun 		if (target->ring != ring)
197*4882a593Smuzhiyun 			continue;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 		/* Would completion of this request free enough space? */
200*4882a593Smuzhiyun 		if (bytes <= __intel_ring_space(target->postfix,
201*4882a593Smuzhiyun 						ring->emit, ring->size))
202*4882a593Smuzhiyun 			break;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	if (GEM_WARN_ON(&target->link == &tl->requests))
206*4882a593Smuzhiyun 		return -ENOSPC;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	timeout = i915_request_wait(target,
209*4882a593Smuzhiyun 				    I915_WAIT_INTERRUPTIBLE,
210*4882a593Smuzhiyun 				    MAX_SCHEDULE_TIMEOUT);
211*4882a593Smuzhiyun 	if (timeout < 0)
212*4882a593Smuzhiyun 		return timeout;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	i915_request_retire_upto(target);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	intel_ring_update_space(ring);
217*4882a593Smuzhiyun 	GEM_BUG_ON(ring->space < bytes);
218*4882a593Smuzhiyun 	return 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
intel_ring_begin(struct i915_request * rq,unsigned int num_dwords)221*4882a593Smuzhiyun u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct intel_ring *ring = rq->ring;
224*4882a593Smuzhiyun 	const unsigned int remain_usable = ring->effective_size - ring->emit;
225*4882a593Smuzhiyun 	const unsigned int bytes = num_dwords * sizeof(u32);
226*4882a593Smuzhiyun 	unsigned int need_wrap = 0;
227*4882a593Smuzhiyun 	unsigned int total_bytes;
228*4882a593Smuzhiyun 	u32 *cs;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/* Packets must be qword aligned. */
231*4882a593Smuzhiyun 	GEM_BUG_ON(num_dwords & 1);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	total_bytes = bytes + rq->reserved_space;
234*4882a593Smuzhiyun 	GEM_BUG_ON(total_bytes > ring->effective_size);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (unlikely(total_bytes > remain_usable)) {
237*4882a593Smuzhiyun 		const int remain_actual = ring->size - ring->emit;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		if (bytes > remain_usable) {
240*4882a593Smuzhiyun 			/*
241*4882a593Smuzhiyun 			 * Not enough space for the basic request. So need to
242*4882a593Smuzhiyun 			 * flush out the remainder and then wait for
243*4882a593Smuzhiyun 			 * base + reserved.
244*4882a593Smuzhiyun 			 */
245*4882a593Smuzhiyun 			total_bytes += remain_actual;
246*4882a593Smuzhiyun 			need_wrap = remain_actual | 1;
247*4882a593Smuzhiyun 		} else  {
248*4882a593Smuzhiyun 			/*
249*4882a593Smuzhiyun 			 * The base request will fit but the reserved space
250*4882a593Smuzhiyun 			 * falls off the end. So we don't need an immediate
251*4882a593Smuzhiyun 			 * wrap and only need to effectively wait for the
252*4882a593Smuzhiyun 			 * reserved size from the start of ringbuffer.
253*4882a593Smuzhiyun 			 */
254*4882a593Smuzhiyun 			total_bytes = rq->reserved_space + remain_actual;
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if (unlikely(total_bytes > ring->space)) {
259*4882a593Smuzhiyun 		int ret;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		/*
262*4882a593Smuzhiyun 		 * Space is reserved in the ringbuffer for finalising the
263*4882a593Smuzhiyun 		 * request, as that cannot be allowed to fail. During request
264*4882a593Smuzhiyun 		 * finalisation, reserved_space is set to 0 to stop the
265*4882a593Smuzhiyun 		 * overallocation and the assumption is that then we never need
266*4882a593Smuzhiyun 		 * to wait (which has the risk of failing with EINTR).
267*4882a593Smuzhiyun 		 *
268*4882a593Smuzhiyun 		 * See also i915_request_alloc() and i915_request_add().
269*4882a593Smuzhiyun 		 */
270*4882a593Smuzhiyun 		GEM_BUG_ON(!rq->reserved_space);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 		ret = wait_for_space(ring,
273*4882a593Smuzhiyun 				     i915_request_timeline(rq),
274*4882a593Smuzhiyun 				     total_bytes);
275*4882a593Smuzhiyun 		if (unlikely(ret))
276*4882a593Smuzhiyun 			return ERR_PTR(ret);
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (unlikely(need_wrap)) {
280*4882a593Smuzhiyun 		need_wrap &= ~1;
281*4882a593Smuzhiyun 		GEM_BUG_ON(need_wrap > ring->space);
282*4882a593Smuzhiyun 		GEM_BUG_ON(ring->emit + need_wrap > ring->size);
283*4882a593Smuzhiyun 		GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 		/* Fill the tail with MI_NOOP */
286*4882a593Smuzhiyun 		memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
287*4882a593Smuzhiyun 		ring->space -= need_wrap;
288*4882a593Smuzhiyun 		ring->emit = 0;
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	GEM_BUG_ON(ring->emit > ring->size - bytes);
292*4882a593Smuzhiyun 	GEM_BUG_ON(ring->space < bytes);
293*4882a593Smuzhiyun 	cs = ring->vaddr + ring->emit;
294*4882a593Smuzhiyun 	GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
295*4882a593Smuzhiyun 	ring->emit += bytes;
296*4882a593Smuzhiyun 	ring->space -= bytes;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	return cs;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /* Align the ring tail to a cacheline boundary */
intel_ring_cacheline_align(struct i915_request * rq)302*4882a593Smuzhiyun int intel_ring_cacheline_align(struct i915_request *rq)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	int num_dwords;
305*4882a593Smuzhiyun 	void *cs;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
308*4882a593Smuzhiyun 	if (num_dwords == 0)
309*4882a593Smuzhiyun 		return 0;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	num_dwords = CACHELINE_DWORDS - num_dwords;
312*4882a593Smuzhiyun 	GEM_BUG_ON(num_dwords & 1);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	cs = intel_ring_begin(rq, num_dwords);
315*4882a593Smuzhiyun 	if (IS_ERR(cs))
316*4882a593Smuzhiyun 		return PTR_ERR(cs);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
319*4882a593Smuzhiyun 	intel_ring_advance(rq, cs + num_dwords);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
322*4882a593Smuzhiyun 	return 0;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
326*4882a593Smuzhiyun #include "selftest_ring.c"
327*4882a593Smuzhiyun #endif
328