xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/radeon/r600_dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2013 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Authors: Alex Deucher
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "radeon.h"
26*4882a593Smuzhiyun #include "radeon_asic.h"
27*4882a593Smuzhiyun #include "r600d.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * DMA
33*4882a593Smuzhiyun  * Starting with R600, the GPU has an asynchronous
34*4882a593Smuzhiyun  * DMA engine.  The programming model is very similar
35*4882a593Smuzhiyun  * to the 3D engine (ring buffer, IBs, etc.), but the
36*4882a593Smuzhiyun  * DMA controller has it's own packet format that is
37*4882a593Smuzhiyun  * different form the PM4 format used by the 3D engine.
38*4882a593Smuzhiyun  * It supports copying data, writing embedded data,
39*4882a593Smuzhiyun  * solid fills, and a number of other things.  It also
40*4882a593Smuzhiyun  * has support for tiling/detiling of buffers.
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun  * r600_dma_get_rptr - get the current read pointer
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * @rdev: radeon_device pointer
47*4882a593Smuzhiyun  * @ring: radeon ring pointer
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * Get the current rptr from the hardware (r6xx+).
50*4882a593Smuzhiyun  */
r600_dma_get_rptr(struct radeon_device * rdev,struct radeon_ring * ring)51*4882a593Smuzhiyun uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
52*4882a593Smuzhiyun 			   struct radeon_ring *ring)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	u32 rptr;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if (rdev->wb.enabled)
57*4882a593Smuzhiyun 		rptr = rdev->wb.wb[ring->rptr_offs/4];
58*4882a593Smuzhiyun 	else
59*4882a593Smuzhiyun 		rptr = RREG32(DMA_RB_RPTR);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	return (rptr & 0x3fffc) >> 2;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /**
65*4882a593Smuzhiyun  * r600_dma_get_wptr - get the current write pointer
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * @rdev: radeon_device pointer
68*4882a593Smuzhiyun  * @ring: radeon ring pointer
69*4882a593Smuzhiyun  *
70*4882a593Smuzhiyun  * Get the current wptr from the hardware (r6xx+).
71*4882a593Smuzhiyun  */
r600_dma_get_wptr(struct radeon_device * rdev,struct radeon_ring * ring)72*4882a593Smuzhiyun uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
73*4882a593Smuzhiyun 			   struct radeon_ring *ring)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun  * r600_dma_set_wptr - commit the write pointer
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  * @rdev: radeon_device pointer
82*4882a593Smuzhiyun  * @ring: radeon ring pointer
83*4882a593Smuzhiyun  *
84*4882a593Smuzhiyun  * Write the wptr back to the hardware (r6xx+).
85*4882a593Smuzhiyun  */
r600_dma_set_wptr(struct radeon_device * rdev,struct radeon_ring * ring)86*4882a593Smuzhiyun void r600_dma_set_wptr(struct radeon_device *rdev,
87*4882a593Smuzhiyun 		       struct radeon_ring *ring)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun  * r600_dma_stop - stop the async dma engine
94*4882a593Smuzhiyun  *
95*4882a593Smuzhiyun  * @rdev: radeon_device pointer
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  * Stop the async dma engine (r6xx-evergreen).
98*4882a593Smuzhiyun  */
r600_dma_stop(struct radeon_device * rdev)99*4882a593Smuzhiyun void r600_dma_stop(struct radeon_device *rdev)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	u32 rb_cntl = RREG32(DMA_RB_CNTL);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
104*4882a593Smuzhiyun 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	rb_cntl &= ~DMA_RB_ENABLE;
107*4882a593Smuzhiyun 	WREG32(DMA_RB_CNTL, rb_cntl);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun  * r600_dma_resume - setup and start the async dma engine
114*4882a593Smuzhiyun  *
115*4882a593Smuzhiyun  * @rdev: radeon_device pointer
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
118*4882a593Smuzhiyun  * Returns 0 for success, error for failure.
119*4882a593Smuzhiyun  */
r600_dma_resume(struct radeon_device * rdev)120*4882a593Smuzhiyun int r600_dma_resume(struct radeon_device *rdev)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
123*4882a593Smuzhiyun 	u32 rb_cntl, dma_cntl, ib_cntl;
124*4882a593Smuzhiyun 	u32 rb_bufsz;
125*4882a593Smuzhiyun 	int r;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
128*4882a593Smuzhiyun 	WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* Set ring buffer size in dwords */
131*4882a593Smuzhiyun 	rb_bufsz = order_base_2(ring->ring_size / 4);
132*4882a593Smuzhiyun 	rb_cntl = rb_bufsz << 1;
133*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
134*4882a593Smuzhiyun 	rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
135*4882a593Smuzhiyun #endif
136*4882a593Smuzhiyun 	WREG32(DMA_RB_CNTL, rb_cntl);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* Initialize the ring buffer's read and write pointers */
139*4882a593Smuzhiyun 	WREG32(DMA_RB_RPTR, 0);
140*4882a593Smuzhiyun 	WREG32(DMA_RB_WPTR, 0);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* set the wb address whether it's enabled or not */
143*4882a593Smuzhiyun 	WREG32(DMA_RB_RPTR_ADDR_HI,
144*4882a593Smuzhiyun 	       upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
145*4882a593Smuzhiyun 	WREG32(DMA_RB_RPTR_ADDR_LO,
146*4882a593Smuzhiyun 	       ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (rdev->wb.enabled)
149*4882a593Smuzhiyun 		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* enable DMA IBs */
154*4882a593Smuzhiyun 	ib_cntl = DMA_IB_ENABLE;
155*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
156*4882a593Smuzhiyun 	ib_cntl |= DMA_IB_SWAP_ENABLE;
157*4882a593Smuzhiyun #endif
158*4882a593Smuzhiyun 	WREG32(DMA_IB_CNTL, ib_cntl);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	dma_cntl = RREG32(DMA_CNTL);
161*4882a593Smuzhiyun 	dma_cntl &= ~CTXEMPTY_INT_ENABLE;
162*4882a593Smuzhiyun 	WREG32(DMA_CNTL, dma_cntl);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (rdev->family >= CHIP_RV770)
165*4882a593Smuzhiyun 		WREG32(DMA_MODE, 1);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	ring->wptr = 0;
168*4882a593Smuzhiyun 	WREG32(DMA_RB_WPTR, ring->wptr << 2);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	ring->ready = true;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
175*4882a593Smuzhiyun 	if (r) {
176*4882a593Smuzhiyun 		ring->ready = false;
177*4882a593Smuzhiyun 		return r;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
181*4882a593Smuzhiyun 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	return 0;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /**
187*4882a593Smuzhiyun  * r600_dma_fini - tear down the async dma engine
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * @rdev: radeon_device pointer
190*4882a593Smuzhiyun  *
191*4882a593Smuzhiyun  * Stop the async dma engine and free the ring (r6xx-evergreen).
192*4882a593Smuzhiyun  */
r600_dma_fini(struct radeon_device * rdev)193*4882a593Smuzhiyun void r600_dma_fini(struct radeon_device *rdev)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	r600_dma_stop(rdev);
196*4882a593Smuzhiyun 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /**
200*4882a593Smuzhiyun  * r600_dma_is_lockup - Check if the DMA engine is locked up
201*4882a593Smuzhiyun  *
202*4882a593Smuzhiyun  * @rdev: radeon_device pointer
203*4882a593Smuzhiyun  * @ring: radeon_ring structure holding ring information
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * Check if the async DMA engine is locked up.
206*4882a593Smuzhiyun  * Returns true if the engine appears to be locked up, false if not.
207*4882a593Smuzhiyun  */
r600_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)208*4882a593Smuzhiyun bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	if (!(reset_mask & RADEON_RESET_DMA)) {
213*4882a593Smuzhiyun 		radeon_ring_lockup_update(rdev, ring);
214*4882a593Smuzhiyun 		return false;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 	return radeon_ring_test_lockup(rdev, ring);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /**
221*4882a593Smuzhiyun  * r600_dma_ring_test - simple async dma engine test
222*4882a593Smuzhiyun  *
223*4882a593Smuzhiyun  * @rdev: radeon_device pointer
224*4882a593Smuzhiyun  * @ring: radeon_ring structure holding ring information
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * Test the DMA engine by writing using it to write an
227*4882a593Smuzhiyun  * value to memory. (r6xx-SI).
228*4882a593Smuzhiyun  * Returns 0 for success, error for failure.
229*4882a593Smuzhiyun  */
r600_dma_ring_test(struct radeon_device * rdev,struct radeon_ring * ring)230*4882a593Smuzhiyun int r600_dma_ring_test(struct radeon_device *rdev,
231*4882a593Smuzhiyun 		       struct radeon_ring *ring)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	unsigned i;
234*4882a593Smuzhiyun 	int r;
235*4882a593Smuzhiyun 	unsigned index;
236*4882a593Smuzhiyun 	u32 tmp;
237*4882a593Smuzhiyun 	u64 gpu_addr;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
240*4882a593Smuzhiyun 		index = R600_WB_DMA_RING_TEST_OFFSET;
241*4882a593Smuzhiyun 	else
242*4882a593Smuzhiyun 		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	gpu_addr = rdev->wb.gpu_addr + index;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	tmp = 0xCAFEDEAD;
247*4882a593Smuzhiyun 	rdev->wb.wb[index/4] = cpu_to_le32(tmp);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	r = radeon_ring_lock(rdev, ring, 4);
250*4882a593Smuzhiyun 	if (r) {
251*4882a593Smuzhiyun 		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
252*4882a593Smuzhiyun 		return r;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
255*4882a593Smuzhiyun 	radeon_ring_write(ring, lower_32_bits(gpu_addr));
256*4882a593Smuzhiyun 	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
257*4882a593Smuzhiyun 	radeon_ring_write(ring, 0xDEADBEEF);
258*4882a593Smuzhiyun 	radeon_ring_unlock_commit(rdev, ring, false);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	for (i = 0; i < rdev->usec_timeout; i++) {
261*4882a593Smuzhiyun 		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
262*4882a593Smuzhiyun 		if (tmp == 0xDEADBEEF)
263*4882a593Smuzhiyun 			break;
264*4882a593Smuzhiyun 		udelay(1);
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (i < rdev->usec_timeout) {
268*4882a593Smuzhiyun 		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
269*4882a593Smuzhiyun 	} else {
270*4882a593Smuzhiyun 		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
271*4882a593Smuzhiyun 			  ring->idx, tmp);
272*4882a593Smuzhiyun 		r = -EINVAL;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 	return r;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun /**
278*4882a593Smuzhiyun  * r600_dma_fence_ring_emit - emit a fence on the DMA ring
279*4882a593Smuzhiyun  *
280*4882a593Smuzhiyun  * @rdev: radeon_device pointer
281*4882a593Smuzhiyun  * @fence: radeon fence object
282*4882a593Smuzhiyun  *
283*4882a593Smuzhiyun  * Add a DMA fence packet to the ring to write
284*4882a593Smuzhiyun  * the fence seq number and DMA trap packet to generate
285*4882a593Smuzhiyun  * an interrupt if needed (r6xx-r7xx).
286*4882a593Smuzhiyun  */
r600_dma_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)287*4882a593Smuzhiyun void r600_dma_fence_ring_emit(struct radeon_device *rdev,
288*4882a593Smuzhiyun 			      struct radeon_fence *fence)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct radeon_ring *ring = &rdev->ring[fence->ring];
291*4882a593Smuzhiyun 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* write the fence */
294*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
295*4882a593Smuzhiyun 	radeon_ring_write(ring, addr & 0xfffffffc);
296*4882a593Smuzhiyun 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
297*4882a593Smuzhiyun 	radeon_ring_write(ring, lower_32_bits(fence->seq));
298*4882a593Smuzhiyun 	/* generate an interrupt */
299*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /**
303*4882a593Smuzhiyun  * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
304*4882a593Smuzhiyun  *
305*4882a593Smuzhiyun  * @rdev: radeon_device pointer
306*4882a593Smuzhiyun  * @ring: radeon_ring structure holding ring information
307*4882a593Smuzhiyun  * @semaphore: radeon semaphore object
308*4882a593Smuzhiyun  * @emit_wait: wait or signal semaphore
309*4882a593Smuzhiyun  *
310*4882a593Smuzhiyun  * Add a DMA semaphore packet to the ring wait on or signal
311*4882a593Smuzhiyun  * other rings (r6xx-SI).
312*4882a593Smuzhiyun  */
r600_dma_semaphore_ring_emit(struct radeon_device * rdev,struct radeon_ring * ring,struct radeon_semaphore * semaphore,bool emit_wait)313*4882a593Smuzhiyun bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
314*4882a593Smuzhiyun 				  struct radeon_ring *ring,
315*4882a593Smuzhiyun 				  struct radeon_semaphore *semaphore,
316*4882a593Smuzhiyun 				  bool emit_wait)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	u64 addr = semaphore->gpu_addr;
319*4882a593Smuzhiyun 	u32 s = emit_wait ? 0 : 1;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
322*4882a593Smuzhiyun 	radeon_ring_write(ring, addr & 0xfffffffc);
323*4882a593Smuzhiyun 	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return true;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun  * r600_dma_ib_test - test an IB on the DMA engine
330*4882a593Smuzhiyun  *
331*4882a593Smuzhiyun  * @rdev: radeon_device pointer
332*4882a593Smuzhiyun  * @ring: radeon_ring structure holding ring information
333*4882a593Smuzhiyun  *
334*4882a593Smuzhiyun  * Test a simple IB in the DMA ring (r6xx-SI).
335*4882a593Smuzhiyun  * Returns 0 on success, error on failure.
336*4882a593Smuzhiyun  */
r600_dma_ib_test(struct radeon_device * rdev,struct radeon_ring * ring)337*4882a593Smuzhiyun int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct radeon_ib ib;
340*4882a593Smuzhiyun 	unsigned i;
341*4882a593Smuzhiyun 	unsigned index;
342*4882a593Smuzhiyun 	int r;
343*4882a593Smuzhiyun 	u32 tmp = 0;
344*4882a593Smuzhiyun 	u64 gpu_addr;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
347*4882a593Smuzhiyun 		index = R600_WB_DMA_RING_TEST_OFFSET;
348*4882a593Smuzhiyun 	else
349*4882a593Smuzhiyun 		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	gpu_addr = rdev->wb.gpu_addr + index;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
354*4882a593Smuzhiyun 	if (r) {
355*4882a593Smuzhiyun 		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
356*4882a593Smuzhiyun 		return r;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
360*4882a593Smuzhiyun 	ib.ptr[1] = lower_32_bits(gpu_addr);
361*4882a593Smuzhiyun 	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
362*4882a593Smuzhiyun 	ib.ptr[3] = 0xDEADBEEF;
363*4882a593Smuzhiyun 	ib.length_dw = 4;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
366*4882a593Smuzhiyun 	if (r) {
367*4882a593Smuzhiyun 		radeon_ib_free(rdev, &ib);
368*4882a593Smuzhiyun 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
369*4882a593Smuzhiyun 		return r;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 	r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
372*4882a593Smuzhiyun 		RADEON_USEC_IB_TEST_TIMEOUT));
373*4882a593Smuzhiyun 	if (r < 0) {
374*4882a593Smuzhiyun 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
375*4882a593Smuzhiyun 		return r;
376*4882a593Smuzhiyun 	} else if (r == 0) {
377*4882a593Smuzhiyun 		DRM_ERROR("radeon: fence wait timed out.\n");
378*4882a593Smuzhiyun 		return -ETIMEDOUT;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 	r = 0;
381*4882a593Smuzhiyun 	for (i = 0; i < rdev->usec_timeout; i++) {
382*4882a593Smuzhiyun 		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
383*4882a593Smuzhiyun 		if (tmp == 0xDEADBEEF)
384*4882a593Smuzhiyun 			break;
385*4882a593Smuzhiyun 		udelay(1);
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 	if (i < rdev->usec_timeout) {
388*4882a593Smuzhiyun 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
389*4882a593Smuzhiyun 	} else {
390*4882a593Smuzhiyun 		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
391*4882a593Smuzhiyun 		r = -EINVAL;
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 	radeon_ib_free(rdev, &ib);
394*4882a593Smuzhiyun 	return r;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun /**
398*4882a593Smuzhiyun  * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
399*4882a593Smuzhiyun  *
400*4882a593Smuzhiyun  * @rdev: radeon_device pointer
401*4882a593Smuzhiyun  * @ib: IB object to schedule
402*4882a593Smuzhiyun  *
403*4882a593Smuzhiyun  * Schedule an IB in the DMA ring (r6xx-r7xx).
404*4882a593Smuzhiyun  */
r600_dma_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)405*4882a593Smuzhiyun void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	struct radeon_ring *ring = &rdev->ring[ib->ring];
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	if (rdev->wb.enabled) {
410*4882a593Smuzhiyun 		u32 next_rptr = ring->wptr + 4;
411*4882a593Smuzhiyun 		while ((next_rptr & 7) != 5)
412*4882a593Smuzhiyun 			next_rptr++;
413*4882a593Smuzhiyun 		next_rptr += 3;
414*4882a593Smuzhiyun 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
415*4882a593Smuzhiyun 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
416*4882a593Smuzhiyun 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
417*4882a593Smuzhiyun 		radeon_ring_write(ring, next_rptr);
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
421*4882a593Smuzhiyun 	 * Pad as necessary with NOPs.
422*4882a593Smuzhiyun 	 */
423*4882a593Smuzhiyun 	while ((ring->wptr & 7) != 5)
424*4882a593Smuzhiyun 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
425*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
426*4882a593Smuzhiyun 	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
427*4882a593Smuzhiyun 	radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun /**
432*4882a593Smuzhiyun  * r600_copy_dma - copy pages using the DMA engine
433*4882a593Smuzhiyun  *
434*4882a593Smuzhiyun  * @rdev: radeon_device pointer
435*4882a593Smuzhiyun  * @src_offset: src GPU address
436*4882a593Smuzhiyun  * @dst_offset: dst GPU address
437*4882a593Smuzhiyun  * @num_gpu_pages: number of GPU pages to xfer
438*4882a593Smuzhiyun  * @resv: reservation object to sync to
439*4882a593Smuzhiyun  *
440*4882a593Smuzhiyun  * Copy GPU paging using the DMA engine (r6xx).
441*4882a593Smuzhiyun  * Used by the radeon ttm implementation to move pages if
442*4882a593Smuzhiyun  * registered as the asic copy callback.
443*4882a593Smuzhiyun  */
r600_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct dma_resv * resv)444*4882a593Smuzhiyun struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
445*4882a593Smuzhiyun 				   uint64_t src_offset, uint64_t dst_offset,
446*4882a593Smuzhiyun 				   unsigned num_gpu_pages,
447*4882a593Smuzhiyun 				   struct dma_resv *resv)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	struct radeon_fence *fence;
450*4882a593Smuzhiyun 	struct radeon_sync sync;
451*4882a593Smuzhiyun 	int ring_index = rdev->asic->copy.dma_ring_index;
452*4882a593Smuzhiyun 	struct radeon_ring *ring = &rdev->ring[ring_index];
453*4882a593Smuzhiyun 	u32 size_in_dw, cur_size_in_dw;
454*4882a593Smuzhiyun 	int i, num_loops;
455*4882a593Smuzhiyun 	int r = 0;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	radeon_sync_create(&sync);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
460*4882a593Smuzhiyun 	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
461*4882a593Smuzhiyun 	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
462*4882a593Smuzhiyun 	if (r) {
463*4882a593Smuzhiyun 		DRM_ERROR("radeon: moving bo (%d).\n", r);
464*4882a593Smuzhiyun 		radeon_sync_free(rdev, &sync, NULL);
465*4882a593Smuzhiyun 		return ERR_PTR(r);
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	radeon_sync_resv(rdev, &sync, resv, false);
469*4882a593Smuzhiyun 	radeon_sync_rings(rdev, &sync, ring->idx);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	for (i = 0; i < num_loops; i++) {
472*4882a593Smuzhiyun 		cur_size_in_dw = size_in_dw;
473*4882a593Smuzhiyun 		if (cur_size_in_dw > 0xFFFE)
474*4882a593Smuzhiyun 			cur_size_in_dw = 0xFFFE;
475*4882a593Smuzhiyun 		size_in_dw -= cur_size_in_dw;
476*4882a593Smuzhiyun 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
477*4882a593Smuzhiyun 		radeon_ring_write(ring, dst_offset & 0xfffffffc);
478*4882a593Smuzhiyun 		radeon_ring_write(ring, src_offset & 0xfffffffc);
479*4882a593Smuzhiyun 		radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
480*4882a593Smuzhiyun 					 (upper_32_bits(src_offset) & 0xff)));
481*4882a593Smuzhiyun 		src_offset += cur_size_in_dw * 4;
482*4882a593Smuzhiyun 		dst_offset += cur_size_in_dw * 4;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	r = radeon_fence_emit(rdev, &fence, ring->idx);
486*4882a593Smuzhiyun 	if (r) {
487*4882a593Smuzhiyun 		radeon_ring_unlock_undo(rdev, ring);
488*4882a593Smuzhiyun 		radeon_sync_free(rdev, &sync, NULL);
489*4882a593Smuzhiyun 		return ERR_PTR(r);
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	radeon_ring_unlock_commit(rdev, ring, false);
493*4882a593Smuzhiyun 	radeon_sync_free(rdev, &sync, fence);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	return fence;
496*4882a593Smuzhiyun }
497