xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/radeon/evergreen_dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2010 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Authors: Alex Deucher
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "radeon.h"
26*4882a593Smuzhiyun #include "radeon_asic.h"
27*4882a593Smuzhiyun #include "evergreend.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /**
32*4882a593Smuzhiyun  * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * @rdev: radeon_device pointer
35*4882a593Smuzhiyun  * @fence: radeon fence object
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * Add a DMA fence packet to the ring to write
38*4882a593Smuzhiyun  * the fence seq number and DMA trap packet to generate
39*4882a593Smuzhiyun  * an interrupt if needed (evergreen-SI).
40*4882a593Smuzhiyun  */
evergreen_dma_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)41*4882a593Smuzhiyun void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
42*4882a593Smuzhiyun 				   struct radeon_fence *fence)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct radeon_ring *ring = &rdev->ring[fence->ring];
45*4882a593Smuzhiyun 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
46*4882a593Smuzhiyun 	/* write the fence */
47*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
48*4882a593Smuzhiyun 	radeon_ring_write(ring, addr & 0xfffffffc);
49*4882a593Smuzhiyun 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
50*4882a593Smuzhiyun 	radeon_ring_write(ring, fence->seq);
51*4882a593Smuzhiyun 	/* generate an interrupt */
52*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
53*4882a593Smuzhiyun 	/* flush HDP */
54*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
55*4882a593Smuzhiyun 	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
56*4882a593Smuzhiyun 	radeon_ring_write(ring, 1);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /**
60*4882a593Smuzhiyun  * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * @rdev: radeon_device pointer
63*4882a593Smuzhiyun  * @ib: IB object to schedule
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * Schedule an IB in the DMA ring (evergreen).
66*4882a593Smuzhiyun  */
evergreen_dma_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)67*4882a593Smuzhiyun void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
68*4882a593Smuzhiyun 				   struct radeon_ib *ib)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct radeon_ring *ring = &rdev->ring[ib->ring];
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	if (rdev->wb.enabled) {
73*4882a593Smuzhiyun 		u32 next_rptr = ring->wptr + 4;
74*4882a593Smuzhiyun 		while ((next_rptr & 7) != 5)
75*4882a593Smuzhiyun 			next_rptr++;
76*4882a593Smuzhiyun 		next_rptr += 3;
77*4882a593Smuzhiyun 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
78*4882a593Smuzhiyun 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
79*4882a593Smuzhiyun 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
80*4882a593Smuzhiyun 		radeon_ring_write(ring, next_rptr);
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
84*4882a593Smuzhiyun 	 * Pad as necessary with NOPs.
85*4882a593Smuzhiyun 	 */
86*4882a593Smuzhiyun 	while ((ring->wptr & 7) != 5)
87*4882a593Smuzhiyun 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
88*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
89*4882a593Smuzhiyun 	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
90*4882a593Smuzhiyun 	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun  * evergreen_copy_dma - copy pages using the DMA engine
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  * @rdev: radeon_device pointer
98*4882a593Smuzhiyun  * @src_offset: src GPU address
99*4882a593Smuzhiyun  * @dst_offset: dst GPU address
100*4882a593Smuzhiyun  * @num_gpu_pages: number of GPU pages to xfer
101*4882a593Smuzhiyun  * @fence: radeon fence object
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Copy GPU paging using the DMA engine (evergreen-cayman).
104*4882a593Smuzhiyun  * Used by the radeon ttm implementation to move pages if
105*4882a593Smuzhiyun  * registered as the asic copy callback.
106*4882a593Smuzhiyun  */
evergreen_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct dma_resv * resv)107*4882a593Smuzhiyun struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
108*4882a593Smuzhiyun 					uint64_t src_offset,
109*4882a593Smuzhiyun 					uint64_t dst_offset,
110*4882a593Smuzhiyun 					unsigned num_gpu_pages,
111*4882a593Smuzhiyun 					struct dma_resv *resv)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct radeon_fence *fence;
114*4882a593Smuzhiyun 	struct radeon_sync sync;
115*4882a593Smuzhiyun 	int ring_index = rdev->asic->copy.dma_ring_index;
116*4882a593Smuzhiyun 	struct radeon_ring *ring = &rdev->ring[ring_index];
117*4882a593Smuzhiyun 	u32 size_in_dw, cur_size_in_dw;
118*4882a593Smuzhiyun 	int i, num_loops;
119*4882a593Smuzhiyun 	int r = 0;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	radeon_sync_create(&sync);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
124*4882a593Smuzhiyun 	num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
125*4882a593Smuzhiyun 	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
126*4882a593Smuzhiyun 	if (r) {
127*4882a593Smuzhiyun 		DRM_ERROR("radeon: moving bo (%d).\n", r);
128*4882a593Smuzhiyun 		radeon_sync_free(rdev, &sync, NULL);
129*4882a593Smuzhiyun 		return ERR_PTR(r);
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	radeon_sync_resv(rdev, &sync, resv, false);
133*4882a593Smuzhiyun 	radeon_sync_rings(rdev, &sync, ring->idx);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	for (i = 0; i < num_loops; i++) {
136*4882a593Smuzhiyun 		cur_size_in_dw = size_in_dw;
137*4882a593Smuzhiyun 		if (cur_size_in_dw > 0xFFFFF)
138*4882a593Smuzhiyun 			cur_size_in_dw = 0xFFFFF;
139*4882a593Smuzhiyun 		size_in_dw -= cur_size_in_dw;
140*4882a593Smuzhiyun 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
141*4882a593Smuzhiyun 		radeon_ring_write(ring, dst_offset & 0xfffffffc);
142*4882a593Smuzhiyun 		radeon_ring_write(ring, src_offset & 0xfffffffc);
143*4882a593Smuzhiyun 		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
144*4882a593Smuzhiyun 		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
145*4882a593Smuzhiyun 		src_offset += cur_size_in_dw * 4;
146*4882a593Smuzhiyun 		dst_offset += cur_size_in_dw * 4;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	r = radeon_fence_emit(rdev, &fence, ring->idx);
150*4882a593Smuzhiyun 	if (r) {
151*4882a593Smuzhiyun 		radeon_ring_unlock_undo(rdev, ring);
152*4882a593Smuzhiyun 		radeon_sync_free(rdev, &sync, NULL);
153*4882a593Smuzhiyun 		return ERR_PTR(r);
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	radeon_ring_unlock_commit(rdev, ring, false);
157*4882a593Smuzhiyun 	radeon_sync_free(rdev, &sync, fence);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	return fence;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /**
163*4882a593Smuzhiyun  * evergreen_dma_is_lockup - Check if the DMA engine is locked up
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  * @rdev: radeon_device pointer
166*4882a593Smuzhiyun  * @ring: radeon_ring structure holding ring information
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * Check if the async DMA engine is locked up.
169*4882a593Smuzhiyun  * Returns true if the engine appears to be locked up, false if not.
170*4882a593Smuzhiyun  */
evergreen_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)171*4882a593Smuzhiyun bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (!(reset_mask & RADEON_RESET_DMA)) {
176*4882a593Smuzhiyun 		radeon_ring_lockup_update(rdev, ring);
177*4882a593Smuzhiyun 		return false;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 	return radeon_ring_test_lockup(rdev, ring);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 
183