xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/radeon/ni_dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2010 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Authors: Alex Deucher
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "radeon.h"
26*4882a593Smuzhiyun #include "radeon_asic.h"
27*4882a593Smuzhiyun #include "radeon_trace.h"
28*4882a593Smuzhiyun #include "nid.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * DMA
34*4882a593Smuzhiyun  * Starting with R600, the GPU has an asynchronous
35*4882a593Smuzhiyun  * DMA engine.  The programming model is very similar
36*4882a593Smuzhiyun  * to the 3D engine (ring buffer, IBs, etc.), but the
37*4882a593Smuzhiyun  * DMA controller has it's own packet format that is
38*4882a593Smuzhiyun  * different form the PM4 format used by the 3D engine.
39*4882a593Smuzhiyun  * It supports copying data, writing embedded data,
40*4882a593Smuzhiyun  * solid fills, and a number of other things.  It also
41*4882a593Smuzhiyun  * has support for tiling/detiling of buffers.
42*4882a593Smuzhiyun  * Cayman and newer support two asynchronous DMA engines.
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun  * cayman_dma_get_rptr - get the current read pointer
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  * @rdev: radeon_device pointer
49*4882a593Smuzhiyun  * @ring: radeon ring pointer
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * Get the current rptr from the hardware (cayman+).
52*4882a593Smuzhiyun  */
cayman_dma_get_rptr(struct radeon_device * rdev,struct radeon_ring * ring)53*4882a593Smuzhiyun uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
54*4882a593Smuzhiyun 			     struct radeon_ring *ring)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	u32 rptr, reg;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (rdev->wb.enabled) {
59*4882a593Smuzhiyun 		rptr = rdev->wb.wb[ring->rptr_offs/4];
60*4882a593Smuzhiyun 	} else {
61*4882a593Smuzhiyun 		if (ring->idx == R600_RING_TYPE_DMA_INDEX)
62*4882a593Smuzhiyun 			reg = DMA_RB_RPTR + DMA0_REGISTER_OFFSET;
63*4882a593Smuzhiyun 		else
64*4882a593Smuzhiyun 			reg = DMA_RB_RPTR + DMA1_REGISTER_OFFSET;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 		rptr = RREG32(reg);
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return (rptr & 0x3fffc) >> 2;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun  * cayman_dma_get_wptr - get the current write pointer
74*4882a593Smuzhiyun  *
75*4882a593Smuzhiyun  * @rdev: radeon_device pointer
76*4882a593Smuzhiyun  * @ring: radeon ring pointer
77*4882a593Smuzhiyun  *
78*4882a593Smuzhiyun  * Get the current wptr from the hardware (cayman+).
79*4882a593Smuzhiyun  */
cayman_dma_get_wptr(struct radeon_device * rdev,struct radeon_ring * ring)80*4882a593Smuzhiyun uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
81*4882a593Smuzhiyun 			   struct radeon_ring *ring)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	u32 reg;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
86*4882a593Smuzhiyun 		reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
87*4882a593Smuzhiyun 	else
88*4882a593Smuzhiyun 		reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	return (RREG32(reg) & 0x3fffc) >> 2;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun  * cayman_dma_set_wptr - commit the write pointer
95*4882a593Smuzhiyun  *
96*4882a593Smuzhiyun  * @rdev: radeon_device pointer
97*4882a593Smuzhiyun  * @ring: radeon ring pointer
98*4882a593Smuzhiyun  *
99*4882a593Smuzhiyun  * Write the wptr back to the hardware (cayman+).
100*4882a593Smuzhiyun  */
cayman_dma_set_wptr(struct radeon_device * rdev,struct radeon_ring * ring)101*4882a593Smuzhiyun void cayman_dma_set_wptr(struct radeon_device *rdev,
102*4882a593Smuzhiyun 			 struct radeon_ring *ring)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	u32 reg;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
107*4882a593Smuzhiyun 		reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
108*4882a593Smuzhiyun 	else
109*4882a593Smuzhiyun 		reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	WREG32(reg, (ring->wptr << 2) & 0x3fffc);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun  * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  * @rdev: radeon_device pointer
118*4882a593Smuzhiyun  * @ib: IB object to schedule
119*4882a593Smuzhiyun  *
120*4882a593Smuzhiyun  * Schedule an IB in the DMA ring (cayman-SI).
121*4882a593Smuzhiyun  */
cayman_dma_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)122*4882a593Smuzhiyun void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
123*4882a593Smuzhiyun 				struct radeon_ib *ib)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct radeon_ring *ring = &rdev->ring[ib->ring];
126*4882a593Smuzhiyun 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (rdev->wb.enabled) {
129*4882a593Smuzhiyun 		u32 next_rptr = ring->wptr + 4;
130*4882a593Smuzhiyun 		while ((next_rptr & 7) != 5)
131*4882a593Smuzhiyun 			next_rptr++;
132*4882a593Smuzhiyun 		next_rptr += 3;
133*4882a593Smuzhiyun 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
134*4882a593Smuzhiyun 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
135*4882a593Smuzhiyun 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
136*4882a593Smuzhiyun 		radeon_ring_write(ring, next_rptr);
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
140*4882a593Smuzhiyun 	 * Pad as necessary with NOPs.
141*4882a593Smuzhiyun 	 */
142*4882a593Smuzhiyun 	while ((ring->wptr & 7) != 5)
143*4882a593Smuzhiyun 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
144*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
145*4882a593Smuzhiyun 	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
146*4882a593Smuzhiyun 	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun  * cayman_dma_stop - stop the async dma engines
152*4882a593Smuzhiyun  *
153*4882a593Smuzhiyun  * @rdev: radeon_device pointer
154*4882a593Smuzhiyun  *
155*4882a593Smuzhiyun  * Stop the async dma engines (cayman-SI).
156*4882a593Smuzhiyun  */
cayman_dma_stop(struct radeon_device * rdev)157*4882a593Smuzhiyun void cayman_dma_stop(struct radeon_device *rdev)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	u32 rb_cntl;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
162*4882a593Smuzhiyun 	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
163*4882a593Smuzhiyun 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* dma0 */
166*4882a593Smuzhiyun 	rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
167*4882a593Smuzhiyun 	rb_cntl &= ~DMA_RB_ENABLE;
168*4882a593Smuzhiyun 	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/* dma1 */
171*4882a593Smuzhiyun 	rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
172*4882a593Smuzhiyun 	rb_cntl &= ~DMA_RB_ENABLE;
173*4882a593Smuzhiyun 	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
176*4882a593Smuzhiyun 	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /**
180*4882a593Smuzhiyun  * cayman_dma_resume - setup and start the async dma engines
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  * @rdev: radeon_device pointer
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  * Set up the DMA ring buffers and enable them. (cayman-SI).
185*4882a593Smuzhiyun  * Returns 0 for success, error for failure.
186*4882a593Smuzhiyun  */
cayman_dma_resume(struct radeon_device * rdev)187*4882a593Smuzhiyun int cayman_dma_resume(struct radeon_device *rdev)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct radeon_ring *ring;
190*4882a593Smuzhiyun 	u32 rb_cntl, dma_cntl, ib_cntl;
191*4882a593Smuzhiyun 	u32 rb_bufsz;
192*4882a593Smuzhiyun 	u32 reg_offset, wb_offset;
193*4882a593Smuzhiyun 	int i, r;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
196*4882a593Smuzhiyun 		if (i == 0) {
197*4882a593Smuzhiyun 			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
198*4882a593Smuzhiyun 			reg_offset = DMA0_REGISTER_OFFSET;
199*4882a593Smuzhiyun 			wb_offset = R600_WB_DMA_RPTR_OFFSET;
200*4882a593Smuzhiyun 		} else {
201*4882a593Smuzhiyun 			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
202*4882a593Smuzhiyun 			reg_offset = DMA1_REGISTER_OFFSET;
203*4882a593Smuzhiyun 			wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
204*4882a593Smuzhiyun 		}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
207*4882a593Smuzhiyun 		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		/* Set ring buffer size in dwords */
210*4882a593Smuzhiyun 		rb_bufsz = order_base_2(ring->ring_size / 4);
211*4882a593Smuzhiyun 		rb_cntl = rb_bufsz << 1;
212*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
213*4882a593Smuzhiyun 		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
214*4882a593Smuzhiyun #endif
215*4882a593Smuzhiyun 		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		/* Initialize the ring buffer's read and write pointers */
218*4882a593Smuzhiyun 		WREG32(DMA_RB_RPTR + reg_offset, 0);
219*4882a593Smuzhiyun 		WREG32(DMA_RB_WPTR + reg_offset, 0);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		/* set the wb address whether it's enabled or not */
222*4882a593Smuzhiyun 		WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
223*4882a593Smuzhiyun 		       upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
224*4882a593Smuzhiyun 		WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
225*4882a593Smuzhiyun 		       ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		if (rdev->wb.enabled)
228*4882a593Smuzhiyun 			rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 		/* enable DMA IBs */
233*4882a593Smuzhiyun 		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
234*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
235*4882a593Smuzhiyun 		ib_cntl |= DMA_IB_SWAP_ENABLE;
236*4882a593Smuzhiyun #endif
237*4882a593Smuzhiyun 		WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		dma_cntl = RREG32(DMA_CNTL + reg_offset);
240*4882a593Smuzhiyun 		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
241*4882a593Smuzhiyun 		WREG32(DMA_CNTL + reg_offset, dma_cntl);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		ring->wptr = 0;
244*4882a593Smuzhiyun 		WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		ring->ready = true;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		r = radeon_ring_test(rdev, ring->idx, ring);
251*4882a593Smuzhiyun 		if (r) {
252*4882a593Smuzhiyun 			ring->ready = false;
253*4882a593Smuzhiyun 			return r;
254*4882a593Smuzhiyun 		}
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
258*4882a593Smuzhiyun 	    (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
259*4882a593Smuzhiyun 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /**
265*4882a593Smuzhiyun  * cayman_dma_fini - tear down the async dma engines
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * @rdev: radeon_device pointer
268*4882a593Smuzhiyun  *
269*4882a593Smuzhiyun  * Stop the async dma engines and free the rings (cayman-SI).
270*4882a593Smuzhiyun  */
cayman_dma_fini(struct radeon_device * rdev)271*4882a593Smuzhiyun void cayman_dma_fini(struct radeon_device *rdev)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	cayman_dma_stop(rdev);
274*4882a593Smuzhiyun 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
275*4882a593Smuzhiyun 	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun /**
279*4882a593Smuzhiyun  * cayman_dma_is_lockup - Check if the DMA engine is locked up
280*4882a593Smuzhiyun  *
281*4882a593Smuzhiyun  * @rdev: radeon_device pointer
282*4882a593Smuzhiyun  * @ring: radeon_ring structure holding ring information
283*4882a593Smuzhiyun  *
284*4882a593Smuzhiyun  * Check if the async DMA engine is locked up.
285*4882a593Smuzhiyun  * Returns true if the engine appears to be locked up, false if not.
286*4882a593Smuzhiyun  */
cayman_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)287*4882a593Smuzhiyun bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
290*4882a593Smuzhiyun 	u32 mask;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
293*4882a593Smuzhiyun 		mask = RADEON_RESET_DMA;
294*4882a593Smuzhiyun 	else
295*4882a593Smuzhiyun 		mask = RADEON_RESET_DMA1;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (!(reset_mask & mask)) {
298*4882a593Smuzhiyun 		radeon_ring_lockup_update(rdev, ring);
299*4882a593Smuzhiyun 		return false;
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 	return radeon_ring_test_lockup(rdev, ring);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /**
305*4882a593Smuzhiyun  * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
306*4882a593Smuzhiyun  *
307*4882a593Smuzhiyun  * @rdev: radeon_device pointer
308*4882a593Smuzhiyun  * @ib: indirect buffer to fill with commands
309*4882a593Smuzhiyun  * @pe: addr of the page entry
310*4882a593Smuzhiyun  * @src: src addr where to copy from
311*4882a593Smuzhiyun  * @count: number of page entries to update
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * Update PTEs by copying them from the GART using the DMA (cayman/TN).
314*4882a593Smuzhiyun  */
cayman_dma_vm_copy_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t src,unsigned count)315*4882a593Smuzhiyun void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
316*4882a593Smuzhiyun 			      struct radeon_ib *ib,
317*4882a593Smuzhiyun 			      uint64_t pe, uint64_t src,
318*4882a593Smuzhiyun 			      unsigned count)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	unsigned ndw;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	while (count) {
323*4882a593Smuzhiyun 		ndw = count * 2;
324*4882a593Smuzhiyun 		if (ndw > 0xFFFFE)
325*4882a593Smuzhiyun 			ndw = 0xFFFFE;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
328*4882a593Smuzhiyun 						      0, 0, ndw);
329*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
330*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = lower_32_bits(src);
331*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
332*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		pe += ndw * 4;
335*4882a593Smuzhiyun 		src += ndw * 4;
336*4882a593Smuzhiyun 		count -= ndw / 2;
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun /**
341*4882a593Smuzhiyun  * cayman_dma_vm_write_pages - update PTEs by writing them manually
342*4882a593Smuzhiyun  *
343*4882a593Smuzhiyun  * @rdev: radeon_device pointer
344*4882a593Smuzhiyun  * @ib: indirect buffer to fill with commands
345*4882a593Smuzhiyun  * @pe: addr of the page entry
346*4882a593Smuzhiyun  * @addr: dst addr to write into pe
347*4882a593Smuzhiyun  * @count: number of page entries to update
348*4882a593Smuzhiyun  * @incr: increase next addr by incr bytes
349*4882a593Smuzhiyun  * @flags: hw access flags
350*4882a593Smuzhiyun  *
351*4882a593Smuzhiyun  * Update PTEs by writing them manually using the DMA (cayman/TN).
352*4882a593Smuzhiyun  */
cayman_dma_vm_write_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)353*4882a593Smuzhiyun void cayman_dma_vm_write_pages(struct radeon_device *rdev,
354*4882a593Smuzhiyun 			       struct radeon_ib *ib,
355*4882a593Smuzhiyun 			       uint64_t pe,
356*4882a593Smuzhiyun 			       uint64_t addr, unsigned count,
357*4882a593Smuzhiyun 			       uint32_t incr, uint32_t flags)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	uint64_t value;
360*4882a593Smuzhiyun 	unsigned ndw;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	while (count) {
363*4882a593Smuzhiyun 		ndw = count * 2;
364*4882a593Smuzhiyun 		if (ndw > 0xFFFFE)
365*4882a593Smuzhiyun 			ndw = 0xFFFFE;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		/* for non-physically contiguous pages (system) */
368*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
369*4882a593Smuzhiyun 						      0, 0, ndw);
370*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = pe;
371*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
372*4882a593Smuzhiyun 		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
373*4882a593Smuzhiyun 			if (flags & R600_PTE_SYSTEM) {
374*4882a593Smuzhiyun 				value = radeon_vm_map_gart(rdev, addr);
375*4882a593Smuzhiyun 			} else if (flags & R600_PTE_VALID) {
376*4882a593Smuzhiyun 				value = addr;
377*4882a593Smuzhiyun 			} else {
378*4882a593Smuzhiyun 				value = 0;
379*4882a593Smuzhiyun 			}
380*4882a593Smuzhiyun 			addr += incr;
381*4882a593Smuzhiyun 			value |= flags;
382*4882a593Smuzhiyun 			ib->ptr[ib->length_dw++] = value;
383*4882a593Smuzhiyun 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
384*4882a593Smuzhiyun 		}
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun /**
389*4882a593Smuzhiyun  * cayman_dma_vm_set_pages - update the page tables using the DMA
390*4882a593Smuzhiyun  *
391*4882a593Smuzhiyun  * @rdev: radeon_device pointer
392*4882a593Smuzhiyun  * @ib: indirect buffer to fill with commands
393*4882a593Smuzhiyun  * @pe: addr of the page entry
394*4882a593Smuzhiyun  * @addr: dst addr to write into pe
395*4882a593Smuzhiyun  * @count: number of page entries to update
396*4882a593Smuzhiyun  * @incr: increase next addr by incr bytes
397*4882a593Smuzhiyun  * @flags: hw access flags
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * Update the page tables using the DMA (cayman/TN).
400*4882a593Smuzhiyun  */
cayman_dma_vm_set_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)401*4882a593Smuzhiyun void cayman_dma_vm_set_pages(struct radeon_device *rdev,
402*4882a593Smuzhiyun 			     struct radeon_ib *ib,
403*4882a593Smuzhiyun 			     uint64_t pe,
404*4882a593Smuzhiyun 			     uint64_t addr, unsigned count,
405*4882a593Smuzhiyun 			     uint32_t incr, uint32_t flags)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	uint64_t value;
408*4882a593Smuzhiyun 	unsigned ndw;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	while (count) {
411*4882a593Smuzhiyun 		ndw = count * 2;
412*4882a593Smuzhiyun 		if (ndw > 0xFFFFE)
413*4882a593Smuzhiyun 			ndw = 0xFFFFE;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		if (flags & R600_PTE_VALID)
416*4882a593Smuzhiyun 			value = addr;
417*4882a593Smuzhiyun 		else
418*4882a593Smuzhiyun 			value = 0;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 		/* for physically contiguous pages (vram) */
421*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
422*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = pe; /* dst addr */
423*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
424*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = flags; /* mask */
425*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = 0;
426*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = value; /* value */
427*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = upper_32_bits(value);
428*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = incr; /* increment size */
429*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = 0;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 		pe += ndw * 4;
432*4882a593Smuzhiyun 		addr += (ndw / 2) * incr;
433*4882a593Smuzhiyun 		count -= ndw / 2;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun  * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
439*4882a593Smuzhiyun  *
440*4882a593Smuzhiyun  * @ib: indirect buffer to fill with padding
441*4882a593Smuzhiyun  *
442*4882a593Smuzhiyun  */
cayman_dma_vm_pad_ib(struct radeon_ib * ib)443*4882a593Smuzhiyun void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	while (ib->length_dw & 0x7)
446*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
cayman_dma_vm_flush(struct radeon_device * rdev,struct radeon_ring * ring,unsigned vm_id,uint64_t pd_addr)449*4882a593Smuzhiyun void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
450*4882a593Smuzhiyun 			 unsigned vm_id, uint64_t pd_addr)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
453*4882a593Smuzhiyun 	radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
454*4882a593Smuzhiyun 	radeon_ring_write(ring, pd_addr >> 12);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/* flush hdp cache */
457*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
458*4882a593Smuzhiyun 	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
459*4882a593Smuzhiyun 	radeon_ring_write(ring, 1);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	/* bits 0-7 are the VM contexts0-7 */
462*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
463*4882a593Smuzhiyun 	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
464*4882a593Smuzhiyun 	radeon_ring_write(ring, 1 << vm_id);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* wait for invalidate to complete */
467*4882a593Smuzhiyun 	radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
468*4882a593Smuzhiyun 	radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
469*4882a593Smuzhiyun 	radeon_ring_write(ring, 0); /* mask */
470*4882a593Smuzhiyun 	radeon_ring_write(ring, 0); /* value */
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
473