xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/amdgpu/si_dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2015 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Authors: Alex Deucher
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "amdgpu.h"
26*4882a593Smuzhiyun #include "amdgpu_trace.h"
27*4882a593Smuzhiyun #include "si.h"
28*4882a593Smuzhiyun #include "sid.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	DMA0_REGISTER_OFFSET,
33*4882a593Smuzhiyun 	DMA1_REGISTER_OFFSET
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
37*4882a593Smuzhiyun static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
38*4882a593Smuzhiyun static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
39*4882a593Smuzhiyun static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
40*4882a593Smuzhiyun 
si_dma_ring_get_rptr(struct amdgpu_ring * ring)41*4882a593Smuzhiyun static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	return ring->adev->wb.wb[ring->rptr_offs>>2];
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
si_dma_ring_get_wptr(struct amdgpu_ring * ring)46*4882a593Smuzhiyun static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	struct amdgpu_device *adev = ring->adev;
49*4882a593Smuzhiyun 	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
si_dma_ring_set_wptr(struct amdgpu_ring * ring)54*4882a593Smuzhiyun static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	struct amdgpu_device *adev = ring->adev;
57*4882a593Smuzhiyun 	u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	WREG32(DMA_RB_WPTR + sdma_offsets[me],
60*4882a593Smuzhiyun 	       (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
si_dma_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)63*4882a593Smuzhiyun static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
64*4882a593Smuzhiyun 				struct amdgpu_job *job,
65*4882a593Smuzhiyun 				struct amdgpu_ib *ib,
66*4882a593Smuzhiyun 				uint32_t flags)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
69*4882a593Smuzhiyun 	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
70*4882a593Smuzhiyun 	 * Pad as necessary with NOPs.
71*4882a593Smuzhiyun 	 */
72*4882a593Smuzhiyun 	while ((lower_32_bits(ring->wptr) & 7) != 5)
73*4882a593Smuzhiyun 		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
74*4882a593Smuzhiyun 	amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
75*4882a593Smuzhiyun 	amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
76*4882a593Smuzhiyun 	amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /**
81*4882a593Smuzhiyun  * si_dma_ring_emit_fence - emit a fence on the DMA ring
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * @ring: amdgpu ring pointer
84*4882a593Smuzhiyun  * @fence: amdgpu fence object
85*4882a593Smuzhiyun  *
86*4882a593Smuzhiyun  * Add a DMA fence packet to the ring to write
87*4882a593Smuzhiyun  * the fence seq number and DMA trap packet to generate
88*4882a593Smuzhiyun  * an interrupt if needed (VI).
89*4882a593Smuzhiyun  */
si_dma_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)90*4882a593Smuzhiyun static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
91*4882a593Smuzhiyun 				      unsigned flags)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
95*4882a593Smuzhiyun 	/* write the fence */
96*4882a593Smuzhiyun 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
97*4882a593Smuzhiyun 	amdgpu_ring_write(ring, addr & 0xfffffffc);
98*4882a593Smuzhiyun 	amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
99*4882a593Smuzhiyun 	amdgpu_ring_write(ring, seq);
100*4882a593Smuzhiyun 	/* optionally write high bits as well */
101*4882a593Smuzhiyun 	if (write64bit) {
102*4882a593Smuzhiyun 		addr += 4;
103*4882a593Smuzhiyun 		amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
104*4882a593Smuzhiyun 		amdgpu_ring_write(ring, addr & 0xfffffffc);
105*4882a593Smuzhiyun 		amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
106*4882a593Smuzhiyun 		amdgpu_ring_write(ring, upper_32_bits(seq));
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun 	/* generate an interrupt */
109*4882a593Smuzhiyun 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
si_dma_stop(struct amdgpu_device * adev)112*4882a593Smuzhiyun static void si_dma_stop(struct amdgpu_device *adev)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct amdgpu_ring *ring;
115*4882a593Smuzhiyun 	u32 rb_cntl;
116*4882a593Smuzhiyun 	unsigned i;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	for (i = 0; i < adev->sdma.num_instances; i++) {
119*4882a593Smuzhiyun 		ring = &adev->sdma.instance[i].ring;
120*4882a593Smuzhiyun 		/* dma0 */
121*4882a593Smuzhiyun 		rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
122*4882a593Smuzhiyun 		rb_cntl &= ~DMA_RB_ENABLE;
123*4882a593Smuzhiyun 		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 		if (adev->mman.buffer_funcs_ring == ring)
126*4882a593Smuzhiyun 			amdgpu_ttm_set_buffer_funcs_status(adev, false);
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
si_dma_start(struct amdgpu_device * adev)130*4882a593Smuzhiyun static int si_dma_start(struct amdgpu_device *adev)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct amdgpu_ring *ring;
133*4882a593Smuzhiyun 	u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
134*4882a593Smuzhiyun 	int i, r;
135*4882a593Smuzhiyun 	uint64_t rptr_addr;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	for (i = 0; i < adev->sdma.num_instances; i++) {
138*4882a593Smuzhiyun 		ring = &adev->sdma.instance[i].ring;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
141*4882a593Smuzhiyun 		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		/* Set ring buffer size in dwords */
144*4882a593Smuzhiyun 		rb_bufsz = order_base_2(ring->ring_size / 4);
145*4882a593Smuzhiyun 		rb_cntl = rb_bufsz << 1;
146*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
147*4882a593Smuzhiyun 		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
148*4882a593Smuzhiyun #endif
149*4882a593Smuzhiyun 		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		/* Initialize the ring buffer's read and write pointers */
152*4882a593Smuzhiyun 		WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
153*4882a593Smuzhiyun 		WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
158*4882a593Smuzhiyun 		WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 		/* enable DMA IBs */
165*4882a593Smuzhiyun 		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
166*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
167*4882a593Smuzhiyun 		ib_cntl |= DMA_IB_SWAP_ENABLE;
168*4882a593Smuzhiyun #endif
169*4882a593Smuzhiyun 		WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
172*4882a593Smuzhiyun 		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
173*4882a593Smuzhiyun 		WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		ring->wptr = 0;
176*4882a593Smuzhiyun 		WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
177*4882a593Smuzhiyun 		WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		ring->sched.ready = true;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		r = amdgpu_ring_test_helper(ring);
182*4882a593Smuzhiyun 		if (r)
183*4882a593Smuzhiyun 			return r;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		if (adev->mman.buffer_funcs_ring == ring)
186*4882a593Smuzhiyun 			amdgpu_ttm_set_buffer_funcs_status(adev, true);
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	return 0;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /**
193*4882a593Smuzhiyun  * si_dma_ring_test_ring - simple async dma engine test
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * @ring: amdgpu_ring structure holding ring information
196*4882a593Smuzhiyun  *
197*4882a593Smuzhiyun  * Test the DMA engine by writing using it to write an
198*4882a593Smuzhiyun  * value to memory. (VI).
199*4882a593Smuzhiyun  * Returns 0 for success, error for failure.
200*4882a593Smuzhiyun  */
si_dma_ring_test_ring(struct amdgpu_ring * ring)201*4882a593Smuzhiyun static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct amdgpu_device *adev = ring->adev;
204*4882a593Smuzhiyun 	unsigned i;
205*4882a593Smuzhiyun 	unsigned index;
206*4882a593Smuzhiyun 	int r;
207*4882a593Smuzhiyun 	u32 tmp;
208*4882a593Smuzhiyun 	u64 gpu_addr;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	r = amdgpu_device_wb_get(adev, &index);
211*4882a593Smuzhiyun 	if (r)
212*4882a593Smuzhiyun 		return r;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	gpu_addr = adev->wb.gpu_addr + (index * 4);
215*4882a593Smuzhiyun 	tmp = 0xCAFEDEAD;
216*4882a593Smuzhiyun 	adev->wb.wb[index] = cpu_to_le32(tmp);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	r = amdgpu_ring_alloc(ring, 4);
219*4882a593Smuzhiyun 	if (r)
220*4882a593Smuzhiyun 		goto error_free_wb;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
223*4882a593Smuzhiyun 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
224*4882a593Smuzhiyun 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
225*4882a593Smuzhiyun 	amdgpu_ring_write(ring, 0xDEADBEEF);
226*4882a593Smuzhiyun 	amdgpu_ring_commit(ring);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	for (i = 0; i < adev->usec_timeout; i++) {
229*4882a593Smuzhiyun 		tmp = le32_to_cpu(adev->wb.wb[index]);
230*4882a593Smuzhiyun 		if (tmp == 0xDEADBEEF)
231*4882a593Smuzhiyun 			break;
232*4882a593Smuzhiyun 		udelay(1);
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (i >= adev->usec_timeout)
236*4882a593Smuzhiyun 		r = -ETIMEDOUT;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun error_free_wb:
239*4882a593Smuzhiyun 	amdgpu_device_wb_free(adev, index);
240*4882a593Smuzhiyun 	return r;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun  * si_dma_ring_test_ib - test an IB on the DMA engine
245*4882a593Smuzhiyun  *
246*4882a593Smuzhiyun  * @ring: amdgpu_ring structure holding ring information
247*4882a593Smuzhiyun  *
248*4882a593Smuzhiyun  * Test a simple IB in the DMA ring (VI).
249*4882a593Smuzhiyun  * Returns 0 on success, error on failure.
250*4882a593Smuzhiyun  */
si_dma_ring_test_ib(struct amdgpu_ring * ring,long timeout)251*4882a593Smuzhiyun static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct amdgpu_device *adev = ring->adev;
254*4882a593Smuzhiyun 	struct amdgpu_ib ib;
255*4882a593Smuzhiyun 	struct dma_fence *f = NULL;
256*4882a593Smuzhiyun 	unsigned index;
257*4882a593Smuzhiyun 	u32 tmp = 0;
258*4882a593Smuzhiyun 	u64 gpu_addr;
259*4882a593Smuzhiyun 	long r;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	r = amdgpu_device_wb_get(adev, &index);
262*4882a593Smuzhiyun 	if (r)
263*4882a593Smuzhiyun 		return r;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	gpu_addr = adev->wb.gpu_addr + (index * 4);
266*4882a593Smuzhiyun 	tmp = 0xCAFEDEAD;
267*4882a593Smuzhiyun 	adev->wb.wb[index] = cpu_to_le32(tmp);
268*4882a593Smuzhiyun 	memset(&ib, 0, sizeof(ib));
269*4882a593Smuzhiyun 	r = amdgpu_ib_get(adev, NULL, 256,
270*4882a593Smuzhiyun 					AMDGPU_IB_POOL_DIRECT, &ib);
271*4882a593Smuzhiyun 	if (r)
272*4882a593Smuzhiyun 		goto err0;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
275*4882a593Smuzhiyun 	ib.ptr[1] = lower_32_bits(gpu_addr);
276*4882a593Smuzhiyun 	ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
277*4882a593Smuzhiyun 	ib.ptr[3] = 0xDEADBEEF;
278*4882a593Smuzhiyun 	ib.length_dw = 4;
279*4882a593Smuzhiyun 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
280*4882a593Smuzhiyun 	if (r)
281*4882a593Smuzhiyun 		goto err1;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	r = dma_fence_wait_timeout(f, false, timeout);
284*4882a593Smuzhiyun 	if (r == 0) {
285*4882a593Smuzhiyun 		r = -ETIMEDOUT;
286*4882a593Smuzhiyun 		goto err1;
287*4882a593Smuzhiyun 	} else if (r < 0) {
288*4882a593Smuzhiyun 		goto err1;
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 	tmp = le32_to_cpu(adev->wb.wb[index]);
291*4882a593Smuzhiyun 	if (tmp == 0xDEADBEEF)
292*4882a593Smuzhiyun 		r = 0;
293*4882a593Smuzhiyun 	else
294*4882a593Smuzhiyun 		r = -EINVAL;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun err1:
297*4882a593Smuzhiyun 	amdgpu_ib_free(adev, &ib, NULL);
298*4882a593Smuzhiyun 	dma_fence_put(f);
299*4882a593Smuzhiyun err0:
300*4882a593Smuzhiyun 	amdgpu_device_wb_free(adev, index);
301*4882a593Smuzhiyun 	return r;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /**
305*4882a593Smuzhiyun  * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
306*4882a593Smuzhiyun  *
307*4882a593Smuzhiyun  * @ib: indirect buffer to fill with commands
308*4882a593Smuzhiyun  * @pe: addr of the page entry
309*4882a593Smuzhiyun  * @src: src addr to copy from
310*4882a593Smuzhiyun  * @count: number of page entries to update
311*4882a593Smuzhiyun  *
312*4882a593Smuzhiyun  * Update PTEs by copying them from the GART using DMA (SI).
313*4882a593Smuzhiyun  */
si_dma_vm_copy_pte(struct amdgpu_ib * ib,uint64_t pe,uint64_t src,unsigned count)314*4882a593Smuzhiyun static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
315*4882a593Smuzhiyun 			       uint64_t pe, uint64_t src,
316*4882a593Smuzhiyun 			       unsigned count)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	unsigned bytes = count * 8;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
321*4882a593Smuzhiyun 					      1, 0, 0, bytes);
322*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
323*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = lower_32_bits(src);
324*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
325*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun  * si_dma_vm_write_pte - update PTEs by writing them manually
330*4882a593Smuzhiyun  *
331*4882a593Smuzhiyun  * @ib: indirect buffer to fill with commands
332*4882a593Smuzhiyun  * @pe: addr of the page entry
333*4882a593Smuzhiyun  * @value: dst addr to write into pe
334*4882a593Smuzhiyun  * @count: number of page entries to update
335*4882a593Smuzhiyun  * @incr: increase next addr by incr bytes
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * Update PTEs by writing them manually using DMA (SI).
338*4882a593Smuzhiyun  */
si_dma_vm_write_pte(struct amdgpu_ib * ib,uint64_t pe,uint64_t value,unsigned count,uint32_t incr)339*4882a593Smuzhiyun static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
340*4882a593Smuzhiyun 				uint64_t value, unsigned count,
341*4882a593Smuzhiyun 				uint32_t incr)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	unsigned ndw = count * 2;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
346*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
347*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
348*4882a593Smuzhiyun 	for (; ndw > 0; ndw -= 2) {
349*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = lower_32_bits(value);
350*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = upper_32_bits(value);
351*4882a593Smuzhiyun 		value += incr;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun /**
356*4882a593Smuzhiyun  * si_dma_vm_set_pte_pde - update the page tables using sDMA
357*4882a593Smuzhiyun  *
358*4882a593Smuzhiyun  * @ib: indirect buffer to fill with commands
359*4882a593Smuzhiyun  * @pe: addr of the page entry
360*4882a593Smuzhiyun  * @addr: dst addr to write into pe
361*4882a593Smuzhiyun  * @count: number of page entries to update
362*4882a593Smuzhiyun  * @incr: increase next addr by incr bytes
363*4882a593Smuzhiyun  * @flags: access flags
364*4882a593Smuzhiyun  *
365*4882a593Smuzhiyun  * Update the page tables using sDMA (CIK).
366*4882a593Smuzhiyun  */
si_dma_vm_set_pte_pde(struct amdgpu_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)367*4882a593Smuzhiyun static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
368*4882a593Smuzhiyun 				     uint64_t pe,
369*4882a593Smuzhiyun 				     uint64_t addr, unsigned count,
370*4882a593Smuzhiyun 				     uint32_t incr, uint64_t flags)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	uint64_t value;
373*4882a593Smuzhiyun 	unsigned ndw;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	while (count) {
376*4882a593Smuzhiyun 		ndw = count * 2;
377*4882a593Smuzhiyun 		if (ndw > 0xFFFFE)
378*4882a593Smuzhiyun 			ndw = 0xFFFFE;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		if (flags & AMDGPU_PTE_VALID)
381*4882a593Smuzhiyun 			value = addr;
382*4882a593Smuzhiyun 		else
383*4882a593Smuzhiyun 			value = 0;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		/* for physically contiguous pages (vram) */
386*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
387*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = pe; /* dst addr */
388*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
389*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
390*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = upper_32_bits(flags);
391*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = value; /* value */
392*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = upper_32_bits(value);
393*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = incr; /* increment size */
394*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = 0;
395*4882a593Smuzhiyun 		pe += ndw * 4;
396*4882a593Smuzhiyun 		addr += (ndw / 2) * incr;
397*4882a593Smuzhiyun 		count -= ndw / 2;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun /**
402*4882a593Smuzhiyun  * si_dma_pad_ib - pad the IB to the required number of dw
403*4882a593Smuzhiyun  *
404*4882a593Smuzhiyun  * @ib: indirect buffer to fill with padding
405*4882a593Smuzhiyun  *
406*4882a593Smuzhiyun  */
si_dma_ring_pad_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib)407*4882a593Smuzhiyun static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	while (ib->length_dw & 0x7)
410*4882a593Smuzhiyun 		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun /**
414*4882a593Smuzhiyun  * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * @ring: amdgpu_ring pointer
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * Make sure all previous operations are completed (CIK).
419*4882a593Smuzhiyun  */
si_dma_ring_emit_pipeline_sync(struct amdgpu_ring * ring)420*4882a593Smuzhiyun static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	uint32_t seq = ring->fence_drv.sync_seq;
423*4882a593Smuzhiyun 	uint64_t addr = ring->fence_drv.gpu_addr;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	/* wait for idle */
426*4882a593Smuzhiyun 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
427*4882a593Smuzhiyun 			  (1 << 27)); /* Poll memory */
428*4882a593Smuzhiyun 	amdgpu_ring_write(ring, lower_32_bits(addr));
429*4882a593Smuzhiyun 	amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
430*4882a593Smuzhiyun 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
431*4882a593Smuzhiyun 	amdgpu_ring_write(ring, seq); /* value */
432*4882a593Smuzhiyun 	amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun /**
436*4882a593Smuzhiyun  * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
437*4882a593Smuzhiyun  *
438*4882a593Smuzhiyun  * @ring: amdgpu_ring pointer
439*4882a593Smuzhiyun  * @vm: amdgpu_vm pointer
440*4882a593Smuzhiyun  *
441*4882a593Smuzhiyun  * Update the page table base and flush the VM TLB
442*4882a593Smuzhiyun  * using sDMA (VI).
443*4882a593Smuzhiyun  */
si_dma_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)444*4882a593Smuzhiyun static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
445*4882a593Smuzhiyun 				      unsigned vmid, uint64_t pd_addr)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/* wait for invalidate to complete */
450*4882a593Smuzhiyun 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
451*4882a593Smuzhiyun 	amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
452*4882a593Smuzhiyun 	amdgpu_ring_write(ring, 0xff << 16); /* retry */
453*4882a593Smuzhiyun 	amdgpu_ring_write(ring, 1 << vmid); /* mask */
454*4882a593Smuzhiyun 	amdgpu_ring_write(ring, 0); /* value */
455*4882a593Smuzhiyun 	amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
si_dma_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)458*4882a593Smuzhiyun static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
459*4882a593Smuzhiyun 				  uint32_t reg, uint32_t val)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
462*4882a593Smuzhiyun 	amdgpu_ring_write(ring, (0xf << 16) | reg);
463*4882a593Smuzhiyun 	amdgpu_ring_write(ring, val);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
si_dma_early_init(void * handle)466*4882a593Smuzhiyun static int si_dma_early_init(void *handle)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	adev->sdma.num_instances = 2;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	si_dma_set_ring_funcs(adev);
473*4882a593Smuzhiyun 	si_dma_set_buffer_funcs(adev);
474*4882a593Smuzhiyun 	si_dma_set_vm_pte_funcs(adev);
475*4882a593Smuzhiyun 	si_dma_set_irq_funcs(adev);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	return 0;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
si_dma_sw_init(void * handle)480*4882a593Smuzhiyun static int si_dma_sw_init(void *handle)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	struct amdgpu_ring *ring;
483*4882a593Smuzhiyun 	int r, i;
484*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/* DMA0 trap event */
487*4882a593Smuzhiyun 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
488*4882a593Smuzhiyun 			      &adev->sdma.trap_irq);
489*4882a593Smuzhiyun 	if (r)
490*4882a593Smuzhiyun 		return r;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/* DMA1 trap event */
493*4882a593Smuzhiyun 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244,
494*4882a593Smuzhiyun 			      &adev->sdma.trap_irq);
495*4882a593Smuzhiyun 	if (r)
496*4882a593Smuzhiyun 		return r;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	for (i = 0; i < adev->sdma.num_instances; i++) {
499*4882a593Smuzhiyun 		ring = &adev->sdma.instance[i].ring;
500*4882a593Smuzhiyun 		ring->ring_obj = NULL;
501*4882a593Smuzhiyun 		ring->use_doorbell = false;
502*4882a593Smuzhiyun 		sprintf(ring->name, "sdma%d", i);
503*4882a593Smuzhiyun 		r = amdgpu_ring_init(adev, ring, 1024,
504*4882a593Smuzhiyun 				     &adev->sdma.trap_irq,
505*4882a593Smuzhiyun 				     (i == 0) ?
506*4882a593Smuzhiyun 				     AMDGPU_SDMA_IRQ_INSTANCE0 :
507*4882a593Smuzhiyun 				     AMDGPU_SDMA_IRQ_INSTANCE1,
508*4882a593Smuzhiyun 				     AMDGPU_RING_PRIO_DEFAULT);
509*4882a593Smuzhiyun 		if (r)
510*4882a593Smuzhiyun 			return r;
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	return r;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun 
si_dma_sw_fini(void * handle)516*4882a593Smuzhiyun static int si_dma_sw_fini(void *handle)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
519*4882a593Smuzhiyun 	int i;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	for (i = 0; i < adev->sdma.num_instances; i++)
522*4882a593Smuzhiyun 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	return 0;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
si_dma_hw_init(void * handle)527*4882a593Smuzhiyun static int si_dma_hw_init(void *handle)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	return si_dma_start(adev);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
si_dma_hw_fini(void * handle)534*4882a593Smuzhiyun static int si_dma_hw_fini(void *handle)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	si_dma_stop(adev);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
si_dma_suspend(void * handle)543*4882a593Smuzhiyun static int si_dma_suspend(void *handle)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	return si_dma_hw_fini(adev);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
si_dma_resume(void * handle)550*4882a593Smuzhiyun static int si_dma_resume(void *handle)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	return si_dma_hw_init(adev);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
si_dma_is_idle(void * handle)557*4882a593Smuzhiyun static bool si_dma_is_idle(void *handle)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
560*4882a593Smuzhiyun 	u32 tmp = RREG32(SRBM_STATUS2);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
563*4882a593Smuzhiyun 	    return false;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	return true;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
si_dma_wait_for_idle(void * handle)568*4882a593Smuzhiyun static int si_dma_wait_for_idle(void *handle)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	unsigned i;
571*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	for (i = 0; i < adev->usec_timeout; i++) {
574*4882a593Smuzhiyun 		if (si_dma_is_idle(handle))
575*4882a593Smuzhiyun 			return 0;
576*4882a593Smuzhiyun 		udelay(1);
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 	return -ETIMEDOUT;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
si_dma_soft_reset(void * handle)581*4882a593Smuzhiyun static int si_dma_soft_reset(void *handle)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
584*4882a593Smuzhiyun 	return 0;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
si_dma_set_trap_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)587*4882a593Smuzhiyun static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
588*4882a593Smuzhiyun 					struct amdgpu_irq_src *src,
589*4882a593Smuzhiyun 					unsigned type,
590*4882a593Smuzhiyun 					enum amdgpu_interrupt_state state)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	u32 sdma_cntl;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	switch (type) {
595*4882a593Smuzhiyun 	case AMDGPU_SDMA_IRQ_INSTANCE0:
596*4882a593Smuzhiyun 		switch (state) {
597*4882a593Smuzhiyun 		case AMDGPU_IRQ_STATE_DISABLE:
598*4882a593Smuzhiyun 			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
599*4882a593Smuzhiyun 			sdma_cntl &= ~TRAP_ENABLE;
600*4882a593Smuzhiyun 			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
601*4882a593Smuzhiyun 			break;
602*4882a593Smuzhiyun 		case AMDGPU_IRQ_STATE_ENABLE:
603*4882a593Smuzhiyun 			sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
604*4882a593Smuzhiyun 			sdma_cntl |= TRAP_ENABLE;
605*4882a593Smuzhiyun 			WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
606*4882a593Smuzhiyun 			break;
607*4882a593Smuzhiyun 		default:
608*4882a593Smuzhiyun 			break;
609*4882a593Smuzhiyun 		}
610*4882a593Smuzhiyun 		break;
611*4882a593Smuzhiyun 	case AMDGPU_SDMA_IRQ_INSTANCE1:
612*4882a593Smuzhiyun 		switch (state) {
613*4882a593Smuzhiyun 		case AMDGPU_IRQ_STATE_DISABLE:
614*4882a593Smuzhiyun 			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
615*4882a593Smuzhiyun 			sdma_cntl &= ~TRAP_ENABLE;
616*4882a593Smuzhiyun 			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
617*4882a593Smuzhiyun 			break;
618*4882a593Smuzhiyun 		case AMDGPU_IRQ_STATE_ENABLE:
619*4882a593Smuzhiyun 			sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
620*4882a593Smuzhiyun 			sdma_cntl |= TRAP_ENABLE;
621*4882a593Smuzhiyun 			WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
622*4882a593Smuzhiyun 			break;
623*4882a593Smuzhiyun 		default:
624*4882a593Smuzhiyun 			break;
625*4882a593Smuzhiyun 		}
626*4882a593Smuzhiyun 		break;
627*4882a593Smuzhiyun 	default:
628*4882a593Smuzhiyun 		break;
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun 	return 0;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
si_dma_process_trap_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)633*4882a593Smuzhiyun static int si_dma_process_trap_irq(struct amdgpu_device *adev,
634*4882a593Smuzhiyun 				      struct amdgpu_irq_src *source,
635*4882a593Smuzhiyun 				      struct amdgpu_iv_entry *entry)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	if (entry->src_id == 224)
638*4882a593Smuzhiyun 		amdgpu_fence_process(&adev->sdma.instance[0].ring);
639*4882a593Smuzhiyun 	else
640*4882a593Smuzhiyun 		amdgpu_fence_process(&adev->sdma.instance[1].ring);
641*4882a593Smuzhiyun 	return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
si_dma_set_clockgating_state(void * handle,enum amd_clockgating_state state)644*4882a593Smuzhiyun static int si_dma_set_clockgating_state(void *handle,
645*4882a593Smuzhiyun 					  enum amd_clockgating_state state)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	u32 orig, data, offset;
648*4882a593Smuzhiyun 	int i;
649*4882a593Smuzhiyun 	bool enable;
650*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	enable = (state == AMD_CG_STATE_GATE);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
655*4882a593Smuzhiyun 		for (i = 0; i < adev->sdma.num_instances; i++) {
656*4882a593Smuzhiyun 			if (i == 0)
657*4882a593Smuzhiyun 				offset = DMA0_REGISTER_OFFSET;
658*4882a593Smuzhiyun 			else
659*4882a593Smuzhiyun 				offset = DMA1_REGISTER_OFFSET;
660*4882a593Smuzhiyun 			orig = data = RREG32(DMA_POWER_CNTL + offset);
661*4882a593Smuzhiyun 			data &= ~MEM_POWER_OVERRIDE;
662*4882a593Smuzhiyun 			if (data != orig)
663*4882a593Smuzhiyun 				WREG32(DMA_POWER_CNTL + offset, data);
664*4882a593Smuzhiyun 			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
665*4882a593Smuzhiyun 		}
666*4882a593Smuzhiyun 	} else {
667*4882a593Smuzhiyun 		for (i = 0; i < adev->sdma.num_instances; i++) {
668*4882a593Smuzhiyun 			if (i == 0)
669*4882a593Smuzhiyun 				offset = DMA0_REGISTER_OFFSET;
670*4882a593Smuzhiyun 			else
671*4882a593Smuzhiyun 				offset = DMA1_REGISTER_OFFSET;
672*4882a593Smuzhiyun 			orig = data = RREG32(DMA_POWER_CNTL + offset);
673*4882a593Smuzhiyun 			data |= MEM_POWER_OVERRIDE;
674*4882a593Smuzhiyun 			if (data != orig)
675*4882a593Smuzhiyun 				WREG32(DMA_POWER_CNTL + offset, data);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 			orig = data = RREG32(DMA_CLK_CTRL + offset);
678*4882a593Smuzhiyun 			data = 0xff000000;
679*4882a593Smuzhiyun 			if (data != orig)
680*4882a593Smuzhiyun 				WREG32(DMA_CLK_CTRL + offset, data);
681*4882a593Smuzhiyun 		}
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	return 0;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
si_dma_set_powergating_state(void * handle,enum amd_powergating_state state)687*4882a593Smuzhiyun static int si_dma_set_powergating_state(void *handle,
688*4882a593Smuzhiyun 					  enum amd_powergating_state state)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	u32 tmp;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	WREG32(DMA_PGFSM_WRITE,  0x00002000);
695*4882a593Smuzhiyun 	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	for (tmp = 0; tmp < 5; tmp++)
698*4882a593Smuzhiyun 		WREG32(DMA_PGFSM_WRITE, 0);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	return 0;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun static const struct amd_ip_funcs si_dma_ip_funcs = {
704*4882a593Smuzhiyun 	.name = "si_dma",
705*4882a593Smuzhiyun 	.early_init = si_dma_early_init,
706*4882a593Smuzhiyun 	.late_init = NULL,
707*4882a593Smuzhiyun 	.sw_init = si_dma_sw_init,
708*4882a593Smuzhiyun 	.sw_fini = si_dma_sw_fini,
709*4882a593Smuzhiyun 	.hw_init = si_dma_hw_init,
710*4882a593Smuzhiyun 	.hw_fini = si_dma_hw_fini,
711*4882a593Smuzhiyun 	.suspend = si_dma_suspend,
712*4882a593Smuzhiyun 	.resume = si_dma_resume,
713*4882a593Smuzhiyun 	.is_idle = si_dma_is_idle,
714*4882a593Smuzhiyun 	.wait_for_idle = si_dma_wait_for_idle,
715*4882a593Smuzhiyun 	.soft_reset = si_dma_soft_reset,
716*4882a593Smuzhiyun 	.set_clockgating_state = si_dma_set_clockgating_state,
717*4882a593Smuzhiyun 	.set_powergating_state = si_dma_set_powergating_state,
718*4882a593Smuzhiyun };
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
721*4882a593Smuzhiyun 	.type = AMDGPU_RING_TYPE_SDMA,
722*4882a593Smuzhiyun 	.align_mask = 0xf,
723*4882a593Smuzhiyun 	.nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
724*4882a593Smuzhiyun 	.support_64bit_ptrs = false,
725*4882a593Smuzhiyun 	.get_rptr = si_dma_ring_get_rptr,
726*4882a593Smuzhiyun 	.get_wptr = si_dma_ring_get_wptr,
727*4882a593Smuzhiyun 	.set_wptr = si_dma_ring_set_wptr,
728*4882a593Smuzhiyun 	.emit_frame_size =
729*4882a593Smuzhiyun 		3 + 3 + /* hdp flush / invalidate */
730*4882a593Smuzhiyun 		6 + /* si_dma_ring_emit_pipeline_sync */
731*4882a593Smuzhiyun 		SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
732*4882a593Smuzhiyun 		9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
733*4882a593Smuzhiyun 	.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
734*4882a593Smuzhiyun 	.emit_ib = si_dma_ring_emit_ib,
735*4882a593Smuzhiyun 	.emit_fence = si_dma_ring_emit_fence,
736*4882a593Smuzhiyun 	.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
737*4882a593Smuzhiyun 	.emit_vm_flush = si_dma_ring_emit_vm_flush,
738*4882a593Smuzhiyun 	.test_ring = si_dma_ring_test_ring,
739*4882a593Smuzhiyun 	.test_ib = si_dma_ring_test_ib,
740*4882a593Smuzhiyun 	.insert_nop = amdgpu_ring_insert_nop,
741*4882a593Smuzhiyun 	.pad_ib = si_dma_ring_pad_ib,
742*4882a593Smuzhiyun 	.emit_wreg = si_dma_ring_emit_wreg,
743*4882a593Smuzhiyun };
744*4882a593Smuzhiyun 
si_dma_set_ring_funcs(struct amdgpu_device * adev)745*4882a593Smuzhiyun static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun 	int i;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	for (i = 0; i < adev->sdma.num_instances; i++)
750*4882a593Smuzhiyun 		adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
754*4882a593Smuzhiyun 	.set = si_dma_set_trap_irq_state,
755*4882a593Smuzhiyun 	.process = si_dma_process_trap_irq,
756*4882a593Smuzhiyun };
757*4882a593Smuzhiyun 
si_dma_set_irq_funcs(struct amdgpu_device * adev)758*4882a593Smuzhiyun static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
761*4882a593Smuzhiyun 	adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun /**
765*4882a593Smuzhiyun  * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
766*4882a593Smuzhiyun  *
767*4882a593Smuzhiyun  * @ring: amdgpu_ring structure holding ring information
768*4882a593Smuzhiyun  * @src_offset: src GPU address
769*4882a593Smuzhiyun  * @dst_offset: dst GPU address
770*4882a593Smuzhiyun  * @byte_count: number of bytes to xfer
771*4882a593Smuzhiyun  *
772*4882a593Smuzhiyun  * Copy GPU buffers using the DMA engine (VI).
773*4882a593Smuzhiyun  * Used by the amdgpu ttm implementation to move pages if
774*4882a593Smuzhiyun  * registered as the asic copy callback.
775*4882a593Smuzhiyun  */
si_dma_emit_copy_buffer(struct amdgpu_ib * ib,uint64_t src_offset,uint64_t dst_offset,uint32_t byte_count,bool tmz)776*4882a593Smuzhiyun static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
777*4882a593Smuzhiyun 				       uint64_t src_offset,
778*4882a593Smuzhiyun 				       uint64_t dst_offset,
779*4882a593Smuzhiyun 				       uint32_t byte_count,
780*4882a593Smuzhiyun 				       bool tmz)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
783*4882a593Smuzhiyun 					      1, 0, 0, byte_count);
784*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
785*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
786*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
787*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun /**
791*4882a593Smuzhiyun  * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
792*4882a593Smuzhiyun  *
793*4882a593Smuzhiyun  * @ring: amdgpu_ring structure holding ring information
794*4882a593Smuzhiyun  * @src_data: value to write to buffer
795*4882a593Smuzhiyun  * @dst_offset: dst GPU address
796*4882a593Smuzhiyun  * @byte_count: number of bytes to xfer
797*4882a593Smuzhiyun  *
798*4882a593Smuzhiyun  * Fill GPU buffers using the DMA engine (VI).
799*4882a593Smuzhiyun  */
si_dma_emit_fill_buffer(struct amdgpu_ib * ib,uint32_t src_data,uint64_t dst_offset,uint32_t byte_count)800*4882a593Smuzhiyun static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
801*4882a593Smuzhiyun 				       uint32_t src_data,
802*4882a593Smuzhiyun 				       uint64_t dst_offset,
803*4882a593Smuzhiyun 				       uint32_t byte_count)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
806*4882a593Smuzhiyun 					      0, 0, 0, byte_count / 4);
807*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
808*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = src_data;
809*4882a593Smuzhiyun 	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
814*4882a593Smuzhiyun 	.copy_max_bytes = 0xffff8,
815*4882a593Smuzhiyun 	.copy_num_dw = 5,
816*4882a593Smuzhiyun 	.emit_copy_buffer = si_dma_emit_copy_buffer,
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	.fill_max_bytes = 0xffff8,
819*4882a593Smuzhiyun 	.fill_num_dw = 4,
820*4882a593Smuzhiyun 	.emit_fill_buffer = si_dma_emit_fill_buffer,
821*4882a593Smuzhiyun };
822*4882a593Smuzhiyun 
si_dma_set_buffer_funcs(struct amdgpu_device * adev)823*4882a593Smuzhiyun static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun 	adev->mman.buffer_funcs = &si_dma_buffer_funcs;
826*4882a593Smuzhiyun 	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
830*4882a593Smuzhiyun 	.copy_pte_num_dw = 5,
831*4882a593Smuzhiyun 	.copy_pte = si_dma_vm_copy_pte,
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	.write_pte = si_dma_vm_write_pte,
834*4882a593Smuzhiyun 	.set_pte_pde = si_dma_vm_set_pte_pde,
835*4882a593Smuzhiyun };
836*4882a593Smuzhiyun 
si_dma_set_vm_pte_funcs(struct amdgpu_device * adev)837*4882a593Smuzhiyun static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun 	unsigned i;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
842*4882a593Smuzhiyun 	for (i = 0; i < adev->sdma.num_instances; i++) {
843*4882a593Smuzhiyun 		adev->vm_manager.vm_pte_scheds[i] =
844*4882a593Smuzhiyun 			&adev->sdma.instance[i].ring.sched;
845*4882a593Smuzhiyun 	}
846*4882a593Smuzhiyun 	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun const struct amdgpu_ip_block_version si_dma_ip_block =
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun 	.type = AMD_IP_BLOCK_TYPE_SDMA,
852*4882a593Smuzhiyun 	.major = 1,
853*4882a593Smuzhiyun 	.minor = 0,
854*4882a593Smuzhiyun 	.rev = 0,
855*4882a593Smuzhiyun 	.funcs = &si_dma_ip_funcs,
856*4882a593Smuzhiyun };
857