1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2013 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * Authors: Alex Deucher
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "radeon.h"
26*4882a593Smuzhiyun #include "radeon_asic.h"
27*4882a593Smuzhiyun #include "radeon_trace.h"
28*4882a593Smuzhiyun #include "sid.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /**
33*4882a593Smuzhiyun * si_dma_is_lockup - Check if the DMA engine is locked up
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * @rdev: radeon_device pointer
36*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Check if the async DMA engine is locked up.
39*4882a593Smuzhiyun * Returns true if the engine appears to be locked up, false if not.
40*4882a593Smuzhiyun */
si_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)41*4882a593Smuzhiyun bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun u32 reset_mask = si_gpu_check_soft_reset(rdev);
44*4882a593Smuzhiyun u32 mask;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (ring->idx == R600_RING_TYPE_DMA_INDEX)
47*4882a593Smuzhiyun mask = RADEON_RESET_DMA;
48*4882a593Smuzhiyun else
49*4882a593Smuzhiyun mask = RADEON_RESET_DMA1;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (!(reset_mask & mask)) {
52*4882a593Smuzhiyun radeon_ring_lockup_update(rdev, ring);
53*4882a593Smuzhiyun return false;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun return radeon_ring_test_lockup(rdev, ring);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * si_dma_vm_copy_pages - update PTEs by copying them from the GART
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * @rdev: radeon_device pointer
62*4882a593Smuzhiyun * @ib: indirect buffer to fill with commands
63*4882a593Smuzhiyun * @pe: addr of the page entry
64*4882a593Smuzhiyun * @src: src addr where to copy from
65*4882a593Smuzhiyun * @count: number of page entries to update
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * Update PTEs by copying them from the GART using the DMA (SI).
68*4882a593Smuzhiyun */
si_dma_vm_copy_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t src,unsigned count)69*4882a593Smuzhiyun void si_dma_vm_copy_pages(struct radeon_device *rdev,
70*4882a593Smuzhiyun struct radeon_ib *ib,
71*4882a593Smuzhiyun uint64_t pe, uint64_t src,
72*4882a593Smuzhiyun unsigned count)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun while (count) {
75*4882a593Smuzhiyun unsigned bytes = count * 8;
76*4882a593Smuzhiyun if (bytes > 0xFFFF8)
77*4882a593Smuzhiyun bytes = 0xFFFF8;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
80*4882a593Smuzhiyun 1, 0, 0, bytes);
81*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = lower_32_bits(pe);
82*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = lower_32_bits(src);
83*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
84*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun pe += bytes;
87*4882a593Smuzhiyun src += bytes;
88*4882a593Smuzhiyun count -= bytes / 8;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun * si_dma_vm_write_pages - update PTEs by writing them manually
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * @rdev: radeon_device pointer
96*4882a593Smuzhiyun * @ib: indirect buffer to fill with commands
97*4882a593Smuzhiyun * @pe: addr of the page entry
98*4882a593Smuzhiyun * @addr: dst addr to write into pe
99*4882a593Smuzhiyun * @count: number of page entries to update
100*4882a593Smuzhiyun * @incr: increase next addr by incr bytes
101*4882a593Smuzhiyun * @flags: access flags
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * Update PTEs by writing them manually using the DMA (SI).
104*4882a593Smuzhiyun */
si_dma_vm_write_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)105*4882a593Smuzhiyun void si_dma_vm_write_pages(struct radeon_device *rdev,
106*4882a593Smuzhiyun struct radeon_ib *ib,
107*4882a593Smuzhiyun uint64_t pe,
108*4882a593Smuzhiyun uint64_t addr, unsigned count,
109*4882a593Smuzhiyun uint32_t incr, uint32_t flags)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun uint64_t value;
112*4882a593Smuzhiyun unsigned ndw;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun while (count) {
115*4882a593Smuzhiyun ndw = count * 2;
116*4882a593Smuzhiyun if (ndw > 0xFFFFE)
117*4882a593Smuzhiyun ndw = 0xFFFFE;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* for non-physically contiguous pages (system) */
120*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
121*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = pe;
122*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
123*4882a593Smuzhiyun for (; ndw > 0; ndw -= 2, --count, pe += 8) {
124*4882a593Smuzhiyun if (flags & R600_PTE_SYSTEM) {
125*4882a593Smuzhiyun value = radeon_vm_map_gart(rdev, addr);
126*4882a593Smuzhiyun } else if (flags & R600_PTE_VALID) {
127*4882a593Smuzhiyun value = addr;
128*4882a593Smuzhiyun } else {
129*4882a593Smuzhiyun value = 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun addr += incr;
132*4882a593Smuzhiyun value |= flags;
133*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = value;
134*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = upper_32_bits(value);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun * si_dma_vm_set_pages - update the page tables using the DMA
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * @rdev: radeon_device pointer
143*4882a593Smuzhiyun * @ib: indirect buffer to fill with commands
144*4882a593Smuzhiyun * @pe: addr of the page entry
145*4882a593Smuzhiyun * @addr: dst addr to write into pe
146*4882a593Smuzhiyun * @count: number of page entries to update
147*4882a593Smuzhiyun * @incr: increase next addr by incr bytes
148*4882a593Smuzhiyun * @flags: access flags
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun * Update the page tables using the DMA (SI).
151*4882a593Smuzhiyun */
si_dma_vm_set_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)152*4882a593Smuzhiyun void si_dma_vm_set_pages(struct radeon_device *rdev,
153*4882a593Smuzhiyun struct radeon_ib *ib,
154*4882a593Smuzhiyun uint64_t pe,
155*4882a593Smuzhiyun uint64_t addr, unsigned count,
156*4882a593Smuzhiyun uint32_t incr, uint32_t flags)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun uint64_t value;
159*4882a593Smuzhiyun unsigned ndw;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun while (count) {
162*4882a593Smuzhiyun ndw = count * 2;
163*4882a593Smuzhiyun if (ndw > 0xFFFFE)
164*4882a593Smuzhiyun ndw = 0xFFFFE;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (flags & R600_PTE_VALID)
167*4882a593Smuzhiyun value = addr;
168*4882a593Smuzhiyun else
169*4882a593Smuzhiyun value = 0;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* for physically contiguous pages (vram) */
172*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
173*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = pe; /* dst addr */
174*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
175*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = flags; /* mask */
176*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = 0;
177*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = value; /* value */
178*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = upper_32_bits(value);
179*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = incr; /* increment size */
180*4882a593Smuzhiyun ib->ptr[ib->length_dw++] = 0;
181*4882a593Smuzhiyun pe += ndw * 4;
182*4882a593Smuzhiyun addr += (ndw / 2) * incr;
183*4882a593Smuzhiyun count -= ndw / 2;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
si_dma_vm_flush(struct radeon_device * rdev,struct radeon_ring * ring,unsigned vm_id,uint64_t pd_addr)187*4882a593Smuzhiyun void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
188*4882a593Smuzhiyun unsigned vm_id, uint64_t pd_addr)
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
192*4882a593Smuzhiyun if (vm_id < 8) {
193*4882a593Smuzhiyun radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
194*4882a593Smuzhiyun } else {
195*4882a593Smuzhiyun radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun radeon_ring_write(ring, pd_addr >> 12);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* flush hdp cache */
200*4882a593Smuzhiyun radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
201*4882a593Smuzhiyun radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
202*4882a593Smuzhiyun radeon_ring_write(ring, 1);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* bits 0-7 are the VM contexts0-7 */
205*4882a593Smuzhiyun radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
206*4882a593Smuzhiyun radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
207*4882a593Smuzhiyun radeon_ring_write(ring, 1 << vm_id);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* wait for invalidate to complete */
210*4882a593Smuzhiyun radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
211*4882a593Smuzhiyun radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
212*4882a593Smuzhiyun radeon_ring_write(ring, 0xff << 16); /* retry */
213*4882a593Smuzhiyun radeon_ring_write(ring, 1 << vm_id); /* mask */
214*4882a593Smuzhiyun radeon_ring_write(ring, 0); /* value */
215*4882a593Smuzhiyun radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun * si_copy_dma - copy pages using the DMA engine
220*4882a593Smuzhiyun *
221*4882a593Smuzhiyun * @rdev: radeon_device pointer
222*4882a593Smuzhiyun * @src_offset: src GPU address
223*4882a593Smuzhiyun * @dst_offset: dst GPU address
224*4882a593Smuzhiyun * @num_gpu_pages: number of GPU pages to xfer
225*4882a593Smuzhiyun * @resv: reservation object to sync to
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * Copy GPU paging using the DMA engine (SI).
228*4882a593Smuzhiyun * Used by the radeon ttm implementation to move pages if
229*4882a593Smuzhiyun * registered as the asic copy callback.
230*4882a593Smuzhiyun */
si_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct dma_resv * resv)231*4882a593Smuzhiyun struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
232*4882a593Smuzhiyun uint64_t src_offset, uint64_t dst_offset,
233*4882a593Smuzhiyun unsigned num_gpu_pages,
234*4882a593Smuzhiyun struct dma_resv *resv)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct radeon_fence *fence;
237*4882a593Smuzhiyun struct radeon_sync sync;
238*4882a593Smuzhiyun int ring_index = rdev->asic->copy.dma_ring_index;
239*4882a593Smuzhiyun struct radeon_ring *ring = &rdev->ring[ring_index];
240*4882a593Smuzhiyun u32 size_in_bytes, cur_size_in_bytes;
241*4882a593Smuzhiyun int i, num_loops;
242*4882a593Smuzhiyun int r = 0;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun radeon_sync_create(&sync);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
247*4882a593Smuzhiyun num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
248*4882a593Smuzhiyun r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
249*4882a593Smuzhiyun if (r) {
250*4882a593Smuzhiyun DRM_ERROR("radeon: moving bo (%d).\n", r);
251*4882a593Smuzhiyun radeon_sync_free(rdev, &sync, NULL);
252*4882a593Smuzhiyun return ERR_PTR(r);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun radeon_sync_resv(rdev, &sync, resv, false);
256*4882a593Smuzhiyun radeon_sync_rings(rdev, &sync, ring->idx);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun for (i = 0; i < num_loops; i++) {
259*4882a593Smuzhiyun cur_size_in_bytes = size_in_bytes;
260*4882a593Smuzhiyun if (cur_size_in_bytes > 0xFFFFF)
261*4882a593Smuzhiyun cur_size_in_bytes = 0xFFFFF;
262*4882a593Smuzhiyun size_in_bytes -= cur_size_in_bytes;
263*4882a593Smuzhiyun radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
264*4882a593Smuzhiyun radeon_ring_write(ring, lower_32_bits(dst_offset));
265*4882a593Smuzhiyun radeon_ring_write(ring, lower_32_bits(src_offset));
266*4882a593Smuzhiyun radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
267*4882a593Smuzhiyun radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
268*4882a593Smuzhiyun src_offset += cur_size_in_bytes;
269*4882a593Smuzhiyun dst_offset += cur_size_in_bytes;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun r = radeon_fence_emit(rdev, &fence, ring->idx);
273*4882a593Smuzhiyun if (r) {
274*4882a593Smuzhiyun radeon_ring_unlock_undo(rdev, ring);
275*4882a593Smuzhiyun radeon_sync_free(rdev, &sync, NULL);
276*4882a593Smuzhiyun return ERR_PTR(r);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun radeon_ring_unlock_commit(rdev, ring, false);
280*4882a593Smuzhiyun radeon_sync_free(rdev, &sync, fence);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return fence;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285