1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2008 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun * Copyright 2008 Red Hat Inc.
4*4882a593Smuzhiyun * Copyright 2009 Jerome Glisse.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
14*4882a593Smuzhiyun * all copies or substantial portions of the Software.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Authors: Dave Airlie
25*4882a593Smuzhiyun * Alex Deucher
26*4882a593Smuzhiyun * Jerome Glisse
27*4882a593Smuzhiyun * Christian König
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <drm/drm_debugfs.h>
31*4882a593Smuzhiyun #include <drm/drm_device.h>
32*4882a593Smuzhiyun #include <drm/drm_file.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include "radeon.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * Rings
38*4882a593Smuzhiyun * Most engines on the GPU are fed via ring buffers. Ring
39*4882a593Smuzhiyun * buffers are areas of GPU accessible memory that the host
40*4882a593Smuzhiyun * writes commands into and the GPU reads commands out of.
41*4882a593Smuzhiyun * There is a rptr (read pointer) that determines where the
42*4882a593Smuzhiyun * GPU is currently reading, and a wptr (write pointer)
43*4882a593Smuzhiyun * which determines where the host has written. When the
44*4882a593Smuzhiyun * pointers are equal, the ring is idle. When the host
45*4882a593Smuzhiyun * writes commands to the ring buffer, it increments the
46*4882a593Smuzhiyun * wptr. The GPU then starts fetching commands and executes
47*4882a593Smuzhiyun * them until the pointers are equal again.
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun * radeon_ring_supports_scratch_reg - check if the ring supports
53*4882a593Smuzhiyun * writing to scratch registers
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * @rdev: radeon_device pointer
56*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Check if a specific ring supports writing to scratch registers (all asics).
59*4882a593Smuzhiyun * Returns true if the ring supports writing to scratch regs, false if not.
60*4882a593Smuzhiyun */
radeon_ring_supports_scratch_reg(struct radeon_device * rdev,struct radeon_ring * ring)61*4882a593Smuzhiyun bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
62*4882a593Smuzhiyun struct radeon_ring *ring)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun switch (ring->idx) {
65*4882a593Smuzhiyun case RADEON_RING_TYPE_GFX_INDEX:
66*4882a593Smuzhiyun case CAYMAN_RING_TYPE_CP1_INDEX:
67*4882a593Smuzhiyun case CAYMAN_RING_TYPE_CP2_INDEX:
68*4882a593Smuzhiyun return true;
69*4882a593Smuzhiyun default:
70*4882a593Smuzhiyun return false;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun * radeon_ring_free_size - update the free size
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * @rdev: radeon_device pointer
78*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * Update the free dw slots in the ring buffer (all asics).
81*4882a593Smuzhiyun */
radeon_ring_free_size(struct radeon_device * rdev,struct radeon_ring * ring)82*4882a593Smuzhiyun void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* This works because ring_size is a power of 2 */
87*4882a593Smuzhiyun ring->ring_free_dw = rptr + (ring->ring_size / 4);
88*4882a593Smuzhiyun ring->ring_free_dw -= ring->wptr;
89*4882a593Smuzhiyun ring->ring_free_dw &= ring->ptr_mask;
90*4882a593Smuzhiyun if (!ring->ring_free_dw) {
91*4882a593Smuzhiyun /* this is an empty ring */
92*4882a593Smuzhiyun ring->ring_free_dw = ring->ring_size / 4;
93*4882a593Smuzhiyun /* update lockup info to avoid false positive */
94*4882a593Smuzhiyun radeon_ring_lockup_update(rdev, ring);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /**
99*4882a593Smuzhiyun * radeon_ring_alloc - allocate space on the ring buffer
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * @rdev: radeon_device pointer
102*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
103*4882a593Smuzhiyun * @ndw: number of dwords to allocate in the ring buffer
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * Allocate @ndw dwords in the ring buffer (all asics).
106*4882a593Smuzhiyun * Returns 0 on success, error on failure.
107*4882a593Smuzhiyun */
radeon_ring_alloc(struct radeon_device * rdev,struct radeon_ring * ring,unsigned ndw)108*4882a593Smuzhiyun int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun int r;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* make sure we aren't trying to allocate more space than there is on the ring */
113*4882a593Smuzhiyun if (ndw > (ring->ring_size / 4))
114*4882a593Smuzhiyun return -ENOMEM;
115*4882a593Smuzhiyun /* Align requested size with padding so unlock_commit can
116*4882a593Smuzhiyun * pad safely */
117*4882a593Smuzhiyun radeon_ring_free_size(rdev, ring);
118*4882a593Smuzhiyun ndw = (ndw + ring->align_mask) & ~ring->align_mask;
119*4882a593Smuzhiyun while (ndw > (ring->ring_free_dw - 1)) {
120*4882a593Smuzhiyun radeon_ring_free_size(rdev, ring);
121*4882a593Smuzhiyun if (ndw < ring->ring_free_dw) {
122*4882a593Smuzhiyun break;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun r = radeon_fence_wait_next(rdev, ring->idx);
125*4882a593Smuzhiyun if (r)
126*4882a593Smuzhiyun return r;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun ring->count_dw = ndw;
129*4882a593Smuzhiyun ring->wptr_old = ring->wptr;
130*4882a593Smuzhiyun return 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun * radeon_ring_lock - lock the ring and allocate space on it
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * @rdev: radeon_device pointer
137*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
138*4882a593Smuzhiyun * @ndw: number of dwords to allocate in the ring buffer
139*4882a593Smuzhiyun *
140*4882a593Smuzhiyun * Lock the ring and allocate @ndw dwords in the ring buffer
141*4882a593Smuzhiyun * (all asics).
142*4882a593Smuzhiyun * Returns 0 on success, error on failure.
143*4882a593Smuzhiyun */
radeon_ring_lock(struct radeon_device * rdev,struct radeon_ring * ring,unsigned ndw)144*4882a593Smuzhiyun int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun int r;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun mutex_lock(&rdev->ring_lock);
149*4882a593Smuzhiyun r = radeon_ring_alloc(rdev, ring, ndw);
150*4882a593Smuzhiyun if (r) {
151*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
152*4882a593Smuzhiyun return r;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun return 0;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /**
158*4882a593Smuzhiyun * radeon_ring_commit - tell the GPU to execute the new
159*4882a593Smuzhiyun * commands on the ring buffer
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun * @rdev: radeon_device pointer
162*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
163*4882a593Smuzhiyun * @hdp_flush: Whether or not to perform an HDP cache flush
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * Update the wptr (write pointer) to tell the GPU to
166*4882a593Smuzhiyun * execute new commands on the ring buffer (all asics).
167*4882a593Smuzhiyun */
radeon_ring_commit(struct radeon_device * rdev,struct radeon_ring * ring,bool hdp_flush)168*4882a593Smuzhiyun void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
169*4882a593Smuzhiyun bool hdp_flush)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun /* If we are emitting the HDP flush via the ring buffer, we need to
172*4882a593Smuzhiyun * do it before padding.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
175*4882a593Smuzhiyun rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
176*4882a593Smuzhiyun /* We pad to match fetch size */
177*4882a593Smuzhiyun while (ring->wptr & ring->align_mask) {
178*4882a593Smuzhiyun radeon_ring_write(ring, ring->nop);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun mb();
181*4882a593Smuzhiyun /* If we are emitting the HDP flush via MMIO, we need to do it after
182*4882a593Smuzhiyun * all CPU writes to VRAM finished.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun if (hdp_flush && rdev->asic->mmio_hdp_flush)
185*4882a593Smuzhiyun rdev->asic->mmio_hdp_flush(rdev);
186*4882a593Smuzhiyun radeon_ring_set_wptr(rdev, ring);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun * radeon_ring_unlock_commit - tell the GPU to execute the new
191*4882a593Smuzhiyun * commands on the ring buffer and unlock it
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun * @rdev: radeon_device pointer
194*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
195*4882a593Smuzhiyun * @hdp_flush: Whether or not to perform an HDP cache flush
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * Call radeon_ring_commit() then unlock the ring (all asics).
198*4882a593Smuzhiyun */
radeon_ring_unlock_commit(struct radeon_device * rdev,struct radeon_ring * ring,bool hdp_flush)199*4882a593Smuzhiyun void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
200*4882a593Smuzhiyun bool hdp_flush)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun radeon_ring_commit(rdev, ring, hdp_flush);
203*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /**
207*4882a593Smuzhiyun * radeon_ring_undo - reset the wptr
208*4882a593Smuzhiyun *
209*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * Reset the driver's copy of the wptr (all asics).
212*4882a593Smuzhiyun */
radeon_ring_undo(struct radeon_ring * ring)213*4882a593Smuzhiyun void radeon_ring_undo(struct radeon_ring *ring)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun ring->wptr = ring->wptr_old;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun * radeon_ring_unlock_undo - reset the wptr and unlock the ring
220*4882a593Smuzhiyun *
221*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
222*4882a593Smuzhiyun *
223*4882a593Smuzhiyun * Call radeon_ring_undo() then unlock the ring (all asics).
224*4882a593Smuzhiyun */
radeon_ring_unlock_undo(struct radeon_device * rdev,struct radeon_ring * ring)225*4882a593Smuzhiyun void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun radeon_ring_undo(ring);
228*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /**
232*4882a593Smuzhiyun * radeon_ring_lockup_update - update lockup variables
233*4882a593Smuzhiyun *
234*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * Update the last rptr value and timestamp (all asics).
237*4882a593Smuzhiyun */
radeon_ring_lockup_update(struct radeon_device * rdev,struct radeon_ring * ring)238*4882a593Smuzhiyun void radeon_ring_lockup_update(struct radeon_device *rdev,
239*4882a593Smuzhiyun struct radeon_ring *ring)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
242*4882a593Smuzhiyun atomic64_set(&ring->last_activity, jiffies_64);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /**
246*4882a593Smuzhiyun * radeon_ring_test_lockup() - check if ring is lockedup by recording information
247*4882a593Smuzhiyun * @rdev: radeon device structure
248*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun */
radeon_ring_test_lockup(struct radeon_device * rdev,struct radeon_ring * ring)251*4882a593Smuzhiyun bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
254*4882a593Smuzhiyun uint64_t last = atomic64_read(&ring->last_activity);
255*4882a593Smuzhiyun uint64_t elapsed;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (rptr != atomic_read(&ring->last_rptr)) {
258*4882a593Smuzhiyun /* ring is still working, no lockup */
259*4882a593Smuzhiyun radeon_ring_lockup_update(rdev, ring);
260*4882a593Smuzhiyun return false;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun elapsed = jiffies_to_msecs(jiffies_64 - last);
264*4882a593Smuzhiyun if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
265*4882a593Smuzhiyun dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
266*4882a593Smuzhiyun ring->idx, elapsed);
267*4882a593Smuzhiyun return true;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun /* give a chance to the GPU ... */
270*4882a593Smuzhiyun return false;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun * radeon_ring_backup - Back up the content of a ring
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * @rdev: radeon_device pointer
277*4882a593Smuzhiyun * @ring: the ring we want to back up
278*4882a593Smuzhiyun *
279*4882a593Smuzhiyun * Saves all unprocessed commits from a ring, returns the number of dwords saved.
280*4882a593Smuzhiyun */
radeon_ring_backup(struct radeon_device * rdev,struct radeon_ring * ring,uint32_t ** data)281*4882a593Smuzhiyun unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
282*4882a593Smuzhiyun uint32_t **data)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun unsigned size, ptr, i;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* just in case lock the ring */
287*4882a593Smuzhiyun mutex_lock(&rdev->ring_lock);
288*4882a593Smuzhiyun *data = NULL;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (ring->ring_obj == NULL) {
291*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
292*4882a593Smuzhiyun return 0;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* it doesn't make sense to save anything if all fences are signaled */
296*4882a593Smuzhiyun if (!radeon_fence_count_emitted(rdev, ring->idx)) {
297*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* calculate the number of dw on the ring */
302*4882a593Smuzhiyun if (ring->rptr_save_reg)
303*4882a593Smuzhiyun ptr = RREG32(ring->rptr_save_reg);
304*4882a593Smuzhiyun else if (rdev->wb.enabled)
305*4882a593Smuzhiyun ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
306*4882a593Smuzhiyun else {
307*4882a593Smuzhiyun /* no way to read back the next rptr */
308*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
309*4882a593Smuzhiyun return 0;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun size = ring->wptr + (ring->ring_size / 4);
313*4882a593Smuzhiyun size -= ptr;
314*4882a593Smuzhiyun size &= ring->ptr_mask;
315*4882a593Smuzhiyun if (size == 0) {
316*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
317*4882a593Smuzhiyun return 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* and then save the content of the ring */
321*4882a593Smuzhiyun *data = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
322*4882a593Smuzhiyun if (!*data) {
323*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
324*4882a593Smuzhiyun return 0;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun for (i = 0; i < size; ++i) {
327*4882a593Smuzhiyun (*data)[i] = ring->ring[ptr++];
328*4882a593Smuzhiyun ptr &= ring->ptr_mask;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
332*4882a593Smuzhiyun return size;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /**
336*4882a593Smuzhiyun * radeon_ring_restore - append saved commands to the ring again
337*4882a593Smuzhiyun *
338*4882a593Smuzhiyun * @rdev: radeon_device pointer
339*4882a593Smuzhiyun * @ring: ring to append commands to
340*4882a593Smuzhiyun * @size: number of dwords we want to write
341*4882a593Smuzhiyun * @data: saved commands
342*4882a593Smuzhiyun *
343*4882a593Smuzhiyun * Allocates space on the ring and restore the previously saved commands.
344*4882a593Smuzhiyun */
radeon_ring_restore(struct radeon_device * rdev,struct radeon_ring * ring,unsigned size,uint32_t * data)345*4882a593Smuzhiyun int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
346*4882a593Smuzhiyun unsigned size, uint32_t *data)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun int i, r;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (!size || !data)
351*4882a593Smuzhiyun return 0;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* restore the saved ring content */
354*4882a593Smuzhiyun r = radeon_ring_lock(rdev, ring, size);
355*4882a593Smuzhiyun if (r)
356*4882a593Smuzhiyun return r;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun for (i = 0; i < size; ++i) {
359*4882a593Smuzhiyun radeon_ring_write(ring, data[i]);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun radeon_ring_unlock_commit(rdev, ring, false);
363*4882a593Smuzhiyun kvfree(data);
364*4882a593Smuzhiyun return 0;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun * radeon_ring_init - init driver ring struct.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * @rdev: radeon_device pointer
371*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
372*4882a593Smuzhiyun * @ring_size: size of the ring
373*4882a593Smuzhiyun * @rptr_offs: offset of the rptr writeback location in the WB buffer
374*4882a593Smuzhiyun * @nop: nop packet for this ring
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * Initialize the driver information for the selected ring (all asics).
377*4882a593Smuzhiyun * Returns 0 on success, error on failure.
378*4882a593Smuzhiyun */
radeon_ring_init(struct radeon_device * rdev,struct radeon_ring * ring,unsigned ring_size,unsigned rptr_offs,u32 nop)379*4882a593Smuzhiyun int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
380*4882a593Smuzhiyun unsigned rptr_offs, u32 nop)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun int r;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun ring->ring_size = ring_size;
385*4882a593Smuzhiyun ring->rptr_offs = rptr_offs;
386*4882a593Smuzhiyun ring->nop = nop;
387*4882a593Smuzhiyun /* Allocate ring buffer */
388*4882a593Smuzhiyun if (ring->ring_obj == NULL) {
389*4882a593Smuzhiyun r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
390*4882a593Smuzhiyun RADEON_GEM_DOMAIN_GTT, 0, NULL,
391*4882a593Smuzhiyun NULL, &ring->ring_obj);
392*4882a593Smuzhiyun if (r) {
393*4882a593Smuzhiyun dev_err(rdev->dev, "(%d) ring create failed\n", r);
394*4882a593Smuzhiyun return r;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun r = radeon_bo_reserve(ring->ring_obj, false);
397*4882a593Smuzhiyun if (unlikely(r != 0))
398*4882a593Smuzhiyun return r;
399*4882a593Smuzhiyun r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
400*4882a593Smuzhiyun &ring->gpu_addr);
401*4882a593Smuzhiyun if (r) {
402*4882a593Smuzhiyun radeon_bo_unreserve(ring->ring_obj);
403*4882a593Smuzhiyun dev_err(rdev->dev, "(%d) ring pin failed\n", r);
404*4882a593Smuzhiyun return r;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun r = radeon_bo_kmap(ring->ring_obj,
407*4882a593Smuzhiyun (void **)&ring->ring);
408*4882a593Smuzhiyun radeon_bo_unreserve(ring->ring_obj);
409*4882a593Smuzhiyun if (r) {
410*4882a593Smuzhiyun dev_err(rdev->dev, "(%d) ring map failed\n", r);
411*4882a593Smuzhiyun return r;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun ring->ptr_mask = (ring->ring_size / 4) - 1;
415*4882a593Smuzhiyun ring->ring_free_dw = ring->ring_size / 4;
416*4882a593Smuzhiyun if (rdev->wb.enabled) {
417*4882a593Smuzhiyun u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
418*4882a593Smuzhiyun ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
419*4882a593Smuzhiyun ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun if (radeon_debugfs_ring_init(rdev, ring)) {
422*4882a593Smuzhiyun DRM_ERROR("Failed to register debugfs file for rings !\n");
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun radeon_ring_lockup_update(rdev, ring);
425*4882a593Smuzhiyun return 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /**
429*4882a593Smuzhiyun * radeon_ring_fini - tear down the driver ring struct.
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * @rdev: radeon_device pointer
432*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
433*4882a593Smuzhiyun *
434*4882a593Smuzhiyun * Tear down the driver information for the selected ring (all asics).
435*4882a593Smuzhiyun */
radeon_ring_fini(struct radeon_device * rdev,struct radeon_ring * ring)436*4882a593Smuzhiyun void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun int r;
439*4882a593Smuzhiyun struct radeon_bo *ring_obj;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun mutex_lock(&rdev->ring_lock);
442*4882a593Smuzhiyun ring_obj = ring->ring_obj;
443*4882a593Smuzhiyun ring->ready = false;
444*4882a593Smuzhiyun ring->ring = NULL;
445*4882a593Smuzhiyun ring->ring_obj = NULL;
446*4882a593Smuzhiyun mutex_unlock(&rdev->ring_lock);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (ring_obj) {
449*4882a593Smuzhiyun r = radeon_bo_reserve(ring_obj, false);
450*4882a593Smuzhiyun if (likely(r == 0)) {
451*4882a593Smuzhiyun radeon_bo_kunmap(ring_obj);
452*4882a593Smuzhiyun radeon_bo_unpin(ring_obj);
453*4882a593Smuzhiyun radeon_bo_unreserve(ring_obj);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun radeon_bo_unref(&ring_obj);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /*
460*4882a593Smuzhiyun * Debugfs info
461*4882a593Smuzhiyun */
462*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
463*4882a593Smuzhiyun
radeon_debugfs_ring_info(struct seq_file * m,void * data)464*4882a593Smuzhiyun static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun struct drm_info_node *node = (struct drm_info_node *) m->private;
467*4882a593Smuzhiyun struct drm_device *dev = node->minor->dev;
468*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
469*4882a593Smuzhiyun int ridx = *(int*)node->info_ent->data;
470*4882a593Smuzhiyun struct radeon_ring *ring = &rdev->ring[ridx];
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun uint32_t rptr, wptr, rptr_next;
473*4882a593Smuzhiyun unsigned count, i, j;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun radeon_ring_free_size(rdev, ring);
476*4882a593Smuzhiyun count = (ring->ring_size / 4) - ring->ring_free_dw;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun wptr = radeon_ring_get_wptr(rdev, ring);
479*4882a593Smuzhiyun seq_printf(m, "wptr: 0x%08x [%5d]\n",
480*4882a593Smuzhiyun wptr, wptr);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun rptr = radeon_ring_get_rptr(rdev, ring);
483*4882a593Smuzhiyun seq_printf(m, "rptr: 0x%08x [%5d]\n",
484*4882a593Smuzhiyun rptr, rptr);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (ring->rptr_save_reg) {
487*4882a593Smuzhiyun rptr_next = RREG32(ring->rptr_save_reg);
488*4882a593Smuzhiyun seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
489*4882a593Smuzhiyun ring->rptr_save_reg, rptr_next, rptr_next);
490*4882a593Smuzhiyun } else
491*4882a593Smuzhiyun rptr_next = ~0;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
494*4882a593Smuzhiyun ring->wptr, ring->wptr);
495*4882a593Smuzhiyun seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
496*4882a593Smuzhiyun ring->last_semaphore_signal_addr);
497*4882a593Smuzhiyun seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
498*4882a593Smuzhiyun ring->last_semaphore_wait_addr);
499*4882a593Smuzhiyun seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
500*4882a593Smuzhiyun seq_printf(m, "%u dwords in ring\n", count);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (!ring->ring)
503*4882a593Smuzhiyun return 0;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /* print 8 dw before current rptr as often it's the last executed
506*4882a593Smuzhiyun * packet that is the root issue
507*4882a593Smuzhiyun */
508*4882a593Smuzhiyun i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
509*4882a593Smuzhiyun for (j = 0; j <= (count + 32); j++) {
510*4882a593Smuzhiyun seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
511*4882a593Smuzhiyun if (rptr == i)
512*4882a593Smuzhiyun seq_puts(m, " *");
513*4882a593Smuzhiyun if (rptr_next == i)
514*4882a593Smuzhiyun seq_puts(m, " #");
515*4882a593Smuzhiyun seq_puts(m, "\n");
516*4882a593Smuzhiyun i = (i + 1) & ring->ptr_mask;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun return 0;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
522*4882a593Smuzhiyun static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
523*4882a593Smuzhiyun static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
524*4882a593Smuzhiyun static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
525*4882a593Smuzhiyun static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
526*4882a593Smuzhiyun static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
527*4882a593Smuzhiyun static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX;
528*4882a593Smuzhiyun static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun static struct drm_info_list radeon_debugfs_ring_info_list[] = {
531*4882a593Smuzhiyun {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
532*4882a593Smuzhiyun {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
533*4882a593Smuzhiyun {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
534*4882a593Smuzhiyun {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
535*4882a593Smuzhiyun {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
536*4882a593Smuzhiyun {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
537*4882a593Smuzhiyun {"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index},
538*4882a593Smuzhiyun {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
539*4882a593Smuzhiyun };
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun #endif
542*4882a593Smuzhiyun
radeon_debugfs_ring_init(struct radeon_device * rdev,struct radeon_ring * ring)543*4882a593Smuzhiyun static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
546*4882a593Smuzhiyun unsigned i;
547*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
548*4882a593Smuzhiyun struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
549*4882a593Smuzhiyun int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
550*4882a593Smuzhiyun unsigned r;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (&rdev->ring[ridx] != ring)
553*4882a593Smuzhiyun continue;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun r = radeon_debugfs_add_files(rdev, info, 1);
556*4882a593Smuzhiyun if (r)
557*4882a593Smuzhiyun return r;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun #endif
560*4882a593Smuzhiyun return 0;
561*4882a593Smuzhiyun }
562