1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2008 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun * Copyright 2008 Red Hat Inc.
4*4882a593Smuzhiyun * Copyright 2009 Jerome Glisse.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
14*4882a593Smuzhiyun * all copies or substantial portions of the Software.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Authors: Dave Airlie
25*4882a593Smuzhiyun * Alex Deucher
26*4882a593Smuzhiyun * Jerome Glisse
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/seq_file.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include <drm/drm_debugfs.h>
33*4882a593Smuzhiyun #include <drm/drm_device.h>
34*4882a593Smuzhiyun #include <drm/drm_file.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include "atom.h"
37*4882a593Smuzhiyun #include "radeon.h"
38*4882a593Smuzhiyun #include "radeon_asic.h"
39*4882a593Smuzhiyun #include "rv515_reg_safe.h"
40*4882a593Smuzhiyun #include "rv515d.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* This files gather functions specifics to: rv515 */
43*4882a593Smuzhiyun static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
44*4882a593Smuzhiyun static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
45*4882a593Smuzhiyun static void rv515_gpu_init(struct radeon_device *rdev);
46*4882a593Smuzhiyun int rv515_mc_wait_for_idle(struct radeon_device *rdev);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static const u32 crtc_offsets[2] =
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 0,
51*4882a593Smuzhiyun AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
rv515_debugfs(struct radeon_device * rdev)54*4882a593Smuzhiyun void rv515_debugfs(struct radeon_device *rdev)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun if (r100_debugfs_rbbm_init(rdev)) {
57*4882a593Smuzhiyun DRM_ERROR("Failed to register debugfs file for RBBM !\n");
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun if (rv515_debugfs_pipes_info_init(rdev)) {
60*4882a593Smuzhiyun DRM_ERROR("Failed to register debugfs file for pipes !\n");
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun if (rv515_debugfs_ga_info_init(rdev)) {
63*4882a593Smuzhiyun DRM_ERROR("Failed to register debugfs file for pipes !\n");
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
rv515_ring_start(struct radeon_device * rdev,struct radeon_ring * ring)67*4882a593Smuzhiyun void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun int r;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun r = radeon_ring_lock(rdev, ring, 64);
72*4882a593Smuzhiyun if (r) {
73*4882a593Smuzhiyun return;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
76*4882a593Smuzhiyun radeon_ring_write(ring,
77*4882a593Smuzhiyun ISYNC_ANY2D_IDLE3D |
78*4882a593Smuzhiyun ISYNC_ANY3D_IDLE2D |
79*4882a593Smuzhiyun ISYNC_WAIT_IDLEGUI |
80*4882a593Smuzhiyun ISYNC_CPSCRATCH_IDLEGUI);
81*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
82*4882a593Smuzhiyun radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
83*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
84*4882a593Smuzhiyun radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
85*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
86*4882a593Smuzhiyun radeon_ring_write(ring, 0);
87*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
88*4882a593Smuzhiyun radeon_ring_write(ring, 0);
89*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
90*4882a593Smuzhiyun radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
91*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
92*4882a593Smuzhiyun radeon_ring_write(ring, 0);
93*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
94*4882a593Smuzhiyun radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
95*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
96*4882a593Smuzhiyun radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
97*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
98*4882a593Smuzhiyun radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
99*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
100*4882a593Smuzhiyun radeon_ring_write(ring, 0);
101*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
102*4882a593Smuzhiyun radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
103*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
104*4882a593Smuzhiyun radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
105*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
106*4882a593Smuzhiyun radeon_ring_write(ring,
107*4882a593Smuzhiyun ((6 << MS_X0_SHIFT) |
108*4882a593Smuzhiyun (6 << MS_Y0_SHIFT) |
109*4882a593Smuzhiyun (6 << MS_X1_SHIFT) |
110*4882a593Smuzhiyun (6 << MS_Y1_SHIFT) |
111*4882a593Smuzhiyun (6 << MS_X2_SHIFT) |
112*4882a593Smuzhiyun (6 << MS_Y2_SHIFT) |
113*4882a593Smuzhiyun (6 << MSBD0_Y_SHIFT) |
114*4882a593Smuzhiyun (6 << MSBD0_X_SHIFT)));
115*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
116*4882a593Smuzhiyun radeon_ring_write(ring,
117*4882a593Smuzhiyun ((6 << MS_X3_SHIFT) |
118*4882a593Smuzhiyun (6 << MS_Y3_SHIFT) |
119*4882a593Smuzhiyun (6 << MS_X4_SHIFT) |
120*4882a593Smuzhiyun (6 << MS_Y4_SHIFT) |
121*4882a593Smuzhiyun (6 << MS_X5_SHIFT) |
122*4882a593Smuzhiyun (6 << MS_Y5_SHIFT) |
123*4882a593Smuzhiyun (6 << MSBD1_SHIFT)));
124*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
125*4882a593Smuzhiyun radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
126*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
127*4882a593Smuzhiyun radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
128*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
129*4882a593Smuzhiyun radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
130*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(0x20C8, 0));
131*4882a593Smuzhiyun radeon_ring_write(ring, 0);
132*4882a593Smuzhiyun radeon_ring_unlock_commit(rdev, ring, false);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
rv515_mc_wait_for_idle(struct radeon_device * rdev)135*4882a593Smuzhiyun int rv515_mc_wait_for_idle(struct radeon_device *rdev)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun unsigned i;
138*4882a593Smuzhiyun uint32_t tmp;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
141*4882a593Smuzhiyun /* read MC_STATUS */
142*4882a593Smuzhiyun tmp = RREG32_MC(MC_STATUS);
143*4882a593Smuzhiyun if (tmp & MC_STATUS_IDLE) {
144*4882a593Smuzhiyun return 0;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun udelay(1);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun return -1;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
rv515_vga_render_disable(struct radeon_device * rdev)151*4882a593Smuzhiyun void rv515_vga_render_disable(struct radeon_device *rdev)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun WREG32(R_000300_VGA_RENDER_CONTROL,
154*4882a593Smuzhiyun RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
rv515_gpu_init(struct radeon_device * rdev)157*4882a593Smuzhiyun static void rv515_gpu_init(struct radeon_device *rdev)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun unsigned pipe_select_current, gb_pipe_select, tmp;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (r100_gui_wait_for_idle(rdev)) {
162*4882a593Smuzhiyun pr_warn("Failed to wait GUI idle while resetting GPU. Bad things might happen.\n");
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun rv515_vga_render_disable(rdev);
165*4882a593Smuzhiyun r420_pipes_init(rdev);
166*4882a593Smuzhiyun gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
167*4882a593Smuzhiyun tmp = RREG32(R300_DST_PIPE_CONFIG);
168*4882a593Smuzhiyun pipe_select_current = (tmp >> 2) & 3;
169*4882a593Smuzhiyun tmp = (1 << pipe_select_current) |
170*4882a593Smuzhiyun (((gb_pipe_select >> 8) & 0xF) << 4);
171*4882a593Smuzhiyun WREG32_PLL(0x000D, tmp);
172*4882a593Smuzhiyun if (r100_gui_wait_for_idle(rdev)) {
173*4882a593Smuzhiyun pr_warn("Failed to wait GUI idle while resetting GPU. Bad things might happen.\n");
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun if (rv515_mc_wait_for_idle(rdev)) {
176*4882a593Smuzhiyun pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n");
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
rv515_vram_get_type(struct radeon_device * rdev)180*4882a593Smuzhiyun static void rv515_vram_get_type(struct radeon_device *rdev)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun uint32_t tmp;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun rdev->mc.vram_width = 128;
185*4882a593Smuzhiyun rdev->mc.vram_is_ddr = true;
186*4882a593Smuzhiyun tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
187*4882a593Smuzhiyun switch (tmp) {
188*4882a593Smuzhiyun case 0:
189*4882a593Smuzhiyun rdev->mc.vram_width = 64;
190*4882a593Smuzhiyun break;
191*4882a593Smuzhiyun case 1:
192*4882a593Smuzhiyun rdev->mc.vram_width = 128;
193*4882a593Smuzhiyun break;
194*4882a593Smuzhiyun default:
195*4882a593Smuzhiyun rdev->mc.vram_width = 128;
196*4882a593Smuzhiyun break;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
rv515_mc_init(struct radeon_device * rdev)200*4882a593Smuzhiyun static void rv515_mc_init(struct radeon_device *rdev)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun rv515_vram_get_type(rdev);
204*4882a593Smuzhiyun r100_vram_init_sizes(rdev);
205*4882a593Smuzhiyun radeon_vram_location(rdev, &rdev->mc, 0);
206*4882a593Smuzhiyun rdev->mc.gtt_base_align = 0;
207*4882a593Smuzhiyun if (!(rdev->flags & RADEON_IS_AGP))
208*4882a593Smuzhiyun radeon_gtt_location(rdev, &rdev->mc);
209*4882a593Smuzhiyun radeon_update_bandwidth_info(rdev);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
rv515_mc_rreg(struct radeon_device * rdev,uint32_t reg)212*4882a593Smuzhiyun uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun unsigned long flags;
215*4882a593Smuzhiyun uint32_t r;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun spin_lock_irqsave(&rdev->mc_idx_lock, flags);
218*4882a593Smuzhiyun WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
219*4882a593Smuzhiyun r = RREG32(MC_IND_DATA);
220*4882a593Smuzhiyun WREG32(MC_IND_INDEX, 0);
221*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun return r;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
rv515_mc_wreg(struct radeon_device * rdev,uint32_t reg,uint32_t v)226*4882a593Smuzhiyun void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun unsigned long flags;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun spin_lock_irqsave(&rdev->mc_idx_lock, flags);
231*4882a593Smuzhiyun WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
232*4882a593Smuzhiyun WREG32(MC_IND_DATA, (v));
233*4882a593Smuzhiyun WREG32(MC_IND_INDEX, 0);
234*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
rv515_debugfs_pipes_info(struct seq_file * m,void * data)238*4882a593Smuzhiyun static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun struct drm_info_node *node = (struct drm_info_node *) m->private;
241*4882a593Smuzhiyun struct drm_device *dev = node->minor->dev;
242*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
243*4882a593Smuzhiyun uint32_t tmp;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun tmp = RREG32(GB_PIPE_SELECT);
246*4882a593Smuzhiyun seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
247*4882a593Smuzhiyun tmp = RREG32(SU_REG_DEST);
248*4882a593Smuzhiyun seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
249*4882a593Smuzhiyun tmp = RREG32(GB_TILE_CONFIG);
250*4882a593Smuzhiyun seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
251*4882a593Smuzhiyun tmp = RREG32(DST_PIPE_CONFIG);
252*4882a593Smuzhiyun seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
253*4882a593Smuzhiyun return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
rv515_debugfs_ga_info(struct seq_file * m,void * data)256*4882a593Smuzhiyun static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct drm_info_node *node = (struct drm_info_node *) m->private;
259*4882a593Smuzhiyun struct drm_device *dev = node->minor->dev;
260*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
261*4882a593Smuzhiyun uint32_t tmp;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun tmp = RREG32(0x2140);
264*4882a593Smuzhiyun seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
265*4882a593Smuzhiyun radeon_asic_reset(rdev);
266*4882a593Smuzhiyun tmp = RREG32(0x425C);
267*4882a593Smuzhiyun seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
268*4882a593Smuzhiyun return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun static struct drm_info_list rv515_pipes_info_list[] = {
272*4882a593Smuzhiyun {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
273*4882a593Smuzhiyun };
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun static struct drm_info_list rv515_ga_info_list[] = {
276*4882a593Smuzhiyun {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
277*4882a593Smuzhiyun };
278*4882a593Smuzhiyun #endif
279*4882a593Smuzhiyun
rv515_debugfs_pipes_info_init(struct radeon_device * rdev)280*4882a593Smuzhiyun static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
283*4882a593Smuzhiyun return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
284*4882a593Smuzhiyun #else
285*4882a593Smuzhiyun return 0;
286*4882a593Smuzhiyun #endif
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
rv515_debugfs_ga_info_init(struct radeon_device * rdev)289*4882a593Smuzhiyun static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
292*4882a593Smuzhiyun return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
293*4882a593Smuzhiyun #else
294*4882a593Smuzhiyun return 0;
295*4882a593Smuzhiyun #endif
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
rv515_mc_stop(struct radeon_device * rdev,struct rv515_mc_save * save)298*4882a593Smuzhiyun void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun u32 crtc_enabled, tmp, frame_count, blackout;
301*4882a593Smuzhiyun int i, j;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
304*4882a593Smuzhiyun save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* disable VGA render */
307*4882a593Smuzhiyun WREG32(R_000300_VGA_RENDER_CONTROL, 0);
308*4882a593Smuzhiyun /* blank the display controllers */
309*4882a593Smuzhiyun for (i = 0; i < rdev->num_crtc; i++) {
310*4882a593Smuzhiyun crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
311*4882a593Smuzhiyun if (crtc_enabled) {
312*4882a593Smuzhiyun save->crtc_enabled[i] = true;
313*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
314*4882a593Smuzhiyun if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
315*4882a593Smuzhiyun radeon_wait_for_vblank(rdev, i);
316*4882a593Smuzhiyun WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
317*4882a593Smuzhiyun tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
318*4882a593Smuzhiyun WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
319*4882a593Smuzhiyun WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun /* wait for the next frame */
322*4882a593Smuzhiyun frame_count = radeon_get_vblank_counter(rdev, i);
323*4882a593Smuzhiyun for (j = 0; j < rdev->usec_timeout; j++) {
324*4882a593Smuzhiyun if (radeon_get_vblank_counter(rdev, i) != frame_count)
325*4882a593Smuzhiyun break;
326*4882a593Smuzhiyun udelay(1);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
330*4882a593Smuzhiyun WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
331*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
332*4882a593Smuzhiyun tmp &= ~AVIVO_CRTC_EN;
333*4882a593Smuzhiyun WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
334*4882a593Smuzhiyun WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
335*4882a593Smuzhiyun save->crtc_enabled[i] = false;
336*4882a593Smuzhiyun /* ***** */
337*4882a593Smuzhiyun } else {
338*4882a593Smuzhiyun save->crtc_enabled[i] = false;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun radeon_mc_wait_for_idle(rdev);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (rdev->family >= CHIP_R600) {
345*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
346*4882a593Smuzhiyun blackout = RREG32(R700_MC_CITF_CNTL);
347*4882a593Smuzhiyun else
348*4882a593Smuzhiyun blackout = RREG32(R600_CITF_CNTL);
349*4882a593Smuzhiyun if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
350*4882a593Smuzhiyun /* Block CPU access */
351*4882a593Smuzhiyun WREG32(R600_BIF_FB_EN, 0);
352*4882a593Smuzhiyun /* blackout the MC */
353*4882a593Smuzhiyun blackout |= R600_BLACKOUT_MASK;
354*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
355*4882a593Smuzhiyun WREG32(R700_MC_CITF_CNTL, blackout);
356*4882a593Smuzhiyun else
357*4882a593Smuzhiyun WREG32(R600_CITF_CNTL, blackout);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun /* wait for the MC to settle */
361*4882a593Smuzhiyun udelay(100);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* lock double buffered regs */
364*4882a593Smuzhiyun for (i = 0; i < rdev->num_crtc; i++) {
365*4882a593Smuzhiyun if (save->crtc_enabled[i]) {
366*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
367*4882a593Smuzhiyun if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
368*4882a593Smuzhiyun tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
369*4882a593Smuzhiyun WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
372*4882a593Smuzhiyun if (!(tmp & 1)) {
373*4882a593Smuzhiyun tmp |= 1;
374*4882a593Smuzhiyun WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
rv515_mc_resume(struct radeon_device * rdev,struct rv515_mc_save * save)380*4882a593Smuzhiyun void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun u32 tmp, frame_count;
383*4882a593Smuzhiyun int i, j;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* update crtc base addresses */
386*4882a593Smuzhiyun for (i = 0; i < rdev->num_crtc; i++) {
387*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770) {
388*4882a593Smuzhiyun if (i == 0) {
389*4882a593Smuzhiyun WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
390*4882a593Smuzhiyun upper_32_bits(rdev->mc.vram_start));
391*4882a593Smuzhiyun WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
392*4882a593Smuzhiyun upper_32_bits(rdev->mc.vram_start));
393*4882a593Smuzhiyun } else {
394*4882a593Smuzhiyun WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
395*4882a593Smuzhiyun upper_32_bits(rdev->mc.vram_start));
396*4882a593Smuzhiyun WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
397*4882a593Smuzhiyun upper_32_bits(rdev->mc.vram_start));
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
401*4882a593Smuzhiyun (u32)rdev->mc.vram_start);
402*4882a593Smuzhiyun WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
403*4882a593Smuzhiyun (u32)rdev->mc.vram_start);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* unlock regs and wait for update */
408*4882a593Smuzhiyun for (i = 0; i < rdev->num_crtc; i++) {
409*4882a593Smuzhiyun if (save->crtc_enabled[i]) {
410*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
411*4882a593Smuzhiyun if ((tmp & 0x7) != 3) {
412*4882a593Smuzhiyun tmp &= ~0x7;
413*4882a593Smuzhiyun tmp |= 0x3;
414*4882a593Smuzhiyun WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
417*4882a593Smuzhiyun if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
418*4882a593Smuzhiyun tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
419*4882a593Smuzhiyun WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
422*4882a593Smuzhiyun if (tmp & 1) {
423*4882a593Smuzhiyun tmp &= ~1;
424*4882a593Smuzhiyun WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun for (j = 0; j < rdev->usec_timeout; j++) {
427*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
428*4882a593Smuzhiyun if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
429*4882a593Smuzhiyun break;
430*4882a593Smuzhiyun udelay(1);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (rdev->family >= CHIP_R600) {
436*4882a593Smuzhiyun /* unblackout the MC */
437*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
438*4882a593Smuzhiyun tmp = RREG32(R700_MC_CITF_CNTL);
439*4882a593Smuzhiyun else
440*4882a593Smuzhiyun tmp = RREG32(R600_CITF_CNTL);
441*4882a593Smuzhiyun tmp &= ~R600_BLACKOUT_MASK;
442*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
443*4882a593Smuzhiyun WREG32(R700_MC_CITF_CNTL, tmp);
444*4882a593Smuzhiyun else
445*4882a593Smuzhiyun WREG32(R600_CITF_CNTL, tmp);
446*4882a593Smuzhiyun /* allow CPU access */
447*4882a593Smuzhiyun WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun for (i = 0; i < rdev->num_crtc; i++) {
451*4882a593Smuzhiyun if (save->crtc_enabled[i]) {
452*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
453*4882a593Smuzhiyun tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
454*4882a593Smuzhiyun WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
455*4882a593Smuzhiyun /* wait for the next frame */
456*4882a593Smuzhiyun frame_count = radeon_get_vblank_counter(rdev, i);
457*4882a593Smuzhiyun for (j = 0; j < rdev->usec_timeout; j++) {
458*4882a593Smuzhiyun if (radeon_get_vblank_counter(rdev, i) != frame_count)
459*4882a593Smuzhiyun break;
460*4882a593Smuzhiyun udelay(1);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun /* Unlock vga access */
465*4882a593Smuzhiyun WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
466*4882a593Smuzhiyun mdelay(1);
467*4882a593Smuzhiyun WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
rv515_mc_program(struct radeon_device * rdev)470*4882a593Smuzhiyun static void rv515_mc_program(struct radeon_device *rdev)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun struct rv515_mc_save save;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* Stops all mc clients */
475*4882a593Smuzhiyun rv515_mc_stop(rdev, &save);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* Wait for mc idle */
478*4882a593Smuzhiyun if (rv515_mc_wait_for_idle(rdev))
479*4882a593Smuzhiyun dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
480*4882a593Smuzhiyun /* Write VRAM size in case we are limiting it */
481*4882a593Smuzhiyun WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
482*4882a593Smuzhiyun /* Program MC, should be a 32bits limited address space */
483*4882a593Smuzhiyun WREG32_MC(R_000001_MC_FB_LOCATION,
484*4882a593Smuzhiyun S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
485*4882a593Smuzhiyun S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
486*4882a593Smuzhiyun WREG32(R_000134_HDP_FB_LOCATION,
487*4882a593Smuzhiyun S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
488*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_AGP) {
489*4882a593Smuzhiyun WREG32_MC(R_000002_MC_AGP_LOCATION,
490*4882a593Smuzhiyun S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
491*4882a593Smuzhiyun S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
492*4882a593Smuzhiyun WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
493*4882a593Smuzhiyun WREG32_MC(R_000004_MC_AGP_BASE_2,
494*4882a593Smuzhiyun S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
495*4882a593Smuzhiyun } else {
496*4882a593Smuzhiyun WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
497*4882a593Smuzhiyun WREG32_MC(R_000003_MC_AGP_BASE, 0);
498*4882a593Smuzhiyun WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun rv515_mc_resume(rdev, &save);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
rv515_clock_startup(struct radeon_device * rdev)504*4882a593Smuzhiyun void rv515_clock_startup(struct radeon_device *rdev)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun if (radeon_dynclks != -1 && radeon_dynclks)
507*4882a593Smuzhiyun radeon_atom_set_clock_gating(rdev, 1);
508*4882a593Smuzhiyun /* We need to force on some of the block */
509*4882a593Smuzhiyun WREG32_PLL(R_00000F_CP_DYN_CNTL,
510*4882a593Smuzhiyun RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
511*4882a593Smuzhiyun WREG32_PLL(R_000011_E2_DYN_CNTL,
512*4882a593Smuzhiyun RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
513*4882a593Smuzhiyun WREG32_PLL(R_000013_IDCT_DYN_CNTL,
514*4882a593Smuzhiyun RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
rv515_startup(struct radeon_device * rdev)517*4882a593Smuzhiyun static int rv515_startup(struct radeon_device *rdev)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun int r;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun rv515_mc_program(rdev);
522*4882a593Smuzhiyun /* Resume clock */
523*4882a593Smuzhiyun rv515_clock_startup(rdev);
524*4882a593Smuzhiyun /* Initialize GPU configuration (# pipes, ...) */
525*4882a593Smuzhiyun rv515_gpu_init(rdev);
526*4882a593Smuzhiyun /* Initialize GART (initialize after TTM so we can allocate
527*4882a593Smuzhiyun * memory through TTM but finalize after TTM) */
528*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_PCIE) {
529*4882a593Smuzhiyun r = rv370_pcie_gart_enable(rdev);
530*4882a593Smuzhiyun if (r)
531*4882a593Smuzhiyun return r;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /* allocate wb buffer */
535*4882a593Smuzhiyun r = radeon_wb_init(rdev);
536*4882a593Smuzhiyun if (r)
537*4882a593Smuzhiyun return r;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
540*4882a593Smuzhiyun if (r) {
541*4882a593Smuzhiyun dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
542*4882a593Smuzhiyun return r;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /* Enable IRQ */
546*4882a593Smuzhiyun if (!rdev->irq.installed) {
547*4882a593Smuzhiyun r = radeon_irq_kms_init(rdev);
548*4882a593Smuzhiyun if (r)
549*4882a593Smuzhiyun return r;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun rs600_irq_set(rdev);
553*4882a593Smuzhiyun rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
554*4882a593Smuzhiyun /* 1M ring buffer */
555*4882a593Smuzhiyun r = r100_cp_init(rdev, 1024 * 1024);
556*4882a593Smuzhiyun if (r) {
557*4882a593Smuzhiyun dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
558*4882a593Smuzhiyun return r;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun r = radeon_ib_pool_init(rdev);
562*4882a593Smuzhiyun if (r) {
563*4882a593Smuzhiyun dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
564*4882a593Smuzhiyun return r;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
rv515_resume(struct radeon_device * rdev)570*4882a593Smuzhiyun int rv515_resume(struct radeon_device *rdev)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun int r;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* Make sur GART are not working */
575*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_PCIE)
576*4882a593Smuzhiyun rv370_pcie_gart_disable(rdev);
577*4882a593Smuzhiyun /* Resume clock before doing reset */
578*4882a593Smuzhiyun rv515_clock_startup(rdev);
579*4882a593Smuzhiyun /* Reset gpu before posting otherwise ATOM will enter infinite loop */
580*4882a593Smuzhiyun if (radeon_asic_reset(rdev)) {
581*4882a593Smuzhiyun dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
582*4882a593Smuzhiyun RREG32(R_000E40_RBBM_STATUS),
583*4882a593Smuzhiyun RREG32(R_0007C0_CP_STAT));
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun /* post */
586*4882a593Smuzhiyun atom_asic_init(rdev->mode_info.atom_context);
587*4882a593Smuzhiyun /* Resume clock after posting */
588*4882a593Smuzhiyun rv515_clock_startup(rdev);
589*4882a593Smuzhiyun /* Initialize surface registers */
590*4882a593Smuzhiyun radeon_surface_init(rdev);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun rdev->accel_working = true;
593*4882a593Smuzhiyun r = rv515_startup(rdev);
594*4882a593Smuzhiyun if (r) {
595*4882a593Smuzhiyun rdev->accel_working = false;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun return r;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
rv515_suspend(struct radeon_device * rdev)600*4882a593Smuzhiyun int rv515_suspend(struct radeon_device *rdev)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun radeon_pm_suspend(rdev);
603*4882a593Smuzhiyun r100_cp_disable(rdev);
604*4882a593Smuzhiyun radeon_wb_disable(rdev);
605*4882a593Smuzhiyun rs600_irq_disable(rdev);
606*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_PCIE)
607*4882a593Smuzhiyun rv370_pcie_gart_disable(rdev);
608*4882a593Smuzhiyun return 0;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
rv515_set_safe_registers(struct radeon_device * rdev)611*4882a593Smuzhiyun void rv515_set_safe_registers(struct radeon_device *rdev)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
614*4882a593Smuzhiyun rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
rv515_fini(struct radeon_device * rdev)617*4882a593Smuzhiyun void rv515_fini(struct radeon_device *rdev)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun radeon_pm_fini(rdev);
620*4882a593Smuzhiyun r100_cp_fini(rdev);
621*4882a593Smuzhiyun radeon_wb_fini(rdev);
622*4882a593Smuzhiyun radeon_ib_pool_fini(rdev);
623*4882a593Smuzhiyun radeon_gem_fini(rdev);
624*4882a593Smuzhiyun rv370_pcie_gart_fini(rdev);
625*4882a593Smuzhiyun radeon_agp_fini(rdev);
626*4882a593Smuzhiyun radeon_irq_kms_fini(rdev);
627*4882a593Smuzhiyun radeon_fence_driver_fini(rdev);
628*4882a593Smuzhiyun radeon_bo_fini(rdev);
629*4882a593Smuzhiyun radeon_atombios_fini(rdev);
630*4882a593Smuzhiyun kfree(rdev->bios);
631*4882a593Smuzhiyun rdev->bios = NULL;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
rv515_init(struct radeon_device * rdev)634*4882a593Smuzhiyun int rv515_init(struct radeon_device *rdev)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun int r;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /* Initialize scratch registers */
639*4882a593Smuzhiyun radeon_scratch_init(rdev);
640*4882a593Smuzhiyun /* Initialize surface registers */
641*4882a593Smuzhiyun radeon_surface_init(rdev);
642*4882a593Smuzhiyun /* TODO: disable VGA need to use VGA request */
643*4882a593Smuzhiyun /* restore some register to sane defaults */
644*4882a593Smuzhiyun r100_restore_sanity(rdev);
645*4882a593Smuzhiyun /* BIOS*/
646*4882a593Smuzhiyun if (!radeon_get_bios(rdev)) {
647*4882a593Smuzhiyun if (ASIC_IS_AVIVO(rdev))
648*4882a593Smuzhiyun return -EINVAL;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun if (rdev->is_atom_bios) {
651*4882a593Smuzhiyun r = radeon_atombios_init(rdev);
652*4882a593Smuzhiyun if (r)
653*4882a593Smuzhiyun return r;
654*4882a593Smuzhiyun } else {
655*4882a593Smuzhiyun dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
656*4882a593Smuzhiyun return -EINVAL;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun /* Reset gpu before posting otherwise ATOM will enter infinite loop */
659*4882a593Smuzhiyun if (radeon_asic_reset(rdev)) {
660*4882a593Smuzhiyun dev_warn(rdev->dev,
661*4882a593Smuzhiyun "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
662*4882a593Smuzhiyun RREG32(R_000E40_RBBM_STATUS),
663*4882a593Smuzhiyun RREG32(R_0007C0_CP_STAT));
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun /* check if cards are posted or not */
666*4882a593Smuzhiyun if (radeon_boot_test_post_card(rdev) == false)
667*4882a593Smuzhiyun return -EINVAL;
668*4882a593Smuzhiyun /* Initialize clocks */
669*4882a593Smuzhiyun radeon_get_clock_info(rdev->ddev);
670*4882a593Smuzhiyun /* initialize AGP */
671*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_AGP) {
672*4882a593Smuzhiyun r = radeon_agp_init(rdev);
673*4882a593Smuzhiyun if (r) {
674*4882a593Smuzhiyun radeon_agp_disable(rdev);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun /* initialize memory controller */
678*4882a593Smuzhiyun rv515_mc_init(rdev);
679*4882a593Smuzhiyun rv515_debugfs(rdev);
680*4882a593Smuzhiyun /* Fence driver */
681*4882a593Smuzhiyun r = radeon_fence_driver_init(rdev);
682*4882a593Smuzhiyun if (r)
683*4882a593Smuzhiyun return r;
684*4882a593Smuzhiyun /* Memory manager */
685*4882a593Smuzhiyun r = radeon_bo_init(rdev);
686*4882a593Smuzhiyun if (r)
687*4882a593Smuzhiyun return r;
688*4882a593Smuzhiyun r = rv370_pcie_gart_init(rdev);
689*4882a593Smuzhiyun if (r)
690*4882a593Smuzhiyun return r;
691*4882a593Smuzhiyun rv515_set_safe_registers(rdev);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* Initialize power management */
694*4882a593Smuzhiyun radeon_pm_init(rdev);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun rdev->accel_working = true;
697*4882a593Smuzhiyun r = rv515_startup(rdev);
698*4882a593Smuzhiyun if (r) {
699*4882a593Smuzhiyun /* Somethings want wront with the accel init stop accel */
700*4882a593Smuzhiyun dev_err(rdev->dev, "Disabling GPU acceleration\n");
701*4882a593Smuzhiyun r100_cp_fini(rdev);
702*4882a593Smuzhiyun radeon_wb_fini(rdev);
703*4882a593Smuzhiyun radeon_ib_pool_fini(rdev);
704*4882a593Smuzhiyun radeon_irq_kms_fini(rdev);
705*4882a593Smuzhiyun rv370_pcie_gart_fini(rdev);
706*4882a593Smuzhiyun radeon_agp_fini(rdev);
707*4882a593Smuzhiyun rdev->accel_working = false;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun return 0;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
atom_rv515_force_tv_scaler(struct radeon_device * rdev,struct radeon_crtc * crtc)712*4882a593Smuzhiyun void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *crtc)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun int index_reg = 0x6578 + crtc->crtc_offset;
715*4882a593Smuzhiyun int data_reg = 0x657c + crtc->crtc_offset;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun WREG32(0x659C + crtc->crtc_offset, 0x0);
718*4882a593Smuzhiyun WREG32(0x6594 + crtc->crtc_offset, 0x705);
719*4882a593Smuzhiyun WREG32(0x65A4 + crtc->crtc_offset, 0x10001);
720*4882a593Smuzhiyun WREG32(0x65D8 + crtc->crtc_offset, 0x0);
721*4882a593Smuzhiyun WREG32(0x65B0 + crtc->crtc_offset, 0x0);
722*4882a593Smuzhiyun WREG32(0x65C0 + crtc->crtc_offset, 0x0);
723*4882a593Smuzhiyun WREG32(0x65D4 + crtc->crtc_offset, 0x0);
724*4882a593Smuzhiyun WREG32(index_reg, 0x0);
725*4882a593Smuzhiyun WREG32(data_reg, 0x841880A8);
726*4882a593Smuzhiyun WREG32(index_reg, 0x1);
727*4882a593Smuzhiyun WREG32(data_reg, 0x84208680);
728*4882a593Smuzhiyun WREG32(index_reg, 0x2);
729*4882a593Smuzhiyun WREG32(data_reg, 0xBFF880B0);
730*4882a593Smuzhiyun WREG32(index_reg, 0x100);
731*4882a593Smuzhiyun WREG32(data_reg, 0x83D88088);
732*4882a593Smuzhiyun WREG32(index_reg, 0x101);
733*4882a593Smuzhiyun WREG32(data_reg, 0x84608680);
734*4882a593Smuzhiyun WREG32(index_reg, 0x102);
735*4882a593Smuzhiyun WREG32(data_reg, 0xBFF080D0);
736*4882a593Smuzhiyun WREG32(index_reg, 0x200);
737*4882a593Smuzhiyun WREG32(data_reg, 0x83988068);
738*4882a593Smuzhiyun WREG32(index_reg, 0x201);
739*4882a593Smuzhiyun WREG32(data_reg, 0x84A08680);
740*4882a593Smuzhiyun WREG32(index_reg, 0x202);
741*4882a593Smuzhiyun WREG32(data_reg, 0xBFF080F8);
742*4882a593Smuzhiyun WREG32(index_reg, 0x300);
743*4882a593Smuzhiyun WREG32(data_reg, 0x83588058);
744*4882a593Smuzhiyun WREG32(index_reg, 0x301);
745*4882a593Smuzhiyun WREG32(data_reg, 0x84E08660);
746*4882a593Smuzhiyun WREG32(index_reg, 0x302);
747*4882a593Smuzhiyun WREG32(data_reg, 0xBFF88120);
748*4882a593Smuzhiyun WREG32(index_reg, 0x400);
749*4882a593Smuzhiyun WREG32(data_reg, 0x83188040);
750*4882a593Smuzhiyun WREG32(index_reg, 0x401);
751*4882a593Smuzhiyun WREG32(data_reg, 0x85008660);
752*4882a593Smuzhiyun WREG32(index_reg, 0x402);
753*4882a593Smuzhiyun WREG32(data_reg, 0xBFF88150);
754*4882a593Smuzhiyun WREG32(index_reg, 0x500);
755*4882a593Smuzhiyun WREG32(data_reg, 0x82D88030);
756*4882a593Smuzhiyun WREG32(index_reg, 0x501);
757*4882a593Smuzhiyun WREG32(data_reg, 0x85408640);
758*4882a593Smuzhiyun WREG32(index_reg, 0x502);
759*4882a593Smuzhiyun WREG32(data_reg, 0xBFF88180);
760*4882a593Smuzhiyun WREG32(index_reg, 0x600);
761*4882a593Smuzhiyun WREG32(data_reg, 0x82A08018);
762*4882a593Smuzhiyun WREG32(index_reg, 0x601);
763*4882a593Smuzhiyun WREG32(data_reg, 0x85808620);
764*4882a593Smuzhiyun WREG32(index_reg, 0x602);
765*4882a593Smuzhiyun WREG32(data_reg, 0xBFF081B8);
766*4882a593Smuzhiyun WREG32(index_reg, 0x700);
767*4882a593Smuzhiyun WREG32(data_reg, 0x82608010);
768*4882a593Smuzhiyun WREG32(index_reg, 0x701);
769*4882a593Smuzhiyun WREG32(data_reg, 0x85A08600);
770*4882a593Smuzhiyun WREG32(index_reg, 0x702);
771*4882a593Smuzhiyun WREG32(data_reg, 0x800081F0);
772*4882a593Smuzhiyun WREG32(index_reg, 0x800);
773*4882a593Smuzhiyun WREG32(data_reg, 0x8228BFF8);
774*4882a593Smuzhiyun WREG32(index_reg, 0x801);
775*4882a593Smuzhiyun WREG32(data_reg, 0x85E085E0);
776*4882a593Smuzhiyun WREG32(index_reg, 0x802);
777*4882a593Smuzhiyun WREG32(data_reg, 0xBFF88228);
778*4882a593Smuzhiyun WREG32(index_reg, 0x10000);
779*4882a593Smuzhiyun WREG32(data_reg, 0x82A8BF00);
780*4882a593Smuzhiyun WREG32(index_reg, 0x10001);
781*4882a593Smuzhiyun WREG32(data_reg, 0x82A08CC0);
782*4882a593Smuzhiyun WREG32(index_reg, 0x10002);
783*4882a593Smuzhiyun WREG32(data_reg, 0x8008BEF8);
784*4882a593Smuzhiyun WREG32(index_reg, 0x10100);
785*4882a593Smuzhiyun WREG32(data_reg, 0x81F0BF28);
786*4882a593Smuzhiyun WREG32(index_reg, 0x10101);
787*4882a593Smuzhiyun WREG32(data_reg, 0x83608CA0);
788*4882a593Smuzhiyun WREG32(index_reg, 0x10102);
789*4882a593Smuzhiyun WREG32(data_reg, 0x8018BED0);
790*4882a593Smuzhiyun WREG32(index_reg, 0x10200);
791*4882a593Smuzhiyun WREG32(data_reg, 0x8148BF38);
792*4882a593Smuzhiyun WREG32(index_reg, 0x10201);
793*4882a593Smuzhiyun WREG32(data_reg, 0x84408C80);
794*4882a593Smuzhiyun WREG32(index_reg, 0x10202);
795*4882a593Smuzhiyun WREG32(data_reg, 0x8008BEB8);
796*4882a593Smuzhiyun WREG32(index_reg, 0x10300);
797*4882a593Smuzhiyun WREG32(data_reg, 0x80B0BF78);
798*4882a593Smuzhiyun WREG32(index_reg, 0x10301);
799*4882a593Smuzhiyun WREG32(data_reg, 0x85008C20);
800*4882a593Smuzhiyun WREG32(index_reg, 0x10302);
801*4882a593Smuzhiyun WREG32(data_reg, 0x8020BEA0);
802*4882a593Smuzhiyun WREG32(index_reg, 0x10400);
803*4882a593Smuzhiyun WREG32(data_reg, 0x8028BF90);
804*4882a593Smuzhiyun WREG32(index_reg, 0x10401);
805*4882a593Smuzhiyun WREG32(data_reg, 0x85E08BC0);
806*4882a593Smuzhiyun WREG32(index_reg, 0x10402);
807*4882a593Smuzhiyun WREG32(data_reg, 0x8018BE90);
808*4882a593Smuzhiyun WREG32(index_reg, 0x10500);
809*4882a593Smuzhiyun WREG32(data_reg, 0xBFB8BFB0);
810*4882a593Smuzhiyun WREG32(index_reg, 0x10501);
811*4882a593Smuzhiyun WREG32(data_reg, 0x86C08B40);
812*4882a593Smuzhiyun WREG32(index_reg, 0x10502);
813*4882a593Smuzhiyun WREG32(data_reg, 0x8010BE90);
814*4882a593Smuzhiyun WREG32(index_reg, 0x10600);
815*4882a593Smuzhiyun WREG32(data_reg, 0xBF58BFC8);
816*4882a593Smuzhiyun WREG32(index_reg, 0x10601);
817*4882a593Smuzhiyun WREG32(data_reg, 0x87A08AA0);
818*4882a593Smuzhiyun WREG32(index_reg, 0x10602);
819*4882a593Smuzhiyun WREG32(data_reg, 0x8010BE98);
820*4882a593Smuzhiyun WREG32(index_reg, 0x10700);
821*4882a593Smuzhiyun WREG32(data_reg, 0xBF10BFF0);
822*4882a593Smuzhiyun WREG32(index_reg, 0x10701);
823*4882a593Smuzhiyun WREG32(data_reg, 0x886089E0);
824*4882a593Smuzhiyun WREG32(index_reg, 0x10702);
825*4882a593Smuzhiyun WREG32(data_reg, 0x8018BEB0);
826*4882a593Smuzhiyun WREG32(index_reg, 0x10800);
827*4882a593Smuzhiyun WREG32(data_reg, 0xBED8BFE8);
828*4882a593Smuzhiyun WREG32(index_reg, 0x10801);
829*4882a593Smuzhiyun WREG32(data_reg, 0x89408940);
830*4882a593Smuzhiyun WREG32(index_reg, 0x10802);
831*4882a593Smuzhiyun WREG32(data_reg, 0xBFE8BED8);
832*4882a593Smuzhiyun WREG32(index_reg, 0x20000);
833*4882a593Smuzhiyun WREG32(data_reg, 0x80008000);
834*4882a593Smuzhiyun WREG32(index_reg, 0x20001);
835*4882a593Smuzhiyun WREG32(data_reg, 0x90008000);
836*4882a593Smuzhiyun WREG32(index_reg, 0x20002);
837*4882a593Smuzhiyun WREG32(data_reg, 0x80008000);
838*4882a593Smuzhiyun WREG32(index_reg, 0x20003);
839*4882a593Smuzhiyun WREG32(data_reg, 0x80008000);
840*4882a593Smuzhiyun WREG32(index_reg, 0x20100);
841*4882a593Smuzhiyun WREG32(data_reg, 0x80108000);
842*4882a593Smuzhiyun WREG32(index_reg, 0x20101);
843*4882a593Smuzhiyun WREG32(data_reg, 0x8FE0BF70);
844*4882a593Smuzhiyun WREG32(index_reg, 0x20102);
845*4882a593Smuzhiyun WREG32(data_reg, 0xBFE880C0);
846*4882a593Smuzhiyun WREG32(index_reg, 0x20103);
847*4882a593Smuzhiyun WREG32(data_reg, 0x80008000);
848*4882a593Smuzhiyun WREG32(index_reg, 0x20200);
849*4882a593Smuzhiyun WREG32(data_reg, 0x8018BFF8);
850*4882a593Smuzhiyun WREG32(index_reg, 0x20201);
851*4882a593Smuzhiyun WREG32(data_reg, 0x8F80BF08);
852*4882a593Smuzhiyun WREG32(index_reg, 0x20202);
853*4882a593Smuzhiyun WREG32(data_reg, 0xBFD081A0);
854*4882a593Smuzhiyun WREG32(index_reg, 0x20203);
855*4882a593Smuzhiyun WREG32(data_reg, 0xBFF88000);
856*4882a593Smuzhiyun WREG32(index_reg, 0x20300);
857*4882a593Smuzhiyun WREG32(data_reg, 0x80188000);
858*4882a593Smuzhiyun WREG32(index_reg, 0x20301);
859*4882a593Smuzhiyun WREG32(data_reg, 0x8EE0BEC0);
860*4882a593Smuzhiyun WREG32(index_reg, 0x20302);
861*4882a593Smuzhiyun WREG32(data_reg, 0xBFB082A0);
862*4882a593Smuzhiyun WREG32(index_reg, 0x20303);
863*4882a593Smuzhiyun WREG32(data_reg, 0x80008000);
864*4882a593Smuzhiyun WREG32(index_reg, 0x20400);
865*4882a593Smuzhiyun WREG32(data_reg, 0x80188000);
866*4882a593Smuzhiyun WREG32(index_reg, 0x20401);
867*4882a593Smuzhiyun WREG32(data_reg, 0x8E00BEA0);
868*4882a593Smuzhiyun WREG32(index_reg, 0x20402);
869*4882a593Smuzhiyun WREG32(data_reg, 0xBF8883C0);
870*4882a593Smuzhiyun WREG32(index_reg, 0x20403);
871*4882a593Smuzhiyun WREG32(data_reg, 0x80008000);
872*4882a593Smuzhiyun WREG32(index_reg, 0x20500);
873*4882a593Smuzhiyun WREG32(data_reg, 0x80188000);
874*4882a593Smuzhiyun WREG32(index_reg, 0x20501);
875*4882a593Smuzhiyun WREG32(data_reg, 0x8D00BE90);
876*4882a593Smuzhiyun WREG32(index_reg, 0x20502);
877*4882a593Smuzhiyun WREG32(data_reg, 0xBF588500);
878*4882a593Smuzhiyun WREG32(index_reg, 0x20503);
879*4882a593Smuzhiyun WREG32(data_reg, 0x80008008);
880*4882a593Smuzhiyun WREG32(index_reg, 0x20600);
881*4882a593Smuzhiyun WREG32(data_reg, 0x80188000);
882*4882a593Smuzhiyun WREG32(index_reg, 0x20601);
883*4882a593Smuzhiyun WREG32(data_reg, 0x8BC0BE98);
884*4882a593Smuzhiyun WREG32(index_reg, 0x20602);
885*4882a593Smuzhiyun WREG32(data_reg, 0xBF308660);
886*4882a593Smuzhiyun WREG32(index_reg, 0x20603);
887*4882a593Smuzhiyun WREG32(data_reg, 0x80008008);
888*4882a593Smuzhiyun WREG32(index_reg, 0x20700);
889*4882a593Smuzhiyun WREG32(data_reg, 0x80108000);
890*4882a593Smuzhiyun WREG32(index_reg, 0x20701);
891*4882a593Smuzhiyun WREG32(data_reg, 0x8A80BEB0);
892*4882a593Smuzhiyun WREG32(index_reg, 0x20702);
893*4882a593Smuzhiyun WREG32(data_reg, 0xBF0087C0);
894*4882a593Smuzhiyun WREG32(index_reg, 0x20703);
895*4882a593Smuzhiyun WREG32(data_reg, 0x80008008);
896*4882a593Smuzhiyun WREG32(index_reg, 0x20800);
897*4882a593Smuzhiyun WREG32(data_reg, 0x80108000);
898*4882a593Smuzhiyun WREG32(index_reg, 0x20801);
899*4882a593Smuzhiyun WREG32(data_reg, 0x8920BED0);
900*4882a593Smuzhiyun WREG32(index_reg, 0x20802);
901*4882a593Smuzhiyun WREG32(data_reg, 0xBED08920);
902*4882a593Smuzhiyun WREG32(index_reg, 0x20803);
903*4882a593Smuzhiyun WREG32(data_reg, 0x80008010);
904*4882a593Smuzhiyun WREG32(index_reg, 0x30000);
905*4882a593Smuzhiyun WREG32(data_reg, 0x90008000);
906*4882a593Smuzhiyun WREG32(index_reg, 0x30001);
907*4882a593Smuzhiyun WREG32(data_reg, 0x80008000);
908*4882a593Smuzhiyun WREG32(index_reg, 0x30100);
909*4882a593Smuzhiyun WREG32(data_reg, 0x8FE0BF90);
910*4882a593Smuzhiyun WREG32(index_reg, 0x30101);
911*4882a593Smuzhiyun WREG32(data_reg, 0xBFF880A0);
912*4882a593Smuzhiyun WREG32(index_reg, 0x30200);
913*4882a593Smuzhiyun WREG32(data_reg, 0x8F60BF40);
914*4882a593Smuzhiyun WREG32(index_reg, 0x30201);
915*4882a593Smuzhiyun WREG32(data_reg, 0xBFE88180);
916*4882a593Smuzhiyun WREG32(index_reg, 0x30300);
917*4882a593Smuzhiyun WREG32(data_reg, 0x8EC0BF00);
918*4882a593Smuzhiyun WREG32(index_reg, 0x30301);
919*4882a593Smuzhiyun WREG32(data_reg, 0xBFC88280);
920*4882a593Smuzhiyun WREG32(index_reg, 0x30400);
921*4882a593Smuzhiyun WREG32(data_reg, 0x8DE0BEE0);
922*4882a593Smuzhiyun WREG32(index_reg, 0x30401);
923*4882a593Smuzhiyun WREG32(data_reg, 0xBFA083A0);
924*4882a593Smuzhiyun WREG32(index_reg, 0x30500);
925*4882a593Smuzhiyun WREG32(data_reg, 0x8CE0BED0);
926*4882a593Smuzhiyun WREG32(index_reg, 0x30501);
927*4882a593Smuzhiyun WREG32(data_reg, 0xBF7884E0);
928*4882a593Smuzhiyun WREG32(index_reg, 0x30600);
929*4882a593Smuzhiyun WREG32(data_reg, 0x8BA0BED8);
930*4882a593Smuzhiyun WREG32(index_reg, 0x30601);
931*4882a593Smuzhiyun WREG32(data_reg, 0xBF508640);
932*4882a593Smuzhiyun WREG32(index_reg, 0x30700);
933*4882a593Smuzhiyun WREG32(data_reg, 0x8A60BEE8);
934*4882a593Smuzhiyun WREG32(index_reg, 0x30701);
935*4882a593Smuzhiyun WREG32(data_reg, 0xBF2087A0);
936*4882a593Smuzhiyun WREG32(index_reg, 0x30800);
937*4882a593Smuzhiyun WREG32(data_reg, 0x8900BF00);
938*4882a593Smuzhiyun WREG32(index_reg, 0x30801);
939*4882a593Smuzhiyun WREG32(data_reg, 0xBF008900);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun struct rv515_watermark {
943*4882a593Smuzhiyun u32 lb_request_fifo_depth;
944*4882a593Smuzhiyun fixed20_12 num_line_pair;
945*4882a593Smuzhiyun fixed20_12 estimated_width;
946*4882a593Smuzhiyun fixed20_12 worst_case_latency;
947*4882a593Smuzhiyun fixed20_12 consumption_rate;
948*4882a593Smuzhiyun fixed20_12 active_time;
949*4882a593Smuzhiyun fixed20_12 dbpp;
950*4882a593Smuzhiyun fixed20_12 priority_mark_max;
951*4882a593Smuzhiyun fixed20_12 priority_mark;
952*4882a593Smuzhiyun fixed20_12 sclk;
953*4882a593Smuzhiyun };
954*4882a593Smuzhiyun
rv515_crtc_bandwidth_compute(struct radeon_device * rdev,struct radeon_crtc * crtc,struct rv515_watermark * wm,bool low)955*4882a593Smuzhiyun static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
956*4882a593Smuzhiyun struct radeon_crtc *crtc,
957*4882a593Smuzhiyun struct rv515_watermark *wm,
958*4882a593Smuzhiyun bool low)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun struct drm_display_mode *mode = &crtc->base.mode;
961*4882a593Smuzhiyun fixed20_12 a, b, c;
962*4882a593Smuzhiyun fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
963*4882a593Smuzhiyun fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
964*4882a593Smuzhiyun fixed20_12 sclk;
965*4882a593Smuzhiyun u32 selected_sclk;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun if (!crtc->base.enabled) {
968*4882a593Smuzhiyun /* FIXME: wouldn't it better to set priority mark to maximum */
969*4882a593Smuzhiyun wm->lb_request_fifo_depth = 4;
970*4882a593Smuzhiyun return;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun /* rv6xx, rv7xx */
974*4882a593Smuzhiyun if ((rdev->family >= CHIP_RV610) &&
975*4882a593Smuzhiyun (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
976*4882a593Smuzhiyun selected_sclk = radeon_dpm_get_sclk(rdev, low);
977*4882a593Smuzhiyun else
978*4882a593Smuzhiyun selected_sclk = rdev->pm.current_sclk;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /* sclk in Mhz */
981*4882a593Smuzhiyun a.full = dfixed_const(100);
982*4882a593Smuzhiyun sclk.full = dfixed_const(selected_sclk);
983*4882a593Smuzhiyun sclk.full = dfixed_div(sclk, a);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun if (crtc->vsc.full > dfixed_const(2))
986*4882a593Smuzhiyun wm->num_line_pair.full = dfixed_const(2);
987*4882a593Smuzhiyun else
988*4882a593Smuzhiyun wm->num_line_pair.full = dfixed_const(1);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun b.full = dfixed_const(mode->crtc_hdisplay);
991*4882a593Smuzhiyun c.full = dfixed_const(256);
992*4882a593Smuzhiyun a.full = dfixed_div(b, c);
993*4882a593Smuzhiyun request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
994*4882a593Smuzhiyun request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
995*4882a593Smuzhiyun if (a.full < dfixed_const(4)) {
996*4882a593Smuzhiyun wm->lb_request_fifo_depth = 4;
997*4882a593Smuzhiyun } else {
998*4882a593Smuzhiyun wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /* Determine consumption rate
1002*4882a593Smuzhiyun * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
1003*4882a593Smuzhiyun * vtaps = number of vertical taps,
1004*4882a593Smuzhiyun * vsc = vertical scaling ratio, defined as source/destination
1005*4882a593Smuzhiyun * hsc = horizontal scaling ration, defined as source/destination
1006*4882a593Smuzhiyun */
1007*4882a593Smuzhiyun a.full = dfixed_const(mode->clock);
1008*4882a593Smuzhiyun b.full = dfixed_const(1000);
1009*4882a593Smuzhiyun a.full = dfixed_div(a, b);
1010*4882a593Smuzhiyun pclk.full = dfixed_div(b, a);
1011*4882a593Smuzhiyun if (crtc->rmx_type != RMX_OFF) {
1012*4882a593Smuzhiyun b.full = dfixed_const(2);
1013*4882a593Smuzhiyun if (crtc->vsc.full > b.full)
1014*4882a593Smuzhiyun b.full = crtc->vsc.full;
1015*4882a593Smuzhiyun b.full = dfixed_mul(b, crtc->hsc);
1016*4882a593Smuzhiyun c.full = dfixed_const(2);
1017*4882a593Smuzhiyun b.full = dfixed_div(b, c);
1018*4882a593Smuzhiyun consumption_time.full = dfixed_div(pclk, b);
1019*4882a593Smuzhiyun } else {
1020*4882a593Smuzhiyun consumption_time.full = pclk.full;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun a.full = dfixed_const(1);
1023*4882a593Smuzhiyun wm->consumption_rate.full = dfixed_div(a, consumption_time);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun /* Determine line time
1027*4882a593Smuzhiyun * LineTime = total time for one line of displayhtotal
1028*4882a593Smuzhiyun * LineTime = total number of horizontal pixels
1029*4882a593Smuzhiyun * pclk = pixel clock period(ns)
1030*4882a593Smuzhiyun */
1031*4882a593Smuzhiyun a.full = dfixed_const(crtc->base.mode.crtc_htotal);
1032*4882a593Smuzhiyun line_time.full = dfixed_mul(a, pclk);
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun /* Determine active time
1035*4882a593Smuzhiyun * ActiveTime = time of active region of display within one line,
1036*4882a593Smuzhiyun * hactive = total number of horizontal active pixels
1037*4882a593Smuzhiyun * htotal = total number of horizontal pixels
1038*4882a593Smuzhiyun */
1039*4882a593Smuzhiyun a.full = dfixed_const(crtc->base.mode.crtc_htotal);
1040*4882a593Smuzhiyun b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
1041*4882a593Smuzhiyun wm->active_time.full = dfixed_mul(line_time, b);
1042*4882a593Smuzhiyun wm->active_time.full = dfixed_div(wm->active_time, a);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /* Determine chunk time
1045*4882a593Smuzhiyun * ChunkTime = the time it takes the DCP to send one chunk of data
1046*4882a593Smuzhiyun * to the LB which consists of pipeline delay and inter chunk gap
1047*4882a593Smuzhiyun * sclk = system clock(Mhz)
1048*4882a593Smuzhiyun */
1049*4882a593Smuzhiyun a.full = dfixed_const(600 * 1000);
1050*4882a593Smuzhiyun chunk_time.full = dfixed_div(a, sclk);
1051*4882a593Smuzhiyun read_delay_latency.full = dfixed_const(1000);
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun /* Determine the worst case latency
1054*4882a593Smuzhiyun * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
1055*4882a593Smuzhiyun * WorstCaseLatency = worst case time from urgent to when the MC starts
1056*4882a593Smuzhiyun * to return data
1057*4882a593Smuzhiyun * READ_DELAY_IDLE_MAX = constant of 1us
1058*4882a593Smuzhiyun * ChunkTime = time it takes the DCP to send one chunk of data to the LB
1059*4882a593Smuzhiyun * which consists of pipeline delay and inter chunk gap
1060*4882a593Smuzhiyun */
1061*4882a593Smuzhiyun if (dfixed_trunc(wm->num_line_pair) > 1) {
1062*4882a593Smuzhiyun a.full = dfixed_const(3);
1063*4882a593Smuzhiyun wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
1064*4882a593Smuzhiyun wm->worst_case_latency.full += read_delay_latency.full;
1065*4882a593Smuzhiyun } else {
1066*4882a593Smuzhiyun wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun /* Determine the tolerable latency
1070*4882a593Smuzhiyun * TolerableLatency = Any given request has only 1 line time
1071*4882a593Smuzhiyun * for the data to be returned
1072*4882a593Smuzhiyun * LBRequestFifoDepth = Number of chunk requests the LB can
1073*4882a593Smuzhiyun * put into the request FIFO for a display
1074*4882a593Smuzhiyun * LineTime = total time for one line of display
1075*4882a593Smuzhiyun * ChunkTime = the time it takes the DCP to send one chunk
1076*4882a593Smuzhiyun * of data to the LB which consists of
1077*4882a593Smuzhiyun * pipeline delay and inter chunk gap
1078*4882a593Smuzhiyun */
1079*4882a593Smuzhiyun if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
1080*4882a593Smuzhiyun tolerable_latency.full = line_time.full;
1081*4882a593Smuzhiyun } else {
1082*4882a593Smuzhiyun tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
1083*4882a593Smuzhiyun tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
1084*4882a593Smuzhiyun tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
1085*4882a593Smuzhiyun tolerable_latency.full = line_time.full - tolerable_latency.full;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun /* We assume worst case 32bits (4 bytes) */
1088*4882a593Smuzhiyun wm->dbpp.full = dfixed_const(2 * 16);
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun /* Determine the maximum priority mark
1091*4882a593Smuzhiyun * width = viewport width in pixels
1092*4882a593Smuzhiyun */
1093*4882a593Smuzhiyun a.full = dfixed_const(16);
1094*4882a593Smuzhiyun wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
1095*4882a593Smuzhiyun wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
1096*4882a593Smuzhiyun wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun /* Determine estimated width */
1099*4882a593Smuzhiyun estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
1100*4882a593Smuzhiyun estimated_width.full = dfixed_div(estimated_width, consumption_time);
1101*4882a593Smuzhiyun if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
1102*4882a593Smuzhiyun wm->priority_mark.full = wm->priority_mark_max.full;
1103*4882a593Smuzhiyun } else {
1104*4882a593Smuzhiyun a.full = dfixed_const(16);
1105*4882a593Smuzhiyun wm->priority_mark.full = dfixed_div(estimated_width, a);
1106*4882a593Smuzhiyun wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
1107*4882a593Smuzhiyun wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
rv515_compute_mode_priority(struct radeon_device * rdev,struct rv515_watermark * wm0,struct rv515_watermark * wm1,struct drm_display_mode * mode0,struct drm_display_mode * mode1,u32 * d1mode_priority_a_cnt,u32 * d2mode_priority_a_cnt)1111*4882a593Smuzhiyun static void rv515_compute_mode_priority(struct radeon_device *rdev,
1112*4882a593Smuzhiyun struct rv515_watermark *wm0,
1113*4882a593Smuzhiyun struct rv515_watermark *wm1,
1114*4882a593Smuzhiyun struct drm_display_mode *mode0,
1115*4882a593Smuzhiyun struct drm_display_mode *mode1,
1116*4882a593Smuzhiyun u32 *d1mode_priority_a_cnt,
1117*4882a593Smuzhiyun u32 *d2mode_priority_a_cnt)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun fixed20_12 priority_mark02, priority_mark12, fill_rate;
1120*4882a593Smuzhiyun fixed20_12 a, b;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun *d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
1123*4882a593Smuzhiyun *d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun if (mode0 && mode1) {
1126*4882a593Smuzhiyun if (dfixed_trunc(wm0->dbpp) > 64)
1127*4882a593Smuzhiyun a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
1128*4882a593Smuzhiyun else
1129*4882a593Smuzhiyun a.full = wm0->num_line_pair.full;
1130*4882a593Smuzhiyun if (dfixed_trunc(wm1->dbpp) > 64)
1131*4882a593Smuzhiyun b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
1132*4882a593Smuzhiyun else
1133*4882a593Smuzhiyun b.full = wm1->num_line_pair.full;
1134*4882a593Smuzhiyun a.full += b.full;
1135*4882a593Smuzhiyun fill_rate.full = dfixed_div(wm0->sclk, a);
1136*4882a593Smuzhiyun if (wm0->consumption_rate.full > fill_rate.full) {
1137*4882a593Smuzhiyun b.full = wm0->consumption_rate.full - fill_rate.full;
1138*4882a593Smuzhiyun b.full = dfixed_mul(b, wm0->active_time);
1139*4882a593Smuzhiyun a.full = dfixed_const(16);
1140*4882a593Smuzhiyun b.full = dfixed_div(b, a);
1141*4882a593Smuzhiyun a.full = dfixed_mul(wm0->worst_case_latency,
1142*4882a593Smuzhiyun wm0->consumption_rate);
1143*4882a593Smuzhiyun priority_mark02.full = a.full + b.full;
1144*4882a593Smuzhiyun } else {
1145*4882a593Smuzhiyun a.full = dfixed_mul(wm0->worst_case_latency,
1146*4882a593Smuzhiyun wm0->consumption_rate);
1147*4882a593Smuzhiyun b.full = dfixed_const(16 * 1000);
1148*4882a593Smuzhiyun priority_mark02.full = dfixed_div(a, b);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun if (wm1->consumption_rate.full > fill_rate.full) {
1151*4882a593Smuzhiyun b.full = wm1->consumption_rate.full - fill_rate.full;
1152*4882a593Smuzhiyun b.full = dfixed_mul(b, wm1->active_time);
1153*4882a593Smuzhiyun a.full = dfixed_const(16);
1154*4882a593Smuzhiyun b.full = dfixed_div(b, a);
1155*4882a593Smuzhiyun a.full = dfixed_mul(wm1->worst_case_latency,
1156*4882a593Smuzhiyun wm1->consumption_rate);
1157*4882a593Smuzhiyun priority_mark12.full = a.full + b.full;
1158*4882a593Smuzhiyun } else {
1159*4882a593Smuzhiyun a.full = dfixed_mul(wm1->worst_case_latency,
1160*4882a593Smuzhiyun wm1->consumption_rate);
1161*4882a593Smuzhiyun b.full = dfixed_const(16 * 1000);
1162*4882a593Smuzhiyun priority_mark12.full = dfixed_div(a, b);
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun if (wm0->priority_mark.full > priority_mark02.full)
1165*4882a593Smuzhiyun priority_mark02.full = wm0->priority_mark.full;
1166*4882a593Smuzhiyun if (wm0->priority_mark_max.full > priority_mark02.full)
1167*4882a593Smuzhiyun priority_mark02.full = wm0->priority_mark_max.full;
1168*4882a593Smuzhiyun if (wm1->priority_mark.full > priority_mark12.full)
1169*4882a593Smuzhiyun priority_mark12.full = wm1->priority_mark.full;
1170*4882a593Smuzhiyun if (wm1->priority_mark_max.full > priority_mark12.full)
1171*4882a593Smuzhiyun priority_mark12.full = wm1->priority_mark_max.full;
1172*4882a593Smuzhiyun *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1173*4882a593Smuzhiyun *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1174*4882a593Smuzhiyun if (rdev->disp_priority == 2) {
1175*4882a593Smuzhiyun *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1176*4882a593Smuzhiyun *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun } else if (mode0) {
1179*4882a593Smuzhiyun if (dfixed_trunc(wm0->dbpp) > 64)
1180*4882a593Smuzhiyun a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
1181*4882a593Smuzhiyun else
1182*4882a593Smuzhiyun a.full = wm0->num_line_pair.full;
1183*4882a593Smuzhiyun fill_rate.full = dfixed_div(wm0->sclk, a);
1184*4882a593Smuzhiyun if (wm0->consumption_rate.full > fill_rate.full) {
1185*4882a593Smuzhiyun b.full = wm0->consumption_rate.full - fill_rate.full;
1186*4882a593Smuzhiyun b.full = dfixed_mul(b, wm0->active_time);
1187*4882a593Smuzhiyun a.full = dfixed_const(16);
1188*4882a593Smuzhiyun b.full = dfixed_div(b, a);
1189*4882a593Smuzhiyun a.full = dfixed_mul(wm0->worst_case_latency,
1190*4882a593Smuzhiyun wm0->consumption_rate);
1191*4882a593Smuzhiyun priority_mark02.full = a.full + b.full;
1192*4882a593Smuzhiyun } else {
1193*4882a593Smuzhiyun a.full = dfixed_mul(wm0->worst_case_latency,
1194*4882a593Smuzhiyun wm0->consumption_rate);
1195*4882a593Smuzhiyun b.full = dfixed_const(16);
1196*4882a593Smuzhiyun priority_mark02.full = dfixed_div(a, b);
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun if (wm0->priority_mark.full > priority_mark02.full)
1199*4882a593Smuzhiyun priority_mark02.full = wm0->priority_mark.full;
1200*4882a593Smuzhiyun if (wm0->priority_mark_max.full > priority_mark02.full)
1201*4882a593Smuzhiyun priority_mark02.full = wm0->priority_mark_max.full;
1202*4882a593Smuzhiyun *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
1203*4882a593Smuzhiyun if (rdev->disp_priority == 2)
1204*4882a593Smuzhiyun *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1205*4882a593Smuzhiyun } else if (mode1) {
1206*4882a593Smuzhiyun if (dfixed_trunc(wm1->dbpp) > 64)
1207*4882a593Smuzhiyun a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
1208*4882a593Smuzhiyun else
1209*4882a593Smuzhiyun a.full = wm1->num_line_pair.full;
1210*4882a593Smuzhiyun fill_rate.full = dfixed_div(wm1->sclk, a);
1211*4882a593Smuzhiyun if (wm1->consumption_rate.full > fill_rate.full) {
1212*4882a593Smuzhiyun b.full = wm1->consumption_rate.full - fill_rate.full;
1213*4882a593Smuzhiyun b.full = dfixed_mul(b, wm1->active_time);
1214*4882a593Smuzhiyun a.full = dfixed_const(16);
1215*4882a593Smuzhiyun b.full = dfixed_div(b, a);
1216*4882a593Smuzhiyun a.full = dfixed_mul(wm1->worst_case_latency,
1217*4882a593Smuzhiyun wm1->consumption_rate);
1218*4882a593Smuzhiyun priority_mark12.full = a.full + b.full;
1219*4882a593Smuzhiyun } else {
1220*4882a593Smuzhiyun a.full = dfixed_mul(wm1->worst_case_latency,
1221*4882a593Smuzhiyun wm1->consumption_rate);
1222*4882a593Smuzhiyun b.full = dfixed_const(16 * 1000);
1223*4882a593Smuzhiyun priority_mark12.full = dfixed_div(a, b);
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun if (wm1->priority_mark.full > priority_mark12.full)
1226*4882a593Smuzhiyun priority_mark12.full = wm1->priority_mark.full;
1227*4882a593Smuzhiyun if (wm1->priority_mark_max.full > priority_mark12.full)
1228*4882a593Smuzhiyun priority_mark12.full = wm1->priority_mark_max.full;
1229*4882a593Smuzhiyun *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
1230*4882a593Smuzhiyun if (rdev->disp_priority == 2)
1231*4882a593Smuzhiyun *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun
rv515_bandwidth_avivo_update(struct radeon_device * rdev)1235*4882a593Smuzhiyun void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun struct drm_display_mode *mode0 = NULL;
1238*4882a593Smuzhiyun struct drm_display_mode *mode1 = NULL;
1239*4882a593Smuzhiyun struct rv515_watermark wm0_high, wm0_low;
1240*4882a593Smuzhiyun struct rv515_watermark wm1_high, wm1_low;
1241*4882a593Smuzhiyun u32 tmp;
1242*4882a593Smuzhiyun u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
1243*4882a593Smuzhiyun u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun if (rdev->mode_info.crtcs[0]->base.enabled)
1246*4882a593Smuzhiyun mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1247*4882a593Smuzhiyun if (rdev->mode_info.crtcs[1]->base.enabled)
1248*4882a593Smuzhiyun mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1249*4882a593Smuzhiyun rs690_line_buffer_adjust(rdev, mode0, mode1);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
1252*4882a593Smuzhiyun rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false);
1255*4882a593Smuzhiyun rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun tmp = wm0_high.lb_request_fifo_depth;
1258*4882a593Smuzhiyun tmp |= wm1_high.lb_request_fifo_depth << 16;
1259*4882a593Smuzhiyun WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun rv515_compute_mode_priority(rdev,
1262*4882a593Smuzhiyun &wm0_high, &wm1_high,
1263*4882a593Smuzhiyun mode0, mode1,
1264*4882a593Smuzhiyun &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
1265*4882a593Smuzhiyun rv515_compute_mode_priority(rdev,
1266*4882a593Smuzhiyun &wm0_low, &wm1_low,
1267*4882a593Smuzhiyun mode0, mode1,
1268*4882a593Smuzhiyun &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
1271*4882a593Smuzhiyun WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
1272*4882a593Smuzhiyun WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
1273*4882a593Smuzhiyun WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
rv515_bandwidth_update(struct radeon_device * rdev)1276*4882a593Smuzhiyun void rv515_bandwidth_update(struct radeon_device *rdev)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun uint32_t tmp;
1279*4882a593Smuzhiyun struct drm_display_mode *mode0 = NULL;
1280*4882a593Smuzhiyun struct drm_display_mode *mode1 = NULL;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun if (!rdev->mode_info.mode_config_initialized)
1283*4882a593Smuzhiyun return;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun radeon_update_display_priority(rdev);
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun if (rdev->mode_info.crtcs[0]->base.enabled)
1288*4882a593Smuzhiyun mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1289*4882a593Smuzhiyun if (rdev->mode_info.crtcs[1]->base.enabled)
1290*4882a593Smuzhiyun mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1291*4882a593Smuzhiyun /*
1292*4882a593Smuzhiyun * Set display0/1 priority up in the memory controller for
1293*4882a593Smuzhiyun * modes if the user specifies HIGH for displaypriority
1294*4882a593Smuzhiyun * option.
1295*4882a593Smuzhiyun */
1296*4882a593Smuzhiyun if ((rdev->disp_priority == 2) &&
1297*4882a593Smuzhiyun (rdev->family == CHIP_RV515)) {
1298*4882a593Smuzhiyun tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1299*4882a593Smuzhiyun tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1300*4882a593Smuzhiyun tmp &= ~MC_DISP0R_INIT_LAT_MASK;
1301*4882a593Smuzhiyun if (mode1)
1302*4882a593Smuzhiyun tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
1303*4882a593Smuzhiyun if (mode0)
1304*4882a593Smuzhiyun tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
1305*4882a593Smuzhiyun WREG32_MC(MC_MISC_LAT_TIMER, tmp);
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun rv515_bandwidth_avivo_update(rdev);
1308*4882a593Smuzhiyun }
1309