1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2008 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun * Copyright 2008 Red Hat Inc.
4*4882a593Smuzhiyun * Copyright 2009 Jerome Glisse.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
14*4882a593Smuzhiyun * all copies or substantial portions of the Software.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Authors: Dave Airlie
25*4882a593Smuzhiyun * Alex Deucher
26*4882a593Smuzhiyun * Jerome Glisse
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/firmware.h>
30*4882a593Smuzhiyun #include <linux/module.h>
31*4882a593Smuzhiyun #include <linux/pci.h>
32*4882a593Smuzhiyun #include <linux/slab.h>
33*4882a593Smuzhiyun #include <linux/seq_file.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <drm/drm_debugfs.h>
36*4882a593Smuzhiyun #include <drm/drm_device.h>
37*4882a593Smuzhiyun #include <drm/drm_vblank.h>
38*4882a593Smuzhiyun #include <drm/radeon_drm.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include "atom.h"
41*4882a593Smuzhiyun #include "avivod.h"
42*4882a593Smuzhiyun #include "r600d.h"
43*4882a593Smuzhiyun #include "radeon.h"
44*4882a593Smuzhiyun #include "radeon_asic.h"
45*4882a593Smuzhiyun #include "radeon_audio.h"
46*4882a593Smuzhiyun #include "radeon_mode.h"
47*4882a593Smuzhiyun #include "radeon_ucode.h"
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Firmware Names */
50*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/R600_pfp.bin");
51*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/R600_me.bin");
52*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV610_pfp.bin");
53*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV610_me.bin");
54*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV630_pfp.bin");
55*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV630_me.bin");
56*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV620_pfp.bin");
57*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV620_me.bin");
58*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV635_pfp.bin");
59*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV635_me.bin");
60*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV670_pfp.bin");
61*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV670_me.bin");
62*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RS780_pfp.bin");
63*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RS780_me.bin");
64*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV770_pfp.bin");
65*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV770_me.bin");
66*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV770_smc.bin");
67*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV730_pfp.bin");
68*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV730_me.bin");
69*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV730_smc.bin");
70*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV740_smc.bin");
71*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV710_pfp.bin");
72*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV710_me.bin");
73*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/RV710_smc.bin");
74*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/R600_rlc.bin");
75*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/R700_rlc.bin");
76*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
77*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/CEDAR_me.bin");
78*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
79*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
80*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
81*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
82*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
83*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
84*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
85*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
86*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
87*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
88*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
89*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
90*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
91*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
92*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/PALM_pfp.bin");
93*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/PALM_me.bin");
94*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
95*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
96*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/SUMO_me.bin");
97*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
98*4882a593Smuzhiyun MODULE_FIRMWARE("radeon/SUMO2_me.bin");
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun static const u32 crtc_offsets[2] =
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 0,
103*4882a593Smuzhiyun AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun int r600_debugfs_mc_info_init(struct radeon_device *rdev);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* r600,rv610,rv630,rv620,rv635,rv670 */
109*4882a593Smuzhiyun int r600_mc_wait_for_idle(struct radeon_device *rdev);
110*4882a593Smuzhiyun static void r600_gpu_init(struct radeon_device *rdev);
111*4882a593Smuzhiyun void r600_fini(struct radeon_device *rdev);
112*4882a593Smuzhiyun void r600_irq_disable(struct radeon_device *rdev);
113*4882a593Smuzhiyun static void r600_pcie_gen2_enable(struct radeon_device *rdev);
114*4882a593Smuzhiyun extern int evergreen_rlc_resume(struct radeon_device *rdev);
115*4882a593Smuzhiyun extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun * Indirect registers accessor
119*4882a593Smuzhiyun */
r600_rcu_rreg(struct radeon_device * rdev,u32 reg)120*4882a593Smuzhiyun u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun unsigned long flags;
123*4882a593Smuzhiyun u32 r;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
126*4882a593Smuzhiyun WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
127*4882a593Smuzhiyun r = RREG32(R600_RCU_DATA);
128*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
129*4882a593Smuzhiyun return r;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
r600_rcu_wreg(struct radeon_device * rdev,u32 reg,u32 v)132*4882a593Smuzhiyun void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun unsigned long flags;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
137*4882a593Smuzhiyun WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
138*4882a593Smuzhiyun WREG32(R600_RCU_DATA, (v));
139*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
r600_uvd_ctx_rreg(struct radeon_device * rdev,u32 reg)142*4882a593Smuzhiyun u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun unsigned long flags;
145*4882a593Smuzhiyun u32 r;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
148*4882a593Smuzhiyun WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
149*4882a593Smuzhiyun r = RREG32(R600_UVD_CTX_DATA);
150*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
151*4882a593Smuzhiyun return r;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
r600_uvd_ctx_wreg(struct radeon_device * rdev,u32 reg,u32 v)154*4882a593Smuzhiyun void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun unsigned long flags;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
159*4882a593Smuzhiyun WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
160*4882a593Smuzhiyun WREG32(R600_UVD_CTX_DATA, (v));
161*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /**
165*4882a593Smuzhiyun * r600_get_allowed_info_register - fetch the register for the info ioctl
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * @rdev: radeon_device pointer
168*4882a593Smuzhiyun * @reg: register offset in bytes
169*4882a593Smuzhiyun * @val: register value
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * Returns 0 for success or -EINVAL for an invalid register
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun */
r600_get_allowed_info_register(struct radeon_device * rdev,u32 reg,u32 * val)174*4882a593Smuzhiyun int r600_get_allowed_info_register(struct radeon_device *rdev,
175*4882a593Smuzhiyun u32 reg, u32 *val)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun switch (reg) {
178*4882a593Smuzhiyun case GRBM_STATUS:
179*4882a593Smuzhiyun case GRBM_STATUS2:
180*4882a593Smuzhiyun case R_000E50_SRBM_STATUS:
181*4882a593Smuzhiyun case DMA_STATUS_REG:
182*4882a593Smuzhiyun case UVD_STATUS:
183*4882a593Smuzhiyun *val = RREG32(reg);
184*4882a593Smuzhiyun return 0;
185*4882a593Smuzhiyun default:
186*4882a593Smuzhiyun return -EINVAL;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun * r600_get_xclk - get the xclk
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun * @rdev: radeon_device pointer
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * Returns the reference clock used by the gfx engine
196*4882a593Smuzhiyun * (r6xx, IGPs, APUs).
197*4882a593Smuzhiyun */
r600_get_xclk(struct radeon_device * rdev)198*4882a593Smuzhiyun u32 r600_get_xclk(struct radeon_device *rdev)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun return rdev->clock.spll.reference_freq;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
r600_set_uvd_clocks(struct radeon_device * rdev,u32 vclk,u32 dclk)203*4882a593Smuzhiyun int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
206*4882a593Smuzhiyun int r;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* bypass vclk and dclk with bclk */
209*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL_2,
210*4882a593Smuzhiyun VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
211*4882a593Smuzhiyun ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
214*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
215*4882a593Smuzhiyun UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (rdev->family >= CHIP_RS780)
218*4882a593Smuzhiyun WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
219*4882a593Smuzhiyun ~UPLL_BYPASS_CNTL);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (!vclk || !dclk) {
222*4882a593Smuzhiyun /* keep the Bypass mode, put PLL to sleep */
223*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (rdev->clock.spll.reference_freq == 10000)
228*4882a593Smuzhiyun ref_div = 34;
229*4882a593Smuzhiyun else
230*4882a593Smuzhiyun ref_div = 4;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
233*4882a593Smuzhiyun ref_div + 1, 0xFFF, 2, 30, ~0,
234*4882a593Smuzhiyun &fb_div, &vclk_div, &dclk_div);
235*4882a593Smuzhiyun if (r)
236*4882a593Smuzhiyun return r;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
239*4882a593Smuzhiyun fb_div >>= 1;
240*4882a593Smuzhiyun else
241*4882a593Smuzhiyun fb_div |= 1;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
244*4882a593Smuzhiyun if (r)
245*4882a593Smuzhiyun return r;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* assert PLL_RESET */
248*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* For RS780 we have to choose ref clk */
251*4882a593Smuzhiyun if (rdev->family >= CHIP_RS780)
252*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
253*4882a593Smuzhiyun ~UPLL_REFCLK_SRC_SEL_MASK);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* set the required fb, ref and post divder values */
256*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL,
257*4882a593Smuzhiyun UPLL_FB_DIV(fb_div) |
258*4882a593Smuzhiyun UPLL_REF_DIV(ref_div),
259*4882a593Smuzhiyun ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
260*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL_2,
261*4882a593Smuzhiyun UPLL_SW_HILEN(vclk_div >> 1) |
262*4882a593Smuzhiyun UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
263*4882a593Smuzhiyun UPLL_SW_HILEN2(dclk_div >> 1) |
264*4882a593Smuzhiyun UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
265*4882a593Smuzhiyun UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
266*4882a593Smuzhiyun ~UPLL_SW_MASK);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* give the PLL some time to settle */
269*4882a593Smuzhiyun mdelay(15);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /* deassert PLL_RESET */
272*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun mdelay(15);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* deassert BYPASS EN */
277*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (rdev->family >= CHIP_RS780)
280*4882a593Smuzhiyun WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
283*4882a593Smuzhiyun if (r)
284*4882a593Smuzhiyun return r;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* switch VCLK and DCLK selection */
287*4882a593Smuzhiyun WREG32_P(CG_UPLL_FUNC_CNTL_2,
288*4882a593Smuzhiyun VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
289*4882a593Smuzhiyun ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun mdelay(100);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return 0;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
dce3_program_fmt(struct drm_encoder * encoder)296*4882a593Smuzhiyun void dce3_program_fmt(struct drm_encoder *encoder)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct drm_device *dev = encoder->dev;
299*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
300*4882a593Smuzhiyun struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
301*4882a593Smuzhiyun struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
302*4882a593Smuzhiyun struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
303*4882a593Smuzhiyun int bpc = 0;
304*4882a593Smuzhiyun u32 tmp = 0;
305*4882a593Smuzhiyun enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (connector) {
308*4882a593Smuzhiyun struct radeon_connector *radeon_connector = to_radeon_connector(connector);
309*4882a593Smuzhiyun bpc = radeon_get_monitor_bpc(connector);
310*4882a593Smuzhiyun dither = radeon_connector->dither;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* LVDS FMT is set up by atom */
314*4882a593Smuzhiyun if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
315*4882a593Smuzhiyun return;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* not needed for analog */
318*4882a593Smuzhiyun if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
319*4882a593Smuzhiyun (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
320*4882a593Smuzhiyun return;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (bpc == 0)
323*4882a593Smuzhiyun return;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun switch (bpc) {
326*4882a593Smuzhiyun case 6:
327*4882a593Smuzhiyun if (dither == RADEON_FMT_DITHER_ENABLE)
328*4882a593Smuzhiyun /* XXX sort out optimal dither settings */
329*4882a593Smuzhiyun tmp |= FMT_SPATIAL_DITHER_EN;
330*4882a593Smuzhiyun else
331*4882a593Smuzhiyun tmp |= FMT_TRUNCATE_EN;
332*4882a593Smuzhiyun break;
333*4882a593Smuzhiyun case 8:
334*4882a593Smuzhiyun if (dither == RADEON_FMT_DITHER_ENABLE)
335*4882a593Smuzhiyun /* XXX sort out optimal dither settings */
336*4882a593Smuzhiyun tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
337*4882a593Smuzhiyun else
338*4882a593Smuzhiyun tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
339*4882a593Smuzhiyun break;
340*4882a593Smuzhiyun case 10:
341*4882a593Smuzhiyun default:
342*4882a593Smuzhiyun /* not needed */
343*4882a593Smuzhiyun break;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* get temperature in millidegrees */
rv6xx_get_temp(struct radeon_device * rdev)350*4882a593Smuzhiyun int rv6xx_get_temp(struct radeon_device *rdev)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
353*4882a593Smuzhiyun ASIC_T_SHIFT;
354*4882a593Smuzhiyun int actual_temp = temp & 0xff;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (temp & 0x100)
357*4882a593Smuzhiyun actual_temp -= 256;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun return actual_temp * 1000;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
r600_pm_get_dynpm_state(struct radeon_device * rdev)362*4882a593Smuzhiyun void r600_pm_get_dynpm_state(struct radeon_device *rdev)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun int i;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun rdev->pm.dynpm_can_upclock = true;
367*4882a593Smuzhiyun rdev->pm.dynpm_can_downclock = true;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* power state array is low to high, default is first */
370*4882a593Smuzhiyun if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
371*4882a593Smuzhiyun int min_power_state_index = 0;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (rdev->pm.num_power_states > 2)
374*4882a593Smuzhiyun min_power_state_index = 1;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun switch (rdev->pm.dynpm_planned_action) {
377*4882a593Smuzhiyun case DYNPM_ACTION_MINIMUM:
378*4882a593Smuzhiyun rdev->pm.requested_power_state_index = min_power_state_index;
379*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index = 0;
380*4882a593Smuzhiyun rdev->pm.dynpm_can_downclock = false;
381*4882a593Smuzhiyun break;
382*4882a593Smuzhiyun case DYNPM_ACTION_DOWNCLOCK:
383*4882a593Smuzhiyun if (rdev->pm.current_power_state_index == min_power_state_index) {
384*4882a593Smuzhiyun rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
385*4882a593Smuzhiyun rdev->pm.dynpm_can_downclock = false;
386*4882a593Smuzhiyun } else {
387*4882a593Smuzhiyun if (rdev->pm.active_crtc_count > 1) {
388*4882a593Smuzhiyun for (i = 0; i < rdev->pm.num_power_states; i++) {
389*4882a593Smuzhiyun if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
390*4882a593Smuzhiyun continue;
391*4882a593Smuzhiyun else if (i >= rdev->pm.current_power_state_index) {
392*4882a593Smuzhiyun rdev->pm.requested_power_state_index =
393*4882a593Smuzhiyun rdev->pm.current_power_state_index;
394*4882a593Smuzhiyun break;
395*4882a593Smuzhiyun } else {
396*4882a593Smuzhiyun rdev->pm.requested_power_state_index = i;
397*4882a593Smuzhiyun break;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun } else {
401*4882a593Smuzhiyun if (rdev->pm.current_power_state_index == 0)
402*4882a593Smuzhiyun rdev->pm.requested_power_state_index =
403*4882a593Smuzhiyun rdev->pm.num_power_states - 1;
404*4882a593Smuzhiyun else
405*4882a593Smuzhiyun rdev->pm.requested_power_state_index =
406*4882a593Smuzhiyun rdev->pm.current_power_state_index - 1;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index = 0;
410*4882a593Smuzhiyun /* don't use the power state if crtcs are active and no display flag is set */
411*4882a593Smuzhiyun if ((rdev->pm.active_crtc_count > 0) &&
412*4882a593Smuzhiyun (rdev->pm.power_state[rdev->pm.requested_power_state_index].
413*4882a593Smuzhiyun clock_info[rdev->pm.requested_clock_mode_index].flags &
414*4882a593Smuzhiyun RADEON_PM_MODE_NO_DISPLAY)) {
415*4882a593Smuzhiyun rdev->pm.requested_power_state_index++;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun break;
418*4882a593Smuzhiyun case DYNPM_ACTION_UPCLOCK:
419*4882a593Smuzhiyun if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
420*4882a593Smuzhiyun rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
421*4882a593Smuzhiyun rdev->pm.dynpm_can_upclock = false;
422*4882a593Smuzhiyun } else {
423*4882a593Smuzhiyun if (rdev->pm.active_crtc_count > 1) {
424*4882a593Smuzhiyun for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
425*4882a593Smuzhiyun if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
426*4882a593Smuzhiyun continue;
427*4882a593Smuzhiyun else if (i <= rdev->pm.current_power_state_index) {
428*4882a593Smuzhiyun rdev->pm.requested_power_state_index =
429*4882a593Smuzhiyun rdev->pm.current_power_state_index;
430*4882a593Smuzhiyun break;
431*4882a593Smuzhiyun } else {
432*4882a593Smuzhiyun rdev->pm.requested_power_state_index = i;
433*4882a593Smuzhiyun break;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun } else
437*4882a593Smuzhiyun rdev->pm.requested_power_state_index =
438*4882a593Smuzhiyun rdev->pm.current_power_state_index + 1;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index = 0;
441*4882a593Smuzhiyun break;
442*4882a593Smuzhiyun case DYNPM_ACTION_DEFAULT:
443*4882a593Smuzhiyun rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
444*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index = 0;
445*4882a593Smuzhiyun rdev->pm.dynpm_can_upclock = false;
446*4882a593Smuzhiyun break;
447*4882a593Smuzhiyun case DYNPM_ACTION_NONE:
448*4882a593Smuzhiyun default:
449*4882a593Smuzhiyun DRM_ERROR("Requested mode for not defined action\n");
450*4882a593Smuzhiyun return;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun } else {
453*4882a593Smuzhiyun /* XXX select a power state based on AC/DC, single/dualhead, etc. */
454*4882a593Smuzhiyun /* for now just select the first power state and switch between clock modes */
455*4882a593Smuzhiyun /* power state array is low to high, default is first (0) */
456*4882a593Smuzhiyun if (rdev->pm.active_crtc_count > 1) {
457*4882a593Smuzhiyun rdev->pm.requested_power_state_index = -1;
458*4882a593Smuzhiyun /* start at 1 as we don't want the default mode */
459*4882a593Smuzhiyun for (i = 1; i < rdev->pm.num_power_states; i++) {
460*4882a593Smuzhiyun if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
461*4882a593Smuzhiyun continue;
462*4882a593Smuzhiyun else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
463*4882a593Smuzhiyun (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
464*4882a593Smuzhiyun rdev->pm.requested_power_state_index = i;
465*4882a593Smuzhiyun break;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun /* if nothing selected, grab the default state. */
469*4882a593Smuzhiyun if (rdev->pm.requested_power_state_index == -1)
470*4882a593Smuzhiyun rdev->pm.requested_power_state_index = 0;
471*4882a593Smuzhiyun } else
472*4882a593Smuzhiyun rdev->pm.requested_power_state_index = 1;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun switch (rdev->pm.dynpm_planned_action) {
475*4882a593Smuzhiyun case DYNPM_ACTION_MINIMUM:
476*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index = 0;
477*4882a593Smuzhiyun rdev->pm.dynpm_can_downclock = false;
478*4882a593Smuzhiyun break;
479*4882a593Smuzhiyun case DYNPM_ACTION_DOWNCLOCK:
480*4882a593Smuzhiyun if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
481*4882a593Smuzhiyun if (rdev->pm.current_clock_mode_index == 0) {
482*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index = 0;
483*4882a593Smuzhiyun rdev->pm.dynpm_can_downclock = false;
484*4882a593Smuzhiyun } else
485*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index =
486*4882a593Smuzhiyun rdev->pm.current_clock_mode_index - 1;
487*4882a593Smuzhiyun } else {
488*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index = 0;
489*4882a593Smuzhiyun rdev->pm.dynpm_can_downclock = false;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun /* don't use the power state if crtcs are active and no display flag is set */
492*4882a593Smuzhiyun if ((rdev->pm.active_crtc_count > 0) &&
493*4882a593Smuzhiyun (rdev->pm.power_state[rdev->pm.requested_power_state_index].
494*4882a593Smuzhiyun clock_info[rdev->pm.requested_clock_mode_index].flags &
495*4882a593Smuzhiyun RADEON_PM_MODE_NO_DISPLAY)) {
496*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index++;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun break;
499*4882a593Smuzhiyun case DYNPM_ACTION_UPCLOCK:
500*4882a593Smuzhiyun if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
501*4882a593Smuzhiyun if (rdev->pm.current_clock_mode_index ==
502*4882a593Smuzhiyun (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
503*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
504*4882a593Smuzhiyun rdev->pm.dynpm_can_upclock = false;
505*4882a593Smuzhiyun } else
506*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index =
507*4882a593Smuzhiyun rdev->pm.current_clock_mode_index + 1;
508*4882a593Smuzhiyun } else {
509*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index =
510*4882a593Smuzhiyun rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
511*4882a593Smuzhiyun rdev->pm.dynpm_can_upclock = false;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun break;
514*4882a593Smuzhiyun case DYNPM_ACTION_DEFAULT:
515*4882a593Smuzhiyun rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
516*4882a593Smuzhiyun rdev->pm.requested_clock_mode_index = 0;
517*4882a593Smuzhiyun rdev->pm.dynpm_can_upclock = false;
518*4882a593Smuzhiyun break;
519*4882a593Smuzhiyun case DYNPM_ACTION_NONE:
520*4882a593Smuzhiyun default:
521*4882a593Smuzhiyun DRM_ERROR("Requested mode for not defined action\n");
522*4882a593Smuzhiyun return;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
527*4882a593Smuzhiyun rdev->pm.power_state[rdev->pm.requested_power_state_index].
528*4882a593Smuzhiyun clock_info[rdev->pm.requested_clock_mode_index].sclk,
529*4882a593Smuzhiyun rdev->pm.power_state[rdev->pm.requested_power_state_index].
530*4882a593Smuzhiyun clock_info[rdev->pm.requested_clock_mode_index].mclk,
531*4882a593Smuzhiyun rdev->pm.power_state[rdev->pm.requested_power_state_index].
532*4882a593Smuzhiyun pcie_lanes);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
rs780_pm_init_profile(struct radeon_device * rdev)535*4882a593Smuzhiyun void rs780_pm_init_profile(struct radeon_device *rdev)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun if (rdev->pm.num_power_states == 2) {
538*4882a593Smuzhiyun /* default */
539*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
540*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
541*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
542*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
543*4882a593Smuzhiyun /* low sh */
544*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
545*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
546*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
547*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
548*4882a593Smuzhiyun /* mid sh */
549*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
550*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
551*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
552*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
553*4882a593Smuzhiyun /* high sh */
554*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
555*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
556*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
557*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
558*4882a593Smuzhiyun /* low mh */
559*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
560*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
561*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
562*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
563*4882a593Smuzhiyun /* mid mh */
564*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
565*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
566*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
567*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
568*4882a593Smuzhiyun /* high mh */
569*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
570*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
571*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
572*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
573*4882a593Smuzhiyun } else if (rdev->pm.num_power_states == 3) {
574*4882a593Smuzhiyun /* default */
575*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
576*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
577*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
578*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
579*4882a593Smuzhiyun /* low sh */
580*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
581*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
582*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
583*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
584*4882a593Smuzhiyun /* mid sh */
585*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
586*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
587*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
588*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
589*4882a593Smuzhiyun /* high sh */
590*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
591*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
592*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
593*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
594*4882a593Smuzhiyun /* low mh */
595*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
596*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
597*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
598*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
599*4882a593Smuzhiyun /* mid mh */
600*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
601*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
602*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
603*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
604*4882a593Smuzhiyun /* high mh */
605*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
606*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
607*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
608*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
609*4882a593Smuzhiyun } else {
610*4882a593Smuzhiyun /* default */
611*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
612*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
613*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
614*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
615*4882a593Smuzhiyun /* low sh */
616*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
617*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
618*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
619*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
620*4882a593Smuzhiyun /* mid sh */
621*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
622*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
623*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
624*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
625*4882a593Smuzhiyun /* high sh */
626*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
627*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
628*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
629*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
630*4882a593Smuzhiyun /* low mh */
631*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
632*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
633*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
634*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
635*4882a593Smuzhiyun /* mid mh */
636*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
637*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
638*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
639*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
640*4882a593Smuzhiyun /* high mh */
641*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
642*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
643*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
644*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
r600_pm_init_profile(struct radeon_device * rdev)648*4882a593Smuzhiyun void r600_pm_init_profile(struct radeon_device *rdev)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun int idx;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun if (rdev->family == CHIP_R600) {
653*4882a593Smuzhiyun /* XXX */
654*4882a593Smuzhiyun /* default */
655*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
656*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
657*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
658*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
659*4882a593Smuzhiyun /* low sh */
660*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
661*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
662*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
663*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
664*4882a593Smuzhiyun /* mid sh */
665*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
666*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
667*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
668*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
669*4882a593Smuzhiyun /* high sh */
670*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
671*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
672*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
673*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
674*4882a593Smuzhiyun /* low mh */
675*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
676*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
677*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
678*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
679*4882a593Smuzhiyun /* mid mh */
680*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
681*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
682*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
683*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
684*4882a593Smuzhiyun /* high mh */
685*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
686*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
687*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
688*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
689*4882a593Smuzhiyun } else {
690*4882a593Smuzhiyun if (rdev->pm.num_power_states < 4) {
691*4882a593Smuzhiyun /* default */
692*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
693*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
694*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
695*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
696*4882a593Smuzhiyun /* low sh */
697*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
698*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
699*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
700*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
701*4882a593Smuzhiyun /* mid sh */
702*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
703*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
704*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
705*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
706*4882a593Smuzhiyun /* high sh */
707*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
708*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
709*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
710*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
711*4882a593Smuzhiyun /* low mh */
712*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
713*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
714*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
715*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
716*4882a593Smuzhiyun /* low mh */
717*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
718*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
719*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
720*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
721*4882a593Smuzhiyun /* high mh */
722*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
723*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
724*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
725*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
726*4882a593Smuzhiyun } else {
727*4882a593Smuzhiyun /* default */
728*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
729*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
730*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
731*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
732*4882a593Smuzhiyun /* low sh */
733*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_MOBILITY)
734*4882a593Smuzhiyun idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
735*4882a593Smuzhiyun else
736*4882a593Smuzhiyun idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
737*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
738*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
739*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
740*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
741*4882a593Smuzhiyun /* mid sh */
742*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
743*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
744*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
745*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
746*4882a593Smuzhiyun /* high sh */
747*4882a593Smuzhiyun idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
748*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
749*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
750*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
751*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
752*4882a593Smuzhiyun /* low mh */
753*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_MOBILITY)
754*4882a593Smuzhiyun idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
755*4882a593Smuzhiyun else
756*4882a593Smuzhiyun idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
757*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
758*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
759*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
760*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
761*4882a593Smuzhiyun /* mid mh */
762*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
763*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
764*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
765*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
766*4882a593Smuzhiyun /* high mh */
767*4882a593Smuzhiyun idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
768*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
769*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
770*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
771*4882a593Smuzhiyun rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
r600_pm_misc(struct radeon_device * rdev)776*4882a593Smuzhiyun void r600_pm_misc(struct radeon_device *rdev)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun int req_ps_idx = rdev->pm.requested_power_state_index;
779*4882a593Smuzhiyun int req_cm_idx = rdev->pm.requested_clock_mode_index;
780*4882a593Smuzhiyun struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
781*4882a593Smuzhiyun struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
784*4882a593Smuzhiyun /* 0xff01 is a flag rather then an actual voltage */
785*4882a593Smuzhiyun if (voltage->voltage == 0xff01)
786*4882a593Smuzhiyun return;
787*4882a593Smuzhiyun if (voltage->voltage != rdev->pm.current_vddc) {
788*4882a593Smuzhiyun radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
789*4882a593Smuzhiyun rdev->pm.current_vddc = voltage->voltage;
790*4882a593Smuzhiyun DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
r600_gui_idle(struct radeon_device * rdev)795*4882a593Smuzhiyun bool r600_gui_idle(struct radeon_device *rdev)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
798*4882a593Smuzhiyun return false;
799*4882a593Smuzhiyun else
800*4882a593Smuzhiyun return true;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /* hpd for digital panel detect/disconnect */
r600_hpd_sense(struct radeon_device * rdev,enum radeon_hpd_id hpd)804*4882a593Smuzhiyun bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun bool connected = false;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
809*4882a593Smuzhiyun switch (hpd) {
810*4882a593Smuzhiyun case RADEON_HPD_1:
811*4882a593Smuzhiyun if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
812*4882a593Smuzhiyun connected = true;
813*4882a593Smuzhiyun break;
814*4882a593Smuzhiyun case RADEON_HPD_2:
815*4882a593Smuzhiyun if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
816*4882a593Smuzhiyun connected = true;
817*4882a593Smuzhiyun break;
818*4882a593Smuzhiyun case RADEON_HPD_3:
819*4882a593Smuzhiyun if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
820*4882a593Smuzhiyun connected = true;
821*4882a593Smuzhiyun break;
822*4882a593Smuzhiyun case RADEON_HPD_4:
823*4882a593Smuzhiyun if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
824*4882a593Smuzhiyun connected = true;
825*4882a593Smuzhiyun break;
826*4882a593Smuzhiyun /* DCE 3.2 */
827*4882a593Smuzhiyun case RADEON_HPD_5:
828*4882a593Smuzhiyun if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
829*4882a593Smuzhiyun connected = true;
830*4882a593Smuzhiyun break;
831*4882a593Smuzhiyun case RADEON_HPD_6:
832*4882a593Smuzhiyun if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
833*4882a593Smuzhiyun connected = true;
834*4882a593Smuzhiyun break;
835*4882a593Smuzhiyun default:
836*4882a593Smuzhiyun break;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun } else {
839*4882a593Smuzhiyun switch (hpd) {
840*4882a593Smuzhiyun case RADEON_HPD_1:
841*4882a593Smuzhiyun if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
842*4882a593Smuzhiyun connected = true;
843*4882a593Smuzhiyun break;
844*4882a593Smuzhiyun case RADEON_HPD_2:
845*4882a593Smuzhiyun if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
846*4882a593Smuzhiyun connected = true;
847*4882a593Smuzhiyun break;
848*4882a593Smuzhiyun case RADEON_HPD_3:
849*4882a593Smuzhiyun if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
850*4882a593Smuzhiyun connected = true;
851*4882a593Smuzhiyun break;
852*4882a593Smuzhiyun default:
853*4882a593Smuzhiyun break;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun return connected;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
r600_hpd_set_polarity(struct radeon_device * rdev,enum radeon_hpd_id hpd)859*4882a593Smuzhiyun void r600_hpd_set_polarity(struct radeon_device *rdev,
860*4882a593Smuzhiyun enum radeon_hpd_id hpd)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun u32 tmp;
863*4882a593Smuzhiyun bool connected = r600_hpd_sense(rdev, hpd);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
866*4882a593Smuzhiyun switch (hpd) {
867*4882a593Smuzhiyun case RADEON_HPD_1:
868*4882a593Smuzhiyun tmp = RREG32(DC_HPD1_INT_CONTROL);
869*4882a593Smuzhiyun if (connected)
870*4882a593Smuzhiyun tmp &= ~DC_HPDx_INT_POLARITY;
871*4882a593Smuzhiyun else
872*4882a593Smuzhiyun tmp |= DC_HPDx_INT_POLARITY;
873*4882a593Smuzhiyun WREG32(DC_HPD1_INT_CONTROL, tmp);
874*4882a593Smuzhiyun break;
875*4882a593Smuzhiyun case RADEON_HPD_2:
876*4882a593Smuzhiyun tmp = RREG32(DC_HPD2_INT_CONTROL);
877*4882a593Smuzhiyun if (connected)
878*4882a593Smuzhiyun tmp &= ~DC_HPDx_INT_POLARITY;
879*4882a593Smuzhiyun else
880*4882a593Smuzhiyun tmp |= DC_HPDx_INT_POLARITY;
881*4882a593Smuzhiyun WREG32(DC_HPD2_INT_CONTROL, tmp);
882*4882a593Smuzhiyun break;
883*4882a593Smuzhiyun case RADEON_HPD_3:
884*4882a593Smuzhiyun tmp = RREG32(DC_HPD3_INT_CONTROL);
885*4882a593Smuzhiyun if (connected)
886*4882a593Smuzhiyun tmp &= ~DC_HPDx_INT_POLARITY;
887*4882a593Smuzhiyun else
888*4882a593Smuzhiyun tmp |= DC_HPDx_INT_POLARITY;
889*4882a593Smuzhiyun WREG32(DC_HPD3_INT_CONTROL, tmp);
890*4882a593Smuzhiyun break;
891*4882a593Smuzhiyun case RADEON_HPD_4:
892*4882a593Smuzhiyun tmp = RREG32(DC_HPD4_INT_CONTROL);
893*4882a593Smuzhiyun if (connected)
894*4882a593Smuzhiyun tmp &= ~DC_HPDx_INT_POLARITY;
895*4882a593Smuzhiyun else
896*4882a593Smuzhiyun tmp |= DC_HPDx_INT_POLARITY;
897*4882a593Smuzhiyun WREG32(DC_HPD4_INT_CONTROL, tmp);
898*4882a593Smuzhiyun break;
899*4882a593Smuzhiyun case RADEON_HPD_5:
900*4882a593Smuzhiyun tmp = RREG32(DC_HPD5_INT_CONTROL);
901*4882a593Smuzhiyun if (connected)
902*4882a593Smuzhiyun tmp &= ~DC_HPDx_INT_POLARITY;
903*4882a593Smuzhiyun else
904*4882a593Smuzhiyun tmp |= DC_HPDx_INT_POLARITY;
905*4882a593Smuzhiyun WREG32(DC_HPD5_INT_CONTROL, tmp);
906*4882a593Smuzhiyun break;
907*4882a593Smuzhiyun /* DCE 3.2 */
908*4882a593Smuzhiyun case RADEON_HPD_6:
909*4882a593Smuzhiyun tmp = RREG32(DC_HPD6_INT_CONTROL);
910*4882a593Smuzhiyun if (connected)
911*4882a593Smuzhiyun tmp &= ~DC_HPDx_INT_POLARITY;
912*4882a593Smuzhiyun else
913*4882a593Smuzhiyun tmp |= DC_HPDx_INT_POLARITY;
914*4882a593Smuzhiyun WREG32(DC_HPD6_INT_CONTROL, tmp);
915*4882a593Smuzhiyun break;
916*4882a593Smuzhiyun default:
917*4882a593Smuzhiyun break;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun } else {
920*4882a593Smuzhiyun switch (hpd) {
921*4882a593Smuzhiyun case RADEON_HPD_1:
922*4882a593Smuzhiyun tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
923*4882a593Smuzhiyun if (connected)
924*4882a593Smuzhiyun tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
925*4882a593Smuzhiyun else
926*4882a593Smuzhiyun tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
927*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
928*4882a593Smuzhiyun break;
929*4882a593Smuzhiyun case RADEON_HPD_2:
930*4882a593Smuzhiyun tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
931*4882a593Smuzhiyun if (connected)
932*4882a593Smuzhiyun tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
933*4882a593Smuzhiyun else
934*4882a593Smuzhiyun tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
935*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
936*4882a593Smuzhiyun break;
937*4882a593Smuzhiyun case RADEON_HPD_3:
938*4882a593Smuzhiyun tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
939*4882a593Smuzhiyun if (connected)
940*4882a593Smuzhiyun tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
941*4882a593Smuzhiyun else
942*4882a593Smuzhiyun tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
943*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
944*4882a593Smuzhiyun break;
945*4882a593Smuzhiyun default:
946*4882a593Smuzhiyun break;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
r600_hpd_init(struct radeon_device * rdev)951*4882a593Smuzhiyun void r600_hpd_init(struct radeon_device *rdev)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun struct drm_device *dev = rdev->ddev;
954*4882a593Smuzhiyun struct drm_connector *connector;
955*4882a593Smuzhiyun unsigned enable = 0;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
958*4882a593Smuzhiyun struct radeon_connector *radeon_connector = to_radeon_connector(connector);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
961*4882a593Smuzhiyun connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
962*4882a593Smuzhiyun /* don't try to enable hpd on eDP or LVDS avoid breaking the
963*4882a593Smuzhiyun * aux dp channel on imac and help (but not completely fix)
964*4882a593Smuzhiyun * https://bugzilla.redhat.com/show_bug.cgi?id=726143
965*4882a593Smuzhiyun */
966*4882a593Smuzhiyun continue;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
969*4882a593Smuzhiyun u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
970*4882a593Smuzhiyun if (ASIC_IS_DCE32(rdev))
971*4882a593Smuzhiyun tmp |= DC_HPDx_EN;
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun switch (radeon_connector->hpd.hpd) {
974*4882a593Smuzhiyun case RADEON_HPD_1:
975*4882a593Smuzhiyun WREG32(DC_HPD1_CONTROL, tmp);
976*4882a593Smuzhiyun break;
977*4882a593Smuzhiyun case RADEON_HPD_2:
978*4882a593Smuzhiyun WREG32(DC_HPD2_CONTROL, tmp);
979*4882a593Smuzhiyun break;
980*4882a593Smuzhiyun case RADEON_HPD_3:
981*4882a593Smuzhiyun WREG32(DC_HPD3_CONTROL, tmp);
982*4882a593Smuzhiyun break;
983*4882a593Smuzhiyun case RADEON_HPD_4:
984*4882a593Smuzhiyun WREG32(DC_HPD4_CONTROL, tmp);
985*4882a593Smuzhiyun break;
986*4882a593Smuzhiyun /* DCE 3.2 */
987*4882a593Smuzhiyun case RADEON_HPD_5:
988*4882a593Smuzhiyun WREG32(DC_HPD5_CONTROL, tmp);
989*4882a593Smuzhiyun break;
990*4882a593Smuzhiyun case RADEON_HPD_6:
991*4882a593Smuzhiyun WREG32(DC_HPD6_CONTROL, tmp);
992*4882a593Smuzhiyun break;
993*4882a593Smuzhiyun default:
994*4882a593Smuzhiyun break;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun } else {
997*4882a593Smuzhiyun switch (radeon_connector->hpd.hpd) {
998*4882a593Smuzhiyun case RADEON_HPD_1:
999*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
1000*4882a593Smuzhiyun break;
1001*4882a593Smuzhiyun case RADEON_HPD_2:
1002*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
1003*4882a593Smuzhiyun break;
1004*4882a593Smuzhiyun case RADEON_HPD_3:
1005*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
1006*4882a593Smuzhiyun break;
1007*4882a593Smuzhiyun default:
1008*4882a593Smuzhiyun break;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
1012*4882a593Smuzhiyun enable |= 1 << radeon_connector->hpd.hpd;
1013*4882a593Smuzhiyun radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun radeon_irq_kms_enable_hpd(rdev, enable);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
r600_hpd_fini(struct radeon_device * rdev)1018*4882a593Smuzhiyun void r600_hpd_fini(struct radeon_device *rdev)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun struct drm_device *dev = rdev->ddev;
1021*4882a593Smuzhiyun struct drm_connector *connector;
1022*4882a593Smuzhiyun unsigned disable = 0;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1025*4882a593Smuzhiyun struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1026*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
1027*4882a593Smuzhiyun switch (radeon_connector->hpd.hpd) {
1028*4882a593Smuzhiyun case RADEON_HPD_1:
1029*4882a593Smuzhiyun WREG32(DC_HPD1_CONTROL, 0);
1030*4882a593Smuzhiyun break;
1031*4882a593Smuzhiyun case RADEON_HPD_2:
1032*4882a593Smuzhiyun WREG32(DC_HPD2_CONTROL, 0);
1033*4882a593Smuzhiyun break;
1034*4882a593Smuzhiyun case RADEON_HPD_3:
1035*4882a593Smuzhiyun WREG32(DC_HPD3_CONTROL, 0);
1036*4882a593Smuzhiyun break;
1037*4882a593Smuzhiyun case RADEON_HPD_4:
1038*4882a593Smuzhiyun WREG32(DC_HPD4_CONTROL, 0);
1039*4882a593Smuzhiyun break;
1040*4882a593Smuzhiyun /* DCE 3.2 */
1041*4882a593Smuzhiyun case RADEON_HPD_5:
1042*4882a593Smuzhiyun WREG32(DC_HPD5_CONTROL, 0);
1043*4882a593Smuzhiyun break;
1044*4882a593Smuzhiyun case RADEON_HPD_6:
1045*4882a593Smuzhiyun WREG32(DC_HPD6_CONTROL, 0);
1046*4882a593Smuzhiyun break;
1047*4882a593Smuzhiyun default:
1048*4882a593Smuzhiyun break;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun } else {
1051*4882a593Smuzhiyun switch (radeon_connector->hpd.hpd) {
1052*4882a593Smuzhiyun case RADEON_HPD_1:
1053*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
1054*4882a593Smuzhiyun break;
1055*4882a593Smuzhiyun case RADEON_HPD_2:
1056*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
1057*4882a593Smuzhiyun break;
1058*4882a593Smuzhiyun case RADEON_HPD_3:
1059*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
1060*4882a593Smuzhiyun break;
1061*4882a593Smuzhiyun default:
1062*4882a593Smuzhiyun break;
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
1066*4882a593Smuzhiyun disable |= 1 << radeon_connector->hpd.hpd;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun radeon_irq_kms_disable_hpd(rdev, disable);
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /*
1072*4882a593Smuzhiyun * R600 PCIE GART
1073*4882a593Smuzhiyun */
r600_pcie_gart_tlb_flush(struct radeon_device * rdev)1074*4882a593Smuzhiyun void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun unsigned i;
1077*4882a593Smuzhiyun u32 tmp;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun /* flush hdp cache so updates hit vram */
1080*4882a593Smuzhiyun if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
1081*4882a593Smuzhiyun !(rdev->flags & RADEON_IS_AGP)) {
1082*4882a593Smuzhiyun void __iomem *ptr = (void *)rdev->gart.ptr;
1083*4882a593Smuzhiyun u32 tmp;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
1086*4882a593Smuzhiyun * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
1087*4882a593Smuzhiyun * This seems to cause problems on some AGP cards. Just use the old
1088*4882a593Smuzhiyun * method for them.
1089*4882a593Smuzhiyun */
1090*4882a593Smuzhiyun WREG32(HDP_DEBUG1, 0);
1091*4882a593Smuzhiyun tmp = readl((void __iomem *)ptr);
1092*4882a593Smuzhiyun } else
1093*4882a593Smuzhiyun WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
1096*4882a593Smuzhiyun WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
1097*4882a593Smuzhiyun WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
1098*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
1099*4882a593Smuzhiyun /* read MC_STATUS */
1100*4882a593Smuzhiyun tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
1101*4882a593Smuzhiyun tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
1102*4882a593Smuzhiyun if (tmp == 2) {
1103*4882a593Smuzhiyun pr_warn("[drm] r600 flush TLB failed\n");
1104*4882a593Smuzhiyun return;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun if (tmp) {
1107*4882a593Smuzhiyun return;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun udelay(1);
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
r600_pcie_gart_init(struct radeon_device * rdev)1113*4882a593Smuzhiyun int r600_pcie_gart_init(struct radeon_device *rdev)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun int r;
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun if (rdev->gart.robj) {
1118*4882a593Smuzhiyun WARN(1, "R600 PCIE GART already initialized\n");
1119*4882a593Smuzhiyun return 0;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun /* Initialize common gart structure */
1122*4882a593Smuzhiyun r = radeon_gart_init(rdev);
1123*4882a593Smuzhiyun if (r)
1124*4882a593Smuzhiyun return r;
1125*4882a593Smuzhiyun rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
1126*4882a593Smuzhiyun return radeon_gart_table_vram_alloc(rdev);
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
r600_pcie_gart_enable(struct radeon_device * rdev)1129*4882a593Smuzhiyun static int r600_pcie_gart_enable(struct radeon_device *rdev)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun u32 tmp;
1132*4882a593Smuzhiyun int r, i;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun if (rdev->gart.robj == NULL) {
1135*4882a593Smuzhiyun dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1136*4882a593Smuzhiyun return -EINVAL;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun r = radeon_gart_table_vram_pin(rdev);
1139*4882a593Smuzhiyun if (r)
1140*4882a593Smuzhiyun return r;
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun /* Setup L2 cache */
1143*4882a593Smuzhiyun WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1144*4882a593Smuzhiyun ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1145*4882a593Smuzhiyun EFFECTIVE_L2_QUEUE_SIZE(7));
1146*4882a593Smuzhiyun WREG32(VM_L2_CNTL2, 0);
1147*4882a593Smuzhiyun WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1148*4882a593Smuzhiyun /* Setup TLB control */
1149*4882a593Smuzhiyun tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1150*4882a593Smuzhiyun SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1151*4882a593Smuzhiyun EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1152*4882a593Smuzhiyun ENABLE_WAIT_L2_QUERY;
1153*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1154*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1155*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1156*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1157*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1158*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1159*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1160*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1161*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1162*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1163*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1164*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1165*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1166*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1167*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1168*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1169*4882a593Smuzhiyun WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1170*4882a593Smuzhiyun WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1171*4882a593Smuzhiyun WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1172*4882a593Smuzhiyun WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1173*4882a593Smuzhiyun RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1174*4882a593Smuzhiyun WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1175*4882a593Smuzhiyun (u32)(rdev->dummy_page.addr >> 12));
1176*4882a593Smuzhiyun for (i = 1; i < 7; i++)
1177*4882a593Smuzhiyun WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun r600_pcie_gart_tlb_flush(rdev);
1180*4882a593Smuzhiyun DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1181*4882a593Smuzhiyun (unsigned)(rdev->mc.gtt_size >> 20),
1182*4882a593Smuzhiyun (unsigned long long)rdev->gart.table_addr);
1183*4882a593Smuzhiyun rdev->gart.ready = true;
1184*4882a593Smuzhiyun return 0;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun
r600_pcie_gart_disable(struct radeon_device * rdev)1187*4882a593Smuzhiyun static void r600_pcie_gart_disable(struct radeon_device *rdev)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun u32 tmp;
1190*4882a593Smuzhiyun int i;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun /* Disable all tables */
1193*4882a593Smuzhiyun for (i = 0; i < 7; i++)
1194*4882a593Smuzhiyun WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* Disable L2 cache */
1197*4882a593Smuzhiyun WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1198*4882a593Smuzhiyun EFFECTIVE_L2_QUEUE_SIZE(7));
1199*4882a593Smuzhiyun WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1200*4882a593Smuzhiyun /* Setup L1 TLB control */
1201*4882a593Smuzhiyun tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1202*4882a593Smuzhiyun ENABLE_WAIT_L2_QUERY;
1203*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1204*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1205*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1206*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1207*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1208*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1209*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1210*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1211*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1212*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1213*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1214*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1215*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1216*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1217*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
1218*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
1219*4882a593Smuzhiyun radeon_gart_table_vram_unpin(rdev);
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
r600_pcie_gart_fini(struct radeon_device * rdev)1222*4882a593Smuzhiyun static void r600_pcie_gart_fini(struct radeon_device *rdev)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun radeon_gart_fini(rdev);
1225*4882a593Smuzhiyun r600_pcie_gart_disable(rdev);
1226*4882a593Smuzhiyun radeon_gart_table_vram_free(rdev);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
r600_agp_enable(struct radeon_device * rdev)1229*4882a593Smuzhiyun static void r600_agp_enable(struct radeon_device *rdev)
1230*4882a593Smuzhiyun {
1231*4882a593Smuzhiyun u32 tmp;
1232*4882a593Smuzhiyun int i;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun /* Setup L2 cache */
1235*4882a593Smuzhiyun WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1236*4882a593Smuzhiyun ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1237*4882a593Smuzhiyun EFFECTIVE_L2_QUEUE_SIZE(7));
1238*4882a593Smuzhiyun WREG32(VM_L2_CNTL2, 0);
1239*4882a593Smuzhiyun WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1240*4882a593Smuzhiyun /* Setup TLB control */
1241*4882a593Smuzhiyun tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1242*4882a593Smuzhiyun SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1243*4882a593Smuzhiyun EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1244*4882a593Smuzhiyun ENABLE_WAIT_L2_QUERY;
1245*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1246*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1247*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1248*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1249*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1250*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1251*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1252*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1253*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1254*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1255*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1256*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1257*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1258*4882a593Smuzhiyun WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1259*4882a593Smuzhiyun for (i = 0; i < 7; i++)
1260*4882a593Smuzhiyun WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
r600_mc_wait_for_idle(struct radeon_device * rdev)1263*4882a593Smuzhiyun int r600_mc_wait_for_idle(struct radeon_device *rdev)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun unsigned i;
1266*4882a593Smuzhiyun u32 tmp;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
1269*4882a593Smuzhiyun /* read MC_STATUS */
1270*4882a593Smuzhiyun tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1271*4882a593Smuzhiyun if (!tmp)
1272*4882a593Smuzhiyun return 0;
1273*4882a593Smuzhiyun udelay(1);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun return -1;
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun
rs780_mc_rreg(struct radeon_device * rdev,uint32_t reg)1278*4882a593Smuzhiyun uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun unsigned long flags;
1281*4882a593Smuzhiyun uint32_t r;
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1284*4882a593Smuzhiyun WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1285*4882a593Smuzhiyun r = RREG32(R_0028FC_MC_DATA);
1286*4882a593Smuzhiyun WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1287*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1288*4882a593Smuzhiyun return r;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun
rs780_mc_wreg(struct radeon_device * rdev,uint32_t reg,uint32_t v)1291*4882a593Smuzhiyun void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun unsigned long flags;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1296*4882a593Smuzhiyun WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1297*4882a593Smuzhiyun S_0028F8_MC_IND_WR_EN(1));
1298*4882a593Smuzhiyun WREG32(R_0028FC_MC_DATA, v);
1299*4882a593Smuzhiyun WREG32(R_0028F8_MC_INDEX, 0x7F);
1300*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
r600_mc_program(struct radeon_device * rdev)1303*4882a593Smuzhiyun static void r600_mc_program(struct radeon_device *rdev)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun struct rv515_mc_save save;
1306*4882a593Smuzhiyun u32 tmp;
1307*4882a593Smuzhiyun int i, j;
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun /* Initialize HDP */
1310*4882a593Smuzhiyun for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1311*4882a593Smuzhiyun WREG32((0x2c14 + j), 0x00000000);
1312*4882a593Smuzhiyun WREG32((0x2c18 + j), 0x00000000);
1313*4882a593Smuzhiyun WREG32((0x2c1c + j), 0x00000000);
1314*4882a593Smuzhiyun WREG32((0x2c20 + j), 0x00000000);
1315*4882a593Smuzhiyun WREG32((0x2c24 + j), 0x00000000);
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun rv515_mc_stop(rdev, &save);
1320*4882a593Smuzhiyun if (r600_mc_wait_for_idle(rdev)) {
1321*4882a593Smuzhiyun dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun /* Lockout access through VGA aperture (doesn't exist before R600) */
1324*4882a593Smuzhiyun WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1325*4882a593Smuzhiyun /* Update configuration */
1326*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_AGP) {
1327*4882a593Smuzhiyun if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1328*4882a593Smuzhiyun /* VRAM before AGP */
1329*4882a593Smuzhiyun WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1330*4882a593Smuzhiyun rdev->mc.vram_start >> 12);
1331*4882a593Smuzhiyun WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1332*4882a593Smuzhiyun rdev->mc.gtt_end >> 12);
1333*4882a593Smuzhiyun } else {
1334*4882a593Smuzhiyun /* VRAM after AGP */
1335*4882a593Smuzhiyun WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1336*4882a593Smuzhiyun rdev->mc.gtt_start >> 12);
1337*4882a593Smuzhiyun WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1338*4882a593Smuzhiyun rdev->mc.vram_end >> 12);
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun } else {
1341*4882a593Smuzhiyun WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1342*4882a593Smuzhiyun WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1345*4882a593Smuzhiyun tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1346*4882a593Smuzhiyun tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1347*4882a593Smuzhiyun WREG32(MC_VM_FB_LOCATION, tmp);
1348*4882a593Smuzhiyun WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1349*4882a593Smuzhiyun WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1350*4882a593Smuzhiyun WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1351*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_AGP) {
1352*4882a593Smuzhiyun WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1353*4882a593Smuzhiyun WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1354*4882a593Smuzhiyun WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1355*4882a593Smuzhiyun } else {
1356*4882a593Smuzhiyun WREG32(MC_VM_AGP_BASE, 0);
1357*4882a593Smuzhiyun WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1358*4882a593Smuzhiyun WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun if (r600_mc_wait_for_idle(rdev)) {
1361*4882a593Smuzhiyun dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun rv515_mc_resume(rdev, &save);
1364*4882a593Smuzhiyun /* we need to own VRAM, so turn off the VGA renderer here
1365*4882a593Smuzhiyun * to stop it overwriting our objects */
1366*4882a593Smuzhiyun rv515_vga_render_disable(rdev);
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun /**
1370*4882a593Smuzhiyun * r600_vram_gtt_location - try to find VRAM & GTT location
1371*4882a593Smuzhiyun * @rdev: radeon device structure holding all necessary informations
1372*4882a593Smuzhiyun * @mc: memory controller structure holding memory informations
1373*4882a593Smuzhiyun *
1374*4882a593Smuzhiyun * Function will place try to place VRAM at same place as in CPU (PCI)
1375*4882a593Smuzhiyun * address space as some GPU seems to have issue when we reprogram at
1376*4882a593Smuzhiyun * different address space.
1377*4882a593Smuzhiyun *
1378*4882a593Smuzhiyun * If there is not enough space to fit the unvisible VRAM after the
1379*4882a593Smuzhiyun * aperture then we limit the VRAM size to the aperture.
1380*4882a593Smuzhiyun *
1381*4882a593Smuzhiyun * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1382*4882a593Smuzhiyun * them to be in one from GPU point of view so that we can program GPU to
1383*4882a593Smuzhiyun * catch access outside them (weird GPU policy see ??).
1384*4882a593Smuzhiyun *
1385*4882a593Smuzhiyun * This function will never fails, worst case are limiting VRAM or GTT.
1386*4882a593Smuzhiyun *
1387*4882a593Smuzhiyun * Note: GTT start, end, size should be initialized before calling this
1388*4882a593Smuzhiyun * function on AGP platform.
1389*4882a593Smuzhiyun */
r600_vram_gtt_location(struct radeon_device * rdev,struct radeon_mc * mc)1390*4882a593Smuzhiyun static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun u64 size_bf, size_af;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun if (mc->mc_vram_size > 0xE0000000) {
1395*4882a593Smuzhiyun /* leave room for at least 512M GTT */
1396*4882a593Smuzhiyun dev_warn(rdev->dev, "limiting VRAM\n");
1397*4882a593Smuzhiyun mc->real_vram_size = 0xE0000000;
1398*4882a593Smuzhiyun mc->mc_vram_size = 0xE0000000;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_AGP) {
1401*4882a593Smuzhiyun size_bf = mc->gtt_start;
1402*4882a593Smuzhiyun size_af = mc->mc_mask - mc->gtt_end;
1403*4882a593Smuzhiyun if (size_bf > size_af) {
1404*4882a593Smuzhiyun if (mc->mc_vram_size > size_bf) {
1405*4882a593Smuzhiyun dev_warn(rdev->dev, "limiting VRAM\n");
1406*4882a593Smuzhiyun mc->real_vram_size = size_bf;
1407*4882a593Smuzhiyun mc->mc_vram_size = size_bf;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1410*4882a593Smuzhiyun } else {
1411*4882a593Smuzhiyun if (mc->mc_vram_size > size_af) {
1412*4882a593Smuzhiyun dev_warn(rdev->dev, "limiting VRAM\n");
1413*4882a593Smuzhiyun mc->real_vram_size = size_af;
1414*4882a593Smuzhiyun mc->mc_vram_size = size_af;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun mc->vram_start = mc->gtt_end + 1;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1419*4882a593Smuzhiyun dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1420*4882a593Smuzhiyun mc->mc_vram_size >> 20, mc->vram_start,
1421*4882a593Smuzhiyun mc->vram_end, mc->real_vram_size >> 20);
1422*4882a593Smuzhiyun } else {
1423*4882a593Smuzhiyun u64 base = 0;
1424*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_IGP) {
1425*4882a593Smuzhiyun base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1426*4882a593Smuzhiyun base <<= 24;
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun radeon_vram_location(rdev, &rdev->mc, base);
1429*4882a593Smuzhiyun rdev->mc.gtt_base_align = 0;
1430*4882a593Smuzhiyun radeon_gtt_location(rdev, mc);
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
r600_mc_init(struct radeon_device * rdev)1434*4882a593Smuzhiyun static int r600_mc_init(struct radeon_device *rdev)
1435*4882a593Smuzhiyun {
1436*4882a593Smuzhiyun u32 tmp;
1437*4882a593Smuzhiyun int chansize, numchan;
1438*4882a593Smuzhiyun uint32_t h_addr, l_addr;
1439*4882a593Smuzhiyun unsigned long long k8_addr;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun /* Get VRAM informations */
1442*4882a593Smuzhiyun rdev->mc.vram_is_ddr = true;
1443*4882a593Smuzhiyun tmp = RREG32(RAMCFG);
1444*4882a593Smuzhiyun if (tmp & CHANSIZE_OVERRIDE) {
1445*4882a593Smuzhiyun chansize = 16;
1446*4882a593Smuzhiyun } else if (tmp & CHANSIZE_MASK) {
1447*4882a593Smuzhiyun chansize = 64;
1448*4882a593Smuzhiyun } else {
1449*4882a593Smuzhiyun chansize = 32;
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun tmp = RREG32(CHMAP);
1452*4882a593Smuzhiyun switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1453*4882a593Smuzhiyun case 0:
1454*4882a593Smuzhiyun default:
1455*4882a593Smuzhiyun numchan = 1;
1456*4882a593Smuzhiyun break;
1457*4882a593Smuzhiyun case 1:
1458*4882a593Smuzhiyun numchan = 2;
1459*4882a593Smuzhiyun break;
1460*4882a593Smuzhiyun case 2:
1461*4882a593Smuzhiyun numchan = 4;
1462*4882a593Smuzhiyun break;
1463*4882a593Smuzhiyun case 3:
1464*4882a593Smuzhiyun numchan = 8;
1465*4882a593Smuzhiyun break;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun rdev->mc.vram_width = numchan * chansize;
1468*4882a593Smuzhiyun /* Could aper size report 0 ? */
1469*4882a593Smuzhiyun rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1470*4882a593Smuzhiyun rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1471*4882a593Smuzhiyun /* Setup GPU memory space */
1472*4882a593Smuzhiyun rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1473*4882a593Smuzhiyun rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1474*4882a593Smuzhiyun rdev->mc.visible_vram_size = rdev->mc.aper_size;
1475*4882a593Smuzhiyun r600_vram_gtt_location(rdev, &rdev->mc);
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_IGP) {
1478*4882a593Smuzhiyun rs690_pm_info(rdev);
1479*4882a593Smuzhiyun rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1482*4882a593Smuzhiyun /* Use K8 direct mapping for fast fb access. */
1483*4882a593Smuzhiyun rdev->fastfb_working = false;
1484*4882a593Smuzhiyun h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1485*4882a593Smuzhiyun l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1486*4882a593Smuzhiyun k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1487*4882a593Smuzhiyun #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1488*4882a593Smuzhiyun if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1489*4882a593Smuzhiyun #endif
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1492*4882a593Smuzhiyun * memory is present.
1493*4882a593Smuzhiyun */
1494*4882a593Smuzhiyun if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1495*4882a593Smuzhiyun DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1496*4882a593Smuzhiyun (unsigned long long)rdev->mc.aper_base, k8_addr);
1497*4882a593Smuzhiyun rdev->mc.aper_base = (resource_size_t)k8_addr;
1498*4882a593Smuzhiyun rdev->fastfb_working = true;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun radeon_update_bandwidth_info(rdev);
1505*4882a593Smuzhiyun return 0;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun
r600_vram_scratch_init(struct radeon_device * rdev)1508*4882a593Smuzhiyun int r600_vram_scratch_init(struct radeon_device *rdev)
1509*4882a593Smuzhiyun {
1510*4882a593Smuzhiyun int r;
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun if (rdev->vram_scratch.robj == NULL) {
1513*4882a593Smuzhiyun r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1514*4882a593Smuzhiyun PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1515*4882a593Smuzhiyun 0, NULL, NULL, &rdev->vram_scratch.robj);
1516*4882a593Smuzhiyun if (r) {
1517*4882a593Smuzhiyun return r;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1522*4882a593Smuzhiyun if (unlikely(r != 0))
1523*4882a593Smuzhiyun return r;
1524*4882a593Smuzhiyun r = radeon_bo_pin(rdev->vram_scratch.robj,
1525*4882a593Smuzhiyun RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1526*4882a593Smuzhiyun if (r) {
1527*4882a593Smuzhiyun radeon_bo_unreserve(rdev->vram_scratch.robj);
1528*4882a593Smuzhiyun return r;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun r = radeon_bo_kmap(rdev->vram_scratch.robj,
1531*4882a593Smuzhiyun (void **)&rdev->vram_scratch.ptr);
1532*4882a593Smuzhiyun if (r)
1533*4882a593Smuzhiyun radeon_bo_unpin(rdev->vram_scratch.robj);
1534*4882a593Smuzhiyun radeon_bo_unreserve(rdev->vram_scratch.robj);
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun return r;
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun
r600_vram_scratch_fini(struct radeon_device * rdev)1539*4882a593Smuzhiyun void r600_vram_scratch_fini(struct radeon_device *rdev)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun int r;
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun if (rdev->vram_scratch.robj == NULL) {
1544*4882a593Smuzhiyun return;
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1547*4882a593Smuzhiyun if (likely(r == 0)) {
1548*4882a593Smuzhiyun radeon_bo_kunmap(rdev->vram_scratch.robj);
1549*4882a593Smuzhiyun radeon_bo_unpin(rdev->vram_scratch.robj);
1550*4882a593Smuzhiyun radeon_bo_unreserve(rdev->vram_scratch.robj);
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun radeon_bo_unref(&rdev->vram_scratch.robj);
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun
r600_set_bios_scratch_engine_hung(struct radeon_device * rdev,bool hung)1555*4882a593Smuzhiyun void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1556*4882a593Smuzhiyun {
1557*4882a593Smuzhiyun u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun if (hung)
1560*4882a593Smuzhiyun tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1561*4882a593Smuzhiyun else
1562*4882a593Smuzhiyun tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun WREG32(R600_BIOS_3_SCRATCH, tmp);
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun
r600_print_gpu_status_regs(struct radeon_device * rdev)1567*4882a593Smuzhiyun static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1568*4882a593Smuzhiyun {
1569*4882a593Smuzhiyun dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1570*4882a593Smuzhiyun RREG32(R_008010_GRBM_STATUS));
1571*4882a593Smuzhiyun dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1572*4882a593Smuzhiyun RREG32(R_008014_GRBM_STATUS2));
1573*4882a593Smuzhiyun dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1574*4882a593Smuzhiyun RREG32(R_000E50_SRBM_STATUS));
1575*4882a593Smuzhiyun dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1576*4882a593Smuzhiyun RREG32(CP_STALLED_STAT1));
1577*4882a593Smuzhiyun dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1578*4882a593Smuzhiyun RREG32(CP_STALLED_STAT2));
1579*4882a593Smuzhiyun dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1580*4882a593Smuzhiyun RREG32(CP_BUSY_STAT));
1581*4882a593Smuzhiyun dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1582*4882a593Smuzhiyun RREG32(CP_STAT));
1583*4882a593Smuzhiyun dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1584*4882a593Smuzhiyun RREG32(DMA_STATUS_REG));
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun
r600_is_display_hung(struct radeon_device * rdev)1587*4882a593Smuzhiyun static bool r600_is_display_hung(struct radeon_device *rdev)
1588*4882a593Smuzhiyun {
1589*4882a593Smuzhiyun u32 crtc_hung = 0;
1590*4882a593Smuzhiyun u32 crtc_status[2];
1591*4882a593Smuzhiyun u32 i, j, tmp;
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun for (i = 0; i < rdev->num_crtc; i++) {
1594*4882a593Smuzhiyun if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1595*4882a593Smuzhiyun crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1596*4882a593Smuzhiyun crtc_hung |= (1 << i);
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun for (j = 0; j < 10; j++) {
1601*4882a593Smuzhiyun for (i = 0; i < rdev->num_crtc; i++) {
1602*4882a593Smuzhiyun if (crtc_hung & (1 << i)) {
1603*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1604*4882a593Smuzhiyun if (tmp != crtc_status[i])
1605*4882a593Smuzhiyun crtc_hung &= ~(1 << i);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun if (crtc_hung == 0)
1609*4882a593Smuzhiyun return false;
1610*4882a593Smuzhiyun udelay(100);
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun return true;
1614*4882a593Smuzhiyun }
1615*4882a593Smuzhiyun
r600_gpu_check_soft_reset(struct radeon_device * rdev)1616*4882a593Smuzhiyun u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun u32 reset_mask = 0;
1619*4882a593Smuzhiyun u32 tmp;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun /* GRBM_STATUS */
1622*4882a593Smuzhiyun tmp = RREG32(R_008010_GRBM_STATUS);
1623*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770) {
1624*4882a593Smuzhiyun if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1625*4882a593Smuzhiyun G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1626*4882a593Smuzhiyun G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1627*4882a593Smuzhiyun G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1628*4882a593Smuzhiyun G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1629*4882a593Smuzhiyun reset_mask |= RADEON_RESET_GFX;
1630*4882a593Smuzhiyun } else {
1631*4882a593Smuzhiyun if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1632*4882a593Smuzhiyun G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1633*4882a593Smuzhiyun G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1634*4882a593Smuzhiyun G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1635*4882a593Smuzhiyun G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1636*4882a593Smuzhiyun reset_mask |= RADEON_RESET_GFX;
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1640*4882a593Smuzhiyun G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1641*4882a593Smuzhiyun reset_mask |= RADEON_RESET_CP;
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun if (G_008010_GRBM_EE_BUSY(tmp))
1644*4882a593Smuzhiyun reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun /* DMA_STATUS_REG */
1647*4882a593Smuzhiyun tmp = RREG32(DMA_STATUS_REG);
1648*4882a593Smuzhiyun if (!(tmp & DMA_IDLE))
1649*4882a593Smuzhiyun reset_mask |= RADEON_RESET_DMA;
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun /* SRBM_STATUS */
1652*4882a593Smuzhiyun tmp = RREG32(R_000E50_SRBM_STATUS);
1653*4882a593Smuzhiyun if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1654*4882a593Smuzhiyun reset_mask |= RADEON_RESET_RLC;
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun if (G_000E50_IH_BUSY(tmp))
1657*4882a593Smuzhiyun reset_mask |= RADEON_RESET_IH;
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun if (G_000E50_SEM_BUSY(tmp))
1660*4882a593Smuzhiyun reset_mask |= RADEON_RESET_SEM;
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun if (G_000E50_GRBM_RQ_PENDING(tmp))
1663*4882a593Smuzhiyun reset_mask |= RADEON_RESET_GRBM;
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun if (G_000E50_VMC_BUSY(tmp))
1666*4882a593Smuzhiyun reset_mask |= RADEON_RESET_VMC;
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1669*4882a593Smuzhiyun G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1670*4882a593Smuzhiyun G_000E50_MCDW_BUSY(tmp))
1671*4882a593Smuzhiyun reset_mask |= RADEON_RESET_MC;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun if (r600_is_display_hung(rdev))
1674*4882a593Smuzhiyun reset_mask |= RADEON_RESET_DISPLAY;
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun /* Skip MC reset as it's mostly likely not hung, just busy */
1677*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_MC) {
1678*4882a593Smuzhiyun DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1679*4882a593Smuzhiyun reset_mask &= ~RADEON_RESET_MC;
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun return reset_mask;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun
r600_gpu_soft_reset(struct radeon_device * rdev,u32 reset_mask)1685*4882a593Smuzhiyun static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1686*4882a593Smuzhiyun {
1687*4882a593Smuzhiyun struct rv515_mc_save save;
1688*4882a593Smuzhiyun u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1689*4882a593Smuzhiyun u32 tmp;
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun if (reset_mask == 0)
1692*4882a593Smuzhiyun return;
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun r600_print_gpu_status_regs(rdev);
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun /* Disable CP parsing/prefetching */
1699*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
1700*4882a593Smuzhiyun WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1701*4882a593Smuzhiyun else
1702*4882a593Smuzhiyun WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun /* disable the RLC */
1705*4882a593Smuzhiyun WREG32(RLC_CNTL, 0);
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_DMA) {
1708*4882a593Smuzhiyun /* Disable DMA */
1709*4882a593Smuzhiyun tmp = RREG32(DMA_RB_CNTL);
1710*4882a593Smuzhiyun tmp &= ~DMA_RB_ENABLE;
1711*4882a593Smuzhiyun WREG32(DMA_RB_CNTL, tmp);
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun mdelay(50);
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun rv515_mc_stop(rdev, &save);
1717*4882a593Smuzhiyun if (r600_mc_wait_for_idle(rdev)) {
1718*4882a593Smuzhiyun dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1722*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
1723*4882a593Smuzhiyun grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1724*4882a593Smuzhiyun S_008020_SOFT_RESET_CB(1) |
1725*4882a593Smuzhiyun S_008020_SOFT_RESET_PA(1) |
1726*4882a593Smuzhiyun S_008020_SOFT_RESET_SC(1) |
1727*4882a593Smuzhiyun S_008020_SOFT_RESET_SPI(1) |
1728*4882a593Smuzhiyun S_008020_SOFT_RESET_SX(1) |
1729*4882a593Smuzhiyun S_008020_SOFT_RESET_SH(1) |
1730*4882a593Smuzhiyun S_008020_SOFT_RESET_TC(1) |
1731*4882a593Smuzhiyun S_008020_SOFT_RESET_TA(1) |
1732*4882a593Smuzhiyun S_008020_SOFT_RESET_VC(1) |
1733*4882a593Smuzhiyun S_008020_SOFT_RESET_VGT(1);
1734*4882a593Smuzhiyun else
1735*4882a593Smuzhiyun grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1736*4882a593Smuzhiyun S_008020_SOFT_RESET_DB(1) |
1737*4882a593Smuzhiyun S_008020_SOFT_RESET_CB(1) |
1738*4882a593Smuzhiyun S_008020_SOFT_RESET_PA(1) |
1739*4882a593Smuzhiyun S_008020_SOFT_RESET_SC(1) |
1740*4882a593Smuzhiyun S_008020_SOFT_RESET_SMX(1) |
1741*4882a593Smuzhiyun S_008020_SOFT_RESET_SPI(1) |
1742*4882a593Smuzhiyun S_008020_SOFT_RESET_SX(1) |
1743*4882a593Smuzhiyun S_008020_SOFT_RESET_SH(1) |
1744*4882a593Smuzhiyun S_008020_SOFT_RESET_TC(1) |
1745*4882a593Smuzhiyun S_008020_SOFT_RESET_TA(1) |
1746*4882a593Smuzhiyun S_008020_SOFT_RESET_VC(1) |
1747*4882a593Smuzhiyun S_008020_SOFT_RESET_VGT(1);
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_CP) {
1751*4882a593Smuzhiyun grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1752*4882a593Smuzhiyun S_008020_SOFT_RESET_VGT(1);
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_DMA) {
1758*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
1759*4882a593Smuzhiyun srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1760*4882a593Smuzhiyun else
1761*4882a593Smuzhiyun srbm_soft_reset |= SOFT_RESET_DMA;
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_RLC)
1765*4882a593Smuzhiyun srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_SEM)
1768*4882a593Smuzhiyun srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_IH)
1771*4882a593Smuzhiyun srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_GRBM)
1774*4882a593Smuzhiyun srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun if (!(rdev->flags & RADEON_IS_IGP)) {
1777*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_MC)
1778*4882a593Smuzhiyun srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun if (reset_mask & RADEON_RESET_VMC)
1782*4882a593Smuzhiyun srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun if (grbm_soft_reset) {
1785*4882a593Smuzhiyun tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1786*4882a593Smuzhiyun tmp |= grbm_soft_reset;
1787*4882a593Smuzhiyun dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1788*4882a593Smuzhiyun WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1789*4882a593Smuzhiyun tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun udelay(50);
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun tmp &= ~grbm_soft_reset;
1794*4882a593Smuzhiyun WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1795*4882a593Smuzhiyun tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun if (srbm_soft_reset) {
1799*4882a593Smuzhiyun tmp = RREG32(SRBM_SOFT_RESET);
1800*4882a593Smuzhiyun tmp |= srbm_soft_reset;
1801*4882a593Smuzhiyun dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1802*4882a593Smuzhiyun WREG32(SRBM_SOFT_RESET, tmp);
1803*4882a593Smuzhiyun tmp = RREG32(SRBM_SOFT_RESET);
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun udelay(50);
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun tmp &= ~srbm_soft_reset;
1808*4882a593Smuzhiyun WREG32(SRBM_SOFT_RESET, tmp);
1809*4882a593Smuzhiyun tmp = RREG32(SRBM_SOFT_RESET);
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun /* Wait a little for things to settle down */
1813*4882a593Smuzhiyun mdelay(1);
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun rv515_mc_resume(rdev, &save);
1816*4882a593Smuzhiyun udelay(50);
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun r600_print_gpu_status_regs(rdev);
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun
r600_gpu_pci_config_reset(struct radeon_device * rdev)1821*4882a593Smuzhiyun static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
1822*4882a593Smuzhiyun {
1823*4882a593Smuzhiyun struct rv515_mc_save save;
1824*4882a593Smuzhiyun u32 tmp, i;
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun dev_info(rdev->dev, "GPU pci config reset\n");
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun /* disable dpm? */
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun /* Disable CP parsing/prefetching */
1831*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
1832*4882a593Smuzhiyun WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1833*4882a593Smuzhiyun else
1834*4882a593Smuzhiyun WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun /* disable the RLC */
1837*4882a593Smuzhiyun WREG32(RLC_CNTL, 0);
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun /* Disable DMA */
1840*4882a593Smuzhiyun tmp = RREG32(DMA_RB_CNTL);
1841*4882a593Smuzhiyun tmp &= ~DMA_RB_ENABLE;
1842*4882a593Smuzhiyun WREG32(DMA_RB_CNTL, tmp);
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun mdelay(50);
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun /* set mclk/sclk to bypass */
1847*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
1848*4882a593Smuzhiyun rv770_set_clk_bypass_mode(rdev);
1849*4882a593Smuzhiyun /* disable BM */
1850*4882a593Smuzhiyun pci_clear_master(rdev->pdev);
1851*4882a593Smuzhiyun /* disable mem access */
1852*4882a593Smuzhiyun rv515_mc_stop(rdev, &save);
1853*4882a593Smuzhiyun if (r600_mc_wait_for_idle(rdev)) {
1854*4882a593Smuzhiyun dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1855*4882a593Smuzhiyun }
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun /* BIF reset workaround. Not sure if this is needed on 6xx */
1858*4882a593Smuzhiyun tmp = RREG32(BUS_CNTL);
1859*4882a593Smuzhiyun tmp |= VGA_COHE_SPEC_TIMER_DIS;
1860*4882a593Smuzhiyun WREG32(BUS_CNTL, tmp);
1861*4882a593Smuzhiyun
1862*4882a593Smuzhiyun tmp = RREG32(BIF_SCRATCH0);
1863*4882a593Smuzhiyun
1864*4882a593Smuzhiyun /* reset */
1865*4882a593Smuzhiyun radeon_pci_config_reset(rdev);
1866*4882a593Smuzhiyun mdelay(1);
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun /* BIF reset workaround. Not sure if this is needed on 6xx */
1869*4882a593Smuzhiyun tmp = SOFT_RESET_BIF;
1870*4882a593Smuzhiyun WREG32(SRBM_SOFT_RESET, tmp);
1871*4882a593Smuzhiyun mdelay(1);
1872*4882a593Smuzhiyun WREG32(SRBM_SOFT_RESET, 0);
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun /* wait for asic to come out of reset */
1875*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
1876*4882a593Smuzhiyun if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
1877*4882a593Smuzhiyun break;
1878*4882a593Smuzhiyun udelay(1);
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun }
1881*4882a593Smuzhiyun
r600_asic_reset(struct radeon_device * rdev,bool hard)1882*4882a593Smuzhiyun int r600_asic_reset(struct radeon_device *rdev, bool hard)
1883*4882a593Smuzhiyun {
1884*4882a593Smuzhiyun u32 reset_mask;
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun if (hard) {
1887*4882a593Smuzhiyun r600_gpu_pci_config_reset(rdev);
1888*4882a593Smuzhiyun return 0;
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun reset_mask = r600_gpu_check_soft_reset(rdev);
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun if (reset_mask)
1894*4882a593Smuzhiyun r600_set_bios_scratch_engine_hung(rdev, true);
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun /* try soft reset */
1897*4882a593Smuzhiyun r600_gpu_soft_reset(rdev, reset_mask);
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun reset_mask = r600_gpu_check_soft_reset(rdev);
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun /* try pci config reset */
1902*4882a593Smuzhiyun if (reset_mask && radeon_hard_reset)
1903*4882a593Smuzhiyun r600_gpu_pci_config_reset(rdev);
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun reset_mask = r600_gpu_check_soft_reset(rdev);
1906*4882a593Smuzhiyun
1907*4882a593Smuzhiyun if (!reset_mask)
1908*4882a593Smuzhiyun r600_set_bios_scratch_engine_hung(rdev, false);
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun return 0;
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun /**
1914*4882a593Smuzhiyun * r600_gfx_is_lockup - Check if the GFX engine is locked up
1915*4882a593Smuzhiyun *
1916*4882a593Smuzhiyun * @rdev: radeon_device pointer
1917*4882a593Smuzhiyun * @ring: radeon_ring structure holding ring information
1918*4882a593Smuzhiyun *
1919*4882a593Smuzhiyun * Check if the GFX engine is locked up.
1920*4882a593Smuzhiyun * Returns true if the engine appears to be locked up, false if not.
1921*4882a593Smuzhiyun */
r600_gfx_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)1922*4882a593Smuzhiyun bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1923*4882a593Smuzhiyun {
1924*4882a593Smuzhiyun u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun if (!(reset_mask & (RADEON_RESET_GFX |
1927*4882a593Smuzhiyun RADEON_RESET_COMPUTE |
1928*4882a593Smuzhiyun RADEON_RESET_CP))) {
1929*4882a593Smuzhiyun radeon_ring_lockup_update(rdev, ring);
1930*4882a593Smuzhiyun return false;
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun return radeon_ring_test_lockup(rdev, ring);
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun
r6xx_remap_render_backend(struct radeon_device * rdev,u32 tiling_pipe_num,u32 max_rb_num,u32 total_max_rb_num,u32 disabled_rb_mask)1935*4882a593Smuzhiyun u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1936*4882a593Smuzhiyun u32 tiling_pipe_num,
1937*4882a593Smuzhiyun u32 max_rb_num,
1938*4882a593Smuzhiyun u32 total_max_rb_num,
1939*4882a593Smuzhiyun u32 disabled_rb_mask)
1940*4882a593Smuzhiyun {
1941*4882a593Smuzhiyun u32 rendering_pipe_num, rb_num_width, req_rb_num;
1942*4882a593Smuzhiyun u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1943*4882a593Smuzhiyun u32 data = 0, mask = 1 << (max_rb_num - 1);
1944*4882a593Smuzhiyun unsigned i, j;
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun /* mask out the RBs that don't exist on that asic */
1947*4882a593Smuzhiyun tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1948*4882a593Smuzhiyun /* make sure at least one RB is available */
1949*4882a593Smuzhiyun if ((tmp & 0xff) != 0xff)
1950*4882a593Smuzhiyun disabled_rb_mask = tmp;
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun rendering_pipe_num = 1 << tiling_pipe_num;
1953*4882a593Smuzhiyun req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1954*4882a593Smuzhiyun BUG_ON(rendering_pipe_num < req_rb_num);
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1957*4882a593Smuzhiyun pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun if (rdev->family <= CHIP_RV740) {
1960*4882a593Smuzhiyun /* r6xx/r7xx */
1961*4882a593Smuzhiyun rb_num_width = 2;
1962*4882a593Smuzhiyun } else {
1963*4882a593Smuzhiyun /* eg+ */
1964*4882a593Smuzhiyun rb_num_width = 4;
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun for (i = 0; i < max_rb_num; i++) {
1968*4882a593Smuzhiyun if (!(mask & disabled_rb_mask)) {
1969*4882a593Smuzhiyun for (j = 0; j < pipe_rb_ratio; j++) {
1970*4882a593Smuzhiyun data <<= rb_num_width;
1971*4882a593Smuzhiyun data |= max_rb_num - i - 1;
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun if (pipe_rb_remain) {
1974*4882a593Smuzhiyun data <<= rb_num_width;
1975*4882a593Smuzhiyun data |= max_rb_num - i - 1;
1976*4882a593Smuzhiyun pipe_rb_remain--;
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun mask >>= 1;
1980*4882a593Smuzhiyun }
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun return data;
1983*4882a593Smuzhiyun }
1984*4882a593Smuzhiyun
r600_count_pipe_bits(uint32_t val)1985*4882a593Smuzhiyun int r600_count_pipe_bits(uint32_t val)
1986*4882a593Smuzhiyun {
1987*4882a593Smuzhiyun return hweight32(val);
1988*4882a593Smuzhiyun }
1989*4882a593Smuzhiyun
r600_gpu_init(struct radeon_device * rdev)1990*4882a593Smuzhiyun static void r600_gpu_init(struct radeon_device *rdev)
1991*4882a593Smuzhiyun {
1992*4882a593Smuzhiyun u32 tiling_config;
1993*4882a593Smuzhiyun u32 ramcfg;
1994*4882a593Smuzhiyun u32 cc_gc_shader_pipe_config;
1995*4882a593Smuzhiyun u32 tmp;
1996*4882a593Smuzhiyun int i, j;
1997*4882a593Smuzhiyun u32 sq_config;
1998*4882a593Smuzhiyun u32 sq_gpr_resource_mgmt_1 = 0;
1999*4882a593Smuzhiyun u32 sq_gpr_resource_mgmt_2 = 0;
2000*4882a593Smuzhiyun u32 sq_thread_resource_mgmt = 0;
2001*4882a593Smuzhiyun u32 sq_stack_resource_mgmt_1 = 0;
2002*4882a593Smuzhiyun u32 sq_stack_resource_mgmt_2 = 0;
2003*4882a593Smuzhiyun u32 disabled_rb_mask;
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun rdev->config.r600.tiling_group_size = 256;
2006*4882a593Smuzhiyun switch (rdev->family) {
2007*4882a593Smuzhiyun case CHIP_R600:
2008*4882a593Smuzhiyun rdev->config.r600.max_pipes = 4;
2009*4882a593Smuzhiyun rdev->config.r600.max_tile_pipes = 8;
2010*4882a593Smuzhiyun rdev->config.r600.max_simds = 4;
2011*4882a593Smuzhiyun rdev->config.r600.max_backends = 4;
2012*4882a593Smuzhiyun rdev->config.r600.max_gprs = 256;
2013*4882a593Smuzhiyun rdev->config.r600.max_threads = 192;
2014*4882a593Smuzhiyun rdev->config.r600.max_stack_entries = 256;
2015*4882a593Smuzhiyun rdev->config.r600.max_hw_contexts = 8;
2016*4882a593Smuzhiyun rdev->config.r600.max_gs_threads = 16;
2017*4882a593Smuzhiyun rdev->config.r600.sx_max_export_size = 128;
2018*4882a593Smuzhiyun rdev->config.r600.sx_max_export_pos_size = 16;
2019*4882a593Smuzhiyun rdev->config.r600.sx_max_export_smx_size = 128;
2020*4882a593Smuzhiyun rdev->config.r600.sq_num_cf_insts = 2;
2021*4882a593Smuzhiyun break;
2022*4882a593Smuzhiyun case CHIP_RV630:
2023*4882a593Smuzhiyun case CHIP_RV635:
2024*4882a593Smuzhiyun rdev->config.r600.max_pipes = 2;
2025*4882a593Smuzhiyun rdev->config.r600.max_tile_pipes = 2;
2026*4882a593Smuzhiyun rdev->config.r600.max_simds = 3;
2027*4882a593Smuzhiyun rdev->config.r600.max_backends = 1;
2028*4882a593Smuzhiyun rdev->config.r600.max_gprs = 128;
2029*4882a593Smuzhiyun rdev->config.r600.max_threads = 192;
2030*4882a593Smuzhiyun rdev->config.r600.max_stack_entries = 128;
2031*4882a593Smuzhiyun rdev->config.r600.max_hw_contexts = 8;
2032*4882a593Smuzhiyun rdev->config.r600.max_gs_threads = 4;
2033*4882a593Smuzhiyun rdev->config.r600.sx_max_export_size = 128;
2034*4882a593Smuzhiyun rdev->config.r600.sx_max_export_pos_size = 16;
2035*4882a593Smuzhiyun rdev->config.r600.sx_max_export_smx_size = 128;
2036*4882a593Smuzhiyun rdev->config.r600.sq_num_cf_insts = 2;
2037*4882a593Smuzhiyun break;
2038*4882a593Smuzhiyun case CHIP_RV610:
2039*4882a593Smuzhiyun case CHIP_RV620:
2040*4882a593Smuzhiyun case CHIP_RS780:
2041*4882a593Smuzhiyun case CHIP_RS880:
2042*4882a593Smuzhiyun rdev->config.r600.max_pipes = 1;
2043*4882a593Smuzhiyun rdev->config.r600.max_tile_pipes = 1;
2044*4882a593Smuzhiyun rdev->config.r600.max_simds = 2;
2045*4882a593Smuzhiyun rdev->config.r600.max_backends = 1;
2046*4882a593Smuzhiyun rdev->config.r600.max_gprs = 128;
2047*4882a593Smuzhiyun rdev->config.r600.max_threads = 192;
2048*4882a593Smuzhiyun rdev->config.r600.max_stack_entries = 128;
2049*4882a593Smuzhiyun rdev->config.r600.max_hw_contexts = 4;
2050*4882a593Smuzhiyun rdev->config.r600.max_gs_threads = 4;
2051*4882a593Smuzhiyun rdev->config.r600.sx_max_export_size = 128;
2052*4882a593Smuzhiyun rdev->config.r600.sx_max_export_pos_size = 16;
2053*4882a593Smuzhiyun rdev->config.r600.sx_max_export_smx_size = 128;
2054*4882a593Smuzhiyun rdev->config.r600.sq_num_cf_insts = 1;
2055*4882a593Smuzhiyun break;
2056*4882a593Smuzhiyun case CHIP_RV670:
2057*4882a593Smuzhiyun rdev->config.r600.max_pipes = 4;
2058*4882a593Smuzhiyun rdev->config.r600.max_tile_pipes = 4;
2059*4882a593Smuzhiyun rdev->config.r600.max_simds = 4;
2060*4882a593Smuzhiyun rdev->config.r600.max_backends = 4;
2061*4882a593Smuzhiyun rdev->config.r600.max_gprs = 192;
2062*4882a593Smuzhiyun rdev->config.r600.max_threads = 192;
2063*4882a593Smuzhiyun rdev->config.r600.max_stack_entries = 256;
2064*4882a593Smuzhiyun rdev->config.r600.max_hw_contexts = 8;
2065*4882a593Smuzhiyun rdev->config.r600.max_gs_threads = 16;
2066*4882a593Smuzhiyun rdev->config.r600.sx_max_export_size = 128;
2067*4882a593Smuzhiyun rdev->config.r600.sx_max_export_pos_size = 16;
2068*4882a593Smuzhiyun rdev->config.r600.sx_max_export_smx_size = 128;
2069*4882a593Smuzhiyun rdev->config.r600.sq_num_cf_insts = 2;
2070*4882a593Smuzhiyun break;
2071*4882a593Smuzhiyun default:
2072*4882a593Smuzhiyun break;
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun /* Initialize HDP */
2076*4882a593Smuzhiyun for (i = 0, j = 0; i < 32; i++, j += 0x18) {
2077*4882a593Smuzhiyun WREG32((0x2c14 + j), 0x00000000);
2078*4882a593Smuzhiyun WREG32((0x2c18 + j), 0x00000000);
2079*4882a593Smuzhiyun WREG32((0x2c1c + j), 0x00000000);
2080*4882a593Smuzhiyun WREG32((0x2c20 + j), 0x00000000);
2081*4882a593Smuzhiyun WREG32((0x2c24 + j), 0x00000000);
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun /* Setup tiling */
2087*4882a593Smuzhiyun tiling_config = 0;
2088*4882a593Smuzhiyun ramcfg = RREG32(RAMCFG);
2089*4882a593Smuzhiyun switch (rdev->config.r600.max_tile_pipes) {
2090*4882a593Smuzhiyun case 1:
2091*4882a593Smuzhiyun tiling_config |= PIPE_TILING(0);
2092*4882a593Smuzhiyun break;
2093*4882a593Smuzhiyun case 2:
2094*4882a593Smuzhiyun tiling_config |= PIPE_TILING(1);
2095*4882a593Smuzhiyun break;
2096*4882a593Smuzhiyun case 4:
2097*4882a593Smuzhiyun tiling_config |= PIPE_TILING(2);
2098*4882a593Smuzhiyun break;
2099*4882a593Smuzhiyun case 8:
2100*4882a593Smuzhiyun tiling_config |= PIPE_TILING(3);
2101*4882a593Smuzhiyun break;
2102*4882a593Smuzhiyun default:
2103*4882a593Smuzhiyun break;
2104*4882a593Smuzhiyun }
2105*4882a593Smuzhiyun rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
2106*4882a593Smuzhiyun rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2107*4882a593Smuzhiyun tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
2108*4882a593Smuzhiyun tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
2111*4882a593Smuzhiyun if (tmp > 3) {
2112*4882a593Smuzhiyun tiling_config |= ROW_TILING(3);
2113*4882a593Smuzhiyun tiling_config |= SAMPLE_SPLIT(3);
2114*4882a593Smuzhiyun } else {
2115*4882a593Smuzhiyun tiling_config |= ROW_TILING(tmp);
2116*4882a593Smuzhiyun tiling_config |= SAMPLE_SPLIT(tmp);
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun tiling_config |= BANK_SWAPS(1);
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
2121*4882a593Smuzhiyun tmp = rdev->config.r600.max_simds -
2122*4882a593Smuzhiyun r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
2123*4882a593Smuzhiyun rdev->config.r600.active_simds = tmp;
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
2126*4882a593Smuzhiyun tmp = 0;
2127*4882a593Smuzhiyun for (i = 0; i < rdev->config.r600.max_backends; i++)
2128*4882a593Smuzhiyun tmp |= (1 << i);
2129*4882a593Smuzhiyun /* if all the backends are disabled, fix it up here */
2130*4882a593Smuzhiyun if ((disabled_rb_mask & tmp) == tmp) {
2131*4882a593Smuzhiyun for (i = 0; i < rdev->config.r600.max_backends; i++)
2132*4882a593Smuzhiyun disabled_rb_mask &= ~(1 << i);
2133*4882a593Smuzhiyun }
2134*4882a593Smuzhiyun tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
2135*4882a593Smuzhiyun tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
2136*4882a593Smuzhiyun R6XX_MAX_BACKENDS, disabled_rb_mask);
2137*4882a593Smuzhiyun tiling_config |= tmp << 16;
2138*4882a593Smuzhiyun rdev->config.r600.backend_map = tmp;
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun rdev->config.r600.tile_config = tiling_config;
2141*4882a593Smuzhiyun WREG32(GB_TILING_CONFIG, tiling_config);
2142*4882a593Smuzhiyun WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
2143*4882a593Smuzhiyun WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
2144*4882a593Smuzhiyun WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
2147*4882a593Smuzhiyun WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
2148*4882a593Smuzhiyun WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun /* Setup some CP states */
2151*4882a593Smuzhiyun WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
2152*4882a593Smuzhiyun WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
2153*4882a593Smuzhiyun
2154*4882a593Smuzhiyun WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
2155*4882a593Smuzhiyun SYNC_WALKER | SYNC_ALIGNER));
2156*4882a593Smuzhiyun /* Setup various GPU states */
2157*4882a593Smuzhiyun if (rdev->family == CHIP_RV670)
2158*4882a593Smuzhiyun WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun tmp = RREG32(SX_DEBUG_1);
2161*4882a593Smuzhiyun tmp |= SMX_EVENT_RELEASE;
2162*4882a593Smuzhiyun if ((rdev->family > CHIP_R600))
2163*4882a593Smuzhiyun tmp |= ENABLE_NEW_SMX_ADDRESS;
2164*4882a593Smuzhiyun WREG32(SX_DEBUG_1, tmp);
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun if (((rdev->family) == CHIP_R600) ||
2167*4882a593Smuzhiyun ((rdev->family) == CHIP_RV630) ||
2168*4882a593Smuzhiyun ((rdev->family) == CHIP_RV610) ||
2169*4882a593Smuzhiyun ((rdev->family) == CHIP_RV620) ||
2170*4882a593Smuzhiyun ((rdev->family) == CHIP_RS780) ||
2171*4882a593Smuzhiyun ((rdev->family) == CHIP_RS880)) {
2172*4882a593Smuzhiyun WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
2173*4882a593Smuzhiyun } else {
2174*4882a593Smuzhiyun WREG32(DB_DEBUG, 0);
2175*4882a593Smuzhiyun }
2176*4882a593Smuzhiyun WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
2177*4882a593Smuzhiyun DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2180*4882a593Smuzhiyun WREG32(VGT_NUM_INSTANCES, 0);
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
2183*4882a593Smuzhiyun WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun tmp = RREG32(SQ_MS_FIFO_SIZES);
2186*4882a593Smuzhiyun if (((rdev->family) == CHIP_RV610) ||
2187*4882a593Smuzhiyun ((rdev->family) == CHIP_RV620) ||
2188*4882a593Smuzhiyun ((rdev->family) == CHIP_RS780) ||
2189*4882a593Smuzhiyun ((rdev->family) == CHIP_RS880)) {
2190*4882a593Smuzhiyun tmp = (CACHE_FIFO_SIZE(0xa) |
2191*4882a593Smuzhiyun FETCH_FIFO_HIWATER(0xa) |
2192*4882a593Smuzhiyun DONE_FIFO_HIWATER(0xe0) |
2193*4882a593Smuzhiyun ALU_UPDATE_FIFO_HIWATER(0x8));
2194*4882a593Smuzhiyun } else if (((rdev->family) == CHIP_R600) ||
2195*4882a593Smuzhiyun ((rdev->family) == CHIP_RV630)) {
2196*4882a593Smuzhiyun tmp &= ~DONE_FIFO_HIWATER(0xff);
2197*4882a593Smuzhiyun tmp |= DONE_FIFO_HIWATER(0x4);
2198*4882a593Smuzhiyun }
2199*4882a593Smuzhiyun WREG32(SQ_MS_FIFO_SIZES, tmp);
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2202*4882a593Smuzhiyun * should be adjusted as needed by the 2D/3D drivers. This just sets default values
2203*4882a593Smuzhiyun */
2204*4882a593Smuzhiyun sq_config = RREG32(SQ_CONFIG);
2205*4882a593Smuzhiyun sq_config &= ~(PS_PRIO(3) |
2206*4882a593Smuzhiyun VS_PRIO(3) |
2207*4882a593Smuzhiyun GS_PRIO(3) |
2208*4882a593Smuzhiyun ES_PRIO(3));
2209*4882a593Smuzhiyun sq_config |= (DX9_CONSTS |
2210*4882a593Smuzhiyun VC_ENABLE |
2211*4882a593Smuzhiyun PS_PRIO(0) |
2212*4882a593Smuzhiyun VS_PRIO(1) |
2213*4882a593Smuzhiyun GS_PRIO(2) |
2214*4882a593Smuzhiyun ES_PRIO(3));
2215*4882a593Smuzhiyun
2216*4882a593Smuzhiyun if ((rdev->family) == CHIP_R600) {
2217*4882a593Smuzhiyun sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
2218*4882a593Smuzhiyun NUM_VS_GPRS(124) |
2219*4882a593Smuzhiyun NUM_CLAUSE_TEMP_GPRS(4));
2220*4882a593Smuzhiyun sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
2221*4882a593Smuzhiyun NUM_ES_GPRS(0));
2222*4882a593Smuzhiyun sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
2223*4882a593Smuzhiyun NUM_VS_THREADS(48) |
2224*4882a593Smuzhiyun NUM_GS_THREADS(4) |
2225*4882a593Smuzhiyun NUM_ES_THREADS(4));
2226*4882a593Smuzhiyun sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
2227*4882a593Smuzhiyun NUM_VS_STACK_ENTRIES(128));
2228*4882a593Smuzhiyun sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
2229*4882a593Smuzhiyun NUM_ES_STACK_ENTRIES(0));
2230*4882a593Smuzhiyun } else if (((rdev->family) == CHIP_RV610) ||
2231*4882a593Smuzhiyun ((rdev->family) == CHIP_RV620) ||
2232*4882a593Smuzhiyun ((rdev->family) == CHIP_RS780) ||
2233*4882a593Smuzhiyun ((rdev->family) == CHIP_RS880)) {
2234*4882a593Smuzhiyun /* no vertex cache */
2235*4882a593Smuzhiyun sq_config &= ~VC_ENABLE;
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2238*4882a593Smuzhiyun NUM_VS_GPRS(44) |
2239*4882a593Smuzhiyun NUM_CLAUSE_TEMP_GPRS(2));
2240*4882a593Smuzhiyun sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2241*4882a593Smuzhiyun NUM_ES_GPRS(17));
2242*4882a593Smuzhiyun sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2243*4882a593Smuzhiyun NUM_VS_THREADS(78) |
2244*4882a593Smuzhiyun NUM_GS_THREADS(4) |
2245*4882a593Smuzhiyun NUM_ES_THREADS(31));
2246*4882a593Smuzhiyun sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2247*4882a593Smuzhiyun NUM_VS_STACK_ENTRIES(40));
2248*4882a593Smuzhiyun sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2249*4882a593Smuzhiyun NUM_ES_STACK_ENTRIES(16));
2250*4882a593Smuzhiyun } else if (((rdev->family) == CHIP_RV630) ||
2251*4882a593Smuzhiyun ((rdev->family) == CHIP_RV635)) {
2252*4882a593Smuzhiyun sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2253*4882a593Smuzhiyun NUM_VS_GPRS(44) |
2254*4882a593Smuzhiyun NUM_CLAUSE_TEMP_GPRS(2));
2255*4882a593Smuzhiyun sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2256*4882a593Smuzhiyun NUM_ES_GPRS(18));
2257*4882a593Smuzhiyun sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2258*4882a593Smuzhiyun NUM_VS_THREADS(78) |
2259*4882a593Smuzhiyun NUM_GS_THREADS(4) |
2260*4882a593Smuzhiyun NUM_ES_THREADS(31));
2261*4882a593Smuzhiyun sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2262*4882a593Smuzhiyun NUM_VS_STACK_ENTRIES(40));
2263*4882a593Smuzhiyun sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2264*4882a593Smuzhiyun NUM_ES_STACK_ENTRIES(16));
2265*4882a593Smuzhiyun } else if ((rdev->family) == CHIP_RV670) {
2266*4882a593Smuzhiyun sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2267*4882a593Smuzhiyun NUM_VS_GPRS(44) |
2268*4882a593Smuzhiyun NUM_CLAUSE_TEMP_GPRS(2));
2269*4882a593Smuzhiyun sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2270*4882a593Smuzhiyun NUM_ES_GPRS(17));
2271*4882a593Smuzhiyun sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2272*4882a593Smuzhiyun NUM_VS_THREADS(78) |
2273*4882a593Smuzhiyun NUM_GS_THREADS(4) |
2274*4882a593Smuzhiyun NUM_ES_THREADS(31));
2275*4882a593Smuzhiyun sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2276*4882a593Smuzhiyun NUM_VS_STACK_ENTRIES(64));
2277*4882a593Smuzhiyun sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2278*4882a593Smuzhiyun NUM_ES_STACK_ENTRIES(64));
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun WREG32(SQ_CONFIG, sq_config);
2282*4882a593Smuzhiyun WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2283*4882a593Smuzhiyun WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2284*4882a593Smuzhiyun WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2285*4882a593Smuzhiyun WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2286*4882a593Smuzhiyun WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun if (((rdev->family) == CHIP_RV610) ||
2289*4882a593Smuzhiyun ((rdev->family) == CHIP_RV620) ||
2290*4882a593Smuzhiyun ((rdev->family) == CHIP_RS780) ||
2291*4882a593Smuzhiyun ((rdev->family) == CHIP_RS880)) {
2292*4882a593Smuzhiyun WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2293*4882a593Smuzhiyun } else {
2294*4882a593Smuzhiyun WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun
2297*4882a593Smuzhiyun /* More default values. 2D/3D driver should adjust as needed */
2298*4882a593Smuzhiyun WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2299*4882a593Smuzhiyun S1_X(0x4) | S1_Y(0xc)));
2300*4882a593Smuzhiyun WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2301*4882a593Smuzhiyun S1_X(0x2) | S1_Y(0x2) |
2302*4882a593Smuzhiyun S2_X(0xa) | S2_Y(0x6) |
2303*4882a593Smuzhiyun S3_X(0x6) | S3_Y(0xa)));
2304*4882a593Smuzhiyun WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2305*4882a593Smuzhiyun S1_X(0x4) | S1_Y(0xc) |
2306*4882a593Smuzhiyun S2_X(0x1) | S2_Y(0x6) |
2307*4882a593Smuzhiyun S3_X(0xa) | S3_Y(0xe)));
2308*4882a593Smuzhiyun WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2309*4882a593Smuzhiyun S5_X(0x0) | S5_Y(0x0) |
2310*4882a593Smuzhiyun S6_X(0xb) | S6_Y(0x4) |
2311*4882a593Smuzhiyun S7_X(0x7) | S7_Y(0x8)));
2312*4882a593Smuzhiyun
2313*4882a593Smuzhiyun WREG32(VGT_STRMOUT_EN, 0);
2314*4882a593Smuzhiyun tmp = rdev->config.r600.max_pipes * 16;
2315*4882a593Smuzhiyun switch (rdev->family) {
2316*4882a593Smuzhiyun case CHIP_RV610:
2317*4882a593Smuzhiyun case CHIP_RV620:
2318*4882a593Smuzhiyun case CHIP_RS780:
2319*4882a593Smuzhiyun case CHIP_RS880:
2320*4882a593Smuzhiyun tmp += 32;
2321*4882a593Smuzhiyun break;
2322*4882a593Smuzhiyun case CHIP_RV670:
2323*4882a593Smuzhiyun tmp += 128;
2324*4882a593Smuzhiyun break;
2325*4882a593Smuzhiyun default:
2326*4882a593Smuzhiyun break;
2327*4882a593Smuzhiyun }
2328*4882a593Smuzhiyun if (tmp > 256) {
2329*4882a593Smuzhiyun tmp = 256;
2330*4882a593Smuzhiyun }
2331*4882a593Smuzhiyun WREG32(VGT_ES_PER_GS, 128);
2332*4882a593Smuzhiyun WREG32(VGT_GS_PER_ES, tmp);
2333*4882a593Smuzhiyun WREG32(VGT_GS_PER_VS, 2);
2334*4882a593Smuzhiyun WREG32(VGT_GS_VERTEX_REUSE, 16);
2335*4882a593Smuzhiyun
2336*4882a593Smuzhiyun /* more default values. 2D/3D driver should adjust as needed */
2337*4882a593Smuzhiyun WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2338*4882a593Smuzhiyun WREG32(VGT_STRMOUT_EN, 0);
2339*4882a593Smuzhiyun WREG32(SX_MISC, 0);
2340*4882a593Smuzhiyun WREG32(PA_SC_MODE_CNTL, 0);
2341*4882a593Smuzhiyun WREG32(PA_SC_AA_CONFIG, 0);
2342*4882a593Smuzhiyun WREG32(PA_SC_LINE_STIPPLE, 0);
2343*4882a593Smuzhiyun WREG32(SPI_INPUT_Z, 0);
2344*4882a593Smuzhiyun WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2345*4882a593Smuzhiyun WREG32(CB_COLOR7_FRAG, 0);
2346*4882a593Smuzhiyun
2347*4882a593Smuzhiyun /* Clear render buffer base addresses */
2348*4882a593Smuzhiyun WREG32(CB_COLOR0_BASE, 0);
2349*4882a593Smuzhiyun WREG32(CB_COLOR1_BASE, 0);
2350*4882a593Smuzhiyun WREG32(CB_COLOR2_BASE, 0);
2351*4882a593Smuzhiyun WREG32(CB_COLOR3_BASE, 0);
2352*4882a593Smuzhiyun WREG32(CB_COLOR4_BASE, 0);
2353*4882a593Smuzhiyun WREG32(CB_COLOR5_BASE, 0);
2354*4882a593Smuzhiyun WREG32(CB_COLOR6_BASE, 0);
2355*4882a593Smuzhiyun WREG32(CB_COLOR7_BASE, 0);
2356*4882a593Smuzhiyun WREG32(CB_COLOR7_FRAG, 0);
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun switch (rdev->family) {
2359*4882a593Smuzhiyun case CHIP_RV610:
2360*4882a593Smuzhiyun case CHIP_RV620:
2361*4882a593Smuzhiyun case CHIP_RS780:
2362*4882a593Smuzhiyun case CHIP_RS880:
2363*4882a593Smuzhiyun tmp = TC_L2_SIZE(8);
2364*4882a593Smuzhiyun break;
2365*4882a593Smuzhiyun case CHIP_RV630:
2366*4882a593Smuzhiyun case CHIP_RV635:
2367*4882a593Smuzhiyun tmp = TC_L2_SIZE(4);
2368*4882a593Smuzhiyun break;
2369*4882a593Smuzhiyun case CHIP_R600:
2370*4882a593Smuzhiyun tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2371*4882a593Smuzhiyun break;
2372*4882a593Smuzhiyun default:
2373*4882a593Smuzhiyun tmp = TC_L2_SIZE(0);
2374*4882a593Smuzhiyun break;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun WREG32(TC_CNTL, tmp);
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun tmp = RREG32(HDP_HOST_PATH_CNTL);
2379*4882a593Smuzhiyun WREG32(HDP_HOST_PATH_CNTL, tmp);
2380*4882a593Smuzhiyun
2381*4882a593Smuzhiyun tmp = RREG32(ARB_POP);
2382*4882a593Smuzhiyun tmp |= ENABLE_TC128;
2383*4882a593Smuzhiyun WREG32(ARB_POP, tmp);
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2386*4882a593Smuzhiyun WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2387*4882a593Smuzhiyun NUM_CLIP_SEQ(3)));
2388*4882a593Smuzhiyun WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2389*4882a593Smuzhiyun WREG32(VC_ENHANCE, 0);
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun /*
2394*4882a593Smuzhiyun * Indirect registers accessor
2395*4882a593Smuzhiyun */
r600_pciep_rreg(struct radeon_device * rdev,u32 reg)2396*4882a593Smuzhiyun u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2397*4882a593Smuzhiyun {
2398*4882a593Smuzhiyun unsigned long flags;
2399*4882a593Smuzhiyun u32 r;
2400*4882a593Smuzhiyun
2401*4882a593Smuzhiyun spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2402*4882a593Smuzhiyun WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2403*4882a593Smuzhiyun (void)RREG32(PCIE_PORT_INDEX);
2404*4882a593Smuzhiyun r = RREG32(PCIE_PORT_DATA);
2405*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2406*4882a593Smuzhiyun return r;
2407*4882a593Smuzhiyun }
2408*4882a593Smuzhiyun
r600_pciep_wreg(struct radeon_device * rdev,u32 reg,u32 v)2409*4882a593Smuzhiyun void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2410*4882a593Smuzhiyun {
2411*4882a593Smuzhiyun unsigned long flags;
2412*4882a593Smuzhiyun
2413*4882a593Smuzhiyun spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2414*4882a593Smuzhiyun WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2415*4882a593Smuzhiyun (void)RREG32(PCIE_PORT_INDEX);
2416*4882a593Smuzhiyun WREG32(PCIE_PORT_DATA, (v));
2417*4882a593Smuzhiyun (void)RREG32(PCIE_PORT_DATA);
2418*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun
2421*4882a593Smuzhiyun /*
2422*4882a593Smuzhiyun * CP & Ring
2423*4882a593Smuzhiyun */
r600_cp_stop(struct radeon_device * rdev)2424*4882a593Smuzhiyun void r600_cp_stop(struct radeon_device *rdev)
2425*4882a593Smuzhiyun {
2426*4882a593Smuzhiyun if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2427*4882a593Smuzhiyun radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2428*4882a593Smuzhiyun WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2429*4882a593Smuzhiyun WREG32(SCRATCH_UMSK, 0);
2430*4882a593Smuzhiyun rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2431*4882a593Smuzhiyun }
2432*4882a593Smuzhiyun
r600_init_microcode(struct radeon_device * rdev)2433*4882a593Smuzhiyun int r600_init_microcode(struct radeon_device *rdev)
2434*4882a593Smuzhiyun {
2435*4882a593Smuzhiyun const char *chip_name;
2436*4882a593Smuzhiyun const char *rlc_chip_name;
2437*4882a593Smuzhiyun const char *smc_chip_name = "RV770";
2438*4882a593Smuzhiyun size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2439*4882a593Smuzhiyun char fw_name[30];
2440*4882a593Smuzhiyun int err;
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun DRM_DEBUG("\n");
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun switch (rdev->family) {
2445*4882a593Smuzhiyun case CHIP_R600:
2446*4882a593Smuzhiyun chip_name = "R600";
2447*4882a593Smuzhiyun rlc_chip_name = "R600";
2448*4882a593Smuzhiyun break;
2449*4882a593Smuzhiyun case CHIP_RV610:
2450*4882a593Smuzhiyun chip_name = "RV610";
2451*4882a593Smuzhiyun rlc_chip_name = "R600";
2452*4882a593Smuzhiyun break;
2453*4882a593Smuzhiyun case CHIP_RV630:
2454*4882a593Smuzhiyun chip_name = "RV630";
2455*4882a593Smuzhiyun rlc_chip_name = "R600";
2456*4882a593Smuzhiyun break;
2457*4882a593Smuzhiyun case CHIP_RV620:
2458*4882a593Smuzhiyun chip_name = "RV620";
2459*4882a593Smuzhiyun rlc_chip_name = "R600";
2460*4882a593Smuzhiyun break;
2461*4882a593Smuzhiyun case CHIP_RV635:
2462*4882a593Smuzhiyun chip_name = "RV635";
2463*4882a593Smuzhiyun rlc_chip_name = "R600";
2464*4882a593Smuzhiyun break;
2465*4882a593Smuzhiyun case CHIP_RV670:
2466*4882a593Smuzhiyun chip_name = "RV670";
2467*4882a593Smuzhiyun rlc_chip_name = "R600";
2468*4882a593Smuzhiyun break;
2469*4882a593Smuzhiyun case CHIP_RS780:
2470*4882a593Smuzhiyun case CHIP_RS880:
2471*4882a593Smuzhiyun chip_name = "RS780";
2472*4882a593Smuzhiyun rlc_chip_name = "R600";
2473*4882a593Smuzhiyun break;
2474*4882a593Smuzhiyun case CHIP_RV770:
2475*4882a593Smuzhiyun chip_name = "RV770";
2476*4882a593Smuzhiyun rlc_chip_name = "R700";
2477*4882a593Smuzhiyun smc_chip_name = "RV770";
2478*4882a593Smuzhiyun smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2479*4882a593Smuzhiyun break;
2480*4882a593Smuzhiyun case CHIP_RV730:
2481*4882a593Smuzhiyun chip_name = "RV730";
2482*4882a593Smuzhiyun rlc_chip_name = "R700";
2483*4882a593Smuzhiyun smc_chip_name = "RV730";
2484*4882a593Smuzhiyun smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2485*4882a593Smuzhiyun break;
2486*4882a593Smuzhiyun case CHIP_RV710:
2487*4882a593Smuzhiyun chip_name = "RV710";
2488*4882a593Smuzhiyun rlc_chip_name = "R700";
2489*4882a593Smuzhiyun smc_chip_name = "RV710";
2490*4882a593Smuzhiyun smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2491*4882a593Smuzhiyun break;
2492*4882a593Smuzhiyun case CHIP_RV740:
2493*4882a593Smuzhiyun chip_name = "RV730";
2494*4882a593Smuzhiyun rlc_chip_name = "R700";
2495*4882a593Smuzhiyun smc_chip_name = "RV740";
2496*4882a593Smuzhiyun smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2497*4882a593Smuzhiyun break;
2498*4882a593Smuzhiyun case CHIP_CEDAR:
2499*4882a593Smuzhiyun chip_name = "CEDAR";
2500*4882a593Smuzhiyun rlc_chip_name = "CEDAR";
2501*4882a593Smuzhiyun smc_chip_name = "CEDAR";
2502*4882a593Smuzhiyun smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2503*4882a593Smuzhiyun break;
2504*4882a593Smuzhiyun case CHIP_REDWOOD:
2505*4882a593Smuzhiyun chip_name = "REDWOOD";
2506*4882a593Smuzhiyun rlc_chip_name = "REDWOOD";
2507*4882a593Smuzhiyun smc_chip_name = "REDWOOD";
2508*4882a593Smuzhiyun smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2509*4882a593Smuzhiyun break;
2510*4882a593Smuzhiyun case CHIP_JUNIPER:
2511*4882a593Smuzhiyun chip_name = "JUNIPER";
2512*4882a593Smuzhiyun rlc_chip_name = "JUNIPER";
2513*4882a593Smuzhiyun smc_chip_name = "JUNIPER";
2514*4882a593Smuzhiyun smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2515*4882a593Smuzhiyun break;
2516*4882a593Smuzhiyun case CHIP_CYPRESS:
2517*4882a593Smuzhiyun case CHIP_HEMLOCK:
2518*4882a593Smuzhiyun chip_name = "CYPRESS";
2519*4882a593Smuzhiyun rlc_chip_name = "CYPRESS";
2520*4882a593Smuzhiyun smc_chip_name = "CYPRESS";
2521*4882a593Smuzhiyun smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2522*4882a593Smuzhiyun break;
2523*4882a593Smuzhiyun case CHIP_PALM:
2524*4882a593Smuzhiyun chip_name = "PALM";
2525*4882a593Smuzhiyun rlc_chip_name = "SUMO";
2526*4882a593Smuzhiyun break;
2527*4882a593Smuzhiyun case CHIP_SUMO:
2528*4882a593Smuzhiyun chip_name = "SUMO";
2529*4882a593Smuzhiyun rlc_chip_name = "SUMO";
2530*4882a593Smuzhiyun break;
2531*4882a593Smuzhiyun case CHIP_SUMO2:
2532*4882a593Smuzhiyun chip_name = "SUMO2";
2533*4882a593Smuzhiyun rlc_chip_name = "SUMO";
2534*4882a593Smuzhiyun break;
2535*4882a593Smuzhiyun default: BUG();
2536*4882a593Smuzhiyun }
2537*4882a593Smuzhiyun
2538*4882a593Smuzhiyun if (rdev->family >= CHIP_CEDAR) {
2539*4882a593Smuzhiyun pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2540*4882a593Smuzhiyun me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2541*4882a593Smuzhiyun rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2542*4882a593Smuzhiyun } else if (rdev->family >= CHIP_RV770) {
2543*4882a593Smuzhiyun pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2544*4882a593Smuzhiyun me_req_size = R700_PM4_UCODE_SIZE * 4;
2545*4882a593Smuzhiyun rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2546*4882a593Smuzhiyun } else {
2547*4882a593Smuzhiyun pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2548*4882a593Smuzhiyun me_req_size = R600_PM4_UCODE_SIZE * 12;
2549*4882a593Smuzhiyun rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2550*4882a593Smuzhiyun }
2551*4882a593Smuzhiyun
2552*4882a593Smuzhiyun DRM_INFO("Loading %s Microcode\n", chip_name);
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2555*4882a593Smuzhiyun err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2556*4882a593Smuzhiyun if (err)
2557*4882a593Smuzhiyun goto out;
2558*4882a593Smuzhiyun if (rdev->pfp_fw->size != pfp_req_size) {
2559*4882a593Smuzhiyun pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
2560*4882a593Smuzhiyun rdev->pfp_fw->size, fw_name);
2561*4882a593Smuzhiyun err = -EINVAL;
2562*4882a593Smuzhiyun goto out;
2563*4882a593Smuzhiyun }
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2566*4882a593Smuzhiyun err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2567*4882a593Smuzhiyun if (err)
2568*4882a593Smuzhiyun goto out;
2569*4882a593Smuzhiyun if (rdev->me_fw->size != me_req_size) {
2570*4882a593Smuzhiyun pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
2571*4882a593Smuzhiyun rdev->me_fw->size, fw_name);
2572*4882a593Smuzhiyun err = -EINVAL;
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2576*4882a593Smuzhiyun err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2577*4882a593Smuzhiyun if (err)
2578*4882a593Smuzhiyun goto out;
2579*4882a593Smuzhiyun if (rdev->rlc_fw->size != rlc_req_size) {
2580*4882a593Smuzhiyun pr_err("r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2581*4882a593Smuzhiyun rdev->rlc_fw->size, fw_name);
2582*4882a593Smuzhiyun err = -EINVAL;
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun
2585*4882a593Smuzhiyun if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2586*4882a593Smuzhiyun snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2587*4882a593Smuzhiyun err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2588*4882a593Smuzhiyun if (err) {
2589*4882a593Smuzhiyun pr_err("smc: error loading firmware \"%s\"\n", fw_name);
2590*4882a593Smuzhiyun release_firmware(rdev->smc_fw);
2591*4882a593Smuzhiyun rdev->smc_fw = NULL;
2592*4882a593Smuzhiyun err = 0;
2593*4882a593Smuzhiyun } else if (rdev->smc_fw->size != smc_req_size) {
2594*4882a593Smuzhiyun pr_err("smc: Bogus length %zu in firmware \"%s\"\n",
2595*4882a593Smuzhiyun rdev->smc_fw->size, fw_name);
2596*4882a593Smuzhiyun err = -EINVAL;
2597*4882a593Smuzhiyun }
2598*4882a593Smuzhiyun }
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun out:
2601*4882a593Smuzhiyun if (err) {
2602*4882a593Smuzhiyun if (err != -EINVAL)
2603*4882a593Smuzhiyun pr_err("r600_cp: Failed to load firmware \"%s\"\n",
2604*4882a593Smuzhiyun fw_name);
2605*4882a593Smuzhiyun release_firmware(rdev->pfp_fw);
2606*4882a593Smuzhiyun rdev->pfp_fw = NULL;
2607*4882a593Smuzhiyun release_firmware(rdev->me_fw);
2608*4882a593Smuzhiyun rdev->me_fw = NULL;
2609*4882a593Smuzhiyun release_firmware(rdev->rlc_fw);
2610*4882a593Smuzhiyun rdev->rlc_fw = NULL;
2611*4882a593Smuzhiyun release_firmware(rdev->smc_fw);
2612*4882a593Smuzhiyun rdev->smc_fw = NULL;
2613*4882a593Smuzhiyun }
2614*4882a593Smuzhiyun return err;
2615*4882a593Smuzhiyun }
2616*4882a593Smuzhiyun
r600_gfx_get_rptr(struct radeon_device * rdev,struct radeon_ring * ring)2617*4882a593Smuzhiyun u32 r600_gfx_get_rptr(struct radeon_device *rdev,
2618*4882a593Smuzhiyun struct radeon_ring *ring)
2619*4882a593Smuzhiyun {
2620*4882a593Smuzhiyun u32 rptr;
2621*4882a593Smuzhiyun
2622*4882a593Smuzhiyun if (rdev->wb.enabled)
2623*4882a593Smuzhiyun rptr = rdev->wb.wb[ring->rptr_offs/4];
2624*4882a593Smuzhiyun else
2625*4882a593Smuzhiyun rptr = RREG32(R600_CP_RB_RPTR);
2626*4882a593Smuzhiyun
2627*4882a593Smuzhiyun return rptr;
2628*4882a593Smuzhiyun }
2629*4882a593Smuzhiyun
r600_gfx_get_wptr(struct radeon_device * rdev,struct radeon_ring * ring)2630*4882a593Smuzhiyun u32 r600_gfx_get_wptr(struct radeon_device *rdev,
2631*4882a593Smuzhiyun struct radeon_ring *ring)
2632*4882a593Smuzhiyun {
2633*4882a593Smuzhiyun return RREG32(R600_CP_RB_WPTR);
2634*4882a593Smuzhiyun }
2635*4882a593Smuzhiyun
r600_gfx_set_wptr(struct radeon_device * rdev,struct radeon_ring * ring)2636*4882a593Smuzhiyun void r600_gfx_set_wptr(struct radeon_device *rdev,
2637*4882a593Smuzhiyun struct radeon_ring *ring)
2638*4882a593Smuzhiyun {
2639*4882a593Smuzhiyun WREG32(R600_CP_RB_WPTR, ring->wptr);
2640*4882a593Smuzhiyun (void)RREG32(R600_CP_RB_WPTR);
2641*4882a593Smuzhiyun }
2642*4882a593Smuzhiyun
r600_cp_load_microcode(struct radeon_device * rdev)2643*4882a593Smuzhiyun static int r600_cp_load_microcode(struct radeon_device *rdev)
2644*4882a593Smuzhiyun {
2645*4882a593Smuzhiyun const __be32 *fw_data;
2646*4882a593Smuzhiyun int i;
2647*4882a593Smuzhiyun
2648*4882a593Smuzhiyun if (!rdev->me_fw || !rdev->pfp_fw)
2649*4882a593Smuzhiyun return -EINVAL;
2650*4882a593Smuzhiyun
2651*4882a593Smuzhiyun r600_cp_stop(rdev);
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun WREG32(CP_RB_CNTL,
2654*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
2655*4882a593Smuzhiyun BUF_SWAP_32BIT |
2656*4882a593Smuzhiyun #endif
2657*4882a593Smuzhiyun RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun /* Reset cp */
2660*4882a593Smuzhiyun WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2661*4882a593Smuzhiyun RREG32(GRBM_SOFT_RESET);
2662*4882a593Smuzhiyun mdelay(15);
2663*4882a593Smuzhiyun WREG32(GRBM_SOFT_RESET, 0);
2664*4882a593Smuzhiyun
2665*4882a593Smuzhiyun WREG32(CP_ME_RAM_WADDR, 0);
2666*4882a593Smuzhiyun
2667*4882a593Smuzhiyun fw_data = (const __be32 *)rdev->me_fw->data;
2668*4882a593Smuzhiyun WREG32(CP_ME_RAM_WADDR, 0);
2669*4882a593Smuzhiyun for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2670*4882a593Smuzhiyun WREG32(CP_ME_RAM_DATA,
2671*4882a593Smuzhiyun be32_to_cpup(fw_data++));
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun fw_data = (const __be32 *)rdev->pfp_fw->data;
2674*4882a593Smuzhiyun WREG32(CP_PFP_UCODE_ADDR, 0);
2675*4882a593Smuzhiyun for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2676*4882a593Smuzhiyun WREG32(CP_PFP_UCODE_DATA,
2677*4882a593Smuzhiyun be32_to_cpup(fw_data++));
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun WREG32(CP_PFP_UCODE_ADDR, 0);
2680*4882a593Smuzhiyun WREG32(CP_ME_RAM_WADDR, 0);
2681*4882a593Smuzhiyun WREG32(CP_ME_RAM_RADDR, 0);
2682*4882a593Smuzhiyun return 0;
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun
r600_cp_start(struct radeon_device * rdev)2685*4882a593Smuzhiyun int r600_cp_start(struct radeon_device *rdev)
2686*4882a593Smuzhiyun {
2687*4882a593Smuzhiyun struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2688*4882a593Smuzhiyun int r;
2689*4882a593Smuzhiyun uint32_t cp_me;
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun r = radeon_ring_lock(rdev, ring, 7);
2692*4882a593Smuzhiyun if (r) {
2693*4882a593Smuzhiyun DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2694*4882a593Smuzhiyun return r;
2695*4882a593Smuzhiyun }
2696*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2697*4882a593Smuzhiyun radeon_ring_write(ring, 0x1);
2698*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770) {
2699*4882a593Smuzhiyun radeon_ring_write(ring, 0x0);
2700*4882a593Smuzhiyun radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2701*4882a593Smuzhiyun } else {
2702*4882a593Smuzhiyun radeon_ring_write(ring, 0x3);
2703*4882a593Smuzhiyun radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2704*4882a593Smuzhiyun }
2705*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2706*4882a593Smuzhiyun radeon_ring_write(ring, 0);
2707*4882a593Smuzhiyun radeon_ring_write(ring, 0);
2708*4882a593Smuzhiyun radeon_ring_unlock_commit(rdev, ring, false);
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun cp_me = 0xff;
2711*4882a593Smuzhiyun WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2712*4882a593Smuzhiyun return 0;
2713*4882a593Smuzhiyun }
2714*4882a593Smuzhiyun
r600_cp_resume(struct radeon_device * rdev)2715*4882a593Smuzhiyun int r600_cp_resume(struct radeon_device *rdev)
2716*4882a593Smuzhiyun {
2717*4882a593Smuzhiyun struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2718*4882a593Smuzhiyun u32 tmp;
2719*4882a593Smuzhiyun u32 rb_bufsz;
2720*4882a593Smuzhiyun int r;
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun /* Reset cp */
2723*4882a593Smuzhiyun WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2724*4882a593Smuzhiyun RREG32(GRBM_SOFT_RESET);
2725*4882a593Smuzhiyun mdelay(15);
2726*4882a593Smuzhiyun WREG32(GRBM_SOFT_RESET, 0);
2727*4882a593Smuzhiyun
2728*4882a593Smuzhiyun /* Set ring buffer size */
2729*4882a593Smuzhiyun rb_bufsz = order_base_2(ring->ring_size / 8);
2730*4882a593Smuzhiyun tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2731*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
2732*4882a593Smuzhiyun tmp |= BUF_SWAP_32BIT;
2733*4882a593Smuzhiyun #endif
2734*4882a593Smuzhiyun WREG32(CP_RB_CNTL, tmp);
2735*4882a593Smuzhiyun WREG32(CP_SEM_WAIT_TIMER, 0x0);
2736*4882a593Smuzhiyun
2737*4882a593Smuzhiyun /* Set the write pointer delay */
2738*4882a593Smuzhiyun WREG32(CP_RB_WPTR_DELAY, 0);
2739*4882a593Smuzhiyun
2740*4882a593Smuzhiyun /* Initialize the ring buffer's read and write pointers */
2741*4882a593Smuzhiyun WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2742*4882a593Smuzhiyun WREG32(CP_RB_RPTR_WR, 0);
2743*4882a593Smuzhiyun ring->wptr = 0;
2744*4882a593Smuzhiyun WREG32(CP_RB_WPTR, ring->wptr);
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun /* set the wb address whether it's enabled or not */
2747*4882a593Smuzhiyun WREG32(CP_RB_RPTR_ADDR,
2748*4882a593Smuzhiyun ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2749*4882a593Smuzhiyun WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2750*4882a593Smuzhiyun WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun if (rdev->wb.enabled)
2753*4882a593Smuzhiyun WREG32(SCRATCH_UMSK, 0xff);
2754*4882a593Smuzhiyun else {
2755*4882a593Smuzhiyun tmp |= RB_NO_UPDATE;
2756*4882a593Smuzhiyun WREG32(SCRATCH_UMSK, 0);
2757*4882a593Smuzhiyun }
2758*4882a593Smuzhiyun
2759*4882a593Smuzhiyun mdelay(1);
2760*4882a593Smuzhiyun WREG32(CP_RB_CNTL, tmp);
2761*4882a593Smuzhiyun
2762*4882a593Smuzhiyun WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2763*4882a593Smuzhiyun WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2764*4882a593Smuzhiyun
2765*4882a593Smuzhiyun r600_cp_start(rdev);
2766*4882a593Smuzhiyun ring->ready = true;
2767*4882a593Smuzhiyun r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2768*4882a593Smuzhiyun if (r) {
2769*4882a593Smuzhiyun ring->ready = false;
2770*4882a593Smuzhiyun return r;
2771*4882a593Smuzhiyun }
2772*4882a593Smuzhiyun
2773*4882a593Smuzhiyun if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2774*4882a593Smuzhiyun radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2775*4882a593Smuzhiyun
2776*4882a593Smuzhiyun return 0;
2777*4882a593Smuzhiyun }
2778*4882a593Smuzhiyun
r600_ring_init(struct radeon_device * rdev,struct radeon_ring * ring,unsigned ring_size)2779*4882a593Smuzhiyun void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2780*4882a593Smuzhiyun {
2781*4882a593Smuzhiyun u32 rb_bufsz;
2782*4882a593Smuzhiyun int r;
2783*4882a593Smuzhiyun
2784*4882a593Smuzhiyun /* Align ring size */
2785*4882a593Smuzhiyun rb_bufsz = order_base_2(ring_size / 8);
2786*4882a593Smuzhiyun ring_size = (1 << (rb_bufsz + 1)) * 4;
2787*4882a593Smuzhiyun ring->ring_size = ring_size;
2788*4882a593Smuzhiyun ring->align_mask = 16 - 1;
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2791*4882a593Smuzhiyun r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2792*4882a593Smuzhiyun if (r) {
2793*4882a593Smuzhiyun DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2794*4882a593Smuzhiyun ring->rptr_save_reg = 0;
2795*4882a593Smuzhiyun }
2796*4882a593Smuzhiyun }
2797*4882a593Smuzhiyun }
2798*4882a593Smuzhiyun
r600_cp_fini(struct radeon_device * rdev)2799*4882a593Smuzhiyun void r600_cp_fini(struct radeon_device *rdev)
2800*4882a593Smuzhiyun {
2801*4882a593Smuzhiyun struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2802*4882a593Smuzhiyun r600_cp_stop(rdev);
2803*4882a593Smuzhiyun radeon_ring_fini(rdev, ring);
2804*4882a593Smuzhiyun radeon_scratch_free(rdev, ring->rptr_save_reg);
2805*4882a593Smuzhiyun }
2806*4882a593Smuzhiyun
2807*4882a593Smuzhiyun /*
2808*4882a593Smuzhiyun * GPU scratch registers helpers function.
2809*4882a593Smuzhiyun */
r600_scratch_init(struct radeon_device * rdev)2810*4882a593Smuzhiyun void r600_scratch_init(struct radeon_device *rdev)
2811*4882a593Smuzhiyun {
2812*4882a593Smuzhiyun int i;
2813*4882a593Smuzhiyun
2814*4882a593Smuzhiyun rdev->scratch.num_reg = 7;
2815*4882a593Smuzhiyun rdev->scratch.reg_base = SCRATCH_REG0;
2816*4882a593Smuzhiyun for (i = 0; i < rdev->scratch.num_reg; i++) {
2817*4882a593Smuzhiyun rdev->scratch.free[i] = true;
2818*4882a593Smuzhiyun rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2819*4882a593Smuzhiyun }
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun
r600_ring_test(struct radeon_device * rdev,struct radeon_ring * ring)2822*4882a593Smuzhiyun int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2823*4882a593Smuzhiyun {
2824*4882a593Smuzhiyun uint32_t scratch;
2825*4882a593Smuzhiyun uint32_t tmp = 0;
2826*4882a593Smuzhiyun unsigned i;
2827*4882a593Smuzhiyun int r;
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun r = radeon_scratch_get(rdev, &scratch);
2830*4882a593Smuzhiyun if (r) {
2831*4882a593Smuzhiyun DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2832*4882a593Smuzhiyun return r;
2833*4882a593Smuzhiyun }
2834*4882a593Smuzhiyun WREG32(scratch, 0xCAFEDEAD);
2835*4882a593Smuzhiyun r = radeon_ring_lock(rdev, ring, 3);
2836*4882a593Smuzhiyun if (r) {
2837*4882a593Smuzhiyun DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2838*4882a593Smuzhiyun radeon_scratch_free(rdev, scratch);
2839*4882a593Smuzhiyun return r;
2840*4882a593Smuzhiyun }
2841*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2842*4882a593Smuzhiyun radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2843*4882a593Smuzhiyun radeon_ring_write(ring, 0xDEADBEEF);
2844*4882a593Smuzhiyun radeon_ring_unlock_commit(rdev, ring, false);
2845*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
2846*4882a593Smuzhiyun tmp = RREG32(scratch);
2847*4882a593Smuzhiyun if (tmp == 0xDEADBEEF)
2848*4882a593Smuzhiyun break;
2849*4882a593Smuzhiyun udelay(1);
2850*4882a593Smuzhiyun }
2851*4882a593Smuzhiyun if (i < rdev->usec_timeout) {
2852*4882a593Smuzhiyun DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2853*4882a593Smuzhiyun } else {
2854*4882a593Smuzhiyun DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2855*4882a593Smuzhiyun ring->idx, scratch, tmp);
2856*4882a593Smuzhiyun r = -EINVAL;
2857*4882a593Smuzhiyun }
2858*4882a593Smuzhiyun radeon_scratch_free(rdev, scratch);
2859*4882a593Smuzhiyun return r;
2860*4882a593Smuzhiyun }
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun /*
2863*4882a593Smuzhiyun * CP fences/semaphores
2864*4882a593Smuzhiyun */
2865*4882a593Smuzhiyun
r600_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)2866*4882a593Smuzhiyun void r600_fence_ring_emit(struct radeon_device *rdev,
2867*4882a593Smuzhiyun struct radeon_fence *fence)
2868*4882a593Smuzhiyun {
2869*4882a593Smuzhiyun struct radeon_ring *ring = &rdev->ring[fence->ring];
2870*4882a593Smuzhiyun u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
2871*4882a593Smuzhiyun PACKET3_SH_ACTION_ENA;
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770)
2874*4882a593Smuzhiyun cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun if (rdev->wb.use_event) {
2877*4882a593Smuzhiyun u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2878*4882a593Smuzhiyun /* flush read cache over gart */
2879*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2880*4882a593Smuzhiyun radeon_ring_write(ring, cp_coher_cntl);
2881*4882a593Smuzhiyun radeon_ring_write(ring, 0xFFFFFFFF);
2882*4882a593Smuzhiyun radeon_ring_write(ring, 0);
2883*4882a593Smuzhiyun radeon_ring_write(ring, 10); /* poll interval */
2884*4882a593Smuzhiyun /* EVENT_WRITE_EOP - flush caches, send int */
2885*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2886*4882a593Smuzhiyun radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2887*4882a593Smuzhiyun radeon_ring_write(ring, lower_32_bits(addr));
2888*4882a593Smuzhiyun radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2889*4882a593Smuzhiyun radeon_ring_write(ring, fence->seq);
2890*4882a593Smuzhiyun radeon_ring_write(ring, 0);
2891*4882a593Smuzhiyun } else {
2892*4882a593Smuzhiyun /* flush read cache over gart */
2893*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2894*4882a593Smuzhiyun radeon_ring_write(ring, cp_coher_cntl);
2895*4882a593Smuzhiyun radeon_ring_write(ring, 0xFFFFFFFF);
2896*4882a593Smuzhiyun radeon_ring_write(ring, 0);
2897*4882a593Smuzhiyun radeon_ring_write(ring, 10); /* poll interval */
2898*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2899*4882a593Smuzhiyun radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2900*4882a593Smuzhiyun /* wait for 3D idle clean */
2901*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2902*4882a593Smuzhiyun radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2903*4882a593Smuzhiyun radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2904*4882a593Smuzhiyun /* Emit fence sequence & fire IRQ */
2905*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2906*4882a593Smuzhiyun radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2907*4882a593Smuzhiyun radeon_ring_write(ring, fence->seq);
2908*4882a593Smuzhiyun /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2909*4882a593Smuzhiyun radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2910*4882a593Smuzhiyun radeon_ring_write(ring, RB_INT_STAT);
2911*4882a593Smuzhiyun }
2912*4882a593Smuzhiyun }
2913*4882a593Smuzhiyun
2914*4882a593Smuzhiyun /**
2915*4882a593Smuzhiyun * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2916*4882a593Smuzhiyun *
2917*4882a593Smuzhiyun * @rdev: radeon_device pointer
2918*4882a593Smuzhiyun * @ring: radeon ring buffer object
2919*4882a593Smuzhiyun * @semaphore: radeon semaphore object
2920*4882a593Smuzhiyun * @emit_wait: Is this a sempahore wait?
2921*4882a593Smuzhiyun *
2922*4882a593Smuzhiyun * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2923*4882a593Smuzhiyun * from running ahead of semaphore waits.
2924*4882a593Smuzhiyun */
r600_semaphore_ring_emit(struct radeon_device * rdev,struct radeon_ring * ring,struct radeon_semaphore * semaphore,bool emit_wait)2925*4882a593Smuzhiyun bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2926*4882a593Smuzhiyun struct radeon_ring *ring,
2927*4882a593Smuzhiyun struct radeon_semaphore *semaphore,
2928*4882a593Smuzhiyun bool emit_wait)
2929*4882a593Smuzhiyun {
2930*4882a593Smuzhiyun uint64_t addr = semaphore->gpu_addr;
2931*4882a593Smuzhiyun unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2932*4882a593Smuzhiyun
2933*4882a593Smuzhiyun if (rdev->family < CHIP_CAYMAN)
2934*4882a593Smuzhiyun sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2937*4882a593Smuzhiyun radeon_ring_write(ring, lower_32_bits(addr));
2938*4882a593Smuzhiyun radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2939*4882a593Smuzhiyun
2940*4882a593Smuzhiyun /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2941*4882a593Smuzhiyun if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
2942*4882a593Smuzhiyun /* Prevent the PFP from running ahead of the semaphore wait */
2943*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2944*4882a593Smuzhiyun radeon_ring_write(ring, 0x0);
2945*4882a593Smuzhiyun }
2946*4882a593Smuzhiyun
2947*4882a593Smuzhiyun return true;
2948*4882a593Smuzhiyun }
2949*4882a593Smuzhiyun
2950*4882a593Smuzhiyun /**
2951*4882a593Smuzhiyun * r600_copy_cpdma - copy pages using the CP DMA engine
2952*4882a593Smuzhiyun *
2953*4882a593Smuzhiyun * @rdev: radeon_device pointer
2954*4882a593Smuzhiyun * @src_offset: src GPU address
2955*4882a593Smuzhiyun * @dst_offset: dst GPU address
2956*4882a593Smuzhiyun * @num_gpu_pages: number of GPU pages to xfer
2957*4882a593Smuzhiyun * @fence: radeon fence object
2958*4882a593Smuzhiyun *
2959*4882a593Smuzhiyun * Copy GPU paging using the CP DMA engine (r6xx+).
2960*4882a593Smuzhiyun * Used by the radeon ttm implementation to move pages if
2961*4882a593Smuzhiyun * registered as the asic copy callback.
2962*4882a593Smuzhiyun */
r600_copy_cpdma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct dma_resv * resv)2963*4882a593Smuzhiyun struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
2964*4882a593Smuzhiyun uint64_t src_offset, uint64_t dst_offset,
2965*4882a593Smuzhiyun unsigned num_gpu_pages,
2966*4882a593Smuzhiyun struct dma_resv *resv)
2967*4882a593Smuzhiyun {
2968*4882a593Smuzhiyun struct radeon_fence *fence;
2969*4882a593Smuzhiyun struct radeon_sync sync;
2970*4882a593Smuzhiyun int ring_index = rdev->asic->copy.blit_ring_index;
2971*4882a593Smuzhiyun struct radeon_ring *ring = &rdev->ring[ring_index];
2972*4882a593Smuzhiyun u32 size_in_bytes, cur_size_in_bytes, tmp;
2973*4882a593Smuzhiyun int i, num_loops;
2974*4882a593Smuzhiyun int r = 0;
2975*4882a593Smuzhiyun
2976*4882a593Smuzhiyun radeon_sync_create(&sync);
2977*4882a593Smuzhiyun
2978*4882a593Smuzhiyun size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2979*4882a593Smuzhiyun num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2980*4882a593Smuzhiyun r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2981*4882a593Smuzhiyun if (r) {
2982*4882a593Smuzhiyun DRM_ERROR("radeon: moving bo (%d).\n", r);
2983*4882a593Smuzhiyun radeon_sync_free(rdev, &sync, NULL);
2984*4882a593Smuzhiyun return ERR_PTR(r);
2985*4882a593Smuzhiyun }
2986*4882a593Smuzhiyun
2987*4882a593Smuzhiyun radeon_sync_resv(rdev, &sync, resv, false);
2988*4882a593Smuzhiyun radeon_sync_rings(rdev, &sync, ring->idx);
2989*4882a593Smuzhiyun
2990*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2991*4882a593Smuzhiyun radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2992*4882a593Smuzhiyun radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2993*4882a593Smuzhiyun for (i = 0; i < num_loops; i++) {
2994*4882a593Smuzhiyun cur_size_in_bytes = size_in_bytes;
2995*4882a593Smuzhiyun if (cur_size_in_bytes > 0x1fffff)
2996*4882a593Smuzhiyun cur_size_in_bytes = 0x1fffff;
2997*4882a593Smuzhiyun size_in_bytes -= cur_size_in_bytes;
2998*4882a593Smuzhiyun tmp = upper_32_bits(src_offset) & 0xff;
2999*4882a593Smuzhiyun if (size_in_bytes == 0)
3000*4882a593Smuzhiyun tmp |= PACKET3_CP_DMA_CP_SYNC;
3001*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
3002*4882a593Smuzhiyun radeon_ring_write(ring, lower_32_bits(src_offset));
3003*4882a593Smuzhiyun radeon_ring_write(ring, tmp);
3004*4882a593Smuzhiyun radeon_ring_write(ring, lower_32_bits(dst_offset));
3005*4882a593Smuzhiyun radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3006*4882a593Smuzhiyun radeon_ring_write(ring, cur_size_in_bytes);
3007*4882a593Smuzhiyun src_offset += cur_size_in_bytes;
3008*4882a593Smuzhiyun dst_offset += cur_size_in_bytes;
3009*4882a593Smuzhiyun }
3010*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3011*4882a593Smuzhiyun radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3012*4882a593Smuzhiyun radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
3013*4882a593Smuzhiyun
3014*4882a593Smuzhiyun r = radeon_fence_emit(rdev, &fence, ring->idx);
3015*4882a593Smuzhiyun if (r) {
3016*4882a593Smuzhiyun radeon_ring_unlock_undo(rdev, ring);
3017*4882a593Smuzhiyun radeon_sync_free(rdev, &sync, NULL);
3018*4882a593Smuzhiyun return ERR_PTR(r);
3019*4882a593Smuzhiyun }
3020*4882a593Smuzhiyun
3021*4882a593Smuzhiyun radeon_ring_unlock_commit(rdev, ring, false);
3022*4882a593Smuzhiyun radeon_sync_free(rdev, &sync, fence);
3023*4882a593Smuzhiyun
3024*4882a593Smuzhiyun return fence;
3025*4882a593Smuzhiyun }
3026*4882a593Smuzhiyun
r600_set_surface_reg(struct radeon_device * rdev,int reg,uint32_t tiling_flags,uint32_t pitch,uint32_t offset,uint32_t obj_size)3027*4882a593Smuzhiyun int r600_set_surface_reg(struct radeon_device *rdev, int reg,
3028*4882a593Smuzhiyun uint32_t tiling_flags, uint32_t pitch,
3029*4882a593Smuzhiyun uint32_t offset, uint32_t obj_size)
3030*4882a593Smuzhiyun {
3031*4882a593Smuzhiyun /* FIXME: implement */
3032*4882a593Smuzhiyun return 0;
3033*4882a593Smuzhiyun }
3034*4882a593Smuzhiyun
r600_clear_surface_reg(struct radeon_device * rdev,int reg)3035*4882a593Smuzhiyun void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
3036*4882a593Smuzhiyun {
3037*4882a593Smuzhiyun /* FIXME: implement */
3038*4882a593Smuzhiyun }
3039*4882a593Smuzhiyun
r600_uvd_init(struct radeon_device * rdev)3040*4882a593Smuzhiyun static void r600_uvd_init(struct radeon_device *rdev)
3041*4882a593Smuzhiyun {
3042*4882a593Smuzhiyun int r;
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun if (!rdev->has_uvd)
3045*4882a593Smuzhiyun return;
3046*4882a593Smuzhiyun
3047*4882a593Smuzhiyun r = radeon_uvd_init(rdev);
3048*4882a593Smuzhiyun if (r) {
3049*4882a593Smuzhiyun dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
3050*4882a593Smuzhiyun /*
3051*4882a593Smuzhiyun * At this point rdev->uvd.vcpu_bo is NULL which trickles down
3052*4882a593Smuzhiyun * to early fails uvd_v1_0_resume() and thus nothing happens
3053*4882a593Smuzhiyun * there. So it is pointless to try to go through that code
3054*4882a593Smuzhiyun * hence why we disable uvd here.
3055*4882a593Smuzhiyun */
3056*4882a593Smuzhiyun rdev->has_uvd = false;
3057*4882a593Smuzhiyun return;
3058*4882a593Smuzhiyun }
3059*4882a593Smuzhiyun rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
3060*4882a593Smuzhiyun r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
3061*4882a593Smuzhiyun }
3062*4882a593Smuzhiyun
r600_uvd_start(struct radeon_device * rdev)3063*4882a593Smuzhiyun static void r600_uvd_start(struct radeon_device *rdev)
3064*4882a593Smuzhiyun {
3065*4882a593Smuzhiyun int r;
3066*4882a593Smuzhiyun
3067*4882a593Smuzhiyun if (!rdev->has_uvd)
3068*4882a593Smuzhiyun return;
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun r = uvd_v1_0_resume(rdev);
3071*4882a593Smuzhiyun if (r) {
3072*4882a593Smuzhiyun dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
3073*4882a593Smuzhiyun goto error;
3074*4882a593Smuzhiyun }
3075*4882a593Smuzhiyun r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
3076*4882a593Smuzhiyun if (r) {
3077*4882a593Smuzhiyun dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
3078*4882a593Smuzhiyun goto error;
3079*4882a593Smuzhiyun }
3080*4882a593Smuzhiyun return;
3081*4882a593Smuzhiyun
3082*4882a593Smuzhiyun error:
3083*4882a593Smuzhiyun rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
3084*4882a593Smuzhiyun }
3085*4882a593Smuzhiyun
r600_uvd_resume(struct radeon_device * rdev)3086*4882a593Smuzhiyun static void r600_uvd_resume(struct radeon_device *rdev)
3087*4882a593Smuzhiyun {
3088*4882a593Smuzhiyun struct radeon_ring *ring;
3089*4882a593Smuzhiyun int r;
3090*4882a593Smuzhiyun
3091*4882a593Smuzhiyun if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
3092*4882a593Smuzhiyun return;
3093*4882a593Smuzhiyun
3094*4882a593Smuzhiyun ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
3095*4882a593Smuzhiyun r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
3096*4882a593Smuzhiyun if (r) {
3097*4882a593Smuzhiyun dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
3098*4882a593Smuzhiyun return;
3099*4882a593Smuzhiyun }
3100*4882a593Smuzhiyun r = uvd_v1_0_init(rdev);
3101*4882a593Smuzhiyun if (r) {
3102*4882a593Smuzhiyun dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
3103*4882a593Smuzhiyun return;
3104*4882a593Smuzhiyun }
3105*4882a593Smuzhiyun }
3106*4882a593Smuzhiyun
r600_startup(struct radeon_device * rdev)3107*4882a593Smuzhiyun static int r600_startup(struct radeon_device *rdev)
3108*4882a593Smuzhiyun {
3109*4882a593Smuzhiyun struct radeon_ring *ring;
3110*4882a593Smuzhiyun int r;
3111*4882a593Smuzhiyun
3112*4882a593Smuzhiyun /* enable pcie gen2 link */
3113*4882a593Smuzhiyun r600_pcie_gen2_enable(rdev);
3114*4882a593Smuzhiyun
3115*4882a593Smuzhiyun /* scratch needs to be initialized before MC */
3116*4882a593Smuzhiyun r = r600_vram_scratch_init(rdev);
3117*4882a593Smuzhiyun if (r)
3118*4882a593Smuzhiyun return r;
3119*4882a593Smuzhiyun
3120*4882a593Smuzhiyun r600_mc_program(rdev);
3121*4882a593Smuzhiyun
3122*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_AGP) {
3123*4882a593Smuzhiyun r600_agp_enable(rdev);
3124*4882a593Smuzhiyun } else {
3125*4882a593Smuzhiyun r = r600_pcie_gart_enable(rdev);
3126*4882a593Smuzhiyun if (r)
3127*4882a593Smuzhiyun return r;
3128*4882a593Smuzhiyun }
3129*4882a593Smuzhiyun r600_gpu_init(rdev);
3130*4882a593Smuzhiyun
3131*4882a593Smuzhiyun /* allocate wb buffer */
3132*4882a593Smuzhiyun r = radeon_wb_init(rdev);
3133*4882a593Smuzhiyun if (r)
3134*4882a593Smuzhiyun return r;
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
3137*4882a593Smuzhiyun if (r) {
3138*4882a593Smuzhiyun dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3139*4882a593Smuzhiyun return r;
3140*4882a593Smuzhiyun }
3141*4882a593Smuzhiyun
3142*4882a593Smuzhiyun r600_uvd_start(rdev);
3143*4882a593Smuzhiyun
3144*4882a593Smuzhiyun /* Enable IRQ */
3145*4882a593Smuzhiyun if (!rdev->irq.installed) {
3146*4882a593Smuzhiyun r = radeon_irq_kms_init(rdev);
3147*4882a593Smuzhiyun if (r)
3148*4882a593Smuzhiyun return r;
3149*4882a593Smuzhiyun }
3150*4882a593Smuzhiyun
3151*4882a593Smuzhiyun r = r600_irq_init(rdev);
3152*4882a593Smuzhiyun if (r) {
3153*4882a593Smuzhiyun DRM_ERROR("radeon: IH init failed (%d).\n", r);
3154*4882a593Smuzhiyun radeon_irq_kms_fini(rdev);
3155*4882a593Smuzhiyun return r;
3156*4882a593Smuzhiyun }
3157*4882a593Smuzhiyun r600_irq_set(rdev);
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3160*4882a593Smuzhiyun r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
3161*4882a593Smuzhiyun RADEON_CP_PACKET2);
3162*4882a593Smuzhiyun if (r)
3163*4882a593Smuzhiyun return r;
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun r = r600_cp_load_microcode(rdev);
3166*4882a593Smuzhiyun if (r)
3167*4882a593Smuzhiyun return r;
3168*4882a593Smuzhiyun r = r600_cp_resume(rdev);
3169*4882a593Smuzhiyun if (r)
3170*4882a593Smuzhiyun return r;
3171*4882a593Smuzhiyun
3172*4882a593Smuzhiyun r600_uvd_resume(rdev);
3173*4882a593Smuzhiyun
3174*4882a593Smuzhiyun r = radeon_ib_pool_init(rdev);
3175*4882a593Smuzhiyun if (r) {
3176*4882a593Smuzhiyun dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3177*4882a593Smuzhiyun return r;
3178*4882a593Smuzhiyun }
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun r = radeon_audio_init(rdev);
3181*4882a593Smuzhiyun if (r) {
3182*4882a593Smuzhiyun DRM_ERROR("radeon: audio init failed\n");
3183*4882a593Smuzhiyun return r;
3184*4882a593Smuzhiyun }
3185*4882a593Smuzhiyun
3186*4882a593Smuzhiyun return 0;
3187*4882a593Smuzhiyun }
3188*4882a593Smuzhiyun
r600_vga_set_state(struct radeon_device * rdev,bool state)3189*4882a593Smuzhiyun void r600_vga_set_state(struct radeon_device *rdev, bool state)
3190*4882a593Smuzhiyun {
3191*4882a593Smuzhiyun uint32_t temp;
3192*4882a593Smuzhiyun
3193*4882a593Smuzhiyun temp = RREG32(CONFIG_CNTL);
3194*4882a593Smuzhiyun if (!state) {
3195*4882a593Smuzhiyun temp &= ~(1<<0);
3196*4882a593Smuzhiyun temp |= (1<<1);
3197*4882a593Smuzhiyun } else {
3198*4882a593Smuzhiyun temp &= ~(1<<1);
3199*4882a593Smuzhiyun }
3200*4882a593Smuzhiyun WREG32(CONFIG_CNTL, temp);
3201*4882a593Smuzhiyun }
3202*4882a593Smuzhiyun
r600_resume(struct radeon_device * rdev)3203*4882a593Smuzhiyun int r600_resume(struct radeon_device *rdev)
3204*4882a593Smuzhiyun {
3205*4882a593Smuzhiyun int r;
3206*4882a593Smuzhiyun
3207*4882a593Smuzhiyun /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
3208*4882a593Smuzhiyun * posting will perform necessary task to bring back GPU into good
3209*4882a593Smuzhiyun * shape.
3210*4882a593Smuzhiyun */
3211*4882a593Smuzhiyun /* post card */
3212*4882a593Smuzhiyun atom_asic_init(rdev->mode_info.atom_context);
3213*4882a593Smuzhiyun
3214*4882a593Smuzhiyun if (rdev->pm.pm_method == PM_METHOD_DPM)
3215*4882a593Smuzhiyun radeon_pm_resume(rdev);
3216*4882a593Smuzhiyun
3217*4882a593Smuzhiyun rdev->accel_working = true;
3218*4882a593Smuzhiyun r = r600_startup(rdev);
3219*4882a593Smuzhiyun if (r) {
3220*4882a593Smuzhiyun DRM_ERROR("r600 startup failed on resume\n");
3221*4882a593Smuzhiyun rdev->accel_working = false;
3222*4882a593Smuzhiyun return r;
3223*4882a593Smuzhiyun }
3224*4882a593Smuzhiyun
3225*4882a593Smuzhiyun return r;
3226*4882a593Smuzhiyun }
3227*4882a593Smuzhiyun
r600_suspend(struct radeon_device * rdev)3228*4882a593Smuzhiyun int r600_suspend(struct radeon_device *rdev)
3229*4882a593Smuzhiyun {
3230*4882a593Smuzhiyun radeon_pm_suspend(rdev);
3231*4882a593Smuzhiyun radeon_audio_fini(rdev);
3232*4882a593Smuzhiyun r600_cp_stop(rdev);
3233*4882a593Smuzhiyun if (rdev->has_uvd) {
3234*4882a593Smuzhiyun uvd_v1_0_fini(rdev);
3235*4882a593Smuzhiyun radeon_uvd_suspend(rdev);
3236*4882a593Smuzhiyun }
3237*4882a593Smuzhiyun r600_irq_suspend(rdev);
3238*4882a593Smuzhiyun radeon_wb_disable(rdev);
3239*4882a593Smuzhiyun r600_pcie_gart_disable(rdev);
3240*4882a593Smuzhiyun
3241*4882a593Smuzhiyun return 0;
3242*4882a593Smuzhiyun }
3243*4882a593Smuzhiyun
3244*4882a593Smuzhiyun /* Plan is to move initialization in that function and use
3245*4882a593Smuzhiyun * helper function so that radeon_device_init pretty much
3246*4882a593Smuzhiyun * do nothing more than calling asic specific function. This
3247*4882a593Smuzhiyun * should also allow to remove a bunch of callback function
3248*4882a593Smuzhiyun * like vram_info.
3249*4882a593Smuzhiyun */
r600_init(struct radeon_device * rdev)3250*4882a593Smuzhiyun int r600_init(struct radeon_device *rdev)
3251*4882a593Smuzhiyun {
3252*4882a593Smuzhiyun int r;
3253*4882a593Smuzhiyun
3254*4882a593Smuzhiyun if (r600_debugfs_mc_info_init(rdev)) {
3255*4882a593Smuzhiyun DRM_ERROR("Failed to register debugfs file for mc !\n");
3256*4882a593Smuzhiyun }
3257*4882a593Smuzhiyun /* Read BIOS */
3258*4882a593Smuzhiyun if (!radeon_get_bios(rdev)) {
3259*4882a593Smuzhiyun if (ASIC_IS_AVIVO(rdev))
3260*4882a593Smuzhiyun return -EINVAL;
3261*4882a593Smuzhiyun }
3262*4882a593Smuzhiyun /* Must be an ATOMBIOS */
3263*4882a593Smuzhiyun if (!rdev->is_atom_bios) {
3264*4882a593Smuzhiyun dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3265*4882a593Smuzhiyun return -EINVAL;
3266*4882a593Smuzhiyun }
3267*4882a593Smuzhiyun r = radeon_atombios_init(rdev);
3268*4882a593Smuzhiyun if (r)
3269*4882a593Smuzhiyun return r;
3270*4882a593Smuzhiyun /* Post card if necessary */
3271*4882a593Smuzhiyun if (!radeon_card_posted(rdev)) {
3272*4882a593Smuzhiyun if (!rdev->bios) {
3273*4882a593Smuzhiyun dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3274*4882a593Smuzhiyun return -EINVAL;
3275*4882a593Smuzhiyun }
3276*4882a593Smuzhiyun DRM_INFO("GPU not posted. posting now...\n");
3277*4882a593Smuzhiyun atom_asic_init(rdev->mode_info.atom_context);
3278*4882a593Smuzhiyun }
3279*4882a593Smuzhiyun /* Initialize scratch registers */
3280*4882a593Smuzhiyun r600_scratch_init(rdev);
3281*4882a593Smuzhiyun /* Initialize surface registers */
3282*4882a593Smuzhiyun radeon_surface_init(rdev);
3283*4882a593Smuzhiyun /* Initialize clocks */
3284*4882a593Smuzhiyun radeon_get_clock_info(rdev->ddev);
3285*4882a593Smuzhiyun /* Fence driver */
3286*4882a593Smuzhiyun r = radeon_fence_driver_init(rdev);
3287*4882a593Smuzhiyun if (r)
3288*4882a593Smuzhiyun return r;
3289*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_AGP) {
3290*4882a593Smuzhiyun r = radeon_agp_init(rdev);
3291*4882a593Smuzhiyun if (r)
3292*4882a593Smuzhiyun radeon_agp_disable(rdev);
3293*4882a593Smuzhiyun }
3294*4882a593Smuzhiyun r = r600_mc_init(rdev);
3295*4882a593Smuzhiyun if (r)
3296*4882a593Smuzhiyun return r;
3297*4882a593Smuzhiyun /* Memory manager */
3298*4882a593Smuzhiyun r = radeon_bo_init(rdev);
3299*4882a593Smuzhiyun if (r)
3300*4882a593Smuzhiyun return r;
3301*4882a593Smuzhiyun
3302*4882a593Smuzhiyun if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3303*4882a593Smuzhiyun r = r600_init_microcode(rdev);
3304*4882a593Smuzhiyun if (r) {
3305*4882a593Smuzhiyun DRM_ERROR("Failed to load firmware!\n");
3306*4882a593Smuzhiyun return r;
3307*4882a593Smuzhiyun }
3308*4882a593Smuzhiyun }
3309*4882a593Smuzhiyun
3310*4882a593Smuzhiyun /* Initialize power management */
3311*4882a593Smuzhiyun radeon_pm_init(rdev);
3312*4882a593Smuzhiyun
3313*4882a593Smuzhiyun rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3314*4882a593Smuzhiyun r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3315*4882a593Smuzhiyun
3316*4882a593Smuzhiyun r600_uvd_init(rdev);
3317*4882a593Smuzhiyun
3318*4882a593Smuzhiyun rdev->ih.ring_obj = NULL;
3319*4882a593Smuzhiyun r600_ih_ring_init(rdev, 64 * 1024);
3320*4882a593Smuzhiyun
3321*4882a593Smuzhiyun r = r600_pcie_gart_init(rdev);
3322*4882a593Smuzhiyun if (r)
3323*4882a593Smuzhiyun return r;
3324*4882a593Smuzhiyun
3325*4882a593Smuzhiyun rdev->accel_working = true;
3326*4882a593Smuzhiyun r = r600_startup(rdev);
3327*4882a593Smuzhiyun if (r) {
3328*4882a593Smuzhiyun dev_err(rdev->dev, "disabling GPU acceleration\n");
3329*4882a593Smuzhiyun r600_cp_fini(rdev);
3330*4882a593Smuzhiyun r600_irq_fini(rdev);
3331*4882a593Smuzhiyun radeon_wb_fini(rdev);
3332*4882a593Smuzhiyun radeon_ib_pool_fini(rdev);
3333*4882a593Smuzhiyun radeon_irq_kms_fini(rdev);
3334*4882a593Smuzhiyun r600_pcie_gart_fini(rdev);
3335*4882a593Smuzhiyun rdev->accel_working = false;
3336*4882a593Smuzhiyun }
3337*4882a593Smuzhiyun
3338*4882a593Smuzhiyun return 0;
3339*4882a593Smuzhiyun }
3340*4882a593Smuzhiyun
r600_fini(struct radeon_device * rdev)3341*4882a593Smuzhiyun void r600_fini(struct radeon_device *rdev)
3342*4882a593Smuzhiyun {
3343*4882a593Smuzhiyun radeon_pm_fini(rdev);
3344*4882a593Smuzhiyun radeon_audio_fini(rdev);
3345*4882a593Smuzhiyun r600_cp_fini(rdev);
3346*4882a593Smuzhiyun r600_irq_fini(rdev);
3347*4882a593Smuzhiyun if (rdev->has_uvd) {
3348*4882a593Smuzhiyun uvd_v1_0_fini(rdev);
3349*4882a593Smuzhiyun radeon_uvd_fini(rdev);
3350*4882a593Smuzhiyun }
3351*4882a593Smuzhiyun radeon_wb_fini(rdev);
3352*4882a593Smuzhiyun radeon_ib_pool_fini(rdev);
3353*4882a593Smuzhiyun radeon_irq_kms_fini(rdev);
3354*4882a593Smuzhiyun r600_pcie_gart_fini(rdev);
3355*4882a593Smuzhiyun r600_vram_scratch_fini(rdev);
3356*4882a593Smuzhiyun radeon_agp_fini(rdev);
3357*4882a593Smuzhiyun radeon_gem_fini(rdev);
3358*4882a593Smuzhiyun radeon_fence_driver_fini(rdev);
3359*4882a593Smuzhiyun radeon_bo_fini(rdev);
3360*4882a593Smuzhiyun radeon_atombios_fini(rdev);
3361*4882a593Smuzhiyun kfree(rdev->bios);
3362*4882a593Smuzhiyun rdev->bios = NULL;
3363*4882a593Smuzhiyun }
3364*4882a593Smuzhiyun
3365*4882a593Smuzhiyun
3366*4882a593Smuzhiyun /*
3367*4882a593Smuzhiyun * CS stuff
3368*4882a593Smuzhiyun */
r600_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)3369*4882a593Smuzhiyun void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3370*4882a593Smuzhiyun {
3371*4882a593Smuzhiyun struct radeon_ring *ring = &rdev->ring[ib->ring];
3372*4882a593Smuzhiyun u32 next_rptr;
3373*4882a593Smuzhiyun
3374*4882a593Smuzhiyun if (ring->rptr_save_reg) {
3375*4882a593Smuzhiyun next_rptr = ring->wptr + 3 + 4;
3376*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3377*4882a593Smuzhiyun radeon_ring_write(ring, ((ring->rptr_save_reg -
3378*4882a593Smuzhiyun PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3379*4882a593Smuzhiyun radeon_ring_write(ring, next_rptr);
3380*4882a593Smuzhiyun } else if (rdev->wb.enabled) {
3381*4882a593Smuzhiyun next_rptr = ring->wptr + 5 + 4;
3382*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3383*4882a593Smuzhiyun radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3384*4882a593Smuzhiyun radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3385*4882a593Smuzhiyun radeon_ring_write(ring, next_rptr);
3386*4882a593Smuzhiyun radeon_ring_write(ring, 0);
3387*4882a593Smuzhiyun }
3388*4882a593Smuzhiyun
3389*4882a593Smuzhiyun radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3390*4882a593Smuzhiyun radeon_ring_write(ring,
3391*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
3392*4882a593Smuzhiyun (2 << 0) |
3393*4882a593Smuzhiyun #endif
3394*4882a593Smuzhiyun (ib->gpu_addr & 0xFFFFFFFC));
3395*4882a593Smuzhiyun radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3396*4882a593Smuzhiyun radeon_ring_write(ring, ib->length_dw);
3397*4882a593Smuzhiyun }
3398*4882a593Smuzhiyun
r600_ib_test(struct radeon_device * rdev,struct radeon_ring * ring)3399*4882a593Smuzhiyun int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3400*4882a593Smuzhiyun {
3401*4882a593Smuzhiyun struct radeon_ib ib;
3402*4882a593Smuzhiyun uint32_t scratch;
3403*4882a593Smuzhiyun uint32_t tmp = 0;
3404*4882a593Smuzhiyun unsigned i;
3405*4882a593Smuzhiyun int r;
3406*4882a593Smuzhiyun
3407*4882a593Smuzhiyun r = radeon_scratch_get(rdev, &scratch);
3408*4882a593Smuzhiyun if (r) {
3409*4882a593Smuzhiyun DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3410*4882a593Smuzhiyun return r;
3411*4882a593Smuzhiyun }
3412*4882a593Smuzhiyun WREG32(scratch, 0xCAFEDEAD);
3413*4882a593Smuzhiyun r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3414*4882a593Smuzhiyun if (r) {
3415*4882a593Smuzhiyun DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3416*4882a593Smuzhiyun goto free_scratch;
3417*4882a593Smuzhiyun }
3418*4882a593Smuzhiyun ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3419*4882a593Smuzhiyun ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3420*4882a593Smuzhiyun ib.ptr[2] = 0xDEADBEEF;
3421*4882a593Smuzhiyun ib.length_dw = 3;
3422*4882a593Smuzhiyun r = radeon_ib_schedule(rdev, &ib, NULL, false);
3423*4882a593Smuzhiyun if (r) {
3424*4882a593Smuzhiyun DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3425*4882a593Smuzhiyun goto free_ib;
3426*4882a593Smuzhiyun }
3427*4882a593Smuzhiyun r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
3428*4882a593Smuzhiyun RADEON_USEC_IB_TEST_TIMEOUT));
3429*4882a593Smuzhiyun if (r < 0) {
3430*4882a593Smuzhiyun DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3431*4882a593Smuzhiyun goto free_ib;
3432*4882a593Smuzhiyun } else if (r == 0) {
3433*4882a593Smuzhiyun DRM_ERROR("radeon: fence wait timed out.\n");
3434*4882a593Smuzhiyun r = -ETIMEDOUT;
3435*4882a593Smuzhiyun goto free_ib;
3436*4882a593Smuzhiyun }
3437*4882a593Smuzhiyun r = 0;
3438*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
3439*4882a593Smuzhiyun tmp = RREG32(scratch);
3440*4882a593Smuzhiyun if (tmp == 0xDEADBEEF)
3441*4882a593Smuzhiyun break;
3442*4882a593Smuzhiyun udelay(1);
3443*4882a593Smuzhiyun }
3444*4882a593Smuzhiyun if (i < rdev->usec_timeout) {
3445*4882a593Smuzhiyun DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3446*4882a593Smuzhiyun } else {
3447*4882a593Smuzhiyun DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3448*4882a593Smuzhiyun scratch, tmp);
3449*4882a593Smuzhiyun r = -EINVAL;
3450*4882a593Smuzhiyun }
3451*4882a593Smuzhiyun free_ib:
3452*4882a593Smuzhiyun radeon_ib_free(rdev, &ib);
3453*4882a593Smuzhiyun free_scratch:
3454*4882a593Smuzhiyun radeon_scratch_free(rdev, scratch);
3455*4882a593Smuzhiyun return r;
3456*4882a593Smuzhiyun }
3457*4882a593Smuzhiyun
3458*4882a593Smuzhiyun /*
3459*4882a593Smuzhiyun * Interrupts
3460*4882a593Smuzhiyun *
3461*4882a593Smuzhiyun * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3462*4882a593Smuzhiyun * the same as the CP ring buffer, but in reverse. Rather than the CPU
3463*4882a593Smuzhiyun * writing to the ring and the GPU consuming, the GPU writes to the ring
3464*4882a593Smuzhiyun * and host consumes. As the host irq handler processes interrupts, it
3465*4882a593Smuzhiyun * increments the rptr. When the rptr catches up with the wptr, all the
3466*4882a593Smuzhiyun * current interrupts have been processed.
3467*4882a593Smuzhiyun */
3468*4882a593Smuzhiyun
r600_ih_ring_init(struct radeon_device * rdev,unsigned ring_size)3469*4882a593Smuzhiyun void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3470*4882a593Smuzhiyun {
3471*4882a593Smuzhiyun u32 rb_bufsz;
3472*4882a593Smuzhiyun
3473*4882a593Smuzhiyun /* Align ring size */
3474*4882a593Smuzhiyun rb_bufsz = order_base_2(ring_size / 4);
3475*4882a593Smuzhiyun ring_size = (1 << rb_bufsz) * 4;
3476*4882a593Smuzhiyun rdev->ih.ring_size = ring_size;
3477*4882a593Smuzhiyun rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3478*4882a593Smuzhiyun rdev->ih.rptr = 0;
3479*4882a593Smuzhiyun }
3480*4882a593Smuzhiyun
r600_ih_ring_alloc(struct radeon_device * rdev)3481*4882a593Smuzhiyun int r600_ih_ring_alloc(struct radeon_device *rdev)
3482*4882a593Smuzhiyun {
3483*4882a593Smuzhiyun int r;
3484*4882a593Smuzhiyun
3485*4882a593Smuzhiyun /* Allocate ring buffer */
3486*4882a593Smuzhiyun if (rdev->ih.ring_obj == NULL) {
3487*4882a593Smuzhiyun r = radeon_bo_create(rdev, rdev->ih.ring_size,
3488*4882a593Smuzhiyun PAGE_SIZE, true,
3489*4882a593Smuzhiyun RADEON_GEM_DOMAIN_GTT, 0,
3490*4882a593Smuzhiyun NULL, NULL, &rdev->ih.ring_obj);
3491*4882a593Smuzhiyun if (r) {
3492*4882a593Smuzhiyun DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3493*4882a593Smuzhiyun return r;
3494*4882a593Smuzhiyun }
3495*4882a593Smuzhiyun r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3496*4882a593Smuzhiyun if (unlikely(r != 0))
3497*4882a593Smuzhiyun return r;
3498*4882a593Smuzhiyun r = radeon_bo_pin(rdev->ih.ring_obj,
3499*4882a593Smuzhiyun RADEON_GEM_DOMAIN_GTT,
3500*4882a593Smuzhiyun &rdev->ih.gpu_addr);
3501*4882a593Smuzhiyun if (r) {
3502*4882a593Smuzhiyun radeon_bo_unreserve(rdev->ih.ring_obj);
3503*4882a593Smuzhiyun DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3504*4882a593Smuzhiyun return r;
3505*4882a593Smuzhiyun }
3506*4882a593Smuzhiyun r = radeon_bo_kmap(rdev->ih.ring_obj,
3507*4882a593Smuzhiyun (void **)&rdev->ih.ring);
3508*4882a593Smuzhiyun radeon_bo_unreserve(rdev->ih.ring_obj);
3509*4882a593Smuzhiyun if (r) {
3510*4882a593Smuzhiyun DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3511*4882a593Smuzhiyun return r;
3512*4882a593Smuzhiyun }
3513*4882a593Smuzhiyun }
3514*4882a593Smuzhiyun return 0;
3515*4882a593Smuzhiyun }
3516*4882a593Smuzhiyun
r600_ih_ring_fini(struct radeon_device * rdev)3517*4882a593Smuzhiyun void r600_ih_ring_fini(struct radeon_device *rdev)
3518*4882a593Smuzhiyun {
3519*4882a593Smuzhiyun int r;
3520*4882a593Smuzhiyun if (rdev->ih.ring_obj) {
3521*4882a593Smuzhiyun r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3522*4882a593Smuzhiyun if (likely(r == 0)) {
3523*4882a593Smuzhiyun radeon_bo_kunmap(rdev->ih.ring_obj);
3524*4882a593Smuzhiyun radeon_bo_unpin(rdev->ih.ring_obj);
3525*4882a593Smuzhiyun radeon_bo_unreserve(rdev->ih.ring_obj);
3526*4882a593Smuzhiyun }
3527*4882a593Smuzhiyun radeon_bo_unref(&rdev->ih.ring_obj);
3528*4882a593Smuzhiyun rdev->ih.ring = NULL;
3529*4882a593Smuzhiyun rdev->ih.ring_obj = NULL;
3530*4882a593Smuzhiyun }
3531*4882a593Smuzhiyun }
3532*4882a593Smuzhiyun
r600_rlc_stop(struct radeon_device * rdev)3533*4882a593Smuzhiyun void r600_rlc_stop(struct radeon_device *rdev)
3534*4882a593Smuzhiyun {
3535*4882a593Smuzhiyun
3536*4882a593Smuzhiyun if ((rdev->family >= CHIP_RV770) &&
3537*4882a593Smuzhiyun (rdev->family <= CHIP_RV740)) {
3538*4882a593Smuzhiyun /* r7xx asics need to soft reset RLC before halting */
3539*4882a593Smuzhiyun WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3540*4882a593Smuzhiyun RREG32(SRBM_SOFT_RESET);
3541*4882a593Smuzhiyun mdelay(15);
3542*4882a593Smuzhiyun WREG32(SRBM_SOFT_RESET, 0);
3543*4882a593Smuzhiyun RREG32(SRBM_SOFT_RESET);
3544*4882a593Smuzhiyun }
3545*4882a593Smuzhiyun
3546*4882a593Smuzhiyun WREG32(RLC_CNTL, 0);
3547*4882a593Smuzhiyun }
3548*4882a593Smuzhiyun
r600_rlc_start(struct radeon_device * rdev)3549*4882a593Smuzhiyun static void r600_rlc_start(struct radeon_device *rdev)
3550*4882a593Smuzhiyun {
3551*4882a593Smuzhiyun WREG32(RLC_CNTL, RLC_ENABLE);
3552*4882a593Smuzhiyun }
3553*4882a593Smuzhiyun
r600_rlc_resume(struct radeon_device * rdev)3554*4882a593Smuzhiyun static int r600_rlc_resume(struct radeon_device *rdev)
3555*4882a593Smuzhiyun {
3556*4882a593Smuzhiyun u32 i;
3557*4882a593Smuzhiyun const __be32 *fw_data;
3558*4882a593Smuzhiyun
3559*4882a593Smuzhiyun if (!rdev->rlc_fw)
3560*4882a593Smuzhiyun return -EINVAL;
3561*4882a593Smuzhiyun
3562*4882a593Smuzhiyun r600_rlc_stop(rdev);
3563*4882a593Smuzhiyun
3564*4882a593Smuzhiyun WREG32(RLC_HB_CNTL, 0);
3565*4882a593Smuzhiyun
3566*4882a593Smuzhiyun WREG32(RLC_HB_BASE, 0);
3567*4882a593Smuzhiyun WREG32(RLC_HB_RPTR, 0);
3568*4882a593Smuzhiyun WREG32(RLC_HB_WPTR, 0);
3569*4882a593Smuzhiyun WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3570*4882a593Smuzhiyun WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3571*4882a593Smuzhiyun WREG32(RLC_MC_CNTL, 0);
3572*4882a593Smuzhiyun WREG32(RLC_UCODE_CNTL, 0);
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun fw_data = (const __be32 *)rdev->rlc_fw->data;
3575*4882a593Smuzhiyun if (rdev->family >= CHIP_RV770) {
3576*4882a593Smuzhiyun for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3577*4882a593Smuzhiyun WREG32(RLC_UCODE_ADDR, i);
3578*4882a593Smuzhiyun WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3579*4882a593Smuzhiyun }
3580*4882a593Smuzhiyun } else {
3581*4882a593Smuzhiyun for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3582*4882a593Smuzhiyun WREG32(RLC_UCODE_ADDR, i);
3583*4882a593Smuzhiyun WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3584*4882a593Smuzhiyun }
3585*4882a593Smuzhiyun }
3586*4882a593Smuzhiyun WREG32(RLC_UCODE_ADDR, 0);
3587*4882a593Smuzhiyun
3588*4882a593Smuzhiyun r600_rlc_start(rdev);
3589*4882a593Smuzhiyun
3590*4882a593Smuzhiyun return 0;
3591*4882a593Smuzhiyun }
3592*4882a593Smuzhiyun
r600_enable_interrupts(struct radeon_device * rdev)3593*4882a593Smuzhiyun static void r600_enable_interrupts(struct radeon_device *rdev)
3594*4882a593Smuzhiyun {
3595*4882a593Smuzhiyun u32 ih_cntl = RREG32(IH_CNTL);
3596*4882a593Smuzhiyun u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3597*4882a593Smuzhiyun
3598*4882a593Smuzhiyun ih_cntl |= ENABLE_INTR;
3599*4882a593Smuzhiyun ih_rb_cntl |= IH_RB_ENABLE;
3600*4882a593Smuzhiyun WREG32(IH_CNTL, ih_cntl);
3601*4882a593Smuzhiyun WREG32(IH_RB_CNTL, ih_rb_cntl);
3602*4882a593Smuzhiyun rdev->ih.enabled = true;
3603*4882a593Smuzhiyun }
3604*4882a593Smuzhiyun
r600_disable_interrupts(struct radeon_device * rdev)3605*4882a593Smuzhiyun void r600_disable_interrupts(struct radeon_device *rdev)
3606*4882a593Smuzhiyun {
3607*4882a593Smuzhiyun u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3608*4882a593Smuzhiyun u32 ih_cntl = RREG32(IH_CNTL);
3609*4882a593Smuzhiyun
3610*4882a593Smuzhiyun ih_rb_cntl &= ~IH_RB_ENABLE;
3611*4882a593Smuzhiyun ih_cntl &= ~ENABLE_INTR;
3612*4882a593Smuzhiyun WREG32(IH_RB_CNTL, ih_rb_cntl);
3613*4882a593Smuzhiyun WREG32(IH_CNTL, ih_cntl);
3614*4882a593Smuzhiyun /* set rptr, wptr to 0 */
3615*4882a593Smuzhiyun WREG32(IH_RB_RPTR, 0);
3616*4882a593Smuzhiyun WREG32(IH_RB_WPTR, 0);
3617*4882a593Smuzhiyun rdev->ih.enabled = false;
3618*4882a593Smuzhiyun rdev->ih.rptr = 0;
3619*4882a593Smuzhiyun }
3620*4882a593Smuzhiyun
r600_disable_interrupt_state(struct radeon_device * rdev)3621*4882a593Smuzhiyun static void r600_disable_interrupt_state(struct radeon_device *rdev)
3622*4882a593Smuzhiyun {
3623*4882a593Smuzhiyun u32 tmp;
3624*4882a593Smuzhiyun
3625*4882a593Smuzhiyun WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3626*4882a593Smuzhiyun tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3627*4882a593Smuzhiyun WREG32(DMA_CNTL, tmp);
3628*4882a593Smuzhiyun WREG32(GRBM_INT_CNTL, 0);
3629*4882a593Smuzhiyun WREG32(DxMODE_INT_MASK, 0);
3630*4882a593Smuzhiyun WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3631*4882a593Smuzhiyun WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3632*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
3633*4882a593Smuzhiyun WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3634*4882a593Smuzhiyun WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3635*4882a593Smuzhiyun tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3636*4882a593Smuzhiyun WREG32(DC_HPD1_INT_CONTROL, tmp);
3637*4882a593Smuzhiyun tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3638*4882a593Smuzhiyun WREG32(DC_HPD2_INT_CONTROL, tmp);
3639*4882a593Smuzhiyun tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3640*4882a593Smuzhiyun WREG32(DC_HPD3_INT_CONTROL, tmp);
3641*4882a593Smuzhiyun tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3642*4882a593Smuzhiyun WREG32(DC_HPD4_INT_CONTROL, tmp);
3643*4882a593Smuzhiyun if (ASIC_IS_DCE32(rdev)) {
3644*4882a593Smuzhiyun tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3645*4882a593Smuzhiyun WREG32(DC_HPD5_INT_CONTROL, tmp);
3646*4882a593Smuzhiyun tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3647*4882a593Smuzhiyun WREG32(DC_HPD6_INT_CONTROL, tmp);
3648*4882a593Smuzhiyun tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3649*4882a593Smuzhiyun WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3650*4882a593Smuzhiyun tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3651*4882a593Smuzhiyun WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3652*4882a593Smuzhiyun } else {
3653*4882a593Smuzhiyun tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3654*4882a593Smuzhiyun WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3655*4882a593Smuzhiyun tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3656*4882a593Smuzhiyun WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3657*4882a593Smuzhiyun }
3658*4882a593Smuzhiyun } else {
3659*4882a593Smuzhiyun WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3660*4882a593Smuzhiyun WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3661*4882a593Smuzhiyun tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3662*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3663*4882a593Smuzhiyun tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3664*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3665*4882a593Smuzhiyun tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3666*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3667*4882a593Smuzhiyun tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3668*4882a593Smuzhiyun WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3669*4882a593Smuzhiyun tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3670*4882a593Smuzhiyun WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3671*4882a593Smuzhiyun }
3672*4882a593Smuzhiyun }
3673*4882a593Smuzhiyun
r600_irq_init(struct radeon_device * rdev)3674*4882a593Smuzhiyun int r600_irq_init(struct radeon_device *rdev)
3675*4882a593Smuzhiyun {
3676*4882a593Smuzhiyun int ret = 0;
3677*4882a593Smuzhiyun int rb_bufsz;
3678*4882a593Smuzhiyun u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3679*4882a593Smuzhiyun
3680*4882a593Smuzhiyun /* allocate ring */
3681*4882a593Smuzhiyun ret = r600_ih_ring_alloc(rdev);
3682*4882a593Smuzhiyun if (ret)
3683*4882a593Smuzhiyun return ret;
3684*4882a593Smuzhiyun
3685*4882a593Smuzhiyun /* disable irqs */
3686*4882a593Smuzhiyun r600_disable_interrupts(rdev);
3687*4882a593Smuzhiyun
3688*4882a593Smuzhiyun /* init rlc */
3689*4882a593Smuzhiyun if (rdev->family >= CHIP_CEDAR)
3690*4882a593Smuzhiyun ret = evergreen_rlc_resume(rdev);
3691*4882a593Smuzhiyun else
3692*4882a593Smuzhiyun ret = r600_rlc_resume(rdev);
3693*4882a593Smuzhiyun if (ret) {
3694*4882a593Smuzhiyun r600_ih_ring_fini(rdev);
3695*4882a593Smuzhiyun return ret;
3696*4882a593Smuzhiyun }
3697*4882a593Smuzhiyun
3698*4882a593Smuzhiyun /* setup interrupt control */
3699*4882a593Smuzhiyun /* set dummy read address to dummy page address */
3700*4882a593Smuzhiyun WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
3701*4882a593Smuzhiyun interrupt_cntl = RREG32(INTERRUPT_CNTL);
3702*4882a593Smuzhiyun /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3703*4882a593Smuzhiyun * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3704*4882a593Smuzhiyun */
3705*4882a593Smuzhiyun interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3706*4882a593Smuzhiyun /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3707*4882a593Smuzhiyun interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3708*4882a593Smuzhiyun WREG32(INTERRUPT_CNTL, interrupt_cntl);
3709*4882a593Smuzhiyun
3710*4882a593Smuzhiyun WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3711*4882a593Smuzhiyun rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3712*4882a593Smuzhiyun
3713*4882a593Smuzhiyun ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3714*4882a593Smuzhiyun IH_WPTR_OVERFLOW_CLEAR |
3715*4882a593Smuzhiyun (rb_bufsz << 1));
3716*4882a593Smuzhiyun
3717*4882a593Smuzhiyun if (rdev->wb.enabled)
3718*4882a593Smuzhiyun ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3719*4882a593Smuzhiyun
3720*4882a593Smuzhiyun /* set the writeback address whether it's enabled or not */
3721*4882a593Smuzhiyun WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3722*4882a593Smuzhiyun WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3723*4882a593Smuzhiyun
3724*4882a593Smuzhiyun WREG32(IH_RB_CNTL, ih_rb_cntl);
3725*4882a593Smuzhiyun
3726*4882a593Smuzhiyun /* set rptr, wptr to 0 */
3727*4882a593Smuzhiyun WREG32(IH_RB_RPTR, 0);
3728*4882a593Smuzhiyun WREG32(IH_RB_WPTR, 0);
3729*4882a593Smuzhiyun
3730*4882a593Smuzhiyun /* Default settings for IH_CNTL (disabled at first) */
3731*4882a593Smuzhiyun ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3732*4882a593Smuzhiyun /* RPTR_REARM only works if msi's are enabled */
3733*4882a593Smuzhiyun if (rdev->msi_enabled)
3734*4882a593Smuzhiyun ih_cntl |= RPTR_REARM;
3735*4882a593Smuzhiyun WREG32(IH_CNTL, ih_cntl);
3736*4882a593Smuzhiyun
3737*4882a593Smuzhiyun /* force the active interrupt state to all disabled */
3738*4882a593Smuzhiyun if (rdev->family >= CHIP_CEDAR)
3739*4882a593Smuzhiyun evergreen_disable_interrupt_state(rdev);
3740*4882a593Smuzhiyun else
3741*4882a593Smuzhiyun r600_disable_interrupt_state(rdev);
3742*4882a593Smuzhiyun
3743*4882a593Smuzhiyun /* at this point everything should be setup correctly to enable master */
3744*4882a593Smuzhiyun pci_set_master(rdev->pdev);
3745*4882a593Smuzhiyun
3746*4882a593Smuzhiyun /* enable irqs */
3747*4882a593Smuzhiyun r600_enable_interrupts(rdev);
3748*4882a593Smuzhiyun
3749*4882a593Smuzhiyun return ret;
3750*4882a593Smuzhiyun }
3751*4882a593Smuzhiyun
r600_irq_suspend(struct radeon_device * rdev)3752*4882a593Smuzhiyun void r600_irq_suspend(struct radeon_device *rdev)
3753*4882a593Smuzhiyun {
3754*4882a593Smuzhiyun r600_irq_disable(rdev);
3755*4882a593Smuzhiyun r600_rlc_stop(rdev);
3756*4882a593Smuzhiyun }
3757*4882a593Smuzhiyun
r600_irq_fini(struct radeon_device * rdev)3758*4882a593Smuzhiyun void r600_irq_fini(struct radeon_device *rdev)
3759*4882a593Smuzhiyun {
3760*4882a593Smuzhiyun r600_irq_suspend(rdev);
3761*4882a593Smuzhiyun r600_ih_ring_fini(rdev);
3762*4882a593Smuzhiyun }
3763*4882a593Smuzhiyun
r600_irq_set(struct radeon_device * rdev)3764*4882a593Smuzhiyun int r600_irq_set(struct radeon_device *rdev)
3765*4882a593Smuzhiyun {
3766*4882a593Smuzhiyun u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3767*4882a593Smuzhiyun u32 mode_int = 0;
3768*4882a593Smuzhiyun u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3769*4882a593Smuzhiyun u32 grbm_int_cntl = 0;
3770*4882a593Smuzhiyun u32 hdmi0, hdmi1;
3771*4882a593Smuzhiyun u32 dma_cntl;
3772*4882a593Smuzhiyun u32 thermal_int = 0;
3773*4882a593Smuzhiyun
3774*4882a593Smuzhiyun if (!rdev->irq.installed) {
3775*4882a593Smuzhiyun WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3776*4882a593Smuzhiyun return -EINVAL;
3777*4882a593Smuzhiyun }
3778*4882a593Smuzhiyun /* don't enable anything if the ih is disabled */
3779*4882a593Smuzhiyun if (!rdev->ih.enabled) {
3780*4882a593Smuzhiyun r600_disable_interrupts(rdev);
3781*4882a593Smuzhiyun /* force the active interrupt state to all disabled */
3782*4882a593Smuzhiyun r600_disable_interrupt_state(rdev);
3783*4882a593Smuzhiyun return 0;
3784*4882a593Smuzhiyun }
3785*4882a593Smuzhiyun
3786*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
3787*4882a593Smuzhiyun hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3788*4882a593Smuzhiyun hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3789*4882a593Smuzhiyun hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3790*4882a593Smuzhiyun hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3791*4882a593Smuzhiyun if (ASIC_IS_DCE32(rdev)) {
3792*4882a593Smuzhiyun hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3793*4882a593Smuzhiyun hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3794*4882a593Smuzhiyun hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3795*4882a593Smuzhiyun hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3796*4882a593Smuzhiyun } else {
3797*4882a593Smuzhiyun hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3798*4882a593Smuzhiyun hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3799*4882a593Smuzhiyun }
3800*4882a593Smuzhiyun } else {
3801*4882a593Smuzhiyun hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3802*4882a593Smuzhiyun hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3803*4882a593Smuzhiyun hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3804*4882a593Smuzhiyun hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3805*4882a593Smuzhiyun hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3806*4882a593Smuzhiyun }
3807*4882a593Smuzhiyun
3808*4882a593Smuzhiyun dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3809*4882a593Smuzhiyun
3810*4882a593Smuzhiyun if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3811*4882a593Smuzhiyun thermal_int = RREG32(CG_THERMAL_INT) &
3812*4882a593Smuzhiyun ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3813*4882a593Smuzhiyun } else if (rdev->family >= CHIP_RV770) {
3814*4882a593Smuzhiyun thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3815*4882a593Smuzhiyun ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3816*4882a593Smuzhiyun }
3817*4882a593Smuzhiyun if (rdev->irq.dpm_thermal) {
3818*4882a593Smuzhiyun DRM_DEBUG("dpm thermal\n");
3819*4882a593Smuzhiyun thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3820*4882a593Smuzhiyun }
3821*4882a593Smuzhiyun
3822*4882a593Smuzhiyun if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3823*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: sw int\n");
3824*4882a593Smuzhiyun cp_int_cntl |= RB_INT_ENABLE;
3825*4882a593Smuzhiyun cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3826*4882a593Smuzhiyun }
3827*4882a593Smuzhiyun
3828*4882a593Smuzhiyun if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3829*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: sw int dma\n");
3830*4882a593Smuzhiyun dma_cntl |= TRAP_ENABLE;
3831*4882a593Smuzhiyun }
3832*4882a593Smuzhiyun
3833*4882a593Smuzhiyun if (rdev->irq.crtc_vblank_int[0] ||
3834*4882a593Smuzhiyun atomic_read(&rdev->irq.pflip[0])) {
3835*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: vblank 0\n");
3836*4882a593Smuzhiyun mode_int |= D1MODE_VBLANK_INT_MASK;
3837*4882a593Smuzhiyun }
3838*4882a593Smuzhiyun if (rdev->irq.crtc_vblank_int[1] ||
3839*4882a593Smuzhiyun atomic_read(&rdev->irq.pflip[1])) {
3840*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: vblank 1\n");
3841*4882a593Smuzhiyun mode_int |= D2MODE_VBLANK_INT_MASK;
3842*4882a593Smuzhiyun }
3843*4882a593Smuzhiyun if (rdev->irq.hpd[0]) {
3844*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: hpd 1\n");
3845*4882a593Smuzhiyun hpd1 |= DC_HPDx_INT_EN;
3846*4882a593Smuzhiyun }
3847*4882a593Smuzhiyun if (rdev->irq.hpd[1]) {
3848*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: hpd 2\n");
3849*4882a593Smuzhiyun hpd2 |= DC_HPDx_INT_EN;
3850*4882a593Smuzhiyun }
3851*4882a593Smuzhiyun if (rdev->irq.hpd[2]) {
3852*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: hpd 3\n");
3853*4882a593Smuzhiyun hpd3 |= DC_HPDx_INT_EN;
3854*4882a593Smuzhiyun }
3855*4882a593Smuzhiyun if (rdev->irq.hpd[3]) {
3856*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: hpd 4\n");
3857*4882a593Smuzhiyun hpd4 |= DC_HPDx_INT_EN;
3858*4882a593Smuzhiyun }
3859*4882a593Smuzhiyun if (rdev->irq.hpd[4]) {
3860*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: hpd 5\n");
3861*4882a593Smuzhiyun hpd5 |= DC_HPDx_INT_EN;
3862*4882a593Smuzhiyun }
3863*4882a593Smuzhiyun if (rdev->irq.hpd[5]) {
3864*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: hpd 6\n");
3865*4882a593Smuzhiyun hpd6 |= DC_HPDx_INT_EN;
3866*4882a593Smuzhiyun }
3867*4882a593Smuzhiyun if (rdev->irq.afmt[0]) {
3868*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: hdmi 0\n");
3869*4882a593Smuzhiyun hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3870*4882a593Smuzhiyun }
3871*4882a593Smuzhiyun if (rdev->irq.afmt[1]) {
3872*4882a593Smuzhiyun DRM_DEBUG("r600_irq_set: hdmi 0\n");
3873*4882a593Smuzhiyun hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3874*4882a593Smuzhiyun }
3875*4882a593Smuzhiyun
3876*4882a593Smuzhiyun WREG32(CP_INT_CNTL, cp_int_cntl);
3877*4882a593Smuzhiyun WREG32(DMA_CNTL, dma_cntl);
3878*4882a593Smuzhiyun WREG32(DxMODE_INT_MASK, mode_int);
3879*4882a593Smuzhiyun WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3880*4882a593Smuzhiyun WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3881*4882a593Smuzhiyun WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3882*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
3883*4882a593Smuzhiyun WREG32(DC_HPD1_INT_CONTROL, hpd1);
3884*4882a593Smuzhiyun WREG32(DC_HPD2_INT_CONTROL, hpd2);
3885*4882a593Smuzhiyun WREG32(DC_HPD3_INT_CONTROL, hpd3);
3886*4882a593Smuzhiyun WREG32(DC_HPD4_INT_CONTROL, hpd4);
3887*4882a593Smuzhiyun if (ASIC_IS_DCE32(rdev)) {
3888*4882a593Smuzhiyun WREG32(DC_HPD5_INT_CONTROL, hpd5);
3889*4882a593Smuzhiyun WREG32(DC_HPD6_INT_CONTROL, hpd6);
3890*4882a593Smuzhiyun WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3891*4882a593Smuzhiyun WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3892*4882a593Smuzhiyun } else {
3893*4882a593Smuzhiyun WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3894*4882a593Smuzhiyun WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3895*4882a593Smuzhiyun }
3896*4882a593Smuzhiyun } else {
3897*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3898*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3899*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3900*4882a593Smuzhiyun WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3901*4882a593Smuzhiyun WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3902*4882a593Smuzhiyun }
3903*4882a593Smuzhiyun if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3904*4882a593Smuzhiyun WREG32(CG_THERMAL_INT, thermal_int);
3905*4882a593Smuzhiyun } else if (rdev->family >= CHIP_RV770) {
3906*4882a593Smuzhiyun WREG32(RV770_CG_THERMAL_INT, thermal_int);
3907*4882a593Smuzhiyun }
3908*4882a593Smuzhiyun
3909*4882a593Smuzhiyun /* posting read */
3910*4882a593Smuzhiyun RREG32(R_000E50_SRBM_STATUS);
3911*4882a593Smuzhiyun
3912*4882a593Smuzhiyun return 0;
3913*4882a593Smuzhiyun }
3914*4882a593Smuzhiyun
r600_irq_ack(struct radeon_device * rdev)3915*4882a593Smuzhiyun static void r600_irq_ack(struct radeon_device *rdev)
3916*4882a593Smuzhiyun {
3917*4882a593Smuzhiyun u32 tmp;
3918*4882a593Smuzhiyun
3919*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
3920*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3921*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3922*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3923*4882a593Smuzhiyun if (ASIC_IS_DCE32(rdev)) {
3924*4882a593Smuzhiyun rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3925*4882a593Smuzhiyun rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3926*4882a593Smuzhiyun } else {
3927*4882a593Smuzhiyun rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3928*4882a593Smuzhiyun rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3929*4882a593Smuzhiyun }
3930*4882a593Smuzhiyun } else {
3931*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3932*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3933*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3934*4882a593Smuzhiyun rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3935*4882a593Smuzhiyun rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3936*4882a593Smuzhiyun }
3937*4882a593Smuzhiyun rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3938*4882a593Smuzhiyun rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3939*4882a593Smuzhiyun
3940*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3941*4882a593Smuzhiyun WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3942*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3943*4882a593Smuzhiyun WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3944*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3945*4882a593Smuzhiyun WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3946*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3947*4882a593Smuzhiyun WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3948*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3949*4882a593Smuzhiyun WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3950*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3951*4882a593Smuzhiyun WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3952*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3953*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
3954*4882a593Smuzhiyun tmp = RREG32(DC_HPD1_INT_CONTROL);
3955*4882a593Smuzhiyun tmp |= DC_HPDx_INT_ACK;
3956*4882a593Smuzhiyun WREG32(DC_HPD1_INT_CONTROL, tmp);
3957*4882a593Smuzhiyun } else {
3958*4882a593Smuzhiyun tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3959*4882a593Smuzhiyun tmp |= DC_HPDx_INT_ACK;
3960*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3961*4882a593Smuzhiyun }
3962*4882a593Smuzhiyun }
3963*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3964*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
3965*4882a593Smuzhiyun tmp = RREG32(DC_HPD2_INT_CONTROL);
3966*4882a593Smuzhiyun tmp |= DC_HPDx_INT_ACK;
3967*4882a593Smuzhiyun WREG32(DC_HPD2_INT_CONTROL, tmp);
3968*4882a593Smuzhiyun } else {
3969*4882a593Smuzhiyun tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3970*4882a593Smuzhiyun tmp |= DC_HPDx_INT_ACK;
3971*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3972*4882a593Smuzhiyun }
3973*4882a593Smuzhiyun }
3974*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3975*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
3976*4882a593Smuzhiyun tmp = RREG32(DC_HPD3_INT_CONTROL);
3977*4882a593Smuzhiyun tmp |= DC_HPDx_INT_ACK;
3978*4882a593Smuzhiyun WREG32(DC_HPD3_INT_CONTROL, tmp);
3979*4882a593Smuzhiyun } else {
3980*4882a593Smuzhiyun tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3981*4882a593Smuzhiyun tmp |= DC_HPDx_INT_ACK;
3982*4882a593Smuzhiyun WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3983*4882a593Smuzhiyun }
3984*4882a593Smuzhiyun }
3985*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3986*4882a593Smuzhiyun tmp = RREG32(DC_HPD4_INT_CONTROL);
3987*4882a593Smuzhiyun tmp |= DC_HPDx_INT_ACK;
3988*4882a593Smuzhiyun WREG32(DC_HPD4_INT_CONTROL, tmp);
3989*4882a593Smuzhiyun }
3990*4882a593Smuzhiyun if (ASIC_IS_DCE32(rdev)) {
3991*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3992*4882a593Smuzhiyun tmp = RREG32(DC_HPD5_INT_CONTROL);
3993*4882a593Smuzhiyun tmp |= DC_HPDx_INT_ACK;
3994*4882a593Smuzhiyun WREG32(DC_HPD5_INT_CONTROL, tmp);
3995*4882a593Smuzhiyun }
3996*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3997*4882a593Smuzhiyun tmp = RREG32(DC_HPD6_INT_CONTROL);
3998*4882a593Smuzhiyun tmp |= DC_HPDx_INT_ACK;
3999*4882a593Smuzhiyun WREG32(DC_HPD6_INT_CONTROL, tmp);
4000*4882a593Smuzhiyun }
4001*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
4002*4882a593Smuzhiyun tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
4003*4882a593Smuzhiyun tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
4004*4882a593Smuzhiyun WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
4005*4882a593Smuzhiyun }
4006*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
4007*4882a593Smuzhiyun tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
4008*4882a593Smuzhiyun tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
4009*4882a593Smuzhiyun WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
4010*4882a593Smuzhiyun }
4011*4882a593Smuzhiyun } else {
4012*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
4013*4882a593Smuzhiyun tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
4014*4882a593Smuzhiyun tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
4015*4882a593Smuzhiyun WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
4016*4882a593Smuzhiyun }
4017*4882a593Smuzhiyun if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
4018*4882a593Smuzhiyun if (ASIC_IS_DCE3(rdev)) {
4019*4882a593Smuzhiyun tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
4020*4882a593Smuzhiyun tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
4021*4882a593Smuzhiyun WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
4022*4882a593Smuzhiyun } else {
4023*4882a593Smuzhiyun tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
4024*4882a593Smuzhiyun tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
4025*4882a593Smuzhiyun WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
4026*4882a593Smuzhiyun }
4027*4882a593Smuzhiyun }
4028*4882a593Smuzhiyun }
4029*4882a593Smuzhiyun }
4030*4882a593Smuzhiyun
r600_irq_disable(struct radeon_device * rdev)4031*4882a593Smuzhiyun void r600_irq_disable(struct radeon_device *rdev)
4032*4882a593Smuzhiyun {
4033*4882a593Smuzhiyun r600_disable_interrupts(rdev);
4034*4882a593Smuzhiyun /* Wait and acknowledge irq */
4035*4882a593Smuzhiyun mdelay(1);
4036*4882a593Smuzhiyun r600_irq_ack(rdev);
4037*4882a593Smuzhiyun r600_disable_interrupt_state(rdev);
4038*4882a593Smuzhiyun }
4039*4882a593Smuzhiyun
r600_get_ih_wptr(struct radeon_device * rdev)4040*4882a593Smuzhiyun static u32 r600_get_ih_wptr(struct radeon_device *rdev)
4041*4882a593Smuzhiyun {
4042*4882a593Smuzhiyun u32 wptr, tmp;
4043*4882a593Smuzhiyun
4044*4882a593Smuzhiyun if (rdev->wb.enabled)
4045*4882a593Smuzhiyun wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
4046*4882a593Smuzhiyun else
4047*4882a593Smuzhiyun wptr = RREG32(IH_RB_WPTR);
4048*4882a593Smuzhiyun
4049*4882a593Smuzhiyun if (wptr & RB_OVERFLOW) {
4050*4882a593Smuzhiyun wptr &= ~RB_OVERFLOW;
4051*4882a593Smuzhiyun /* When a ring buffer overflow happen start parsing interrupt
4052*4882a593Smuzhiyun * from the last not overwritten vector (wptr + 16). Hopefully
4053*4882a593Smuzhiyun * this should allow us to catchup.
4054*4882a593Smuzhiyun */
4055*4882a593Smuzhiyun dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
4056*4882a593Smuzhiyun wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
4057*4882a593Smuzhiyun rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
4058*4882a593Smuzhiyun tmp = RREG32(IH_RB_CNTL);
4059*4882a593Smuzhiyun tmp |= IH_WPTR_OVERFLOW_CLEAR;
4060*4882a593Smuzhiyun WREG32(IH_RB_CNTL, tmp);
4061*4882a593Smuzhiyun }
4062*4882a593Smuzhiyun return (wptr & rdev->ih.ptr_mask);
4063*4882a593Smuzhiyun }
4064*4882a593Smuzhiyun
4065*4882a593Smuzhiyun /* r600 IV Ring
4066*4882a593Smuzhiyun * Each IV ring entry is 128 bits:
4067*4882a593Smuzhiyun * [7:0] - interrupt source id
4068*4882a593Smuzhiyun * [31:8] - reserved
4069*4882a593Smuzhiyun * [59:32] - interrupt source data
4070*4882a593Smuzhiyun * [127:60] - reserved
4071*4882a593Smuzhiyun *
4072*4882a593Smuzhiyun * The basic interrupt vector entries
4073*4882a593Smuzhiyun * are decoded as follows:
4074*4882a593Smuzhiyun * src_id src_data description
4075*4882a593Smuzhiyun * 1 0 D1 Vblank
4076*4882a593Smuzhiyun * 1 1 D1 Vline
4077*4882a593Smuzhiyun * 5 0 D2 Vblank
4078*4882a593Smuzhiyun * 5 1 D2 Vline
4079*4882a593Smuzhiyun * 19 0 FP Hot plug detection A
4080*4882a593Smuzhiyun * 19 1 FP Hot plug detection B
4081*4882a593Smuzhiyun * 19 2 DAC A auto-detection
4082*4882a593Smuzhiyun * 19 3 DAC B auto-detection
4083*4882a593Smuzhiyun * 21 4 HDMI block A
4084*4882a593Smuzhiyun * 21 5 HDMI block B
4085*4882a593Smuzhiyun * 176 - CP_INT RB
4086*4882a593Smuzhiyun * 177 - CP_INT IB1
4087*4882a593Smuzhiyun * 178 - CP_INT IB2
4088*4882a593Smuzhiyun * 181 - EOP Interrupt
4089*4882a593Smuzhiyun * 233 - GUI Idle
4090*4882a593Smuzhiyun *
4091*4882a593Smuzhiyun * Note, these are based on r600 and may need to be
4092*4882a593Smuzhiyun * adjusted or added to on newer asics
4093*4882a593Smuzhiyun */
4094*4882a593Smuzhiyun
r600_irq_process(struct radeon_device * rdev)4095*4882a593Smuzhiyun int r600_irq_process(struct radeon_device *rdev)
4096*4882a593Smuzhiyun {
4097*4882a593Smuzhiyun u32 wptr;
4098*4882a593Smuzhiyun u32 rptr;
4099*4882a593Smuzhiyun u32 src_id, src_data;
4100*4882a593Smuzhiyun u32 ring_index;
4101*4882a593Smuzhiyun bool queue_hotplug = false;
4102*4882a593Smuzhiyun bool queue_hdmi = false;
4103*4882a593Smuzhiyun bool queue_thermal = false;
4104*4882a593Smuzhiyun
4105*4882a593Smuzhiyun if (!rdev->ih.enabled || rdev->shutdown)
4106*4882a593Smuzhiyun return IRQ_NONE;
4107*4882a593Smuzhiyun
4108*4882a593Smuzhiyun /* No MSIs, need a dummy read to flush PCI DMAs */
4109*4882a593Smuzhiyun if (!rdev->msi_enabled)
4110*4882a593Smuzhiyun RREG32(IH_RB_WPTR);
4111*4882a593Smuzhiyun
4112*4882a593Smuzhiyun wptr = r600_get_ih_wptr(rdev);
4113*4882a593Smuzhiyun
4114*4882a593Smuzhiyun restart_ih:
4115*4882a593Smuzhiyun /* is somebody else already processing irqs? */
4116*4882a593Smuzhiyun if (atomic_xchg(&rdev->ih.lock, 1))
4117*4882a593Smuzhiyun return IRQ_NONE;
4118*4882a593Smuzhiyun
4119*4882a593Smuzhiyun rptr = rdev->ih.rptr;
4120*4882a593Smuzhiyun DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4121*4882a593Smuzhiyun
4122*4882a593Smuzhiyun /* Order reading of wptr vs. reading of IH ring data */
4123*4882a593Smuzhiyun rmb();
4124*4882a593Smuzhiyun
4125*4882a593Smuzhiyun /* display interrupts */
4126*4882a593Smuzhiyun r600_irq_ack(rdev);
4127*4882a593Smuzhiyun
4128*4882a593Smuzhiyun while (rptr != wptr) {
4129*4882a593Smuzhiyun /* wptr/rptr are in bytes! */
4130*4882a593Smuzhiyun ring_index = rptr / 4;
4131*4882a593Smuzhiyun src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
4132*4882a593Smuzhiyun src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
4133*4882a593Smuzhiyun
4134*4882a593Smuzhiyun switch (src_id) {
4135*4882a593Smuzhiyun case 1: /* D1 vblank/vline */
4136*4882a593Smuzhiyun switch (src_data) {
4137*4882a593Smuzhiyun case 0: /* D1 vblank */
4138*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4139*4882a593Smuzhiyun DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4140*4882a593Smuzhiyun
4141*4882a593Smuzhiyun if (rdev->irq.crtc_vblank_int[0]) {
4142*4882a593Smuzhiyun drm_handle_vblank(rdev->ddev, 0);
4143*4882a593Smuzhiyun rdev->pm.vblank_sync = true;
4144*4882a593Smuzhiyun wake_up(&rdev->irq.vblank_queue);
4145*4882a593Smuzhiyun }
4146*4882a593Smuzhiyun if (atomic_read(&rdev->irq.pflip[0]))
4147*4882a593Smuzhiyun radeon_crtc_handle_vblank(rdev, 0);
4148*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4149*4882a593Smuzhiyun DRM_DEBUG("IH: D1 vblank\n");
4150*4882a593Smuzhiyun
4151*4882a593Smuzhiyun break;
4152*4882a593Smuzhiyun case 1: /* D1 vline */
4153*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4154*4882a593Smuzhiyun DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4155*4882a593Smuzhiyun
4156*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4157*4882a593Smuzhiyun DRM_DEBUG("IH: D1 vline\n");
4158*4882a593Smuzhiyun
4159*4882a593Smuzhiyun break;
4160*4882a593Smuzhiyun default:
4161*4882a593Smuzhiyun DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4162*4882a593Smuzhiyun break;
4163*4882a593Smuzhiyun }
4164*4882a593Smuzhiyun break;
4165*4882a593Smuzhiyun case 5: /* D2 vblank/vline */
4166*4882a593Smuzhiyun switch (src_data) {
4167*4882a593Smuzhiyun case 0: /* D2 vblank */
4168*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4169*4882a593Smuzhiyun DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4170*4882a593Smuzhiyun
4171*4882a593Smuzhiyun if (rdev->irq.crtc_vblank_int[1]) {
4172*4882a593Smuzhiyun drm_handle_vblank(rdev->ddev, 1);
4173*4882a593Smuzhiyun rdev->pm.vblank_sync = true;
4174*4882a593Smuzhiyun wake_up(&rdev->irq.vblank_queue);
4175*4882a593Smuzhiyun }
4176*4882a593Smuzhiyun if (atomic_read(&rdev->irq.pflip[1]))
4177*4882a593Smuzhiyun radeon_crtc_handle_vblank(rdev, 1);
4178*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4179*4882a593Smuzhiyun DRM_DEBUG("IH: D2 vblank\n");
4180*4882a593Smuzhiyun
4181*4882a593Smuzhiyun break;
4182*4882a593Smuzhiyun case 1: /* D1 vline */
4183*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4184*4882a593Smuzhiyun DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4185*4882a593Smuzhiyun
4186*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4187*4882a593Smuzhiyun DRM_DEBUG("IH: D2 vline\n");
4188*4882a593Smuzhiyun
4189*4882a593Smuzhiyun break;
4190*4882a593Smuzhiyun default:
4191*4882a593Smuzhiyun DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4192*4882a593Smuzhiyun break;
4193*4882a593Smuzhiyun }
4194*4882a593Smuzhiyun break;
4195*4882a593Smuzhiyun case 9: /* D1 pflip */
4196*4882a593Smuzhiyun DRM_DEBUG("IH: D1 flip\n");
4197*4882a593Smuzhiyun if (radeon_use_pflipirq > 0)
4198*4882a593Smuzhiyun radeon_crtc_handle_flip(rdev, 0);
4199*4882a593Smuzhiyun break;
4200*4882a593Smuzhiyun case 11: /* D2 pflip */
4201*4882a593Smuzhiyun DRM_DEBUG("IH: D2 flip\n");
4202*4882a593Smuzhiyun if (radeon_use_pflipirq > 0)
4203*4882a593Smuzhiyun radeon_crtc_handle_flip(rdev, 1);
4204*4882a593Smuzhiyun break;
4205*4882a593Smuzhiyun case 19: /* HPD/DAC hotplug */
4206*4882a593Smuzhiyun switch (src_data) {
4207*4882a593Smuzhiyun case 0:
4208*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4209*4882a593Smuzhiyun DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4210*4882a593Smuzhiyun
4211*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4212*4882a593Smuzhiyun queue_hotplug = true;
4213*4882a593Smuzhiyun DRM_DEBUG("IH: HPD1\n");
4214*4882a593Smuzhiyun break;
4215*4882a593Smuzhiyun case 1:
4216*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4217*4882a593Smuzhiyun DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4218*4882a593Smuzhiyun
4219*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4220*4882a593Smuzhiyun queue_hotplug = true;
4221*4882a593Smuzhiyun DRM_DEBUG("IH: HPD2\n");
4222*4882a593Smuzhiyun break;
4223*4882a593Smuzhiyun case 4:
4224*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4225*4882a593Smuzhiyun DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4226*4882a593Smuzhiyun
4227*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4228*4882a593Smuzhiyun queue_hotplug = true;
4229*4882a593Smuzhiyun DRM_DEBUG("IH: HPD3\n");
4230*4882a593Smuzhiyun break;
4231*4882a593Smuzhiyun case 5:
4232*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4233*4882a593Smuzhiyun DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4234*4882a593Smuzhiyun
4235*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4236*4882a593Smuzhiyun queue_hotplug = true;
4237*4882a593Smuzhiyun DRM_DEBUG("IH: HPD4\n");
4238*4882a593Smuzhiyun break;
4239*4882a593Smuzhiyun case 10:
4240*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4241*4882a593Smuzhiyun DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4242*4882a593Smuzhiyun
4243*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4244*4882a593Smuzhiyun queue_hotplug = true;
4245*4882a593Smuzhiyun DRM_DEBUG("IH: HPD5\n");
4246*4882a593Smuzhiyun break;
4247*4882a593Smuzhiyun case 12:
4248*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4249*4882a593Smuzhiyun DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4250*4882a593Smuzhiyun
4251*4882a593Smuzhiyun rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4252*4882a593Smuzhiyun queue_hotplug = true;
4253*4882a593Smuzhiyun DRM_DEBUG("IH: HPD6\n");
4254*4882a593Smuzhiyun
4255*4882a593Smuzhiyun break;
4256*4882a593Smuzhiyun default:
4257*4882a593Smuzhiyun DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4258*4882a593Smuzhiyun break;
4259*4882a593Smuzhiyun }
4260*4882a593Smuzhiyun break;
4261*4882a593Smuzhiyun case 21: /* hdmi */
4262*4882a593Smuzhiyun switch (src_data) {
4263*4882a593Smuzhiyun case 4:
4264*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4265*4882a593Smuzhiyun DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4266*4882a593Smuzhiyun
4267*4882a593Smuzhiyun rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4268*4882a593Smuzhiyun queue_hdmi = true;
4269*4882a593Smuzhiyun DRM_DEBUG("IH: HDMI0\n");
4270*4882a593Smuzhiyun
4271*4882a593Smuzhiyun break;
4272*4882a593Smuzhiyun case 5:
4273*4882a593Smuzhiyun if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4274*4882a593Smuzhiyun DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4275*4882a593Smuzhiyun
4276*4882a593Smuzhiyun rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4277*4882a593Smuzhiyun queue_hdmi = true;
4278*4882a593Smuzhiyun DRM_DEBUG("IH: HDMI1\n");
4279*4882a593Smuzhiyun
4280*4882a593Smuzhiyun break;
4281*4882a593Smuzhiyun default:
4282*4882a593Smuzhiyun DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4283*4882a593Smuzhiyun break;
4284*4882a593Smuzhiyun }
4285*4882a593Smuzhiyun break;
4286*4882a593Smuzhiyun case 124: /* UVD */
4287*4882a593Smuzhiyun DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
4288*4882a593Smuzhiyun radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
4289*4882a593Smuzhiyun break;
4290*4882a593Smuzhiyun case 176: /* CP_INT in ring buffer */
4291*4882a593Smuzhiyun case 177: /* CP_INT in IB1 */
4292*4882a593Smuzhiyun case 178: /* CP_INT in IB2 */
4293*4882a593Smuzhiyun DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
4294*4882a593Smuzhiyun radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4295*4882a593Smuzhiyun break;
4296*4882a593Smuzhiyun case 181: /* CP EOP event */
4297*4882a593Smuzhiyun DRM_DEBUG("IH: CP EOP\n");
4298*4882a593Smuzhiyun radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4299*4882a593Smuzhiyun break;
4300*4882a593Smuzhiyun case 224: /* DMA trap event */
4301*4882a593Smuzhiyun DRM_DEBUG("IH: DMA trap\n");
4302*4882a593Smuzhiyun radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4303*4882a593Smuzhiyun break;
4304*4882a593Smuzhiyun case 230: /* thermal low to high */
4305*4882a593Smuzhiyun DRM_DEBUG("IH: thermal low to high\n");
4306*4882a593Smuzhiyun rdev->pm.dpm.thermal.high_to_low = false;
4307*4882a593Smuzhiyun queue_thermal = true;
4308*4882a593Smuzhiyun break;
4309*4882a593Smuzhiyun case 231: /* thermal high to low */
4310*4882a593Smuzhiyun DRM_DEBUG("IH: thermal high to low\n");
4311*4882a593Smuzhiyun rdev->pm.dpm.thermal.high_to_low = true;
4312*4882a593Smuzhiyun queue_thermal = true;
4313*4882a593Smuzhiyun break;
4314*4882a593Smuzhiyun case 233: /* GUI IDLE */
4315*4882a593Smuzhiyun DRM_DEBUG("IH: GUI idle\n");
4316*4882a593Smuzhiyun break;
4317*4882a593Smuzhiyun default:
4318*4882a593Smuzhiyun DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4319*4882a593Smuzhiyun break;
4320*4882a593Smuzhiyun }
4321*4882a593Smuzhiyun
4322*4882a593Smuzhiyun /* wptr/rptr are in bytes! */
4323*4882a593Smuzhiyun rptr += 16;
4324*4882a593Smuzhiyun rptr &= rdev->ih.ptr_mask;
4325*4882a593Smuzhiyun WREG32(IH_RB_RPTR, rptr);
4326*4882a593Smuzhiyun }
4327*4882a593Smuzhiyun if (queue_hotplug)
4328*4882a593Smuzhiyun schedule_delayed_work(&rdev->hotplug_work, 0);
4329*4882a593Smuzhiyun if (queue_hdmi)
4330*4882a593Smuzhiyun schedule_work(&rdev->audio_work);
4331*4882a593Smuzhiyun if (queue_thermal && rdev->pm.dpm_enabled)
4332*4882a593Smuzhiyun schedule_work(&rdev->pm.dpm.thermal.work);
4333*4882a593Smuzhiyun rdev->ih.rptr = rptr;
4334*4882a593Smuzhiyun atomic_set(&rdev->ih.lock, 0);
4335*4882a593Smuzhiyun
4336*4882a593Smuzhiyun /* make sure wptr hasn't changed while processing */
4337*4882a593Smuzhiyun wptr = r600_get_ih_wptr(rdev);
4338*4882a593Smuzhiyun if (wptr != rptr)
4339*4882a593Smuzhiyun goto restart_ih;
4340*4882a593Smuzhiyun
4341*4882a593Smuzhiyun return IRQ_HANDLED;
4342*4882a593Smuzhiyun }
4343*4882a593Smuzhiyun
4344*4882a593Smuzhiyun /*
4345*4882a593Smuzhiyun * Debugfs info
4346*4882a593Smuzhiyun */
4347*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
4348*4882a593Smuzhiyun
r600_debugfs_mc_info(struct seq_file * m,void * data)4349*4882a593Smuzhiyun static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4350*4882a593Smuzhiyun {
4351*4882a593Smuzhiyun struct drm_info_node *node = (struct drm_info_node *) m->private;
4352*4882a593Smuzhiyun struct drm_device *dev = node->minor->dev;
4353*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
4354*4882a593Smuzhiyun
4355*4882a593Smuzhiyun DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4356*4882a593Smuzhiyun DREG32_SYS(m, rdev, VM_L2_STATUS);
4357*4882a593Smuzhiyun return 0;
4358*4882a593Smuzhiyun }
4359*4882a593Smuzhiyun
4360*4882a593Smuzhiyun static struct drm_info_list r600_mc_info_list[] = {
4361*4882a593Smuzhiyun {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
4362*4882a593Smuzhiyun };
4363*4882a593Smuzhiyun #endif
4364*4882a593Smuzhiyun
r600_debugfs_mc_info_init(struct radeon_device * rdev)4365*4882a593Smuzhiyun int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4366*4882a593Smuzhiyun {
4367*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
4368*4882a593Smuzhiyun return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4369*4882a593Smuzhiyun #else
4370*4882a593Smuzhiyun return 0;
4371*4882a593Smuzhiyun #endif
4372*4882a593Smuzhiyun }
4373*4882a593Smuzhiyun
4374*4882a593Smuzhiyun /**
4375*4882a593Smuzhiyun * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
4376*4882a593Smuzhiyun * rdev: radeon device structure
4377*4882a593Smuzhiyun *
4378*4882a593Smuzhiyun * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4379*4882a593Smuzhiyun * through the ring buffer. This leads to corruption in rendering, see
4380*4882a593Smuzhiyun * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4381*4882a593Smuzhiyun * directly perform the HDP flush by writing the register through MMIO.
4382*4882a593Smuzhiyun */
r600_mmio_hdp_flush(struct radeon_device * rdev)4383*4882a593Smuzhiyun void r600_mmio_hdp_flush(struct radeon_device *rdev)
4384*4882a593Smuzhiyun {
4385*4882a593Smuzhiyun /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4386*4882a593Smuzhiyun * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4387*4882a593Smuzhiyun * This seems to cause problems on some AGP cards. Just use the old
4388*4882a593Smuzhiyun * method for them.
4389*4882a593Smuzhiyun */
4390*4882a593Smuzhiyun if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4391*4882a593Smuzhiyun rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4392*4882a593Smuzhiyun void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
4393*4882a593Smuzhiyun u32 tmp;
4394*4882a593Smuzhiyun
4395*4882a593Smuzhiyun WREG32(HDP_DEBUG1, 0);
4396*4882a593Smuzhiyun tmp = readl((void __iomem *)ptr);
4397*4882a593Smuzhiyun } else
4398*4882a593Smuzhiyun WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4399*4882a593Smuzhiyun }
4400*4882a593Smuzhiyun
r600_set_pcie_lanes(struct radeon_device * rdev,int lanes)4401*4882a593Smuzhiyun void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4402*4882a593Smuzhiyun {
4403*4882a593Smuzhiyun u32 link_width_cntl, mask;
4404*4882a593Smuzhiyun
4405*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_IGP)
4406*4882a593Smuzhiyun return;
4407*4882a593Smuzhiyun
4408*4882a593Smuzhiyun if (!(rdev->flags & RADEON_IS_PCIE))
4409*4882a593Smuzhiyun return;
4410*4882a593Smuzhiyun
4411*4882a593Smuzhiyun /* x2 cards have a special sequence */
4412*4882a593Smuzhiyun if (ASIC_IS_X2(rdev))
4413*4882a593Smuzhiyun return;
4414*4882a593Smuzhiyun
4415*4882a593Smuzhiyun radeon_gui_idle(rdev);
4416*4882a593Smuzhiyun
4417*4882a593Smuzhiyun switch (lanes) {
4418*4882a593Smuzhiyun case 0:
4419*4882a593Smuzhiyun mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4420*4882a593Smuzhiyun break;
4421*4882a593Smuzhiyun case 1:
4422*4882a593Smuzhiyun mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4423*4882a593Smuzhiyun break;
4424*4882a593Smuzhiyun case 2:
4425*4882a593Smuzhiyun mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4426*4882a593Smuzhiyun break;
4427*4882a593Smuzhiyun case 4:
4428*4882a593Smuzhiyun mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4429*4882a593Smuzhiyun break;
4430*4882a593Smuzhiyun case 8:
4431*4882a593Smuzhiyun mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4432*4882a593Smuzhiyun break;
4433*4882a593Smuzhiyun case 12:
4434*4882a593Smuzhiyun /* not actually supported */
4435*4882a593Smuzhiyun mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4436*4882a593Smuzhiyun break;
4437*4882a593Smuzhiyun case 16:
4438*4882a593Smuzhiyun mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4439*4882a593Smuzhiyun break;
4440*4882a593Smuzhiyun default:
4441*4882a593Smuzhiyun DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4442*4882a593Smuzhiyun return;
4443*4882a593Smuzhiyun }
4444*4882a593Smuzhiyun
4445*4882a593Smuzhiyun link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4446*4882a593Smuzhiyun link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4447*4882a593Smuzhiyun link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4448*4882a593Smuzhiyun link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4449*4882a593Smuzhiyun R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4450*4882a593Smuzhiyun
4451*4882a593Smuzhiyun WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4452*4882a593Smuzhiyun }
4453*4882a593Smuzhiyun
r600_get_pcie_lanes(struct radeon_device * rdev)4454*4882a593Smuzhiyun int r600_get_pcie_lanes(struct radeon_device *rdev)
4455*4882a593Smuzhiyun {
4456*4882a593Smuzhiyun u32 link_width_cntl;
4457*4882a593Smuzhiyun
4458*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_IGP)
4459*4882a593Smuzhiyun return 0;
4460*4882a593Smuzhiyun
4461*4882a593Smuzhiyun if (!(rdev->flags & RADEON_IS_PCIE))
4462*4882a593Smuzhiyun return 0;
4463*4882a593Smuzhiyun
4464*4882a593Smuzhiyun /* x2 cards have a special sequence */
4465*4882a593Smuzhiyun if (ASIC_IS_X2(rdev))
4466*4882a593Smuzhiyun return 0;
4467*4882a593Smuzhiyun
4468*4882a593Smuzhiyun radeon_gui_idle(rdev);
4469*4882a593Smuzhiyun
4470*4882a593Smuzhiyun link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4471*4882a593Smuzhiyun
4472*4882a593Smuzhiyun switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4473*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X1:
4474*4882a593Smuzhiyun return 1;
4475*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X2:
4476*4882a593Smuzhiyun return 2;
4477*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X4:
4478*4882a593Smuzhiyun return 4;
4479*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X8:
4480*4882a593Smuzhiyun return 8;
4481*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X12:
4482*4882a593Smuzhiyun /* not actually supported */
4483*4882a593Smuzhiyun return 12;
4484*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X0:
4485*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X16:
4486*4882a593Smuzhiyun default:
4487*4882a593Smuzhiyun return 16;
4488*4882a593Smuzhiyun }
4489*4882a593Smuzhiyun }
4490*4882a593Smuzhiyun
r600_pcie_gen2_enable(struct radeon_device * rdev)4491*4882a593Smuzhiyun static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4492*4882a593Smuzhiyun {
4493*4882a593Smuzhiyun u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4494*4882a593Smuzhiyun u16 link_cntl2;
4495*4882a593Smuzhiyun
4496*4882a593Smuzhiyun if (radeon_pcie_gen2 == 0)
4497*4882a593Smuzhiyun return;
4498*4882a593Smuzhiyun
4499*4882a593Smuzhiyun if (rdev->flags & RADEON_IS_IGP)
4500*4882a593Smuzhiyun return;
4501*4882a593Smuzhiyun
4502*4882a593Smuzhiyun if (!(rdev->flags & RADEON_IS_PCIE))
4503*4882a593Smuzhiyun return;
4504*4882a593Smuzhiyun
4505*4882a593Smuzhiyun /* x2 cards have a special sequence */
4506*4882a593Smuzhiyun if (ASIC_IS_X2(rdev))
4507*4882a593Smuzhiyun return;
4508*4882a593Smuzhiyun
4509*4882a593Smuzhiyun /* only RV6xx+ chips are supported */
4510*4882a593Smuzhiyun if (rdev->family <= CHIP_R600)
4511*4882a593Smuzhiyun return;
4512*4882a593Smuzhiyun
4513*4882a593Smuzhiyun if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4514*4882a593Smuzhiyun (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
4515*4882a593Smuzhiyun return;
4516*4882a593Smuzhiyun
4517*4882a593Smuzhiyun speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4518*4882a593Smuzhiyun if (speed_cntl & LC_CURRENT_DATA_RATE) {
4519*4882a593Smuzhiyun DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4520*4882a593Smuzhiyun return;
4521*4882a593Smuzhiyun }
4522*4882a593Smuzhiyun
4523*4882a593Smuzhiyun DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4524*4882a593Smuzhiyun
4525*4882a593Smuzhiyun /* 55 nm r6xx asics */
4526*4882a593Smuzhiyun if ((rdev->family == CHIP_RV670) ||
4527*4882a593Smuzhiyun (rdev->family == CHIP_RV620) ||
4528*4882a593Smuzhiyun (rdev->family == CHIP_RV635)) {
4529*4882a593Smuzhiyun /* advertise upconfig capability */
4530*4882a593Smuzhiyun link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4531*4882a593Smuzhiyun link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4532*4882a593Smuzhiyun WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4533*4882a593Smuzhiyun link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4534*4882a593Smuzhiyun if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4535*4882a593Smuzhiyun lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4536*4882a593Smuzhiyun link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4537*4882a593Smuzhiyun LC_RECONFIG_ARC_MISSING_ESCAPE);
4538*4882a593Smuzhiyun link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4539*4882a593Smuzhiyun WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4540*4882a593Smuzhiyun } else {
4541*4882a593Smuzhiyun link_width_cntl |= LC_UPCONFIGURE_DIS;
4542*4882a593Smuzhiyun WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4543*4882a593Smuzhiyun }
4544*4882a593Smuzhiyun }
4545*4882a593Smuzhiyun
4546*4882a593Smuzhiyun speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4547*4882a593Smuzhiyun if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4548*4882a593Smuzhiyun (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4549*4882a593Smuzhiyun
4550*4882a593Smuzhiyun /* 55 nm r6xx asics */
4551*4882a593Smuzhiyun if ((rdev->family == CHIP_RV670) ||
4552*4882a593Smuzhiyun (rdev->family == CHIP_RV620) ||
4553*4882a593Smuzhiyun (rdev->family == CHIP_RV635)) {
4554*4882a593Smuzhiyun WREG32(MM_CFGREGS_CNTL, 0x8);
4555*4882a593Smuzhiyun link_cntl2 = RREG32(0x4088);
4556*4882a593Smuzhiyun WREG32(MM_CFGREGS_CNTL, 0);
4557*4882a593Smuzhiyun /* not supported yet */
4558*4882a593Smuzhiyun if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4559*4882a593Smuzhiyun return;
4560*4882a593Smuzhiyun }
4561*4882a593Smuzhiyun
4562*4882a593Smuzhiyun speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4563*4882a593Smuzhiyun speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4564*4882a593Smuzhiyun speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4565*4882a593Smuzhiyun speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4566*4882a593Smuzhiyun speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4567*4882a593Smuzhiyun WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4568*4882a593Smuzhiyun
4569*4882a593Smuzhiyun tmp = RREG32(0x541c);
4570*4882a593Smuzhiyun WREG32(0x541c, tmp | 0x8);
4571*4882a593Smuzhiyun WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4572*4882a593Smuzhiyun link_cntl2 = RREG16(0x4088);
4573*4882a593Smuzhiyun link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4574*4882a593Smuzhiyun link_cntl2 |= 0x2;
4575*4882a593Smuzhiyun WREG16(0x4088, link_cntl2);
4576*4882a593Smuzhiyun WREG32(MM_CFGREGS_CNTL, 0);
4577*4882a593Smuzhiyun
4578*4882a593Smuzhiyun if ((rdev->family == CHIP_RV670) ||
4579*4882a593Smuzhiyun (rdev->family == CHIP_RV620) ||
4580*4882a593Smuzhiyun (rdev->family == CHIP_RV635)) {
4581*4882a593Smuzhiyun training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4582*4882a593Smuzhiyun training_cntl &= ~LC_POINT_7_PLUS_EN;
4583*4882a593Smuzhiyun WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4584*4882a593Smuzhiyun } else {
4585*4882a593Smuzhiyun speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4586*4882a593Smuzhiyun speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4587*4882a593Smuzhiyun WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4588*4882a593Smuzhiyun }
4589*4882a593Smuzhiyun
4590*4882a593Smuzhiyun speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4591*4882a593Smuzhiyun speed_cntl |= LC_GEN2_EN_STRAP;
4592*4882a593Smuzhiyun WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4593*4882a593Smuzhiyun
4594*4882a593Smuzhiyun } else {
4595*4882a593Smuzhiyun link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4596*4882a593Smuzhiyun /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4597*4882a593Smuzhiyun if (1)
4598*4882a593Smuzhiyun link_width_cntl |= LC_UPCONFIGURE_DIS;
4599*4882a593Smuzhiyun else
4600*4882a593Smuzhiyun link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4601*4882a593Smuzhiyun WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4602*4882a593Smuzhiyun }
4603*4882a593Smuzhiyun }
4604*4882a593Smuzhiyun
4605*4882a593Smuzhiyun /**
4606*4882a593Smuzhiyun * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4607*4882a593Smuzhiyun *
4608*4882a593Smuzhiyun * @rdev: radeon_device pointer
4609*4882a593Smuzhiyun *
4610*4882a593Smuzhiyun * Fetches a GPU clock counter snapshot (R6xx-cayman).
4611*4882a593Smuzhiyun * Returns the 64 bit clock counter snapshot.
4612*4882a593Smuzhiyun */
r600_get_gpu_clock_counter(struct radeon_device * rdev)4613*4882a593Smuzhiyun uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4614*4882a593Smuzhiyun {
4615*4882a593Smuzhiyun uint64_t clock;
4616*4882a593Smuzhiyun
4617*4882a593Smuzhiyun mutex_lock(&rdev->gpu_clock_mutex);
4618*4882a593Smuzhiyun WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4619*4882a593Smuzhiyun clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4620*4882a593Smuzhiyun ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4621*4882a593Smuzhiyun mutex_unlock(&rdev->gpu_clock_mutex);
4622*4882a593Smuzhiyun return clock;
4623*4882a593Smuzhiyun }
4624