1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2008 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun * Copyright 2008 Red Hat Inc.
4*4882a593Smuzhiyun * Copyright 2009 Jerome Glisse.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
14*4882a593Smuzhiyun * all copies or substantial portions of the Software.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Authors: Dave Airlie
25*4882a593Smuzhiyun * Alex Deucher
26*4882a593Smuzhiyun * Jerome Glisse
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun /* RS600 / Radeon X1250/X1270 integrated GPU
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * This file gather function specific to RS600 which is the IGP of
31*4882a593Smuzhiyun * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32*4882a593Smuzhiyun * is the X1250/X1270 supporting AMD CPU). The display engine are
33*4882a593Smuzhiyun * the avivo one, bios is an atombios, 3D block are the one of the
34*4882a593Smuzhiyun * R4XX family. The GART is different from the RS400 one and is very
35*4882a593Smuzhiyun * close to the one of the R600 family (R600 likely being an evolution
36*4882a593Smuzhiyun * of the RS600 GART block).
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
40*4882a593Smuzhiyun #include <linux/pci.h>
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #include <drm/drm_device.h>
43*4882a593Smuzhiyun #include <drm/drm_vblank.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "atom.h"
46*4882a593Smuzhiyun #include "radeon.h"
47*4882a593Smuzhiyun #include "radeon_asic.h"
48*4882a593Smuzhiyun #include "radeon_audio.h"
49*4882a593Smuzhiyun #include "rs600_reg_safe.h"
50*4882a593Smuzhiyun #include "rs600d.h"
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun static void rs600_gpu_init(struct radeon_device *rdev);
53*4882a593Smuzhiyun int rs600_mc_wait_for_idle(struct radeon_device *rdev);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun static const u32 crtc_offsets[2] =
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 0,
58*4882a593Smuzhiyun AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
avivo_is_in_vblank(struct radeon_device * rdev,int crtc)61*4882a593Smuzhiyun static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
64*4882a593Smuzhiyun return true;
65*4882a593Smuzhiyun else
66*4882a593Smuzhiyun return false;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
avivo_is_counter_moving(struct radeon_device * rdev,int crtc)69*4882a593Smuzhiyun static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun u32 pos1, pos2;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
74*4882a593Smuzhiyun pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun if (pos1 != pos2)
77*4882a593Smuzhiyun return true;
78*4882a593Smuzhiyun else
79*4882a593Smuzhiyun return false;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /**
83*4882a593Smuzhiyun * avivo_wait_for_vblank - vblank wait asic callback.
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * @rdev: radeon_device pointer
86*4882a593Smuzhiyun * @crtc: crtc to wait for vblank on
87*4882a593Smuzhiyun *
88*4882a593Smuzhiyun * Wait for vblank on the requested crtc (r5xx-r7xx).
89*4882a593Smuzhiyun */
avivo_wait_for_vblank(struct radeon_device * rdev,int crtc)90*4882a593Smuzhiyun void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun unsigned i = 0;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (crtc >= rdev->num_crtc)
95*4882a593Smuzhiyun return;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
98*4882a593Smuzhiyun return;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* depending on when we hit vblank, we may be close to active; if so,
101*4882a593Smuzhiyun * wait for another frame.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun while (avivo_is_in_vblank(rdev, crtc)) {
104*4882a593Smuzhiyun if (i++ % 100 == 0) {
105*4882a593Smuzhiyun if (!avivo_is_counter_moving(rdev, crtc))
106*4882a593Smuzhiyun break;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun while (!avivo_is_in_vblank(rdev, crtc)) {
111*4882a593Smuzhiyun if (i++ % 100 == 0) {
112*4882a593Smuzhiyun if (!avivo_is_counter_moving(rdev, crtc))
113*4882a593Smuzhiyun break;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
rs600_page_flip(struct radeon_device * rdev,int crtc_id,u64 crtc_base,bool async)118*4882a593Smuzhiyun void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
121*4882a593Smuzhiyun u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
122*4882a593Smuzhiyun int i;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* Lock the graphics update lock */
125*4882a593Smuzhiyun tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
126*4882a593Smuzhiyun WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* update the scanout addresses */
129*4882a593Smuzhiyun WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
130*4882a593Smuzhiyun async ? AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
131*4882a593Smuzhiyun WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
132*4882a593Smuzhiyun (u32)crtc_base);
133*4882a593Smuzhiyun WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
134*4882a593Smuzhiyun (u32)crtc_base);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* Wait for update_pending to go high. */
137*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
138*4882a593Smuzhiyun if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
139*4882a593Smuzhiyun break;
140*4882a593Smuzhiyun udelay(1);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* Unlock the lock, so double-buffering can take place inside vblank */
145*4882a593Smuzhiyun tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
146*4882a593Smuzhiyun WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
rs600_page_flip_pending(struct radeon_device * rdev,int crtc_id)149*4882a593Smuzhiyun bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* Return current update_pending status: */
154*4882a593Smuzhiyun return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
155*4882a593Smuzhiyun AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
avivo_program_fmt(struct drm_encoder * encoder)158*4882a593Smuzhiyun void avivo_program_fmt(struct drm_encoder *encoder)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun struct drm_device *dev = encoder->dev;
161*4882a593Smuzhiyun struct radeon_device *rdev = dev->dev_private;
162*4882a593Smuzhiyun struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
163*4882a593Smuzhiyun struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
164*4882a593Smuzhiyun int bpc = 0;
165*4882a593Smuzhiyun u32 tmp = 0;
166*4882a593Smuzhiyun enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (connector) {
169*4882a593Smuzhiyun struct radeon_connector *radeon_connector = to_radeon_connector(connector);
170*4882a593Smuzhiyun bpc = radeon_get_monitor_bpc(connector);
171*4882a593Smuzhiyun dither = radeon_connector->dither;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* LVDS FMT is set up by atom */
175*4882a593Smuzhiyun if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
176*4882a593Smuzhiyun return;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (bpc == 0)
179*4882a593Smuzhiyun return;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun switch (bpc) {
182*4882a593Smuzhiyun case 6:
183*4882a593Smuzhiyun if (dither == RADEON_FMT_DITHER_ENABLE)
184*4882a593Smuzhiyun /* XXX sort out optimal dither settings */
185*4882a593Smuzhiyun tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
186*4882a593Smuzhiyun else
187*4882a593Smuzhiyun tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
188*4882a593Smuzhiyun break;
189*4882a593Smuzhiyun case 8:
190*4882a593Smuzhiyun if (dither == RADEON_FMT_DITHER_ENABLE)
191*4882a593Smuzhiyun /* XXX sort out optimal dither settings */
192*4882a593Smuzhiyun tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
193*4882a593Smuzhiyun AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
194*4882a593Smuzhiyun else
195*4882a593Smuzhiyun tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
196*4882a593Smuzhiyun AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
197*4882a593Smuzhiyun break;
198*4882a593Smuzhiyun case 10:
199*4882a593Smuzhiyun default:
200*4882a593Smuzhiyun /* not needed */
201*4882a593Smuzhiyun break;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun switch (radeon_encoder->encoder_id) {
205*4882a593Smuzhiyun case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
206*4882a593Smuzhiyun WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
207*4882a593Smuzhiyun break;
208*4882a593Smuzhiyun case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
209*4882a593Smuzhiyun WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
210*4882a593Smuzhiyun break;
211*4882a593Smuzhiyun case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
212*4882a593Smuzhiyun WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
213*4882a593Smuzhiyun break;
214*4882a593Smuzhiyun case ENCODER_OBJECT_ID_INTERNAL_DDI:
215*4882a593Smuzhiyun WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
216*4882a593Smuzhiyun break;
217*4882a593Smuzhiyun default:
218*4882a593Smuzhiyun break;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
rs600_pm_misc(struct radeon_device * rdev)222*4882a593Smuzhiyun void rs600_pm_misc(struct radeon_device *rdev)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun int requested_index = rdev->pm.requested_power_state_index;
225*4882a593Smuzhiyun struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
226*4882a593Smuzhiyun struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
227*4882a593Smuzhiyun u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
228*4882a593Smuzhiyun u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
231*4882a593Smuzhiyun if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
232*4882a593Smuzhiyun tmp = RREG32(voltage->gpio.reg);
233*4882a593Smuzhiyun if (voltage->active_high)
234*4882a593Smuzhiyun tmp |= voltage->gpio.mask;
235*4882a593Smuzhiyun else
236*4882a593Smuzhiyun tmp &= ~(voltage->gpio.mask);
237*4882a593Smuzhiyun WREG32(voltage->gpio.reg, tmp);
238*4882a593Smuzhiyun if (voltage->delay)
239*4882a593Smuzhiyun udelay(voltage->delay);
240*4882a593Smuzhiyun } else {
241*4882a593Smuzhiyun tmp = RREG32(voltage->gpio.reg);
242*4882a593Smuzhiyun if (voltage->active_high)
243*4882a593Smuzhiyun tmp &= ~voltage->gpio.mask;
244*4882a593Smuzhiyun else
245*4882a593Smuzhiyun tmp |= voltage->gpio.mask;
246*4882a593Smuzhiyun WREG32(voltage->gpio.reg, tmp);
247*4882a593Smuzhiyun if (voltage->delay)
248*4882a593Smuzhiyun udelay(voltage->delay);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun } else if (voltage->type == VOLTAGE_VDDC)
251*4882a593Smuzhiyun radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
254*4882a593Smuzhiyun dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
255*4882a593Smuzhiyun dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
256*4882a593Smuzhiyun if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
257*4882a593Smuzhiyun if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
258*4882a593Smuzhiyun dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
259*4882a593Smuzhiyun dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
260*4882a593Smuzhiyun } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
261*4882a593Smuzhiyun dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
262*4882a593Smuzhiyun dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun } else {
265*4882a593Smuzhiyun dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
266*4882a593Smuzhiyun dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
271*4882a593Smuzhiyun if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
272*4882a593Smuzhiyun dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
273*4882a593Smuzhiyun if (voltage->delay) {
274*4882a593Smuzhiyun dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
275*4882a593Smuzhiyun dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
276*4882a593Smuzhiyun } else
277*4882a593Smuzhiyun dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
278*4882a593Smuzhiyun } else
279*4882a593Smuzhiyun dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
280*4882a593Smuzhiyun WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
283*4882a593Smuzhiyun if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
284*4882a593Smuzhiyun hdp_dyn_cntl &= ~HDP_FORCEON;
285*4882a593Smuzhiyun else
286*4882a593Smuzhiyun hdp_dyn_cntl |= HDP_FORCEON;
287*4882a593Smuzhiyun WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
288*4882a593Smuzhiyun #if 0
289*4882a593Smuzhiyun /* mc_host_dyn seems to cause hangs from time to time */
290*4882a593Smuzhiyun mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
291*4882a593Smuzhiyun if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
292*4882a593Smuzhiyun mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
293*4882a593Smuzhiyun else
294*4882a593Smuzhiyun mc_host_dyn_cntl |= MC_HOST_FORCEON;
295*4882a593Smuzhiyun WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
296*4882a593Smuzhiyun #endif
297*4882a593Smuzhiyun dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
298*4882a593Smuzhiyun if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
299*4882a593Smuzhiyun dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
300*4882a593Smuzhiyun else
301*4882a593Smuzhiyun dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
302*4882a593Smuzhiyun WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* set pcie lanes */
305*4882a593Smuzhiyun if ((rdev->flags & RADEON_IS_PCIE) &&
306*4882a593Smuzhiyun !(rdev->flags & RADEON_IS_IGP) &&
307*4882a593Smuzhiyun rdev->asic->pm.set_pcie_lanes &&
308*4882a593Smuzhiyun (ps->pcie_lanes !=
309*4882a593Smuzhiyun rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
310*4882a593Smuzhiyun radeon_set_pcie_lanes(rdev,
311*4882a593Smuzhiyun ps->pcie_lanes);
312*4882a593Smuzhiyun DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
rs600_pm_prepare(struct radeon_device * rdev)316*4882a593Smuzhiyun void rs600_pm_prepare(struct radeon_device *rdev)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun struct drm_device *ddev = rdev->ddev;
319*4882a593Smuzhiyun struct drm_crtc *crtc;
320*4882a593Smuzhiyun struct radeon_crtc *radeon_crtc;
321*4882a593Smuzhiyun u32 tmp;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* disable any active CRTCs */
324*4882a593Smuzhiyun list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
325*4882a593Smuzhiyun radeon_crtc = to_radeon_crtc(crtc);
326*4882a593Smuzhiyun if (radeon_crtc->enabled) {
327*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
328*4882a593Smuzhiyun tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
329*4882a593Smuzhiyun WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
rs600_pm_finish(struct radeon_device * rdev)334*4882a593Smuzhiyun void rs600_pm_finish(struct radeon_device *rdev)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun struct drm_device *ddev = rdev->ddev;
337*4882a593Smuzhiyun struct drm_crtc *crtc;
338*4882a593Smuzhiyun struct radeon_crtc *radeon_crtc;
339*4882a593Smuzhiyun u32 tmp;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* enable any active CRTCs */
342*4882a593Smuzhiyun list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
343*4882a593Smuzhiyun radeon_crtc = to_radeon_crtc(crtc);
344*4882a593Smuzhiyun if (radeon_crtc->enabled) {
345*4882a593Smuzhiyun tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
346*4882a593Smuzhiyun tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
347*4882a593Smuzhiyun WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* hpd for digital panel detect/disconnect */
rs600_hpd_sense(struct radeon_device * rdev,enum radeon_hpd_id hpd)353*4882a593Smuzhiyun bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun u32 tmp;
356*4882a593Smuzhiyun bool connected = false;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun switch (hpd) {
359*4882a593Smuzhiyun case RADEON_HPD_1:
360*4882a593Smuzhiyun tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
361*4882a593Smuzhiyun if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
362*4882a593Smuzhiyun connected = true;
363*4882a593Smuzhiyun break;
364*4882a593Smuzhiyun case RADEON_HPD_2:
365*4882a593Smuzhiyun tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
366*4882a593Smuzhiyun if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
367*4882a593Smuzhiyun connected = true;
368*4882a593Smuzhiyun break;
369*4882a593Smuzhiyun default:
370*4882a593Smuzhiyun break;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun return connected;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
rs600_hpd_set_polarity(struct radeon_device * rdev,enum radeon_hpd_id hpd)375*4882a593Smuzhiyun void rs600_hpd_set_polarity(struct radeon_device *rdev,
376*4882a593Smuzhiyun enum radeon_hpd_id hpd)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun u32 tmp;
379*4882a593Smuzhiyun bool connected = rs600_hpd_sense(rdev, hpd);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun switch (hpd) {
382*4882a593Smuzhiyun case RADEON_HPD_1:
383*4882a593Smuzhiyun tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
384*4882a593Smuzhiyun if (connected)
385*4882a593Smuzhiyun tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
386*4882a593Smuzhiyun else
387*4882a593Smuzhiyun tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
388*4882a593Smuzhiyun WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
389*4882a593Smuzhiyun break;
390*4882a593Smuzhiyun case RADEON_HPD_2:
391*4882a593Smuzhiyun tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
392*4882a593Smuzhiyun if (connected)
393*4882a593Smuzhiyun tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
394*4882a593Smuzhiyun else
395*4882a593Smuzhiyun tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
396*4882a593Smuzhiyun WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
397*4882a593Smuzhiyun break;
398*4882a593Smuzhiyun default:
399*4882a593Smuzhiyun break;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
rs600_hpd_init(struct radeon_device * rdev)403*4882a593Smuzhiyun void rs600_hpd_init(struct radeon_device *rdev)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun struct drm_device *dev = rdev->ddev;
406*4882a593Smuzhiyun struct drm_connector *connector;
407*4882a593Smuzhiyun unsigned enable = 0;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
410*4882a593Smuzhiyun struct radeon_connector *radeon_connector = to_radeon_connector(connector);
411*4882a593Smuzhiyun switch (radeon_connector->hpd.hpd) {
412*4882a593Smuzhiyun case RADEON_HPD_1:
413*4882a593Smuzhiyun WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
414*4882a593Smuzhiyun S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
415*4882a593Smuzhiyun break;
416*4882a593Smuzhiyun case RADEON_HPD_2:
417*4882a593Smuzhiyun WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
418*4882a593Smuzhiyun S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
419*4882a593Smuzhiyun break;
420*4882a593Smuzhiyun default:
421*4882a593Smuzhiyun break;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
424*4882a593Smuzhiyun enable |= 1 << radeon_connector->hpd.hpd;
425*4882a593Smuzhiyun radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun radeon_irq_kms_enable_hpd(rdev, enable);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
rs600_hpd_fini(struct radeon_device * rdev)430*4882a593Smuzhiyun void rs600_hpd_fini(struct radeon_device *rdev)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun struct drm_device *dev = rdev->ddev;
433*4882a593Smuzhiyun struct drm_connector *connector;
434*4882a593Smuzhiyun unsigned disable = 0;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
437*4882a593Smuzhiyun struct radeon_connector *radeon_connector = to_radeon_connector(connector);
438*4882a593Smuzhiyun switch (radeon_connector->hpd.hpd) {
439*4882a593Smuzhiyun case RADEON_HPD_1:
440*4882a593Smuzhiyun WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
441*4882a593Smuzhiyun S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
442*4882a593Smuzhiyun break;
443*4882a593Smuzhiyun case RADEON_HPD_2:
444*4882a593Smuzhiyun WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
445*4882a593Smuzhiyun S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
446*4882a593Smuzhiyun break;
447*4882a593Smuzhiyun default:
448*4882a593Smuzhiyun break;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
451*4882a593Smuzhiyun disable |= 1 << radeon_connector->hpd.hpd;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun radeon_irq_kms_disable_hpd(rdev, disable);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
rs600_asic_reset(struct radeon_device * rdev,bool hard)456*4882a593Smuzhiyun int rs600_asic_reset(struct radeon_device *rdev, bool hard)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun struct rv515_mc_save save;
459*4882a593Smuzhiyun u32 status, tmp;
460*4882a593Smuzhiyun int ret = 0;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun status = RREG32(R_000E40_RBBM_STATUS);
463*4882a593Smuzhiyun if (!G_000E40_GUI_ACTIVE(status)) {
464*4882a593Smuzhiyun return 0;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun /* Stops all mc clients */
467*4882a593Smuzhiyun rv515_mc_stop(rdev, &save);
468*4882a593Smuzhiyun status = RREG32(R_000E40_RBBM_STATUS);
469*4882a593Smuzhiyun dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
470*4882a593Smuzhiyun /* stop CP */
471*4882a593Smuzhiyun WREG32(RADEON_CP_CSQ_CNTL, 0);
472*4882a593Smuzhiyun tmp = RREG32(RADEON_CP_RB_CNTL);
473*4882a593Smuzhiyun WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
474*4882a593Smuzhiyun WREG32(RADEON_CP_RB_RPTR_WR, 0);
475*4882a593Smuzhiyun WREG32(RADEON_CP_RB_WPTR, 0);
476*4882a593Smuzhiyun WREG32(RADEON_CP_RB_CNTL, tmp);
477*4882a593Smuzhiyun pci_save_state(rdev->pdev);
478*4882a593Smuzhiyun /* disable bus mastering */
479*4882a593Smuzhiyun pci_clear_master(rdev->pdev);
480*4882a593Smuzhiyun mdelay(1);
481*4882a593Smuzhiyun /* reset GA+VAP */
482*4882a593Smuzhiyun WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
483*4882a593Smuzhiyun S_0000F0_SOFT_RESET_GA(1));
484*4882a593Smuzhiyun RREG32(R_0000F0_RBBM_SOFT_RESET);
485*4882a593Smuzhiyun mdelay(500);
486*4882a593Smuzhiyun WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
487*4882a593Smuzhiyun mdelay(1);
488*4882a593Smuzhiyun status = RREG32(R_000E40_RBBM_STATUS);
489*4882a593Smuzhiyun dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
490*4882a593Smuzhiyun /* reset CP */
491*4882a593Smuzhiyun WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
492*4882a593Smuzhiyun RREG32(R_0000F0_RBBM_SOFT_RESET);
493*4882a593Smuzhiyun mdelay(500);
494*4882a593Smuzhiyun WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
495*4882a593Smuzhiyun mdelay(1);
496*4882a593Smuzhiyun status = RREG32(R_000E40_RBBM_STATUS);
497*4882a593Smuzhiyun dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
498*4882a593Smuzhiyun /* reset MC */
499*4882a593Smuzhiyun WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
500*4882a593Smuzhiyun RREG32(R_0000F0_RBBM_SOFT_RESET);
501*4882a593Smuzhiyun mdelay(500);
502*4882a593Smuzhiyun WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
503*4882a593Smuzhiyun mdelay(1);
504*4882a593Smuzhiyun status = RREG32(R_000E40_RBBM_STATUS);
505*4882a593Smuzhiyun dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
506*4882a593Smuzhiyun /* restore PCI & busmastering */
507*4882a593Smuzhiyun pci_restore_state(rdev->pdev);
508*4882a593Smuzhiyun /* Check if GPU is idle */
509*4882a593Smuzhiyun if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
510*4882a593Smuzhiyun dev_err(rdev->dev, "failed to reset GPU\n");
511*4882a593Smuzhiyun ret = -1;
512*4882a593Smuzhiyun } else
513*4882a593Smuzhiyun dev_info(rdev->dev, "GPU reset succeed\n");
514*4882a593Smuzhiyun rv515_mc_resume(rdev, &save);
515*4882a593Smuzhiyun return ret;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /*
519*4882a593Smuzhiyun * GART.
520*4882a593Smuzhiyun */
rs600_gart_tlb_flush(struct radeon_device * rdev)521*4882a593Smuzhiyun void rs600_gart_tlb_flush(struct radeon_device *rdev)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun uint32_t tmp;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
526*4882a593Smuzhiyun tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
527*4882a593Smuzhiyun WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
530*4882a593Smuzhiyun tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
531*4882a593Smuzhiyun WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
534*4882a593Smuzhiyun tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
535*4882a593Smuzhiyun WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
536*4882a593Smuzhiyun tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
rs600_gart_init(struct radeon_device * rdev)539*4882a593Smuzhiyun static int rs600_gart_init(struct radeon_device *rdev)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun int r;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (rdev->gart.robj) {
544*4882a593Smuzhiyun WARN(1, "RS600 GART already initialized\n");
545*4882a593Smuzhiyun return 0;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun /* Initialize common gart structure */
548*4882a593Smuzhiyun r = radeon_gart_init(rdev);
549*4882a593Smuzhiyun if (r) {
550*4882a593Smuzhiyun return r;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
553*4882a593Smuzhiyun return radeon_gart_table_vram_alloc(rdev);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
rs600_gart_enable(struct radeon_device * rdev)556*4882a593Smuzhiyun static int rs600_gart_enable(struct radeon_device *rdev)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun u32 tmp;
559*4882a593Smuzhiyun int r, i;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (rdev->gart.robj == NULL) {
562*4882a593Smuzhiyun dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
563*4882a593Smuzhiyun return -EINVAL;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun r = radeon_gart_table_vram_pin(rdev);
566*4882a593Smuzhiyun if (r)
567*4882a593Smuzhiyun return r;
568*4882a593Smuzhiyun /* Enable bus master */
569*4882a593Smuzhiyun tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
570*4882a593Smuzhiyun WREG32(RADEON_BUS_CNTL, tmp);
571*4882a593Smuzhiyun /* FIXME: setup default page */
572*4882a593Smuzhiyun WREG32_MC(R_000100_MC_PT0_CNTL,
573*4882a593Smuzhiyun (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
574*4882a593Smuzhiyun S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun for (i = 0; i < 19; i++) {
577*4882a593Smuzhiyun WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
578*4882a593Smuzhiyun S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
579*4882a593Smuzhiyun S_00016C_SYSTEM_ACCESS_MODE_MASK(
580*4882a593Smuzhiyun V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
581*4882a593Smuzhiyun S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
582*4882a593Smuzhiyun V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
583*4882a593Smuzhiyun S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
584*4882a593Smuzhiyun S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
585*4882a593Smuzhiyun S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun /* enable first context */
588*4882a593Smuzhiyun WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
589*4882a593Smuzhiyun S_000102_ENABLE_PAGE_TABLE(1) |
590*4882a593Smuzhiyun S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* disable all other contexts */
593*4882a593Smuzhiyun for (i = 1; i < 8; i++)
594*4882a593Smuzhiyun WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /* setup the page table */
597*4882a593Smuzhiyun WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
598*4882a593Smuzhiyun rdev->gart.table_addr);
599*4882a593Smuzhiyun WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
600*4882a593Smuzhiyun WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
601*4882a593Smuzhiyun WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* System context maps to VRAM space */
604*4882a593Smuzhiyun WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
605*4882a593Smuzhiyun WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* enable page tables */
608*4882a593Smuzhiyun tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
609*4882a593Smuzhiyun WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
610*4882a593Smuzhiyun tmp = RREG32_MC(R_000009_MC_CNTL1);
611*4882a593Smuzhiyun WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
612*4882a593Smuzhiyun rs600_gart_tlb_flush(rdev);
613*4882a593Smuzhiyun DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
614*4882a593Smuzhiyun (unsigned)(rdev->mc.gtt_size >> 20),
615*4882a593Smuzhiyun (unsigned long long)rdev->gart.table_addr);
616*4882a593Smuzhiyun rdev->gart.ready = true;
617*4882a593Smuzhiyun return 0;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
rs600_gart_disable(struct radeon_device * rdev)620*4882a593Smuzhiyun static void rs600_gart_disable(struct radeon_device *rdev)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun u32 tmp;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /* FIXME: disable out of gart access */
625*4882a593Smuzhiyun WREG32_MC(R_000100_MC_PT0_CNTL, 0);
626*4882a593Smuzhiyun tmp = RREG32_MC(R_000009_MC_CNTL1);
627*4882a593Smuzhiyun WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
628*4882a593Smuzhiyun radeon_gart_table_vram_unpin(rdev);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
rs600_gart_fini(struct radeon_device * rdev)631*4882a593Smuzhiyun static void rs600_gart_fini(struct radeon_device *rdev)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun radeon_gart_fini(rdev);
634*4882a593Smuzhiyun rs600_gart_disable(rdev);
635*4882a593Smuzhiyun radeon_gart_table_vram_free(rdev);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
rs600_gart_get_page_entry(uint64_t addr,uint32_t flags)638*4882a593Smuzhiyun uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun addr = addr & 0xFFFFFFFFFFFFF000ULL;
641*4882a593Smuzhiyun addr |= R600_PTE_SYSTEM;
642*4882a593Smuzhiyun if (flags & RADEON_GART_PAGE_VALID)
643*4882a593Smuzhiyun addr |= R600_PTE_VALID;
644*4882a593Smuzhiyun if (flags & RADEON_GART_PAGE_READ)
645*4882a593Smuzhiyun addr |= R600_PTE_READABLE;
646*4882a593Smuzhiyun if (flags & RADEON_GART_PAGE_WRITE)
647*4882a593Smuzhiyun addr |= R600_PTE_WRITEABLE;
648*4882a593Smuzhiyun if (flags & RADEON_GART_PAGE_SNOOP)
649*4882a593Smuzhiyun addr |= R600_PTE_SNOOPED;
650*4882a593Smuzhiyun return addr;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
rs600_gart_set_page(struct radeon_device * rdev,unsigned i,uint64_t entry)653*4882a593Smuzhiyun void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
654*4882a593Smuzhiyun uint64_t entry)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun void __iomem *ptr = (void *)rdev->gart.ptr;
657*4882a593Smuzhiyun writeq(entry, ptr + (i * 8));
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
rs600_irq_set(struct radeon_device * rdev)660*4882a593Smuzhiyun int rs600_irq_set(struct radeon_device *rdev)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun uint32_t tmp = 0;
663*4882a593Smuzhiyun uint32_t mode_int = 0;
664*4882a593Smuzhiyun u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
665*4882a593Smuzhiyun ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
666*4882a593Smuzhiyun u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
667*4882a593Smuzhiyun ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
668*4882a593Smuzhiyun u32 hdmi0;
669*4882a593Smuzhiyun if (ASIC_IS_DCE2(rdev))
670*4882a593Smuzhiyun hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
671*4882a593Smuzhiyun ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
672*4882a593Smuzhiyun else
673*4882a593Smuzhiyun hdmi0 = 0;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (!rdev->irq.installed) {
676*4882a593Smuzhiyun WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
677*4882a593Smuzhiyun WREG32(R_000040_GEN_INT_CNTL, 0);
678*4882a593Smuzhiyun return -EINVAL;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
681*4882a593Smuzhiyun tmp |= S_000040_SW_INT_EN(1);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun if (rdev->irq.crtc_vblank_int[0] ||
684*4882a593Smuzhiyun atomic_read(&rdev->irq.pflip[0])) {
685*4882a593Smuzhiyun mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun if (rdev->irq.crtc_vblank_int[1] ||
688*4882a593Smuzhiyun atomic_read(&rdev->irq.pflip[1])) {
689*4882a593Smuzhiyun mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun if (rdev->irq.hpd[0]) {
692*4882a593Smuzhiyun hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun if (rdev->irq.hpd[1]) {
695*4882a593Smuzhiyun hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun if (rdev->irq.afmt[0]) {
698*4882a593Smuzhiyun hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun WREG32(R_000040_GEN_INT_CNTL, tmp);
701*4882a593Smuzhiyun WREG32(R_006540_DxMODE_INT_MASK, mode_int);
702*4882a593Smuzhiyun WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
703*4882a593Smuzhiyun WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
704*4882a593Smuzhiyun if (ASIC_IS_DCE2(rdev))
705*4882a593Smuzhiyun WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* posting read */
708*4882a593Smuzhiyun RREG32(R_000040_GEN_INT_CNTL);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun return 0;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
rs600_irq_ack(struct radeon_device * rdev)713*4882a593Smuzhiyun static inline u32 rs600_irq_ack(struct radeon_device *rdev)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
716*4882a593Smuzhiyun uint32_t irq_mask = S_000044_SW_INT(1);
717*4882a593Smuzhiyun u32 tmp;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun if (G_000044_DISPLAY_INT_STAT(irqs)) {
720*4882a593Smuzhiyun rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
721*4882a593Smuzhiyun if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
722*4882a593Smuzhiyun WREG32(R_006534_D1MODE_VBLANK_STATUS,
723*4882a593Smuzhiyun S_006534_D1MODE_VBLANK_ACK(1));
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
726*4882a593Smuzhiyun WREG32(R_006D34_D2MODE_VBLANK_STATUS,
727*4882a593Smuzhiyun S_006D34_D2MODE_VBLANK_ACK(1));
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
730*4882a593Smuzhiyun tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
731*4882a593Smuzhiyun tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
732*4882a593Smuzhiyun WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
735*4882a593Smuzhiyun tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
736*4882a593Smuzhiyun tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
737*4882a593Smuzhiyun WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun } else {
740*4882a593Smuzhiyun rdev->irq.stat_regs.r500.disp_int = 0;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun if (ASIC_IS_DCE2(rdev)) {
744*4882a593Smuzhiyun rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
745*4882a593Smuzhiyun S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
746*4882a593Smuzhiyun if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
747*4882a593Smuzhiyun tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
748*4882a593Smuzhiyun tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
749*4882a593Smuzhiyun WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun } else
752*4882a593Smuzhiyun rdev->irq.stat_regs.r500.hdmi0_status = 0;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun if (irqs) {
755*4882a593Smuzhiyun WREG32(R_000044_GEN_INT_STATUS, irqs);
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun return irqs & irq_mask;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
rs600_irq_disable(struct radeon_device * rdev)760*4882a593Smuzhiyun void rs600_irq_disable(struct radeon_device *rdev)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
763*4882a593Smuzhiyun ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
764*4882a593Smuzhiyun WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
765*4882a593Smuzhiyun WREG32(R_000040_GEN_INT_CNTL, 0);
766*4882a593Smuzhiyun WREG32(R_006540_DxMODE_INT_MASK, 0);
767*4882a593Smuzhiyun /* Wait and acknowledge irq */
768*4882a593Smuzhiyun mdelay(1);
769*4882a593Smuzhiyun rs600_irq_ack(rdev);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
rs600_irq_process(struct radeon_device * rdev)772*4882a593Smuzhiyun int rs600_irq_process(struct radeon_device *rdev)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun u32 status, msi_rearm;
775*4882a593Smuzhiyun bool queue_hotplug = false;
776*4882a593Smuzhiyun bool queue_hdmi = false;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun status = rs600_irq_ack(rdev);
779*4882a593Smuzhiyun if (!status &&
780*4882a593Smuzhiyun !rdev->irq.stat_regs.r500.disp_int &&
781*4882a593Smuzhiyun !rdev->irq.stat_regs.r500.hdmi0_status) {
782*4882a593Smuzhiyun return IRQ_NONE;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun while (status ||
785*4882a593Smuzhiyun rdev->irq.stat_regs.r500.disp_int ||
786*4882a593Smuzhiyun rdev->irq.stat_regs.r500.hdmi0_status) {
787*4882a593Smuzhiyun /* SW interrupt */
788*4882a593Smuzhiyun if (G_000044_SW_INT(status)) {
789*4882a593Smuzhiyun radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun /* Vertical blank interrupts */
792*4882a593Smuzhiyun if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
793*4882a593Smuzhiyun if (rdev->irq.crtc_vblank_int[0]) {
794*4882a593Smuzhiyun drm_handle_vblank(rdev->ddev, 0);
795*4882a593Smuzhiyun rdev->pm.vblank_sync = true;
796*4882a593Smuzhiyun wake_up(&rdev->irq.vblank_queue);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun if (atomic_read(&rdev->irq.pflip[0]))
799*4882a593Smuzhiyun radeon_crtc_handle_vblank(rdev, 0);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
802*4882a593Smuzhiyun if (rdev->irq.crtc_vblank_int[1]) {
803*4882a593Smuzhiyun drm_handle_vblank(rdev->ddev, 1);
804*4882a593Smuzhiyun rdev->pm.vblank_sync = true;
805*4882a593Smuzhiyun wake_up(&rdev->irq.vblank_queue);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun if (atomic_read(&rdev->irq.pflip[1]))
808*4882a593Smuzhiyun radeon_crtc_handle_vblank(rdev, 1);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
811*4882a593Smuzhiyun queue_hotplug = true;
812*4882a593Smuzhiyun DRM_DEBUG("HPD1\n");
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
815*4882a593Smuzhiyun queue_hotplug = true;
816*4882a593Smuzhiyun DRM_DEBUG("HPD2\n");
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
819*4882a593Smuzhiyun queue_hdmi = true;
820*4882a593Smuzhiyun DRM_DEBUG("HDMI0\n");
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun status = rs600_irq_ack(rdev);
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun if (queue_hotplug)
825*4882a593Smuzhiyun schedule_delayed_work(&rdev->hotplug_work, 0);
826*4882a593Smuzhiyun if (queue_hdmi)
827*4882a593Smuzhiyun schedule_work(&rdev->audio_work);
828*4882a593Smuzhiyun if (rdev->msi_enabled) {
829*4882a593Smuzhiyun switch (rdev->family) {
830*4882a593Smuzhiyun case CHIP_RS600:
831*4882a593Smuzhiyun case CHIP_RS690:
832*4882a593Smuzhiyun case CHIP_RS740:
833*4882a593Smuzhiyun msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
834*4882a593Smuzhiyun WREG32(RADEON_BUS_CNTL, msi_rearm);
835*4882a593Smuzhiyun WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
836*4882a593Smuzhiyun break;
837*4882a593Smuzhiyun default:
838*4882a593Smuzhiyun WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
839*4882a593Smuzhiyun break;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun return IRQ_HANDLED;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
rs600_get_vblank_counter(struct radeon_device * rdev,int crtc)845*4882a593Smuzhiyun u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun if (crtc == 0)
848*4882a593Smuzhiyun return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
849*4882a593Smuzhiyun else
850*4882a593Smuzhiyun return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
rs600_mc_wait_for_idle(struct radeon_device * rdev)853*4882a593Smuzhiyun int rs600_mc_wait_for_idle(struct radeon_device *rdev)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun unsigned i;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
858*4882a593Smuzhiyun if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
859*4882a593Smuzhiyun return 0;
860*4882a593Smuzhiyun udelay(1);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun return -1;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
rs600_gpu_init(struct radeon_device * rdev)865*4882a593Smuzhiyun static void rs600_gpu_init(struct radeon_device *rdev)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun r420_pipes_init(rdev);
868*4882a593Smuzhiyun /* Wait for mc idle */
869*4882a593Smuzhiyun if (rs600_mc_wait_for_idle(rdev))
870*4882a593Smuzhiyun dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
rs600_mc_init(struct radeon_device * rdev)873*4882a593Smuzhiyun static void rs600_mc_init(struct radeon_device *rdev)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun u64 base;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
878*4882a593Smuzhiyun rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
879*4882a593Smuzhiyun rdev->mc.vram_is_ddr = true;
880*4882a593Smuzhiyun rdev->mc.vram_width = 128;
881*4882a593Smuzhiyun rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
882*4882a593Smuzhiyun rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
883*4882a593Smuzhiyun rdev->mc.visible_vram_size = rdev->mc.aper_size;
884*4882a593Smuzhiyun rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
885*4882a593Smuzhiyun base = RREG32_MC(R_000004_MC_FB_LOCATION);
886*4882a593Smuzhiyun base = G_000004_MC_FB_START(base) << 16;
887*4882a593Smuzhiyun radeon_vram_location(rdev, &rdev->mc, base);
888*4882a593Smuzhiyun rdev->mc.gtt_base_align = 0;
889*4882a593Smuzhiyun radeon_gtt_location(rdev, &rdev->mc);
890*4882a593Smuzhiyun radeon_update_bandwidth_info(rdev);
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun
rs600_bandwidth_update(struct radeon_device * rdev)893*4882a593Smuzhiyun void rs600_bandwidth_update(struct radeon_device *rdev)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun struct drm_display_mode *mode0 = NULL;
896*4882a593Smuzhiyun struct drm_display_mode *mode1 = NULL;
897*4882a593Smuzhiyun u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
898*4882a593Smuzhiyun /* FIXME: implement full support */
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun if (!rdev->mode_info.mode_config_initialized)
901*4882a593Smuzhiyun return;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun radeon_update_display_priority(rdev);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun if (rdev->mode_info.crtcs[0]->base.enabled)
906*4882a593Smuzhiyun mode0 = &rdev->mode_info.crtcs[0]->base.mode;
907*4882a593Smuzhiyun if (rdev->mode_info.crtcs[1]->base.enabled)
908*4882a593Smuzhiyun mode1 = &rdev->mode_info.crtcs[1]->base.mode;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun rs690_line_buffer_adjust(rdev, mode0, mode1);
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun if (rdev->disp_priority == 2) {
913*4882a593Smuzhiyun d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
914*4882a593Smuzhiyun d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
915*4882a593Smuzhiyun d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
916*4882a593Smuzhiyun d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
917*4882a593Smuzhiyun WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
918*4882a593Smuzhiyun WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
919*4882a593Smuzhiyun WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
920*4882a593Smuzhiyun WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
rs600_mc_rreg(struct radeon_device * rdev,uint32_t reg)924*4882a593Smuzhiyun uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun unsigned long flags;
927*4882a593Smuzhiyun u32 r;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun spin_lock_irqsave(&rdev->mc_idx_lock, flags);
930*4882a593Smuzhiyun WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
931*4882a593Smuzhiyun S_000070_MC_IND_CITF_ARB0(1));
932*4882a593Smuzhiyun r = RREG32(R_000074_MC_IND_DATA);
933*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
934*4882a593Smuzhiyun return r;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
rs600_mc_wreg(struct radeon_device * rdev,uint32_t reg,uint32_t v)937*4882a593Smuzhiyun void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun unsigned long flags;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun spin_lock_irqsave(&rdev->mc_idx_lock, flags);
942*4882a593Smuzhiyun WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
943*4882a593Smuzhiyun S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
944*4882a593Smuzhiyun WREG32(R_000074_MC_IND_DATA, v);
945*4882a593Smuzhiyun spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
rs600_debugfs(struct radeon_device * rdev)948*4882a593Smuzhiyun static void rs600_debugfs(struct radeon_device *rdev)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun if (r100_debugfs_rbbm_init(rdev))
951*4882a593Smuzhiyun DRM_ERROR("Failed to register debugfs file for RBBM !\n");
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
rs600_set_safe_registers(struct radeon_device * rdev)954*4882a593Smuzhiyun void rs600_set_safe_registers(struct radeon_device *rdev)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
957*4882a593Smuzhiyun rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
rs600_mc_program(struct radeon_device * rdev)960*4882a593Smuzhiyun static void rs600_mc_program(struct radeon_device *rdev)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun struct rv515_mc_save save;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun /* Stops all mc clients */
965*4882a593Smuzhiyun rv515_mc_stop(rdev, &save);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun /* Wait for mc idle */
968*4882a593Smuzhiyun if (rs600_mc_wait_for_idle(rdev))
969*4882a593Smuzhiyun dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun /* FIXME: What does AGP means for such chipset ? */
972*4882a593Smuzhiyun WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
973*4882a593Smuzhiyun WREG32_MC(R_000006_AGP_BASE, 0);
974*4882a593Smuzhiyun WREG32_MC(R_000007_AGP_BASE_2, 0);
975*4882a593Smuzhiyun /* Program MC */
976*4882a593Smuzhiyun WREG32_MC(R_000004_MC_FB_LOCATION,
977*4882a593Smuzhiyun S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
978*4882a593Smuzhiyun S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
979*4882a593Smuzhiyun WREG32(R_000134_HDP_FB_LOCATION,
980*4882a593Smuzhiyun S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun rv515_mc_resume(rdev, &save);
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
rs600_startup(struct radeon_device * rdev)985*4882a593Smuzhiyun static int rs600_startup(struct radeon_device *rdev)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun int r;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun rs600_mc_program(rdev);
990*4882a593Smuzhiyun /* Resume clock */
991*4882a593Smuzhiyun rv515_clock_startup(rdev);
992*4882a593Smuzhiyun /* Initialize GPU configuration (# pipes, ...) */
993*4882a593Smuzhiyun rs600_gpu_init(rdev);
994*4882a593Smuzhiyun /* Initialize GART (initialize after TTM so we can allocate
995*4882a593Smuzhiyun * memory through TTM but finalize after TTM) */
996*4882a593Smuzhiyun r = rs600_gart_enable(rdev);
997*4882a593Smuzhiyun if (r)
998*4882a593Smuzhiyun return r;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /* allocate wb buffer */
1001*4882a593Smuzhiyun r = radeon_wb_init(rdev);
1002*4882a593Smuzhiyun if (r)
1003*4882a593Smuzhiyun return r;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1006*4882a593Smuzhiyun if (r) {
1007*4882a593Smuzhiyun dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1008*4882a593Smuzhiyun return r;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /* Enable IRQ */
1012*4882a593Smuzhiyun if (!rdev->irq.installed) {
1013*4882a593Smuzhiyun r = radeon_irq_kms_init(rdev);
1014*4882a593Smuzhiyun if (r)
1015*4882a593Smuzhiyun return r;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun rs600_irq_set(rdev);
1019*4882a593Smuzhiyun rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1020*4882a593Smuzhiyun /* 1M ring buffer */
1021*4882a593Smuzhiyun r = r100_cp_init(rdev, 1024 * 1024);
1022*4882a593Smuzhiyun if (r) {
1023*4882a593Smuzhiyun dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
1024*4882a593Smuzhiyun return r;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun r = radeon_ib_pool_init(rdev);
1028*4882a593Smuzhiyun if (r) {
1029*4882a593Smuzhiyun dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1030*4882a593Smuzhiyun return r;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun r = radeon_audio_init(rdev);
1034*4882a593Smuzhiyun if (r) {
1035*4882a593Smuzhiyun dev_err(rdev->dev, "failed initializing audio\n");
1036*4882a593Smuzhiyun return r;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun return 0;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
rs600_resume(struct radeon_device * rdev)1042*4882a593Smuzhiyun int rs600_resume(struct radeon_device *rdev)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun int r;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /* Make sur GART are not working */
1047*4882a593Smuzhiyun rs600_gart_disable(rdev);
1048*4882a593Smuzhiyun /* Resume clock before doing reset */
1049*4882a593Smuzhiyun rv515_clock_startup(rdev);
1050*4882a593Smuzhiyun /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1051*4882a593Smuzhiyun if (radeon_asic_reset(rdev)) {
1052*4882a593Smuzhiyun dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1053*4882a593Smuzhiyun RREG32(R_000E40_RBBM_STATUS),
1054*4882a593Smuzhiyun RREG32(R_0007C0_CP_STAT));
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun /* post */
1057*4882a593Smuzhiyun atom_asic_init(rdev->mode_info.atom_context);
1058*4882a593Smuzhiyun /* Resume clock after posting */
1059*4882a593Smuzhiyun rv515_clock_startup(rdev);
1060*4882a593Smuzhiyun /* Initialize surface registers */
1061*4882a593Smuzhiyun radeon_surface_init(rdev);
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun rdev->accel_working = true;
1064*4882a593Smuzhiyun r = rs600_startup(rdev);
1065*4882a593Smuzhiyun if (r) {
1066*4882a593Smuzhiyun rdev->accel_working = false;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun return r;
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
rs600_suspend(struct radeon_device * rdev)1071*4882a593Smuzhiyun int rs600_suspend(struct radeon_device *rdev)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun radeon_pm_suspend(rdev);
1074*4882a593Smuzhiyun radeon_audio_fini(rdev);
1075*4882a593Smuzhiyun r100_cp_disable(rdev);
1076*4882a593Smuzhiyun radeon_wb_disable(rdev);
1077*4882a593Smuzhiyun rs600_irq_disable(rdev);
1078*4882a593Smuzhiyun rs600_gart_disable(rdev);
1079*4882a593Smuzhiyun return 0;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
rs600_fini(struct radeon_device * rdev)1082*4882a593Smuzhiyun void rs600_fini(struct radeon_device *rdev)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun radeon_pm_fini(rdev);
1085*4882a593Smuzhiyun radeon_audio_fini(rdev);
1086*4882a593Smuzhiyun r100_cp_fini(rdev);
1087*4882a593Smuzhiyun radeon_wb_fini(rdev);
1088*4882a593Smuzhiyun radeon_ib_pool_fini(rdev);
1089*4882a593Smuzhiyun radeon_gem_fini(rdev);
1090*4882a593Smuzhiyun rs600_gart_fini(rdev);
1091*4882a593Smuzhiyun radeon_irq_kms_fini(rdev);
1092*4882a593Smuzhiyun radeon_fence_driver_fini(rdev);
1093*4882a593Smuzhiyun radeon_bo_fini(rdev);
1094*4882a593Smuzhiyun radeon_atombios_fini(rdev);
1095*4882a593Smuzhiyun kfree(rdev->bios);
1096*4882a593Smuzhiyun rdev->bios = NULL;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
rs600_init(struct radeon_device * rdev)1099*4882a593Smuzhiyun int rs600_init(struct radeon_device *rdev)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun int r;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun /* Disable VGA */
1104*4882a593Smuzhiyun rv515_vga_render_disable(rdev);
1105*4882a593Smuzhiyun /* Initialize scratch registers */
1106*4882a593Smuzhiyun radeon_scratch_init(rdev);
1107*4882a593Smuzhiyun /* Initialize surface registers */
1108*4882a593Smuzhiyun radeon_surface_init(rdev);
1109*4882a593Smuzhiyun /* restore some register to sane defaults */
1110*4882a593Smuzhiyun r100_restore_sanity(rdev);
1111*4882a593Smuzhiyun /* BIOS */
1112*4882a593Smuzhiyun if (!radeon_get_bios(rdev)) {
1113*4882a593Smuzhiyun if (ASIC_IS_AVIVO(rdev))
1114*4882a593Smuzhiyun return -EINVAL;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun if (rdev->is_atom_bios) {
1117*4882a593Smuzhiyun r = radeon_atombios_init(rdev);
1118*4882a593Smuzhiyun if (r)
1119*4882a593Smuzhiyun return r;
1120*4882a593Smuzhiyun } else {
1121*4882a593Smuzhiyun dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
1122*4882a593Smuzhiyun return -EINVAL;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1125*4882a593Smuzhiyun if (radeon_asic_reset(rdev)) {
1126*4882a593Smuzhiyun dev_warn(rdev->dev,
1127*4882a593Smuzhiyun "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1128*4882a593Smuzhiyun RREG32(R_000E40_RBBM_STATUS),
1129*4882a593Smuzhiyun RREG32(R_0007C0_CP_STAT));
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun /* check if cards are posted or not */
1132*4882a593Smuzhiyun if (radeon_boot_test_post_card(rdev) == false)
1133*4882a593Smuzhiyun return -EINVAL;
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun /* Initialize clocks */
1136*4882a593Smuzhiyun radeon_get_clock_info(rdev->ddev);
1137*4882a593Smuzhiyun /* initialize memory controller */
1138*4882a593Smuzhiyun rs600_mc_init(rdev);
1139*4882a593Smuzhiyun rs600_debugfs(rdev);
1140*4882a593Smuzhiyun /* Fence driver */
1141*4882a593Smuzhiyun r = radeon_fence_driver_init(rdev);
1142*4882a593Smuzhiyun if (r)
1143*4882a593Smuzhiyun return r;
1144*4882a593Smuzhiyun /* Memory manager */
1145*4882a593Smuzhiyun r = radeon_bo_init(rdev);
1146*4882a593Smuzhiyun if (r)
1147*4882a593Smuzhiyun return r;
1148*4882a593Smuzhiyun r = rs600_gart_init(rdev);
1149*4882a593Smuzhiyun if (r)
1150*4882a593Smuzhiyun return r;
1151*4882a593Smuzhiyun rs600_set_safe_registers(rdev);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun /* Initialize power management */
1154*4882a593Smuzhiyun radeon_pm_init(rdev);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun rdev->accel_working = true;
1157*4882a593Smuzhiyun r = rs600_startup(rdev);
1158*4882a593Smuzhiyun if (r) {
1159*4882a593Smuzhiyun /* Somethings want wront with the accel init stop accel */
1160*4882a593Smuzhiyun dev_err(rdev->dev, "Disabling GPU acceleration\n");
1161*4882a593Smuzhiyun r100_cp_fini(rdev);
1162*4882a593Smuzhiyun radeon_wb_fini(rdev);
1163*4882a593Smuzhiyun radeon_ib_pool_fini(rdev);
1164*4882a593Smuzhiyun rs600_gart_fini(rdev);
1165*4882a593Smuzhiyun radeon_irq_kms_fini(rdev);
1166*4882a593Smuzhiyun rdev->accel_working = false;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun return 0;
1169*4882a593Smuzhiyun }
1170