1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2013 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/firmware.h>
25*4882a593Smuzhiyun #include <linux/pci.h>
26*4882a593Smuzhiyun #include <linux/seq_file.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "atom.h"
29*4882a593Smuzhiyun #include "ci_dpm.h"
30*4882a593Smuzhiyun #include "cikd.h"
31*4882a593Smuzhiyun #include "r600_dpm.h"
32*4882a593Smuzhiyun #include "radeon.h"
33*4882a593Smuzhiyun #include "radeon_asic.h"
34*4882a593Smuzhiyun #include "radeon_ucode.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define MC_CG_ARB_FREQ_F0 0x0a
37*4882a593Smuzhiyun #define MC_CG_ARB_FREQ_F1 0x0b
38*4882a593Smuzhiyun #define MC_CG_ARB_FREQ_F2 0x0c
39*4882a593Smuzhiyun #define MC_CG_ARB_FREQ_F3 0x0d
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define SMC_RAM_END 0x40000
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define VOLTAGE_SCALE 4
44*4882a593Smuzhiyun #define VOLTAGE_VID_OFFSET_SCALE1 625
45*4882a593Smuzhiyun #define VOLTAGE_VID_OFFSET_SCALE2 100
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun static const struct ci_pt_defaults defaults_hawaii_xt =
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
50*4882a593Smuzhiyun { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
51*4882a593Smuzhiyun { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun static const struct ci_pt_defaults defaults_hawaii_pro =
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
57*4882a593Smuzhiyun { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
58*4882a593Smuzhiyun { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static const struct ci_pt_defaults defaults_bonaire_xt =
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
64*4882a593Smuzhiyun { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
65*4882a593Smuzhiyun { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static const struct ci_pt_defaults defaults_saturn_xt =
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
71*4882a593Smuzhiyun { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
72*4882a593Smuzhiyun { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun static const struct ci_pt_config_reg didt_config_ci[] =
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
78*4882a593Smuzhiyun { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
79*4882a593Smuzhiyun { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80*4882a593Smuzhiyun { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81*4882a593Smuzhiyun { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82*4882a593Smuzhiyun { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83*4882a593Smuzhiyun { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84*4882a593Smuzhiyun { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
85*4882a593Smuzhiyun { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
86*4882a593Smuzhiyun { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
87*4882a593Smuzhiyun { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
88*4882a593Smuzhiyun { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
89*4882a593Smuzhiyun { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
90*4882a593Smuzhiyun { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
91*4882a593Smuzhiyun { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
92*4882a593Smuzhiyun { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
93*4882a593Smuzhiyun { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
94*4882a593Smuzhiyun { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95*4882a593Smuzhiyun { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96*4882a593Smuzhiyun { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97*4882a593Smuzhiyun { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98*4882a593Smuzhiyun { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99*4882a593Smuzhiyun { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100*4882a593Smuzhiyun { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101*4882a593Smuzhiyun { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102*4882a593Smuzhiyun { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103*4882a593Smuzhiyun { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
104*4882a593Smuzhiyun { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
105*4882a593Smuzhiyun { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
106*4882a593Smuzhiyun { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107*4882a593Smuzhiyun { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
108*4882a593Smuzhiyun { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
109*4882a593Smuzhiyun { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
110*4882a593Smuzhiyun { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
111*4882a593Smuzhiyun { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
112*4882a593Smuzhiyun { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113*4882a593Smuzhiyun { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114*4882a593Smuzhiyun { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115*4882a593Smuzhiyun { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116*4882a593Smuzhiyun { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117*4882a593Smuzhiyun { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118*4882a593Smuzhiyun { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119*4882a593Smuzhiyun { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120*4882a593Smuzhiyun { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121*4882a593Smuzhiyun { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122*4882a593Smuzhiyun { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123*4882a593Smuzhiyun { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124*4882a593Smuzhiyun { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125*4882a593Smuzhiyun { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
126*4882a593Smuzhiyun { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
127*4882a593Smuzhiyun { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
128*4882a593Smuzhiyun { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
129*4882a593Smuzhiyun { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
130*4882a593Smuzhiyun { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131*4882a593Smuzhiyun { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132*4882a593Smuzhiyun { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133*4882a593Smuzhiyun { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134*4882a593Smuzhiyun { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135*4882a593Smuzhiyun { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136*4882a593Smuzhiyun { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137*4882a593Smuzhiyun { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138*4882a593Smuzhiyun { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139*4882a593Smuzhiyun { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140*4882a593Smuzhiyun { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141*4882a593Smuzhiyun { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142*4882a593Smuzhiyun { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143*4882a593Smuzhiyun { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
144*4882a593Smuzhiyun { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
145*4882a593Smuzhiyun { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
146*4882a593Smuzhiyun { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
147*4882a593Smuzhiyun { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
148*4882a593Smuzhiyun { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149*4882a593Smuzhiyun { 0xFFFFFFFF }
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
153*4882a593Smuzhiyun extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
154*4882a593Smuzhiyun u32 arb_freq_src, u32 arb_freq_dest);
155*4882a593Smuzhiyun extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
156*4882a593Smuzhiyun extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
157*4882a593Smuzhiyun extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
158*4882a593Smuzhiyun u32 max_voltage_steps,
159*4882a593Smuzhiyun struct atom_voltage_table *voltage_table);
160*4882a593Smuzhiyun extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
161*4882a593Smuzhiyun extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
162*4882a593Smuzhiyun extern int ci_mc_load_microcode(struct radeon_device *rdev);
163*4882a593Smuzhiyun extern void cik_update_cg(struct radeon_device *rdev,
164*4882a593Smuzhiyun u32 block, bool enable);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
167*4882a593Smuzhiyun struct atom_voltage_table_entry *voltage_table,
168*4882a593Smuzhiyun u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
169*4882a593Smuzhiyun static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
170*4882a593Smuzhiyun static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
171*4882a593Smuzhiyun u32 target_tdp);
172*4882a593Smuzhiyun static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
175*4882a593Smuzhiyun static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
176*4882a593Smuzhiyun PPSMC_Msg msg, u32 parameter);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
179*4882a593Smuzhiyun static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
180*4882a593Smuzhiyun
ci_get_pi(struct radeon_device * rdev)181*4882a593Smuzhiyun static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct ci_power_info *pi = rdev->pm.dpm.priv;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return pi;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
ci_get_ps(struct radeon_ps * rps)188*4882a593Smuzhiyun static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun struct ci_ps *ps = rps->ps_priv;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun return ps;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
ci_initialize_powertune_defaults(struct radeon_device * rdev)195*4882a593Smuzhiyun static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun switch (rdev->pdev->device) {
200*4882a593Smuzhiyun case 0x6649:
201*4882a593Smuzhiyun case 0x6650:
202*4882a593Smuzhiyun case 0x6651:
203*4882a593Smuzhiyun case 0x6658:
204*4882a593Smuzhiyun case 0x665C:
205*4882a593Smuzhiyun case 0x665D:
206*4882a593Smuzhiyun default:
207*4882a593Smuzhiyun pi->powertune_defaults = &defaults_bonaire_xt;
208*4882a593Smuzhiyun break;
209*4882a593Smuzhiyun case 0x6640:
210*4882a593Smuzhiyun case 0x6641:
211*4882a593Smuzhiyun case 0x6646:
212*4882a593Smuzhiyun case 0x6647:
213*4882a593Smuzhiyun pi->powertune_defaults = &defaults_saturn_xt;
214*4882a593Smuzhiyun break;
215*4882a593Smuzhiyun case 0x67B8:
216*4882a593Smuzhiyun case 0x67B0:
217*4882a593Smuzhiyun pi->powertune_defaults = &defaults_hawaii_xt;
218*4882a593Smuzhiyun break;
219*4882a593Smuzhiyun case 0x67BA:
220*4882a593Smuzhiyun case 0x67B1:
221*4882a593Smuzhiyun pi->powertune_defaults = &defaults_hawaii_pro;
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun case 0x67A0:
224*4882a593Smuzhiyun case 0x67A1:
225*4882a593Smuzhiyun case 0x67A2:
226*4882a593Smuzhiyun case 0x67A8:
227*4882a593Smuzhiyun case 0x67A9:
228*4882a593Smuzhiyun case 0x67AA:
229*4882a593Smuzhiyun case 0x67B9:
230*4882a593Smuzhiyun case 0x67BE:
231*4882a593Smuzhiyun pi->powertune_defaults = &defaults_bonaire_xt;
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun pi->dte_tj_offset = 0;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun pi->caps_power_containment = true;
238*4882a593Smuzhiyun pi->caps_cac = false;
239*4882a593Smuzhiyun pi->caps_sq_ramping = false;
240*4882a593Smuzhiyun pi->caps_db_ramping = false;
241*4882a593Smuzhiyun pi->caps_td_ramping = false;
242*4882a593Smuzhiyun pi->caps_tcp_ramping = false;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (pi->caps_power_containment) {
245*4882a593Smuzhiyun pi->caps_cac = true;
246*4882a593Smuzhiyun if (rdev->family == CHIP_HAWAII)
247*4882a593Smuzhiyun pi->enable_bapm_feature = false;
248*4882a593Smuzhiyun else
249*4882a593Smuzhiyun pi->enable_bapm_feature = true;
250*4882a593Smuzhiyun pi->enable_tdc_limit_feature = true;
251*4882a593Smuzhiyun pi->enable_pkg_pwr_tracking_feature = true;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
ci_convert_to_vid(u16 vddc)255*4882a593Smuzhiyun static u8 ci_convert_to_vid(u16 vddc)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
ci_populate_bapm_vddc_vid_sidd(struct radeon_device * rdev)260*4882a593Smuzhiyun static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
263*4882a593Smuzhiyun u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
264*4882a593Smuzhiyun u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
265*4882a593Smuzhiyun u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
266*4882a593Smuzhiyun u32 i;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
269*4882a593Smuzhiyun return -EINVAL;
270*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
271*4882a593Smuzhiyun return -EINVAL;
272*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
273*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
274*4882a593Smuzhiyun return -EINVAL;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
277*4882a593Smuzhiyun if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
278*4882a593Smuzhiyun lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
279*4882a593Smuzhiyun hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
280*4882a593Smuzhiyun hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
281*4882a593Smuzhiyun } else {
282*4882a593Smuzhiyun lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
283*4882a593Smuzhiyun hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun return 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
ci_populate_vddc_vid(struct radeon_device * rdev)289*4882a593Smuzhiyun static int ci_populate_vddc_vid(struct radeon_device *rdev)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
292*4882a593Smuzhiyun u8 *vid = pi->smc_powertune_table.VddCVid;
293*4882a593Smuzhiyun u32 i;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (pi->vddc_voltage_table.count > 8)
296*4882a593Smuzhiyun return -EINVAL;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun for (i = 0; i < pi->vddc_voltage_table.count; i++)
299*4882a593Smuzhiyun vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
ci_populate_svi_load_line(struct radeon_device * rdev)304*4882a593Smuzhiyun static int ci_populate_svi_load_line(struct radeon_device *rdev)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
307*4882a593Smuzhiyun const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
310*4882a593Smuzhiyun pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
311*4882a593Smuzhiyun pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
312*4882a593Smuzhiyun pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return 0;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
ci_populate_tdc_limit(struct radeon_device * rdev)317*4882a593Smuzhiyun static int ci_populate_tdc_limit(struct radeon_device *rdev)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
320*4882a593Smuzhiyun const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
321*4882a593Smuzhiyun u16 tdc_limit;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
324*4882a593Smuzhiyun pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
325*4882a593Smuzhiyun pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
326*4882a593Smuzhiyun pt_defaults->tdc_vddc_throttle_release_limit_perc;
327*4882a593Smuzhiyun pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return 0;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
ci_populate_dw8(struct radeon_device * rdev)332*4882a593Smuzhiyun static int ci_populate_dw8(struct radeon_device *rdev)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
335*4882a593Smuzhiyun const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
336*4882a593Smuzhiyun int ret;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun ret = ci_read_smc_sram_dword(rdev,
339*4882a593Smuzhiyun SMU7_FIRMWARE_HEADER_LOCATION +
340*4882a593Smuzhiyun offsetof(SMU7_Firmware_Header, PmFuseTable) +
341*4882a593Smuzhiyun offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
342*4882a593Smuzhiyun (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
343*4882a593Smuzhiyun pi->sram_end);
344*4882a593Smuzhiyun if (ret)
345*4882a593Smuzhiyun return -EINVAL;
346*4882a593Smuzhiyun else
347*4882a593Smuzhiyun pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return 0;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
ci_populate_fuzzy_fan(struct radeon_device * rdev)352*4882a593Smuzhiyun static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
357*4882a593Smuzhiyun (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
358*4882a593Smuzhiyun rdev->pm.dpm.fan.fan_output_sensitivity =
359*4882a593Smuzhiyun rdev->pm.dpm.fan.default_fan_output_sensitivity;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
362*4882a593Smuzhiyun cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return 0;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device * rdev)367*4882a593Smuzhiyun static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
370*4882a593Smuzhiyun u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
371*4882a593Smuzhiyun u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
372*4882a593Smuzhiyun int i, min, max;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun min = max = hi_vid[0];
375*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
376*4882a593Smuzhiyun if (0 != hi_vid[i]) {
377*4882a593Smuzhiyun if (min > hi_vid[i])
378*4882a593Smuzhiyun min = hi_vid[i];
379*4882a593Smuzhiyun if (max < hi_vid[i])
380*4882a593Smuzhiyun max = hi_vid[i];
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (0 != lo_vid[i]) {
384*4882a593Smuzhiyun if (min > lo_vid[i])
385*4882a593Smuzhiyun min = lo_vid[i];
386*4882a593Smuzhiyun if (max < lo_vid[i])
387*4882a593Smuzhiyun max = lo_vid[i];
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if ((min == 0) || (max == 0))
392*4882a593Smuzhiyun return -EINVAL;
393*4882a593Smuzhiyun pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
394*4882a593Smuzhiyun pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device * rdev)399*4882a593Smuzhiyun static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
402*4882a593Smuzhiyun u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
403*4882a593Smuzhiyun u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
404*4882a593Smuzhiyun struct radeon_cac_tdp_table *cac_tdp_table =
405*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.cac_tdp_table;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
408*4882a593Smuzhiyun lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
411*4882a593Smuzhiyun pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
ci_populate_bapm_parameters_in_dpm_table(struct radeon_device * rdev)416*4882a593Smuzhiyun static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
419*4882a593Smuzhiyun const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
420*4882a593Smuzhiyun SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
421*4882a593Smuzhiyun struct radeon_cac_tdp_table *cac_tdp_table =
422*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.cac_tdp_table;
423*4882a593Smuzhiyun struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
424*4882a593Smuzhiyun int i, j, k;
425*4882a593Smuzhiyun const u16 *def1;
426*4882a593Smuzhiyun const u16 *def2;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
429*4882a593Smuzhiyun dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
432*4882a593Smuzhiyun dpm_table->GpuTjMax =
433*4882a593Smuzhiyun (u8)(pi->thermal_temp_setting.temperature_high / 1000);
434*4882a593Smuzhiyun dpm_table->GpuTjHyst = 8;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun if (ppm) {
439*4882a593Smuzhiyun dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
440*4882a593Smuzhiyun dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
441*4882a593Smuzhiyun } else {
442*4882a593Smuzhiyun dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
443*4882a593Smuzhiyun dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
447*4882a593Smuzhiyun def1 = pt_defaults->bapmti_r;
448*4882a593Smuzhiyun def2 = pt_defaults->bapmti_rc;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
451*4882a593Smuzhiyun for (j = 0; j < SMU7_DTE_SOURCES; j++) {
452*4882a593Smuzhiyun for (k = 0; k < SMU7_DTE_SINKS; k++) {
453*4882a593Smuzhiyun dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
454*4882a593Smuzhiyun dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
455*4882a593Smuzhiyun def1++;
456*4882a593Smuzhiyun def2++;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun return 0;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
ci_populate_pm_base(struct radeon_device * rdev)464*4882a593Smuzhiyun static int ci_populate_pm_base(struct radeon_device *rdev)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
467*4882a593Smuzhiyun u32 pm_fuse_table_offset;
468*4882a593Smuzhiyun int ret;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (pi->caps_power_containment) {
471*4882a593Smuzhiyun ret = ci_read_smc_sram_dword(rdev,
472*4882a593Smuzhiyun SMU7_FIRMWARE_HEADER_LOCATION +
473*4882a593Smuzhiyun offsetof(SMU7_Firmware_Header, PmFuseTable),
474*4882a593Smuzhiyun &pm_fuse_table_offset, pi->sram_end);
475*4882a593Smuzhiyun if (ret)
476*4882a593Smuzhiyun return ret;
477*4882a593Smuzhiyun ret = ci_populate_bapm_vddc_vid_sidd(rdev);
478*4882a593Smuzhiyun if (ret)
479*4882a593Smuzhiyun return ret;
480*4882a593Smuzhiyun ret = ci_populate_vddc_vid(rdev);
481*4882a593Smuzhiyun if (ret)
482*4882a593Smuzhiyun return ret;
483*4882a593Smuzhiyun ret = ci_populate_svi_load_line(rdev);
484*4882a593Smuzhiyun if (ret)
485*4882a593Smuzhiyun return ret;
486*4882a593Smuzhiyun ret = ci_populate_tdc_limit(rdev);
487*4882a593Smuzhiyun if (ret)
488*4882a593Smuzhiyun return ret;
489*4882a593Smuzhiyun ret = ci_populate_dw8(rdev);
490*4882a593Smuzhiyun if (ret)
491*4882a593Smuzhiyun return ret;
492*4882a593Smuzhiyun ret = ci_populate_fuzzy_fan(rdev);
493*4882a593Smuzhiyun if (ret)
494*4882a593Smuzhiyun return ret;
495*4882a593Smuzhiyun ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
496*4882a593Smuzhiyun if (ret)
497*4882a593Smuzhiyun return ret;
498*4882a593Smuzhiyun ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
499*4882a593Smuzhiyun if (ret)
500*4882a593Smuzhiyun return ret;
501*4882a593Smuzhiyun ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
502*4882a593Smuzhiyun (u8 *)&pi->smc_powertune_table,
503*4882a593Smuzhiyun sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
504*4882a593Smuzhiyun if (ret)
505*4882a593Smuzhiyun return ret;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun return 0;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
ci_do_enable_didt(struct radeon_device * rdev,const bool enable)511*4882a593Smuzhiyun static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
514*4882a593Smuzhiyun u32 data;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (pi->caps_sq_ramping) {
517*4882a593Smuzhiyun data = RREG32_DIDT(DIDT_SQ_CTRL0);
518*4882a593Smuzhiyun if (enable)
519*4882a593Smuzhiyun data |= DIDT_CTRL_EN;
520*4882a593Smuzhiyun else
521*4882a593Smuzhiyun data &= ~DIDT_CTRL_EN;
522*4882a593Smuzhiyun WREG32_DIDT(DIDT_SQ_CTRL0, data);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if (pi->caps_db_ramping) {
526*4882a593Smuzhiyun data = RREG32_DIDT(DIDT_DB_CTRL0);
527*4882a593Smuzhiyun if (enable)
528*4882a593Smuzhiyun data |= DIDT_CTRL_EN;
529*4882a593Smuzhiyun else
530*4882a593Smuzhiyun data &= ~DIDT_CTRL_EN;
531*4882a593Smuzhiyun WREG32_DIDT(DIDT_DB_CTRL0, data);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun if (pi->caps_td_ramping) {
535*4882a593Smuzhiyun data = RREG32_DIDT(DIDT_TD_CTRL0);
536*4882a593Smuzhiyun if (enable)
537*4882a593Smuzhiyun data |= DIDT_CTRL_EN;
538*4882a593Smuzhiyun else
539*4882a593Smuzhiyun data &= ~DIDT_CTRL_EN;
540*4882a593Smuzhiyun WREG32_DIDT(DIDT_TD_CTRL0, data);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (pi->caps_tcp_ramping) {
544*4882a593Smuzhiyun data = RREG32_DIDT(DIDT_TCP_CTRL0);
545*4882a593Smuzhiyun if (enable)
546*4882a593Smuzhiyun data |= DIDT_CTRL_EN;
547*4882a593Smuzhiyun else
548*4882a593Smuzhiyun data &= ~DIDT_CTRL_EN;
549*4882a593Smuzhiyun WREG32_DIDT(DIDT_TCP_CTRL0, data);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
ci_program_pt_config_registers(struct radeon_device * rdev,const struct ci_pt_config_reg * cac_config_regs)553*4882a593Smuzhiyun static int ci_program_pt_config_registers(struct radeon_device *rdev,
554*4882a593Smuzhiyun const struct ci_pt_config_reg *cac_config_regs)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun const struct ci_pt_config_reg *config_regs = cac_config_regs;
557*4882a593Smuzhiyun u32 data;
558*4882a593Smuzhiyun u32 cache = 0;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (config_regs == NULL)
561*4882a593Smuzhiyun return -EINVAL;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun while (config_regs->offset != 0xFFFFFFFF) {
564*4882a593Smuzhiyun if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
565*4882a593Smuzhiyun cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
566*4882a593Smuzhiyun } else {
567*4882a593Smuzhiyun switch (config_regs->type) {
568*4882a593Smuzhiyun case CISLANDS_CONFIGREG_SMC_IND:
569*4882a593Smuzhiyun data = RREG32_SMC(config_regs->offset);
570*4882a593Smuzhiyun break;
571*4882a593Smuzhiyun case CISLANDS_CONFIGREG_DIDT_IND:
572*4882a593Smuzhiyun data = RREG32_DIDT(config_regs->offset);
573*4882a593Smuzhiyun break;
574*4882a593Smuzhiyun default:
575*4882a593Smuzhiyun data = RREG32(config_regs->offset << 2);
576*4882a593Smuzhiyun break;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun data &= ~config_regs->mask;
580*4882a593Smuzhiyun data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
581*4882a593Smuzhiyun data |= cache;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun switch (config_regs->type) {
584*4882a593Smuzhiyun case CISLANDS_CONFIGREG_SMC_IND:
585*4882a593Smuzhiyun WREG32_SMC(config_regs->offset, data);
586*4882a593Smuzhiyun break;
587*4882a593Smuzhiyun case CISLANDS_CONFIGREG_DIDT_IND:
588*4882a593Smuzhiyun WREG32_DIDT(config_regs->offset, data);
589*4882a593Smuzhiyun break;
590*4882a593Smuzhiyun default:
591*4882a593Smuzhiyun WREG32(config_regs->offset << 2, data);
592*4882a593Smuzhiyun break;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun cache = 0;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun config_regs++;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun return 0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
ci_enable_didt(struct radeon_device * rdev,bool enable)601*4882a593Smuzhiyun static int ci_enable_didt(struct radeon_device *rdev, bool enable)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
604*4882a593Smuzhiyun int ret;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun if (pi->caps_sq_ramping || pi->caps_db_ramping ||
607*4882a593Smuzhiyun pi->caps_td_ramping || pi->caps_tcp_ramping) {
608*4882a593Smuzhiyun cik_enter_rlc_safe_mode(rdev);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun if (enable) {
611*4882a593Smuzhiyun ret = ci_program_pt_config_registers(rdev, didt_config_ci);
612*4882a593Smuzhiyun if (ret) {
613*4882a593Smuzhiyun cik_exit_rlc_safe_mode(rdev);
614*4882a593Smuzhiyun return ret;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun ci_do_enable_didt(rdev, enable);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun cik_exit_rlc_safe_mode(rdev);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun return 0;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
ci_enable_power_containment(struct radeon_device * rdev,bool enable)626*4882a593Smuzhiyun static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
629*4882a593Smuzhiyun PPSMC_Result smc_result;
630*4882a593Smuzhiyun int ret = 0;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (enable) {
633*4882a593Smuzhiyun pi->power_containment_features = 0;
634*4882a593Smuzhiyun if (pi->caps_power_containment) {
635*4882a593Smuzhiyun if (pi->enable_bapm_feature) {
636*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
637*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
638*4882a593Smuzhiyun ret = -EINVAL;
639*4882a593Smuzhiyun else
640*4882a593Smuzhiyun pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (pi->enable_tdc_limit_feature) {
644*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
645*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
646*4882a593Smuzhiyun ret = -EINVAL;
647*4882a593Smuzhiyun else
648*4882a593Smuzhiyun pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (pi->enable_pkg_pwr_tracking_feature) {
652*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
653*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK) {
654*4882a593Smuzhiyun ret = -EINVAL;
655*4882a593Smuzhiyun } else {
656*4882a593Smuzhiyun struct radeon_cac_tdp_table *cac_tdp_table =
657*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.cac_tdp_table;
658*4882a593Smuzhiyun u32 default_pwr_limit =
659*4882a593Smuzhiyun (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun ci_set_power_limit(rdev, default_pwr_limit);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun } else {
668*4882a593Smuzhiyun if (pi->caps_power_containment && pi->power_containment_features) {
669*4882a593Smuzhiyun if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
670*4882a593Smuzhiyun ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
673*4882a593Smuzhiyun ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
676*4882a593Smuzhiyun ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
677*4882a593Smuzhiyun pi->power_containment_features = 0;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun return ret;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
ci_enable_smc_cac(struct radeon_device * rdev,bool enable)684*4882a593Smuzhiyun static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
687*4882a593Smuzhiyun PPSMC_Result smc_result;
688*4882a593Smuzhiyun int ret = 0;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun if (pi->caps_cac) {
691*4882a593Smuzhiyun if (enable) {
692*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
693*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK) {
694*4882a593Smuzhiyun ret = -EINVAL;
695*4882a593Smuzhiyun pi->cac_enabled = false;
696*4882a593Smuzhiyun } else {
697*4882a593Smuzhiyun pi->cac_enabled = true;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun } else if (pi->cac_enabled) {
700*4882a593Smuzhiyun ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
701*4882a593Smuzhiyun pi->cac_enabled = false;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun return ret;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
ci_enable_thermal_based_sclk_dpm(struct radeon_device * rdev,bool enable)708*4882a593Smuzhiyun static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
709*4882a593Smuzhiyun bool enable)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
712*4882a593Smuzhiyun PPSMC_Result smc_result = PPSMC_Result_OK;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (pi->thermal_sclk_dpm_enabled) {
715*4882a593Smuzhiyun if (enable)
716*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
717*4882a593Smuzhiyun else
718*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (smc_result == PPSMC_Result_OK)
722*4882a593Smuzhiyun return 0;
723*4882a593Smuzhiyun else
724*4882a593Smuzhiyun return -EINVAL;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
ci_power_control_set_level(struct radeon_device * rdev)727*4882a593Smuzhiyun static int ci_power_control_set_level(struct radeon_device *rdev)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
730*4882a593Smuzhiyun struct radeon_cac_tdp_table *cac_tdp_table =
731*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.cac_tdp_table;
732*4882a593Smuzhiyun s32 adjust_percent;
733*4882a593Smuzhiyun s32 target_tdp;
734*4882a593Smuzhiyun int ret = 0;
735*4882a593Smuzhiyun bool adjust_polarity = false; /* ??? */
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (pi->caps_power_containment) {
738*4882a593Smuzhiyun adjust_percent = adjust_polarity ?
739*4882a593Smuzhiyun rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
740*4882a593Smuzhiyun target_tdp = ((100 + adjust_percent) *
741*4882a593Smuzhiyun (s32)cac_tdp_table->configurable_tdp) / 100;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun return ret;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
ci_dpm_powergate_uvd(struct radeon_device * rdev,bool gate)749*4882a593Smuzhiyun void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun if (pi->uvd_power_gated == gate)
754*4882a593Smuzhiyun return;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun pi->uvd_power_gated = gate;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun ci_update_uvd_dpm(rdev, gate);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
ci_dpm_vblank_too_short(struct radeon_device * rdev)761*4882a593Smuzhiyun bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
764*4882a593Smuzhiyun u32 vblank_time = r600_dpm_get_vblank_time(rdev);
765*4882a593Smuzhiyun u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /* disable mclk switching if the refresh is >120Hz, even if the
768*4882a593Smuzhiyun * blanking period would allow it
769*4882a593Smuzhiyun */
770*4882a593Smuzhiyun if (r600_dpm_get_vrefresh(rdev) > 120)
771*4882a593Smuzhiyun return true;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun if (vblank_time < switch_limit)
774*4882a593Smuzhiyun return true;
775*4882a593Smuzhiyun else
776*4882a593Smuzhiyun return false;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
ci_apply_state_adjust_rules(struct radeon_device * rdev,struct radeon_ps * rps)780*4882a593Smuzhiyun static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
781*4882a593Smuzhiyun struct radeon_ps *rps)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun struct ci_ps *ps = ci_get_ps(rps);
784*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
785*4882a593Smuzhiyun struct radeon_clock_and_voltage_limits *max_limits;
786*4882a593Smuzhiyun bool disable_mclk_switching;
787*4882a593Smuzhiyun u32 sclk, mclk;
788*4882a593Smuzhiyun int i;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun if (rps->vce_active) {
791*4882a593Smuzhiyun rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
792*4882a593Smuzhiyun rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
793*4882a593Smuzhiyun } else {
794*4882a593Smuzhiyun rps->evclk = 0;
795*4882a593Smuzhiyun rps->ecclk = 0;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
799*4882a593Smuzhiyun ci_dpm_vblank_too_short(rdev))
800*4882a593Smuzhiyun disable_mclk_switching = true;
801*4882a593Smuzhiyun else
802*4882a593Smuzhiyun disable_mclk_switching = false;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
805*4882a593Smuzhiyun pi->battery_state = true;
806*4882a593Smuzhiyun else
807*4882a593Smuzhiyun pi->battery_state = false;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (rdev->pm.dpm.ac_power)
810*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
811*4882a593Smuzhiyun else
812*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun if (rdev->pm.dpm.ac_power == false) {
815*4882a593Smuzhiyun for (i = 0; i < ps->performance_level_count; i++) {
816*4882a593Smuzhiyun if (ps->performance_levels[i].mclk > max_limits->mclk)
817*4882a593Smuzhiyun ps->performance_levels[i].mclk = max_limits->mclk;
818*4882a593Smuzhiyun if (ps->performance_levels[i].sclk > max_limits->sclk)
819*4882a593Smuzhiyun ps->performance_levels[i].sclk = max_limits->sclk;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* XXX validate the min clocks required for display */
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (disable_mclk_switching) {
826*4882a593Smuzhiyun mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
827*4882a593Smuzhiyun sclk = ps->performance_levels[0].sclk;
828*4882a593Smuzhiyun } else {
829*4882a593Smuzhiyun mclk = ps->performance_levels[0].mclk;
830*4882a593Smuzhiyun sclk = ps->performance_levels[0].sclk;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun if (rps->vce_active) {
834*4882a593Smuzhiyun if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
835*4882a593Smuzhiyun sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
836*4882a593Smuzhiyun if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
837*4882a593Smuzhiyun mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun ps->performance_levels[0].sclk = sclk;
841*4882a593Smuzhiyun ps->performance_levels[0].mclk = mclk;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
844*4882a593Smuzhiyun ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (disable_mclk_switching) {
847*4882a593Smuzhiyun if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
848*4882a593Smuzhiyun ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
849*4882a593Smuzhiyun } else {
850*4882a593Smuzhiyun if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
851*4882a593Smuzhiyun ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
ci_thermal_set_temperature_range(struct radeon_device * rdev,int min_temp,int max_temp)855*4882a593Smuzhiyun static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
856*4882a593Smuzhiyun int min_temp, int max_temp)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun int low_temp = 0 * 1000;
859*4882a593Smuzhiyun int high_temp = 255 * 1000;
860*4882a593Smuzhiyun u32 tmp;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun if (low_temp < min_temp)
863*4882a593Smuzhiyun low_temp = min_temp;
864*4882a593Smuzhiyun if (high_temp > max_temp)
865*4882a593Smuzhiyun high_temp = max_temp;
866*4882a593Smuzhiyun if (high_temp < low_temp) {
867*4882a593Smuzhiyun DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
868*4882a593Smuzhiyun return -EINVAL;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun tmp = RREG32_SMC(CG_THERMAL_INT);
872*4882a593Smuzhiyun tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
873*4882a593Smuzhiyun tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
874*4882a593Smuzhiyun CI_DIG_THERM_INTL(low_temp / 1000);
875*4882a593Smuzhiyun WREG32_SMC(CG_THERMAL_INT, tmp);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun #if 0
878*4882a593Smuzhiyun /* XXX: need to figure out how to handle this properly */
879*4882a593Smuzhiyun tmp = RREG32_SMC(CG_THERMAL_CTRL);
880*4882a593Smuzhiyun tmp &= DIG_THERM_DPM_MASK;
881*4882a593Smuzhiyun tmp |= DIG_THERM_DPM(high_temp / 1000);
882*4882a593Smuzhiyun WREG32_SMC(CG_THERMAL_CTRL, tmp);
883*4882a593Smuzhiyun #endif
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun rdev->pm.dpm.thermal.min_temp = low_temp;
886*4882a593Smuzhiyun rdev->pm.dpm.thermal.max_temp = high_temp;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun return 0;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
ci_thermal_enable_alert(struct radeon_device * rdev,bool enable)891*4882a593Smuzhiyun static int ci_thermal_enable_alert(struct radeon_device *rdev,
892*4882a593Smuzhiyun bool enable)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
895*4882a593Smuzhiyun PPSMC_Result result;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (enable) {
898*4882a593Smuzhiyun thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
899*4882a593Smuzhiyun WREG32_SMC(CG_THERMAL_INT, thermal_int);
900*4882a593Smuzhiyun rdev->irq.dpm_thermal = false;
901*4882a593Smuzhiyun result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
902*4882a593Smuzhiyun if (result != PPSMC_Result_OK) {
903*4882a593Smuzhiyun DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
904*4882a593Smuzhiyun return -EINVAL;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun } else {
907*4882a593Smuzhiyun thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
908*4882a593Smuzhiyun WREG32_SMC(CG_THERMAL_INT, thermal_int);
909*4882a593Smuzhiyun rdev->irq.dpm_thermal = true;
910*4882a593Smuzhiyun result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
911*4882a593Smuzhiyun if (result != PPSMC_Result_OK) {
912*4882a593Smuzhiyun DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
913*4882a593Smuzhiyun return -EINVAL;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun return 0;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun
ci_fan_ctrl_set_static_mode(struct radeon_device * rdev,u32 mode)920*4882a593Smuzhiyun static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
923*4882a593Smuzhiyun u32 tmp;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun if (pi->fan_ctrl_is_in_default_mode) {
926*4882a593Smuzhiyun tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
927*4882a593Smuzhiyun pi->fan_ctrl_default_mode = tmp;
928*4882a593Smuzhiyun tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
929*4882a593Smuzhiyun pi->t_min = tmp;
930*4882a593Smuzhiyun pi->fan_ctrl_is_in_default_mode = false;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
934*4882a593Smuzhiyun tmp |= TMIN(0);
935*4882a593Smuzhiyun WREG32_SMC(CG_FDO_CTRL2, tmp);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
938*4882a593Smuzhiyun tmp |= FDO_PWM_MODE(mode);
939*4882a593Smuzhiyun WREG32_SMC(CG_FDO_CTRL2, tmp);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
ci_thermal_setup_fan_table(struct radeon_device * rdev)942*4882a593Smuzhiyun static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
945*4882a593Smuzhiyun SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
946*4882a593Smuzhiyun u32 duty100;
947*4882a593Smuzhiyun u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
948*4882a593Smuzhiyun u16 fdo_min, slope1, slope2;
949*4882a593Smuzhiyun u32 reference_clock, tmp;
950*4882a593Smuzhiyun int ret;
951*4882a593Smuzhiyun u64 tmp64;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun if (!pi->fan_table_start) {
954*4882a593Smuzhiyun rdev->pm.dpm.fan.ucode_fan_control = false;
955*4882a593Smuzhiyun return 0;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun if (duty100 == 0) {
961*4882a593Smuzhiyun rdev->pm.dpm.fan.ucode_fan_control = false;
962*4882a593Smuzhiyun return 0;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
966*4882a593Smuzhiyun do_div(tmp64, 10000);
967*4882a593Smuzhiyun fdo_min = (u16)tmp64;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
970*4882a593Smuzhiyun t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
973*4882a593Smuzhiyun pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
976*4882a593Smuzhiyun slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
979*4882a593Smuzhiyun fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
980*4882a593Smuzhiyun fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun fan_table.Slope1 = cpu_to_be16(slope1);
983*4882a593Smuzhiyun fan_table.Slope2 = cpu_to_be16(slope2);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun fan_table.FdoMin = cpu_to_be16(fdo_min);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun fan_table.HystUp = cpu_to_be16(1);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun fan_table.HystSlope = cpu_to_be16(1);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun fan_table.TempRespLim = cpu_to_be16(5);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun reference_clock = radeon_get_xclk(rdev);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
998*4882a593Smuzhiyun reference_clock) / 1600);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun fan_table.FdoMax = cpu_to_be16((u16)duty100);
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
1003*4882a593Smuzhiyun fan_table.TempSrc = (uint8_t)tmp;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun ret = ci_copy_bytes_to_smc(rdev,
1006*4882a593Smuzhiyun pi->fan_table_start,
1007*4882a593Smuzhiyun (u8 *)(&fan_table),
1008*4882a593Smuzhiyun sizeof(fan_table),
1009*4882a593Smuzhiyun pi->sram_end);
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun if (ret) {
1012*4882a593Smuzhiyun DRM_ERROR("Failed to load fan table to the SMC.");
1013*4882a593Smuzhiyun rdev->pm.dpm.fan.ucode_fan_control = false;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun return 0;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
ci_fan_ctrl_start_smc_fan_control(struct radeon_device * rdev)1019*4882a593Smuzhiyun static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1022*4882a593Smuzhiyun PPSMC_Result ret;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (pi->caps_od_fuzzy_fan_control_support) {
1025*4882a593Smuzhiyun ret = ci_send_msg_to_smc_with_parameter(rdev,
1026*4882a593Smuzhiyun PPSMC_StartFanControl,
1027*4882a593Smuzhiyun FAN_CONTROL_FUZZY);
1028*4882a593Smuzhiyun if (ret != PPSMC_Result_OK)
1029*4882a593Smuzhiyun return -EINVAL;
1030*4882a593Smuzhiyun ret = ci_send_msg_to_smc_with_parameter(rdev,
1031*4882a593Smuzhiyun PPSMC_MSG_SetFanPwmMax,
1032*4882a593Smuzhiyun rdev->pm.dpm.fan.default_max_fan_pwm);
1033*4882a593Smuzhiyun if (ret != PPSMC_Result_OK)
1034*4882a593Smuzhiyun return -EINVAL;
1035*4882a593Smuzhiyun } else {
1036*4882a593Smuzhiyun ret = ci_send_msg_to_smc_with_parameter(rdev,
1037*4882a593Smuzhiyun PPSMC_StartFanControl,
1038*4882a593Smuzhiyun FAN_CONTROL_TABLE);
1039*4882a593Smuzhiyun if (ret != PPSMC_Result_OK)
1040*4882a593Smuzhiyun return -EINVAL;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun pi->fan_is_controlled_by_smc = true;
1044*4882a593Smuzhiyun return 0;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
ci_fan_ctrl_stop_smc_fan_control(struct radeon_device * rdev)1047*4882a593Smuzhiyun static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun PPSMC_Result ret;
1050*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1053*4882a593Smuzhiyun if (ret == PPSMC_Result_OK) {
1054*4882a593Smuzhiyun pi->fan_is_controlled_by_smc = false;
1055*4882a593Smuzhiyun return 0;
1056*4882a593Smuzhiyun } else
1057*4882a593Smuzhiyun return -EINVAL;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
ci_fan_ctrl_get_fan_speed_percent(struct radeon_device * rdev,u32 * speed)1060*4882a593Smuzhiyun int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1061*4882a593Smuzhiyun u32 *speed)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun u32 duty, duty100;
1064*4882a593Smuzhiyun u64 tmp64;
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun if (rdev->pm.no_fan)
1067*4882a593Smuzhiyun return -ENOENT;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1070*4882a593Smuzhiyun duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun if (duty100 == 0)
1073*4882a593Smuzhiyun return -EINVAL;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun tmp64 = (u64)duty * 100;
1076*4882a593Smuzhiyun do_div(tmp64, duty100);
1077*4882a593Smuzhiyun *speed = (u32)tmp64;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun if (*speed > 100)
1080*4882a593Smuzhiyun *speed = 100;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun return 0;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
ci_fan_ctrl_set_fan_speed_percent(struct radeon_device * rdev,u32 speed)1085*4882a593Smuzhiyun int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1086*4882a593Smuzhiyun u32 speed)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun u32 tmp;
1089*4882a593Smuzhiyun u32 duty, duty100;
1090*4882a593Smuzhiyun u64 tmp64;
1091*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun if (rdev->pm.no_fan)
1094*4882a593Smuzhiyun return -ENOENT;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun if (pi->fan_is_controlled_by_smc)
1097*4882a593Smuzhiyun return -EINVAL;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun if (speed > 100)
1100*4882a593Smuzhiyun return -EINVAL;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun if (duty100 == 0)
1105*4882a593Smuzhiyun return -EINVAL;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun tmp64 = (u64)speed * duty100;
1108*4882a593Smuzhiyun do_div(tmp64, 100);
1109*4882a593Smuzhiyun duty = (u32)tmp64;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1112*4882a593Smuzhiyun tmp |= FDO_STATIC_DUTY(duty);
1113*4882a593Smuzhiyun WREG32_SMC(CG_FDO_CTRL0, tmp);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun return 0;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
ci_fan_ctrl_set_mode(struct radeon_device * rdev,u32 mode)1118*4882a593Smuzhiyun void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun if (mode) {
1121*4882a593Smuzhiyun /* stop auto-manage */
1122*4882a593Smuzhiyun if (rdev->pm.dpm.fan.ucode_fan_control)
1123*4882a593Smuzhiyun ci_fan_ctrl_stop_smc_fan_control(rdev);
1124*4882a593Smuzhiyun ci_fan_ctrl_set_static_mode(rdev, mode);
1125*4882a593Smuzhiyun } else {
1126*4882a593Smuzhiyun /* restart auto-manage */
1127*4882a593Smuzhiyun if (rdev->pm.dpm.fan.ucode_fan_control)
1128*4882a593Smuzhiyun ci_thermal_start_smc_fan_control(rdev);
1129*4882a593Smuzhiyun else
1130*4882a593Smuzhiyun ci_fan_ctrl_set_default_mode(rdev);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
ci_fan_ctrl_get_mode(struct radeon_device * rdev)1134*4882a593Smuzhiyun u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1137*4882a593Smuzhiyun u32 tmp;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun if (pi->fan_is_controlled_by_smc)
1140*4882a593Smuzhiyun return 0;
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
1143*4882a593Smuzhiyun return (tmp >> FDO_PWM_MODE_SHIFT);
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun #if 0
1147*4882a593Smuzhiyun static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1148*4882a593Smuzhiyun u32 *speed)
1149*4882a593Smuzhiyun {
1150*4882a593Smuzhiyun u32 tach_period;
1151*4882a593Smuzhiyun u32 xclk = radeon_get_xclk(rdev);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun if (rdev->pm.no_fan)
1154*4882a593Smuzhiyun return -ENOENT;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun if (rdev->pm.fan_pulses_per_revolution == 0)
1157*4882a593Smuzhiyun return -ENOENT;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1160*4882a593Smuzhiyun if (tach_period == 0)
1161*4882a593Smuzhiyun return -ENOENT;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun *speed = 60 * xclk * 10000 / tach_period;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun return 0;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1169*4882a593Smuzhiyun u32 speed)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun u32 tach_period, tmp;
1172*4882a593Smuzhiyun u32 xclk = radeon_get_xclk(rdev);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun if (rdev->pm.no_fan)
1175*4882a593Smuzhiyun return -ENOENT;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun if (rdev->pm.fan_pulses_per_revolution == 0)
1178*4882a593Smuzhiyun return -ENOENT;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun if ((speed < rdev->pm.fan_min_rpm) ||
1181*4882a593Smuzhiyun (speed > rdev->pm.fan_max_rpm))
1182*4882a593Smuzhiyun return -EINVAL;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun if (rdev->pm.dpm.fan.ucode_fan_control)
1185*4882a593Smuzhiyun ci_fan_ctrl_stop_smc_fan_control(rdev);
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun tach_period = 60 * xclk * 10000 / (8 * speed);
1188*4882a593Smuzhiyun tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1189*4882a593Smuzhiyun tmp |= TARGET_PERIOD(tach_period);
1190*4882a593Smuzhiyun WREG32_SMC(CG_TACH_CTRL, tmp);
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun return 0;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun #endif
1197*4882a593Smuzhiyun
ci_fan_ctrl_set_default_mode(struct radeon_device * rdev)1198*4882a593Smuzhiyun static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1201*4882a593Smuzhiyun u32 tmp;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun if (!pi->fan_ctrl_is_in_default_mode) {
1204*4882a593Smuzhiyun tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1205*4882a593Smuzhiyun tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1206*4882a593Smuzhiyun WREG32_SMC(CG_FDO_CTRL2, tmp);
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1209*4882a593Smuzhiyun tmp |= TMIN(pi->t_min);
1210*4882a593Smuzhiyun WREG32_SMC(CG_FDO_CTRL2, tmp);
1211*4882a593Smuzhiyun pi->fan_ctrl_is_in_default_mode = true;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun
ci_thermal_start_smc_fan_control(struct radeon_device * rdev)1215*4882a593Smuzhiyun static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun if (rdev->pm.dpm.fan.ucode_fan_control) {
1218*4882a593Smuzhiyun ci_fan_ctrl_start_smc_fan_control(rdev);
1219*4882a593Smuzhiyun ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
ci_thermal_initialize(struct radeon_device * rdev)1223*4882a593Smuzhiyun static void ci_thermal_initialize(struct radeon_device *rdev)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun u32 tmp;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun if (rdev->pm.fan_pulses_per_revolution) {
1228*4882a593Smuzhiyun tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1229*4882a593Smuzhiyun tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1230*4882a593Smuzhiyun WREG32_SMC(CG_TACH_CTRL, tmp);
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1234*4882a593Smuzhiyun tmp |= TACH_PWM_RESP_RATE(0x28);
1235*4882a593Smuzhiyun WREG32_SMC(CG_FDO_CTRL2, tmp);
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
ci_thermal_start_thermal_controller(struct radeon_device * rdev)1238*4882a593Smuzhiyun static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun int ret;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun ci_thermal_initialize(rdev);
1243*4882a593Smuzhiyun ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1244*4882a593Smuzhiyun if (ret)
1245*4882a593Smuzhiyun return ret;
1246*4882a593Smuzhiyun ret = ci_thermal_enable_alert(rdev, true);
1247*4882a593Smuzhiyun if (ret)
1248*4882a593Smuzhiyun return ret;
1249*4882a593Smuzhiyun if (rdev->pm.dpm.fan.ucode_fan_control) {
1250*4882a593Smuzhiyun ret = ci_thermal_setup_fan_table(rdev);
1251*4882a593Smuzhiyun if (ret)
1252*4882a593Smuzhiyun return ret;
1253*4882a593Smuzhiyun ci_thermal_start_smc_fan_control(rdev);
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun return 0;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
ci_thermal_stop_thermal_controller(struct radeon_device * rdev)1259*4882a593Smuzhiyun static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun if (!rdev->pm.no_fan)
1262*4882a593Smuzhiyun ci_fan_ctrl_set_default_mode(rdev);
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun #if 0
1266*4882a593Smuzhiyun static int ci_read_smc_soft_register(struct radeon_device *rdev,
1267*4882a593Smuzhiyun u16 reg_offset, u32 *value)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun return ci_read_smc_sram_dword(rdev,
1272*4882a593Smuzhiyun pi->soft_regs_start + reg_offset,
1273*4882a593Smuzhiyun value, pi->sram_end);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun #endif
1276*4882a593Smuzhiyun
ci_write_smc_soft_register(struct radeon_device * rdev,u16 reg_offset,u32 value)1277*4882a593Smuzhiyun static int ci_write_smc_soft_register(struct radeon_device *rdev,
1278*4882a593Smuzhiyun u16 reg_offset, u32 value)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun return ci_write_smc_sram_dword(rdev,
1283*4882a593Smuzhiyun pi->soft_regs_start + reg_offset,
1284*4882a593Smuzhiyun value, pi->sram_end);
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
ci_init_fps_limits(struct radeon_device * rdev)1287*4882a593Smuzhiyun static void ci_init_fps_limits(struct radeon_device *rdev)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1290*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun if (pi->caps_fps) {
1293*4882a593Smuzhiyun u16 tmp;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun tmp = 45;
1296*4882a593Smuzhiyun table->FpsHighT = cpu_to_be16(tmp);
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun tmp = 30;
1299*4882a593Smuzhiyun table->FpsLowT = cpu_to_be16(tmp);
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
ci_update_sclk_t(struct radeon_device * rdev)1303*4882a593Smuzhiyun static int ci_update_sclk_t(struct radeon_device *rdev)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1306*4882a593Smuzhiyun int ret = 0;
1307*4882a593Smuzhiyun u32 low_sclk_interrupt_t = 0;
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun if (pi->caps_sclk_throttle_low_notification) {
1310*4882a593Smuzhiyun low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun ret = ci_copy_bytes_to_smc(rdev,
1313*4882a593Smuzhiyun pi->dpm_table_start +
1314*4882a593Smuzhiyun offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1315*4882a593Smuzhiyun (u8 *)&low_sclk_interrupt_t,
1316*4882a593Smuzhiyun sizeof(u32), pi->sram_end);
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun return ret;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun
ci_get_leakage_voltages(struct radeon_device * rdev)1323*4882a593Smuzhiyun static void ci_get_leakage_voltages(struct radeon_device *rdev)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1326*4882a593Smuzhiyun u16 leakage_id, virtual_voltage_id;
1327*4882a593Smuzhiyun u16 vddc, vddci;
1328*4882a593Smuzhiyun int i;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun pi->vddc_leakage.count = 0;
1331*4882a593Smuzhiyun pi->vddci_leakage.count = 0;
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1334*4882a593Smuzhiyun for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1335*4882a593Smuzhiyun virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1336*4882a593Smuzhiyun if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
1337*4882a593Smuzhiyun continue;
1338*4882a593Smuzhiyun if (vddc != 0 && vddc != virtual_voltage_id) {
1339*4882a593Smuzhiyun pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1340*4882a593Smuzhiyun pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1341*4882a593Smuzhiyun pi->vddc_leakage.count++;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
1345*4882a593Smuzhiyun for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1346*4882a593Smuzhiyun virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1347*4882a593Smuzhiyun if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
1348*4882a593Smuzhiyun virtual_voltage_id,
1349*4882a593Smuzhiyun leakage_id) == 0) {
1350*4882a593Smuzhiyun if (vddc != 0 && vddc != virtual_voltage_id) {
1351*4882a593Smuzhiyun pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1352*4882a593Smuzhiyun pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1353*4882a593Smuzhiyun pi->vddc_leakage.count++;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun if (vddci != 0 && vddci != virtual_voltage_id) {
1356*4882a593Smuzhiyun pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1357*4882a593Smuzhiyun pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1358*4882a593Smuzhiyun pi->vddci_leakage.count++;
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun
ci_set_dpm_event_sources(struct radeon_device * rdev,u32 sources)1365*4882a593Smuzhiyun static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1368*4882a593Smuzhiyun bool want_thermal_protection;
1369*4882a593Smuzhiyun enum radeon_dpm_event_src dpm_event_src;
1370*4882a593Smuzhiyun u32 tmp;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun switch (sources) {
1373*4882a593Smuzhiyun case 0:
1374*4882a593Smuzhiyun default:
1375*4882a593Smuzhiyun want_thermal_protection = false;
1376*4882a593Smuzhiyun break;
1377*4882a593Smuzhiyun case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1378*4882a593Smuzhiyun want_thermal_protection = true;
1379*4882a593Smuzhiyun dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1380*4882a593Smuzhiyun break;
1381*4882a593Smuzhiyun case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1382*4882a593Smuzhiyun want_thermal_protection = true;
1383*4882a593Smuzhiyun dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1384*4882a593Smuzhiyun break;
1385*4882a593Smuzhiyun case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1386*4882a593Smuzhiyun (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1387*4882a593Smuzhiyun want_thermal_protection = true;
1388*4882a593Smuzhiyun dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1389*4882a593Smuzhiyun break;
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun if (want_thermal_protection) {
1393*4882a593Smuzhiyun #if 0
1394*4882a593Smuzhiyun /* XXX: need to figure out how to handle this properly */
1395*4882a593Smuzhiyun tmp = RREG32_SMC(CG_THERMAL_CTRL);
1396*4882a593Smuzhiyun tmp &= DPM_EVENT_SRC_MASK;
1397*4882a593Smuzhiyun tmp |= DPM_EVENT_SRC(dpm_event_src);
1398*4882a593Smuzhiyun WREG32_SMC(CG_THERMAL_CTRL, tmp);
1399*4882a593Smuzhiyun #endif
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun tmp = RREG32_SMC(GENERAL_PWRMGT);
1402*4882a593Smuzhiyun if (pi->thermal_protection)
1403*4882a593Smuzhiyun tmp &= ~THERMAL_PROTECTION_DIS;
1404*4882a593Smuzhiyun else
1405*4882a593Smuzhiyun tmp |= THERMAL_PROTECTION_DIS;
1406*4882a593Smuzhiyun WREG32_SMC(GENERAL_PWRMGT, tmp);
1407*4882a593Smuzhiyun } else {
1408*4882a593Smuzhiyun tmp = RREG32_SMC(GENERAL_PWRMGT);
1409*4882a593Smuzhiyun tmp |= THERMAL_PROTECTION_DIS;
1410*4882a593Smuzhiyun WREG32_SMC(GENERAL_PWRMGT, tmp);
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
ci_enable_auto_throttle_source(struct radeon_device * rdev,enum radeon_dpm_auto_throttle_src source,bool enable)1414*4882a593Smuzhiyun static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1415*4882a593Smuzhiyun enum radeon_dpm_auto_throttle_src source,
1416*4882a593Smuzhiyun bool enable)
1417*4882a593Smuzhiyun {
1418*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun if (enable) {
1421*4882a593Smuzhiyun if (!(pi->active_auto_throttle_sources & (1 << source))) {
1422*4882a593Smuzhiyun pi->active_auto_throttle_sources |= 1 << source;
1423*4882a593Smuzhiyun ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun } else {
1426*4882a593Smuzhiyun if (pi->active_auto_throttle_sources & (1 << source)) {
1427*4882a593Smuzhiyun pi->active_auto_throttle_sources &= ~(1 << source);
1428*4882a593Smuzhiyun ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun
ci_enable_vr_hot_gpio_interrupt(struct radeon_device * rdev)1433*4882a593Smuzhiyun static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1434*4882a593Smuzhiyun {
1435*4882a593Smuzhiyun if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1436*4882a593Smuzhiyun ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun
ci_unfreeze_sclk_mclk_dpm(struct radeon_device * rdev)1439*4882a593Smuzhiyun static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1442*4882a593Smuzhiyun PPSMC_Result smc_result;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun if (!pi->need_update_smu7_dpm_table)
1445*4882a593Smuzhiyun return 0;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun if ((!pi->sclk_dpm_key_disabled) &&
1448*4882a593Smuzhiyun (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1449*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1450*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1451*4882a593Smuzhiyun return -EINVAL;
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun if ((!pi->mclk_dpm_key_disabled) &&
1455*4882a593Smuzhiyun (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1456*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1457*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1458*4882a593Smuzhiyun return -EINVAL;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun pi->need_update_smu7_dpm_table = 0;
1462*4882a593Smuzhiyun return 0;
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun
ci_enable_sclk_mclk_dpm(struct radeon_device * rdev,bool enable)1465*4882a593Smuzhiyun static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1466*4882a593Smuzhiyun {
1467*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1468*4882a593Smuzhiyun PPSMC_Result smc_result;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun if (enable) {
1471*4882a593Smuzhiyun if (!pi->sclk_dpm_key_disabled) {
1472*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1473*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1474*4882a593Smuzhiyun return -EINVAL;
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun if (!pi->mclk_dpm_key_disabled) {
1478*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1479*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1480*4882a593Smuzhiyun return -EINVAL;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1485*4882a593Smuzhiyun WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1486*4882a593Smuzhiyun WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun udelay(10);
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1491*4882a593Smuzhiyun WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1492*4882a593Smuzhiyun WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun } else {
1495*4882a593Smuzhiyun if (!pi->sclk_dpm_key_disabled) {
1496*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1497*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1498*4882a593Smuzhiyun return -EINVAL;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun if (!pi->mclk_dpm_key_disabled) {
1502*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1503*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1504*4882a593Smuzhiyun return -EINVAL;
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun return 0;
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun
ci_start_dpm(struct radeon_device * rdev)1511*4882a593Smuzhiyun static int ci_start_dpm(struct radeon_device *rdev)
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1514*4882a593Smuzhiyun PPSMC_Result smc_result;
1515*4882a593Smuzhiyun int ret;
1516*4882a593Smuzhiyun u32 tmp;
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun tmp = RREG32_SMC(GENERAL_PWRMGT);
1519*4882a593Smuzhiyun tmp |= GLOBAL_PWRMGT_EN;
1520*4882a593Smuzhiyun WREG32_SMC(GENERAL_PWRMGT, tmp);
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1523*4882a593Smuzhiyun tmp |= DYNAMIC_PM_EN;
1524*4882a593Smuzhiyun WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1531*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1532*4882a593Smuzhiyun return -EINVAL;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun ret = ci_enable_sclk_mclk_dpm(rdev, true);
1535*4882a593Smuzhiyun if (ret)
1536*4882a593Smuzhiyun return ret;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun if (!pi->pcie_dpm_key_disabled) {
1539*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1540*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1541*4882a593Smuzhiyun return -EINVAL;
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun return 0;
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun
ci_freeze_sclk_mclk_dpm(struct radeon_device * rdev)1547*4882a593Smuzhiyun static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1550*4882a593Smuzhiyun PPSMC_Result smc_result;
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun if (!pi->need_update_smu7_dpm_table)
1553*4882a593Smuzhiyun return 0;
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun if ((!pi->sclk_dpm_key_disabled) &&
1556*4882a593Smuzhiyun (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1557*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1558*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1559*4882a593Smuzhiyun return -EINVAL;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun if ((!pi->mclk_dpm_key_disabled) &&
1563*4882a593Smuzhiyun (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1564*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1565*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1566*4882a593Smuzhiyun return -EINVAL;
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun return 0;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun
ci_stop_dpm(struct radeon_device * rdev)1572*4882a593Smuzhiyun static int ci_stop_dpm(struct radeon_device *rdev)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1575*4882a593Smuzhiyun PPSMC_Result smc_result;
1576*4882a593Smuzhiyun int ret;
1577*4882a593Smuzhiyun u32 tmp;
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun tmp = RREG32_SMC(GENERAL_PWRMGT);
1580*4882a593Smuzhiyun tmp &= ~GLOBAL_PWRMGT_EN;
1581*4882a593Smuzhiyun WREG32_SMC(GENERAL_PWRMGT, tmp);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1584*4882a593Smuzhiyun tmp &= ~DYNAMIC_PM_EN;
1585*4882a593Smuzhiyun WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun if (!pi->pcie_dpm_key_disabled) {
1588*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1589*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1590*4882a593Smuzhiyun return -EINVAL;
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun ret = ci_enable_sclk_mclk_dpm(rdev, false);
1594*4882a593Smuzhiyun if (ret)
1595*4882a593Smuzhiyun return ret;
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1598*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1599*4882a593Smuzhiyun return -EINVAL;
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun return 0;
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun
ci_enable_sclk_control(struct radeon_device * rdev,bool enable)1604*4882a593Smuzhiyun static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1605*4882a593Smuzhiyun {
1606*4882a593Smuzhiyun u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun if (enable)
1609*4882a593Smuzhiyun tmp &= ~SCLK_PWRMGT_OFF;
1610*4882a593Smuzhiyun else
1611*4882a593Smuzhiyun tmp |= SCLK_PWRMGT_OFF;
1612*4882a593Smuzhiyun WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun #if 0
1616*4882a593Smuzhiyun static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1617*4882a593Smuzhiyun bool ac_power)
1618*4882a593Smuzhiyun {
1619*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1620*4882a593Smuzhiyun struct radeon_cac_tdp_table *cac_tdp_table =
1621*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.cac_tdp_table;
1622*4882a593Smuzhiyun u32 power_limit;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun if (ac_power)
1625*4882a593Smuzhiyun power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1626*4882a593Smuzhiyun else
1627*4882a593Smuzhiyun power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun ci_set_power_limit(rdev, power_limit);
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun if (pi->caps_automatic_dc_transition) {
1632*4882a593Smuzhiyun if (ac_power)
1633*4882a593Smuzhiyun ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1634*4882a593Smuzhiyun else
1635*4882a593Smuzhiyun ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun return 0;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun #endif
1641*4882a593Smuzhiyun
ci_send_msg_to_smc(struct radeon_device * rdev,PPSMC_Msg msg)1642*4882a593Smuzhiyun static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun u32 tmp;
1645*4882a593Smuzhiyun int i;
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun if (!ci_is_smc_running(rdev))
1648*4882a593Smuzhiyun return PPSMC_Result_Failed;
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun WREG32(SMC_MESSAGE_0, msg);
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
1653*4882a593Smuzhiyun tmp = RREG32(SMC_RESP_0);
1654*4882a593Smuzhiyun if (tmp != 0)
1655*4882a593Smuzhiyun break;
1656*4882a593Smuzhiyun udelay(1);
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun tmp = RREG32(SMC_RESP_0);
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun return (PPSMC_Result)tmp;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun
ci_send_msg_to_smc_with_parameter(struct radeon_device * rdev,PPSMC_Msg msg,u32 parameter)1663*4882a593Smuzhiyun static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1664*4882a593Smuzhiyun PPSMC_Msg msg, u32 parameter)
1665*4882a593Smuzhiyun {
1666*4882a593Smuzhiyun WREG32(SMC_MSG_ARG_0, parameter);
1667*4882a593Smuzhiyun return ci_send_msg_to_smc(rdev, msg);
1668*4882a593Smuzhiyun }
1669*4882a593Smuzhiyun
ci_send_msg_to_smc_return_parameter(struct radeon_device * rdev,PPSMC_Msg msg,u32 * parameter)1670*4882a593Smuzhiyun static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1671*4882a593Smuzhiyun PPSMC_Msg msg, u32 *parameter)
1672*4882a593Smuzhiyun {
1673*4882a593Smuzhiyun PPSMC_Result smc_result;
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev, msg);
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun if ((smc_result == PPSMC_Result_OK) && parameter)
1678*4882a593Smuzhiyun *parameter = RREG32(SMC_MSG_ARG_0);
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun return smc_result;
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun
ci_dpm_force_state_sclk(struct radeon_device * rdev,u32 n)1683*4882a593Smuzhiyun static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun if (!pi->sclk_dpm_key_disabled) {
1688*4882a593Smuzhiyun PPSMC_Result smc_result =
1689*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1690*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1691*4882a593Smuzhiyun return -EINVAL;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun return 0;
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun
ci_dpm_force_state_mclk(struct radeon_device * rdev,u32 n)1697*4882a593Smuzhiyun static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1698*4882a593Smuzhiyun {
1699*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun if (!pi->mclk_dpm_key_disabled) {
1702*4882a593Smuzhiyun PPSMC_Result smc_result =
1703*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1704*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1705*4882a593Smuzhiyun return -EINVAL;
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun return 0;
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun
ci_dpm_force_state_pcie(struct radeon_device * rdev,u32 n)1711*4882a593Smuzhiyun static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1712*4882a593Smuzhiyun {
1713*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun if (!pi->pcie_dpm_key_disabled) {
1716*4882a593Smuzhiyun PPSMC_Result smc_result =
1717*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1718*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1719*4882a593Smuzhiyun return -EINVAL;
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun return 0;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun
ci_set_power_limit(struct radeon_device * rdev,u32 n)1725*4882a593Smuzhiyun static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1726*4882a593Smuzhiyun {
1727*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1730*4882a593Smuzhiyun PPSMC_Result smc_result =
1731*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1732*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1733*4882a593Smuzhiyun return -EINVAL;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun return 0;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun
ci_set_overdrive_target_tdp(struct radeon_device * rdev,u32 target_tdp)1739*4882a593Smuzhiyun static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1740*4882a593Smuzhiyun u32 target_tdp)
1741*4882a593Smuzhiyun {
1742*4882a593Smuzhiyun PPSMC_Result smc_result =
1743*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1744*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1745*4882a593Smuzhiyun return -EINVAL;
1746*4882a593Smuzhiyun return 0;
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun #if 0
1750*4882a593Smuzhiyun static int ci_set_boot_state(struct radeon_device *rdev)
1751*4882a593Smuzhiyun {
1752*4882a593Smuzhiyun return ci_enable_sclk_mclk_dpm(rdev, false);
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun #endif
1755*4882a593Smuzhiyun
ci_get_average_sclk_freq(struct radeon_device * rdev)1756*4882a593Smuzhiyun static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1757*4882a593Smuzhiyun {
1758*4882a593Smuzhiyun u32 sclk_freq;
1759*4882a593Smuzhiyun PPSMC_Result smc_result =
1760*4882a593Smuzhiyun ci_send_msg_to_smc_return_parameter(rdev,
1761*4882a593Smuzhiyun PPSMC_MSG_API_GetSclkFrequency,
1762*4882a593Smuzhiyun &sclk_freq);
1763*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1764*4882a593Smuzhiyun sclk_freq = 0;
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun return sclk_freq;
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun
ci_get_average_mclk_freq(struct radeon_device * rdev)1769*4882a593Smuzhiyun static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1770*4882a593Smuzhiyun {
1771*4882a593Smuzhiyun u32 mclk_freq;
1772*4882a593Smuzhiyun PPSMC_Result smc_result =
1773*4882a593Smuzhiyun ci_send_msg_to_smc_return_parameter(rdev,
1774*4882a593Smuzhiyun PPSMC_MSG_API_GetMclkFrequency,
1775*4882a593Smuzhiyun &mclk_freq);
1776*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
1777*4882a593Smuzhiyun mclk_freq = 0;
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun return mclk_freq;
1780*4882a593Smuzhiyun }
1781*4882a593Smuzhiyun
ci_dpm_start_smc(struct radeon_device * rdev)1782*4882a593Smuzhiyun static void ci_dpm_start_smc(struct radeon_device *rdev)
1783*4882a593Smuzhiyun {
1784*4882a593Smuzhiyun int i;
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun ci_program_jump_on_start(rdev);
1787*4882a593Smuzhiyun ci_start_smc_clock(rdev);
1788*4882a593Smuzhiyun ci_start_smc(rdev);
1789*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
1790*4882a593Smuzhiyun if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1791*4882a593Smuzhiyun break;
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun }
1794*4882a593Smuzhiyun
ci_dpm_stop_smc(struct radeon_device * rdev)1795*4882a593Smuzhiyun static void ci_dpm_stop_smc(struct radeon_device *rdev)
1796*4882a593Smuzhiyun {
1797*4882a593Smuzhiyun ci_reset_smc(rdev);
1798*4882a593Smuzhiyun ci_stop_smc_clock(rdev);
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun
ci_process_firmware_header(struct radeon_device * rdev)1801*4882a593Smuzhiyun static int ci_process_firmware_header(struct radeon_device *rdev)
1802*4882a593Smuzhiyun {
1803*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1804*4882a593Smuzhiyun u32 tmp;
1805*4882a593Smuzhiyun int ret;
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun ret = ci_read_smc_sram_dword(rdev,
1808*4882a593Smuzhiyun SMU7_FIRMWARE_HEADER_LOCATION +
1809*4882a593Smuzhiyun offsetof(SMU7_Firmware_Header, DpmTable),
1810*4882a593Smuzhiyun &tmp, pi->sram_end);
1811*4882a593Smuzhiyun if (ret)
1812*4882a593Smuzhiyun return ret;
1813*4882a593Smuzhiyun
1814*4882a593Smuzhiyun pi->dpm_table_start = tmp;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun ret = ci_read_smc_sram_dword(rdev,
1817*4882a593Smuzhiyun SMU7_FIRMWARE_HEADER_LOCATION +
1818*4882a593Smuzhiyun offsetof(SMU7_Firmware_Header, SoftRegisters),
1819*4882a593Smuzhiyun &tmp, pi->sram_end);
1820*4882a593Smuzhiyun if (ret)
1821*4882a593Smuzhiyun return ret;
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun pi->soft_regs_start = tmp;
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun ret = ci_read_smc_sram_dword(rdev,
1826*4882a593Smuzhiyun SMU7_FIRMWARE_HEADER_LOCATION +
1827*4882a593Smuzhiyun offsetof(SMU7_Firmware_Header, mcRegisterTable),
1828*4882a593Smuzhiyun &tmp, pi->sram_end);
1829*4882a593Smuzhiyun if (ret)
1830*4882a593Smuzhiyun return ret;
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun pi->mc_reg_table_start = tmp;
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun ret = ci_read_smc_sram_dword(rdev,
1835*4882a593Smuzhiyun SMU7_FIRMWARE_HEADER_LOCATION +
1836*4882a593Smuzhiyun offsetof(SMU7_Firmware_Header, FanTable),
1837*4882a593Smuzhiyun &tmp, pi->sram_end);
1838*4882a593Smuzhiyun if (ret)
1839*4882a593Smuzhiyun return ret;
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun pi->fan_table_start = tmp;
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun ret = ci_read_smc_sram_dword(rdev,
1844*4882a593Smuzhiyun SMU7_FIRMWARE_HEADER_LOCATION +
1845*4882a593Smuzhiyun offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1846*4882a593Smuzhiyun &tmp, pi->sram_end);
1847*4882a593Smuzhiyun if (ret)
1848*4882a593Smuzhiyun return ret;
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun pi->arb_table_start = tmp;
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun return 0;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun
ci_read_clock_registers(struct radeon_device * rdev)1855*4882a593Smuzhiyun static void ci_read_clock_registers(struct radeon_device *rdev)
1856*4882a593Smuzhiyun {
1857*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1858*4882a593Smuzhiyun
1859*4882a593Smuzhiyun pi->clock_registers.cg_spll_func_cntl =
1860*4882a593Smuzhiyun RREG32_SMC(CG_SPLL_FUNC_CNTL);
1861*4882a593Smuzhiyun pi->clock_registers.cg_spll_func_cntl_2 =
1862*4882a593Smuzhiyun RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1863*4882a593Smuzhiyun pi->clock_registers.cg_spll_func_cntl_3 =
1864*4882a593Smuzhiyun RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1865*4882a593Smuzhiyun pi->clock_registers.cg_spll_func_cntl_4 =
1866*4882a593Smuzhiyun RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1867*4882a593Smuzhiyun pi->clock_registers.cg_spll_spread_spectrum =
1868*4882a593Smuzhiyun RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1869*4882a593Smuzhiyun pi->clock_registers.cg_spll_spread_spectrum_2 =
1870*4882a593Smuzhiyun RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1871*4882a593Smuzhiyun pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1872*4882a593Smuzhiyun pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1873*4882a593Smuzhiyun pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1874*4882a593Smuzhiyun pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1875*4882a593Smuzhiyun pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1876*4882a593Smuzhiyun pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1877*4882a593Smuzhiyun pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1878*4882a593Smuzhiyun pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1879*4882a593Smuzhiyun pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1880*4882a593Smuzhiyun }
1881*4882a593Smuzhiyun
ci_init_sclk_t(struct radeon_device * rdev)1882*4882a593Smuzhiyun static void ci_init_sclk_t(struct radeon_device *rdev)
1883*4882a593Smuzhiyun {
1884*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun pi->low_sclk_interrupt_t = 0;
1887*4882a593Smuzhiyun }
1888*4882a593Smuzhiyun
ci_enable_thermal_protection(struct radeon_device * rdev,bool enable)1889*4882a593Smuzhiyun static void ci_enable_thermal_protection(struct radeon_device *rdev,
1890*4882a593Smuzhiyun bool enable)
1891*4882a593Smuzhiyun {
1892*4882a593Smuzhiyun u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun if (enable)
1895*4882a593Smuzhiyun tmp &= ~THERMAL_PROTECTION_DIS;
1896*4882a593Smuzhiyun else
1897*4882a593Smuzhiyun tmp |= THERMAL_PROTECTION_DIS;
1898*4882a593Smuzhiyun WREG32_SMC(GENERAL_PWRMGT, tmp);
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun
ci_enable_acpi_power_management(struct radeon_device * rdev)1901*4882a593Smuzhiyun static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1902*4882a593Smuzhiyun {
1903*4882a593Smuzhiyun u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun tmp |= STATIC_PM_EN;
1906*4882a593Smuzhiyun
1907*4882a593Smuzhiyun WREG32_SMC(GENERAL_PWRMGT, tmp);
1908*4882a593Smuzhiyun }
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun #if 0
1911*4882a593Smuzhiyun static int ci_enter_ulp_state(struct radeon_device *rdev)
1912*4882a593Smuzhiyun {
1913*4882a593Smuzhiyun
1914*4882a593Smuzhiyun WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun udelay(25000);
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun return 0;
1919*4882a593Smuzhiyun }
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun static int ci_exit_ulp_state(struct radeon_device *rdev)
1922*4882a593Smuzhiyun {
1923*4882a593Smuzhiyun int i;
1924*4882a593Smuzhiyun
1925*4882a593Smuzhiyun WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun udelay(7000);
1928*4882a593Smuzhiyun
1929*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
1930*4882a593Smuzhiyun if (RREG32(SMC_RESP_0) == 1)
1931*4882a593Smuzhiyun break;
1932*4882a593Smuzhiyun udelay(1000);
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun return 0;
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun #endif
1938*4882a593Smuzhiyun
ci_notify_smc_display_change(struct radeon_device * rdev,bool has_display)1939*4882a593Smuzhiyun static int ci_notify_smc_display_change(struct radeon_device *rdev,
1940*4882a593Smuzhiyun bool has_display)
1941*4882a593Smuzhiyun {
1942*4882a593Smuzhiyun PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
ci_enable_ds_master_switch(struct radeon_device * rdev,bool enable)1947*4882a593Smuzhiyun static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1948*4882a593Smuzhiyun bool enable)
1949*4882a593Smuzhiyun {
1950*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun if (enable) {
1953*4882a593Smuzhiyun if (pi->caps_sclk_ds) {
1954*4882a593Smuzhiyun if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1955*4882a593Smuzhiyun return -EINVAL;
1956*4882a593Smuzhiyun } else {
1957*4882a593Smuzhiyun if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1958*4882a593Smuzhiyun return -EINVAL;
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun } else {
1961*4882a593Smuzhiyun if (pi->caps_sclk_ds) {
1962*4882a593Smuzhiyun if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1963*4882a593Smuzhiyun return -EINVAL;
1964*4882a593Smuzhiyun }
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun return 0;
1968*4882a593Smuzhiyun }
1969*4882a593Smuzhiyun
ci_program_display_gap(struct radeon_device * rdev)1970*4882a593Smuzhiyun static void ci_program_display_gap(struct radeon_device *rdev)
1971*4882a593Smuzhiyun {
1972*4882a593Smuzhiyun u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1973*4882a593Smuzhiyun u32 pre_vbi_time_in_us;
1974*4882a593Smuzhiyun u32 frame_time_in_us;
1975*4882a593Smuzhiyun u32 ref_clock = rdev->clock.spll.reference_freq;
1976*4882a593Smuzhiyun u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1977*4882a593Smuzhiyun u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun tmp &= ~DISP_GAP_MASK;
1980*4882a593Smuzhiyun if (rdev->pm.dpm.new_active_crtc_count > 0)
1981*4882a593Smuzhiyun tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1982*4882a593Smuzhiyun else
1983*4882a593Smuzhiyun tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1984*4882a593Smuzhiyun WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun if (refresh_rate == 0)
1987*4882a593Smuzhiyun refresh_rate = 60;
1988*4882a593Smuzhiyun if (vblank_time == 0xffffffff)
1989*4882a593Smuzhiyun vblank_time = 500;
1990*4882a593Smuzhiyun frame_time_in_us = 1000000 / refresh_rate;
1991*4882a593Smuzhiyun pre_vbi_time_in_us =
1992*4882a593Smuzhiyun frame_time_in_us - 200 - vblank_time;
1993*4882a593Smuzhiyun tmp = pre_vbi_time_in_us * (ref_clock / 100);
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1996*4882a593Smuzhiyun ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1997*4882a593Smuzhiyun ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1998*4882a593Smuzhiyun
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
2001*4882a593Smuzhiyun
2002*4882a593Smuzhiyun }
2003*4882a593Smuzhiyun
ci_enable_spread_spectrum(struct radeon_device * rdev,bool enable)2004*4882a593Smuzhiyun static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
2005*4882a593Smuzhiyun {
2006*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2007*4882a593Smuzhiyun u32 tmp;
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun if (enable) {
2010*4882a593Smuzhiyun if (pi->caps_sclk_ss_support) {
2011*4882a593Smuzhiyun tmp = RREG32_SMC(GENERAL_PWRMGT);
2012*4882a593Smuzhiyun tmp |= DYN_SPREAD_SPECTRUM_EN;
2013*4882a593Smuzhiyun WREG32_SMC(GENERAL_PWRMGT, tmp);
2014*4882a593Smuzhiyun }
2015*4882a593Smuzhiyun } else {
2016*4882a593Smuzhiyun tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
2017*4882a593Smuzhiyun tmp &= ~SSEN;
2018*4882a593Smuzhiyun WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun tmp = RREG32_SMC(GENERAL_PWRMGT);
2021*4882a593Smuzhiyun tmp &= ~DYN_SPREAD_SPECTRUM_EN;
2022*4882a593Smuzhiyun WREG32_SMC(GENERAL_PWRMGT, tmp);
2023*4882a593Smuzhiyun }
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun
ci_program_sstp(struct radeon_device * rdev)2026*4882a593Smuzhiyun static void ci_program_sstp(struct radeon_device *rdev)
2027*4882a593Smuzhiyun {
2028*4882a593Smuzhiyun WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun
ci_enable_display_gap(struct radeon_device * rdev)2031*4882a593Smuzhiyun static void ci_enable_display_gap(struct radeon_device *rdev)
2032*4882a593Smuzhiyun {
2033*4882a593Smuzhiyun u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
2036*4882a593Smuzhiyun tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
2037*4882a593Smuzhiyun DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun
ci_program_vc(struct radeon_device * rdev)2042*4882a593Smuzhiyun static void ci_program_vc(struct radeon_device *rdev)
2043*4882a593Smuzhiyun {
2044*4882a593Smuzhiyun u32 tmp;
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2047*4882a593Smuzhiyun tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
2048*4882a593Smuzhiyun WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2049*4882a593Smuzhiyun
2050*4882a593Smuzhiyun WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
2051*4882a593Smuzhiyun WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
2052*4882a593Smuzhiyun WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
2053*4882a593Smuzhiyun WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
2054*4882a593Smuzhiyun WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
2055*4882a593Smuzhiyun WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
2056*4882a593Smuzhiyun WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
2057*4882a593Smuzhiyun WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun
ci_clear_vc(struct radeon_device * rdev)2060*4882a593Smuzhiyun static void ci_clear_vc(struct radeon_device *rdev)
2061*4882a593Smuzhiyun {
2062*4882a593Smuzhiyun u32 tmp;
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2065*4882a593Smuzhiyun tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
2066*4882a593Smuzhiyun WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun WREG32_SMC(CG_FTV_0, 0);
2069*4882a593Smuzhiyun WREG32_SMC(CG_FTV_1, 0);
2070*4882a593Smuzhiyun WREG32_SMC(CG_FTV_2, 0);
2071*4882a593Smuzhiyun WREG32_SMC(CG_FTV_3, 0);
2072*4882a593Smuzhiyun WREG32_SMC(CG_FTV_4, 0);
2073*4882a593Smuzhiyun WREG32_SMC(CG_FTV_5, 0);
2074*4882a593Smuzhiyun WREG32_SMC(CG_FTV_6, 0);
2075*4882a593Smuzhiyun WREG32_SMC(CG_FTV_7, 0);
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun
ci_upload_firmware(struct radeon_device * rdev)2078*4882a593Smuzhiyun static int ci_upload_firmware(struct radeon_device *rdev)
2079*4882a593Smuzhiyun {
2080*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2081*4882a593Smuzhiyun int i, ret;
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
2084*4882a593Smuzhiyun if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
2085*4882a593Smuzhiyun break;
2086*4882a593Smuzhiyun }
2087*4882a593Smuzhiyun WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun ci_stop_smc_clock(rdev);
2090*4882a593Smuzhiyun ci_reset_smc(rdev);
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun ret = ci_load_smc_ucode(rdev, pi->sram_end);
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun return ret;
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun }
2097*4882a593Smuzhiyun
ci_get_svi2_voltage_table(struct radeon_device * rdev,struct radeon_clock_voltage_dependency_table * voltage_dependency_table,struct atom_voltage_table * voltage_table)2098*4882a593Smuzhiyun static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
2099*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
2100*4882a593Smuzhiyun struct atom_voltage_table *voltage_table)
2101*4882a593Smuzhiyun {
2102*4882a593Smuzhiyun u32 i;
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun if (voltage_dependency_table == NULL)
2105*4882a593Smuzhiyun return -EINVAL;
2106*4882a593Smuzhiyun
2107*4882a593Smuzhiyun voltage_table->mask_low = 0;
2108*4882a593Smuzhiyun voltage_table->phase_delay = 0;
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun voltage_table->count = voltage_dependency_table->count;
2111*4882a593Smuzhiyun for (i = 0; i < voltage_table->count; i++) {
2112*4882a593Smuzhiyun voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2113*4882a593Smuzhiyun voltage_table->entries[i].smio_low = 0;
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun
2116*4882a593Smuzhiyun return 0;
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun
ci_construct_voltage_tables(struct radeon_device * rdev)2119*4882a593Smuzhiyun static int ci_construct_voltage_tables(struct radeon_device *rdev)
2120*4882a593Smuzhiyun {
2121*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2122*4882a593Smuzhiyun int ret;
2123*4882a593Smuzhiyun
2124*4882a593Smuzhiyun if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2125*4882a593Smuzhiyun ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
2126*4882a593Smuzhiyun VOLTAGE_OBJ_GPIO_LUT,
2127*4882a593Smuzhiyun &pi->vddc_voltage_table);
2128*4882a593Smuzhiyun if (ret)
2129*4882a593Smuzhiyun return ret;
2130*4882a593Smuzhiyun } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2131*4882a593Smuzhiyun ret = ci_get_svi2_voltage_table(rdev,
2132*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2133*4882a593Smuzhiyun &pi->vddc_voltage_table);
2134*4882a593Smuzhiyun if (ret)
2135*4882a593Smuzhiyun return ret;
2136*4882a593Smuzhiyun }
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2139*4882a593Smuzhiyun si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
2140*4882a593Smuzhiyun &pi->vddc_voltage_table);
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2143*4882a593Smuzhiyun ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
2144*4882a593Smuzhiyun VOLTAGE_OBJ_GPIO_LUT,
2145*4882a593Smuzhiyun &pi->vddci_voltage_table);
2146*4882a593Smuzhiyun if (ret)
2147*4882a593Smuzhiyun return ret;
2148*4882a593Smuzhiyun } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2149*4882a593Smuzhiyun ret = ci_get_svi2_voltage_table(rdev,
2150*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2151*4882a593Smuzhiyun &pi->vddci_voltage_table);
2152*4882a593Smuzhiyun if (ret)
2153*4882a593Smuzhiyun return ret;
2154*4882a593Smuzhiyun }
2155*4882a593Smuzhiyun
2156*4882a593Smuzhiyun if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2157*4882a593Smuzhiyun si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
2158*4882a593Smuzhiyun &pi->vddci_voltage_table);
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2161*4882a593Smuzhiyun ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
2162*4882a593Smuzhiyun VOLTAGE_OBJ_GPIO_LUT,
2163*4882a593Smuzhiyun &pi->mvdd_voltage_table);
2164*4882a593Smuzhiyun if (ret)
2165*4882a593Smuzhiyun return ret;
2166*4882a593Smuzhiyun } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2167*4882a593Smuzhiyun ret = ci_get_svi2_voltage_table(rdev,
2168*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2169*4882a593Smuzhiyun &pi->mvdd_voltage_table);
2170*4882a593Smuzhiyun if (ret)
2171*4882a593Smuzhiyun return ret;
2172*4882a593Smuzhiyun }
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2175*4882a593Smuzhiyun si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
2176*4882a593Smuzhiyun &pi->mvdd_voltage_table);
2177*4882a593Smuzhiyun
2178*4882a593Smuzhiyun return 0;
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun
ci_populate_smc_voltage_table(struct radeon_device * rdev,struct atom_voltage_table_entry * voltage_table,SMU7_Discrete_VoltageLevel * smc_voltage_table)2181*4882a593Smuzhiyun static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
2182*4882a593Smuzhiyun struct atom_voltage_table_entry *voltage_table,
2183*4882a593Smuzhiyun SMU7_Discrete_VoltageLevel *smc_voltage_table)
2184*4882a593Smuzhiyun {
2185*4882a593Smuzhiyun int ret;
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
2188*4882a593Smuzhiyun &smc_voltage_table->StdVoltageHiSidd,
2189*4882a593Smuzhiyun &smc_voltage_table->StdVoltageLoSidd);
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun if (ret) {
2192*4882a593Smuzhiyun smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2193*4882a593Smuzhiyun smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2194*4882a593Smuzhiyun }
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2197*4882a593Smuzhiyun smc_voltage_table->StdVoltageHiSidd =
2198*4882a593Smuzhiyun cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2199*4882a593Smuzhiyun smc_voltage_table->StdVoltageLoSidd =
2200*4882a593Smuzhiyun cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2201*4882a593Smuzhiyun }
2202*4882a593Smuzhiyun
ci_populate_smc_vddc_table(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2203*4882a593Smuzhiyun static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
2204*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2205*4882a593Smuzhiyun {
2206*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2207*4882a593Smuzhiyun unsigned int count;
2208*4882a593Smuzhiyun
2209*4882a593Smuzhiyun table->VddcLevelCount = pi->vddc_voltage_table.count;
2210*4882a593Smuzhiyun for (count = 0; count < table->VddcLevelCount; count++) {
2211*4882a593Smuzhiyun ci_populate_smc_voltage_table(rdev,
2212*4882a593Smuzhiyun &pi->vddc_voltage_table.entries[count],
2213*4882a593Smuzhiyun &table->VddcLevel[count]);
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2216*4882a593Smuzhiyun table->VddcLevel[count].Smio |=
2217*4882a593Smuzhiyun pi->vddc_voltage_table.entries[count].smio_low;
2218*4882a593Smuzhiyun else
2219*4882a593Smuzhiyun table->VddcLevel[count].Smio = 0;
2220*4882a593Smuzhiyun }
2221*4882a593Smuzhiyun table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun return 0;
2224*4882a593Smuzhiyun }
2225*4882a593Smuzhiyun
ci_populate_smc_vddci_table(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2226*4882a593Smuzhiyun static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
2227*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2228*4882a593Smuzhiyun {
2229*4882a593Smuzhiyun unsigned int count;
2230*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun table->VddciLevelCount = pi->vddci_voltage_table.count;
2233*4882a593Smuzhiyun for (count = 0; count < table->VddciLevelCount; count++) {
2234*4882a593Smuzhiyun ci_populate_smc_voltage_table(rdev,
2235*4882a593Smuzhiyun &pi->vddci_voltage_table.entries[count],
2236*4882a593Smuzhiyun &table->VddciLevel[count]);
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2239*4882a593Smuzhiyun table->VddciLevel[count].Smio |=
2240*4882a593Smuzhiyun pi->vddci_voltage_table.entries[count].smio_low;
2241*4882a593Smuzhiyun else
2242*4882a593Smuzhiyun table->VddciLevel[count].Smio = 0;
2243*4882a593Smuzhiyun }
2244*4882a593Smuzhiyun table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2245*4882a593Smuzhiyun
2246*4882a593Smuzhiyun return 0;
2247*4882a593Smuzhiyun }
2248*4882a593Smuzhiyun
ci_populate_smc_mvdd_table(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2249*4882a593Smuzhiyun static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
2250*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2251*4882a593Smuzhiyun {
2252*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2253*4882a593Smuzhiyun unsigned int count;
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun table->MvddLevelCount = pi->mvdd_voltage_table.count;
2256*4882a593Smuzhiyun for (count = 0; count < table->MvddLevelCount; count++) {
2257*4882a593Smuzhiyun ci_populate_smc_voltage_table(rdev,
2258*4882a593Smuzhiyun &pi->mvdd_voltage_table.entries[count],
2259*4882a593Smuzhiyun &table->MvddLevel[count]);
2260*4882a593Smuzhiyun
2261*4882a593Smuzhiyun if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2262*4882a593Smuzhiyun table->MvddLevel[count].Smio |=
2263*4882a593Smuzhiyun pi->mvdd_voltage_table.entries[count].smio_low;
2264*4882a593Smuzhiyun else
2265*4882a593Smuzhiyun table->MvddLevel[count].Smio = 0;
2266*4882a593Smuzhiyun }
2267*4882a593Smuzhiyun table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun return 0;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun
ci_populate_smc_voltage_tables(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2272*4882a593Smuzhiyun static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
2273*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2274*4882a593Smuzhiyun {
2275*4882a593Smuzhiyun int ret;
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun ret = ci_populate_smc_vddc_table(rdev, table);
2278*4882a593Smuzhiyun if (ret)
2279*4882a593Smuzhiyun return ret;
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun ret = ci_populate_smc_vddci_table(rdev, table);
2282*4882a593Smuzhiyun if (ret)
2283*4882a593Smuzhiyun return ret;
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun ret = ci_populate_smc_mvdd_table(rdev, table);
2286*4882a593Smuzhiyun if (ret)
2287*4882a593Smuzhiyun return ret;
2288*4882a593Smuzhiyun
2289*4882a593Smuzhiyun return 0;
2290*4882a593Smuzhiyun }
2291*4882a593Smuzhiyun
ci_populate_mvdd_value(struct radeon_device * rdev,u32 mclk,SMU7_Discrete_VoltageLevel * voltage)2292*4882a593Smuzhiyun static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
2293*4882a593Smuzhiyun SMU7_Discrete_VoltageLevel *voltage)
2294*4882a593Smuzhiyun {
2295*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2296*4882a593Smuzhiyun u32 i = 0;
2297*4882a593Smuzhiyun
2298*4882a593Smuzhiyun if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2299*4882a593Smuzhiyun for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2300*4882a593Smuzhiyun if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2301*4882a593Smuzhiyun voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2302*4882a593Smuzhiyun break;
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2307*4882a593Smuzhiyun return -EINVAL;
2308*4882a593Smuzhiyun }
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun return -EINVAL;
2311*4882a593Smuzhiyun }
2312*4882a593Smuzhiyun
ci_get_std_voltage_value_sidd(struct radeon_device * rdev,struct atom_voltage_table_entry * voltage_table,u16 * std_voltage_hi_sidd,u16 * std_voltage_lo_sidd)2313*4882a593Smuzhiyun static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
2314*4882a593Smuzhiyun struct atom_voltage_table_entry *voltage_table,
2315*4882a593Smuzhiyun u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2316*4882a593Smuzhiyun {
2317*4882a593Smuzhiyun u16 v_index, idx;
2318*4882a593Smuzhiyun bool voltage_found = false;
2319*4882a593Smuzhiyun *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2320*4882a593Smuzhiyun *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2321*4882a593Smuzhiyun
2322*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2323*4882a593Smuzhiyun return -EINVAL;
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2326*4882a593Smuzhiyun for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2327*4882a593Smuzhiyun if (voltage_table->value ==
2328*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2329*4882a593Smuzhiyun voltage_found = true;
2330*4882a593Smuzhiyun if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2331*4882a593Smuzhiyun idx = v_index;
2332*4882a593Smuzhiyun else
2333*4882a593Smuzhiyun idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2334*4882a593Smuzhiyun *std_voltage_lo_sidd =
2335*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2336*4882a593Smuzhiyun *std_voltage_hi_sidd =
2337*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2338*4882a593Smuzhiyun break;
2339*4882a593Smuzhiyun }
2340*4882a593Smuzhiyun }
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun if (!voltage_found) {
2343*4882a593Smuzhiyun for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2344*4882a593Smuzhiyun if (voltage_table->value <=
2345*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2346*4882a593Smuzhiyun voltage_found = true;
2347*4882a593Smuzhiyun if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2348*4882a593Smuzhiyun idx = v_index;
2349*4882a593Smuzhiyun else
2350*4882a593Smuzhiyun idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2351*4882a593Smuzhiyun *std_voltage_lo_sidd =
2352*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2353*4882a593Smuzhiyun *std_voltage_hi_sidd =
2354*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2355*4882a593Smuzhiyun break;
2356*4882a593Smuzhiyun }
2357*4882a593Smuzhiyun }
2358*4882a593Smuzhiyun }
2359*4882a593Smuzhiyun }
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun return 0;
2362*4882a593Smuzhiyun }
2363*4882a593Smuzhiyun
ci_populate_phase_value_based_on_sclk(struct radeon_device * rdev,const struct radeon_phase_shedding_limits_table * limits,u32 sclk,u32 * phase_shedding)2364*4882a593Smuzhiyun static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
2365*4882a593Smuzhiyun const struct radeon_phase_shedding_limits_table *limits,
2366*4882a593Smuzhiyun u32 sclk,
2367*4882a593Smuzhiyun u32 *phase_shedding)
2368*4882a593Smuzhiyun {
2369*4882a593Smuzhiyun unsigned int i;
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun *phase_shedding = 1;
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun for (i = 0; i < limits->count; i++) {
2374*4882a593Smuzhiyun if (sclk < limits->entries[i].sclk) {
2375*4882a593Smuzhiyun *phase_shedding = i;
2376*4882a593Smuzhiyun break;
2377*4882a593Smuzhiyun }
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun }
2380*4882a593Smuzhiyun
ci_populate_phase_value_based_on_mclk(struct radeon_device * rdev,const struct radeon_phase_shedding_limits_table * limits,u32 mclk,u32 * phase_shedding)2381*4882a593Smuzhiyun static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
2382*4882a593Smuzhiyun const struct radeon_phase_shedding_limits_table *limits,
2383*4882a593Smuzhiyun u32 mclk,
2384*4882a593Smuzhiyun u32 *phase_shedding)
2385*4882a593Smuzhiyun {
2386*4882a593Smuzhiyun unsigned int i;
2387*4882a593Smuzhiyun
2388*4882a593Smuzhiyun *phase_shedding = 1;
2389*4882a593Smuzhiyun
2390*4882a593Smuzhiyun for (i = 0; i < limits->count; i++) {
2391*4882a593Smuzhiyun if (mclk < limits->entries[i].mclk) {
2392*4882a593Smuzhiyun *phase_shedding = i;
2393*4882a593Smuzhiyun break;
2394*4882a593Smuzhiyun }
2395*4882a593Smuzhiyun }
2396*4882a593Smuzhiyun }
2397*4882a593Smuzhiyun
ci_init_arb_table_index(struct radeon_device * rdev)2398*4882a593Smuzhiyun static int ci_init_arb_table_index(struct radeon_device *rdev)
2399*4882a593Smuzhiyun {
2400*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2401*4882a593Smuzhiyun u32 tmp;
2402*4882a593Smuzhiyun int ret;
2403*4882a593Smuzhiyun
2404*4882a593Smuzhiyun ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2405*4882a593Smuzhiyun &tmp, pi->sram_end);
2406*4882a593Smuzhiyun if (ret)
2407*4882a593Smuzhiyun return ret;
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun tmp &= 0x00FFFFFF;
2410*4882a593Smuzhiyun tmp |= MC_CG_ARB_FREQ_F1 << 24;
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2413*4882a593Smuzhiyun tmp, pi->sram_end);
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun
ci_get_dependency_volt_by_clk(struct radeon_device * rdev,struct radeon_clock_voltage_dependency_table * allowed_clock_voltage_table,u32 clock,u32 * voltage)2416*4882a593Smuzhiyun static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2417*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2418*4882a593Smuzhiyun u32 clock, u32 *voltage)
2419*4882a593Smuzhiyun {
2420*4882a593Smuzhiyun u32 i = 0;
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun if (allowed_clock_voltage_table->count == 0)
2423*4882a593Smuzhiyun return -EINVAL;
2424*4882a593Smuzhiyun
2425*4882a593Smuzhiyun for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2426*4882a593Smuzhiyun if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2427*4882a593Smuzhiyun *voltage = allowed_clock_voltage_table->entries[i].v;
2428*4882a593Smuzhiyun return 0;
2429*4882a593Smuzhiyun }
2430*4882a593Smuzhiyun }
2431*4882a593Smuzhiyun
2432*4882a593Smuzhiyun *voltage = allowed_clock_voltage_table->entries[i-1].v;
2433*4882a593Smuzhiyun
2434*4882a593Smuzhiyun return 0;
2435*4882a593Smuzhiyun }
2436*4882a593Smuzhiyun
ci_get_sleep_divider_id_from_clock(struct radeon_device * rdev,u32 sclk,u32 min_sclk_in_sr)2437*4882a593Smuzhiyun static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2438*4882a593Smuzhiyun u32 sclk, u32 min_sclk_in_sr)
2439*4882a593Smuzhiyun {
2440*4882a593Smuzhiyun u32 i;
2441*4882a593Smuzhiyun u32 tmp;
2442*4882a593Smuzhiyun u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2443*4882a593Smuzhiyun min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun if (sclk < min)
2446*4882a593Smuzhiyun return 0;
2447*4882a593Smuzhiyun
2448*4882a593Smuzhiyun for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
2449*4882a593Smuzhiyun tmp = sclk / (1 << i);
2450*4882a593Smuzhiyun if (tmp >= min || i == 0)
2451*4882a593Smuzhiyun break;
2452*4882a593Smuzhiyun }
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun return (u8)i;
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun
ci_initial_switch_from_arb_f0_to_f1(struct radeon_device * rdev)2457*4882a593Smuzhiyun static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2458*4882a593Smuzhiyun {
2459*4882a593Smuzhiyun return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2460*4882a593Smuzhiyun }
2461*4882a593Smuzhiyun
ci_reset_to_default(struct radeon_device * rdev)2462*4882a593Smuzhiyun static int ci_reset_to_default(struct radeon_device *rdev)
2463*4882a593Smuzhiyun {
2464*4882a593Smuzhiyun return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2465*4882a593Smuzhiyun 0 : -EINVAL;
2466*4882a593Smuzhiyun }
2467*4882a593Smuzhiyun
ci_force_switch_to_arb_f0(struct radeon_device * rdev)2468*4882a593Smuzhiyun static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2469*4882a593Smuzhiyun {
2470*4882a593Smuzhiyun u32 tmp;
2471*4882a593Smuzhiyun
2472*4882a593Smuzhiyun tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2473*4882a593Smuzhiyun
2474*4882a593Smuzhiyun if (tmp == MC_CG_ARB_FREQ_F0)
2475*4882a593Smuzhiyun return 0;
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2478*4882a593Smuzhiyun }
2479*4882a593Smuzhiyun
ci_register_patching_mc_arb(struct radeon_device * rdev,const u32 engine_clock,const u32 memory_clock,u32 * dram_timimg2)2480*4882a593Smuzhiyun static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2481*4882a593Smuzhiyun const u32 engine_clock,
2482*4882a593Smuzhiyun const u32 memory_clock,
2483*4882a593Smuzhiyun u32 *dram_timimg2)
2484*4882a593Smuzhiyun {
2485*4882a593Smuzhiyun bool patch;
2486*4882a593Smuzhiyun u32 tmp, tmp2;
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun tmp = RREG32(MC_SEQ_MISC0);
2489*4882a593Smuzhiyun patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun if (patch &&
2492*4882a593Smuzhiyun ((rdev->pdev->device == 0x67B0) ||
2493*4882a593Smuzhiyun (rdev->pdev->device == 0x67B1))) {
2494*4882a593Smuzhiyun if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2495*4882a593Smuzhiyun tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2496*4882a593Smuzhiyun *dram_timimg2 &= ~0x00ff0000;
2497*4882a593Smuzhiyun *dram_timimg2 |= tmp2 << 16;
2498*4882a593Smuzhiyun } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2499*4882a593Smuzhiyun tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2500*4882a593Smuzhiyun *dram_timimg2 &= ~0x00ff0000;
2501*4882a593Smuzhiyun *dram_timimg2 |= tmp2 << 16;
2502*4882a593Smuzhiyun }
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun }
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun
ci_populate_memory_timing_parameters(struct radeon_device * rdev,u32 sclk,u32 mclk,SMU7_Discrete_MCArbDramTimingTableEntry * arb_regs)2507*4882a593Smuzhiyun static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2508*4882a593Smuzhiyun u32 sclk,
2509*4882a593Smuzhiyun u32 mclk,
2510*4882a593Smuzhiyun SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2511*4882a593Smuzhiyun {
2512*4882a593Smuzhiyun u32 dram_timing;
2513*4882a593Smuzhiyun u32 dram_timing2;
2514*4882a593Smuzhiyun u32 burst_time;
2515*4882a593Smuzhiyun
2516*4882a593Smuzhiyun radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun dram_timing = RREG32(MC_ARB_DRAM_TIMING);
2519*4882a593Smuzhiyun dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2520*4882a593Smuzhiyun burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2521*4882a593Smuzhiyun
2522*4882a593Smuzhiyun ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2523*4882a593Smuzhiyun
2524*4882a593Smuzhiyun arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2525*4882a593Smuzhiyun arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2526*4882a593Smuzhiyun arb_regs->McArbBurstTime = (u8)burst_time;
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun return 0;
2529*4882a593Smuzhiyun }
2530*4882a593Smuzhiyun
ci_do_program_memory_timing_parameters(struct radeon_device * rdev)2531*4882a593Smuzhiyun static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2532*4882a593Smuzhiyun {
2533*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2534*4882a593Smuzhiyun SMU7_Discrete_MCArbDramTimingTable arb_regs;
2535*4882a593Smuzhiyun u32 i, j;
2536*4882a593Smuzhiyun int ret = 0;
2537*4882a593Smuzhiyun
2538*4882a593Smuzhiyun memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2539*4882a593Smuzhiyun
2540*4882a593Smuzhiyun for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2541*4882a593Smuzhiyun for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2542*4882a593Smuzhiyun ret = ci_populate_memory_timing_parameters(rdev,
2543*4882a593Smuzhiyun pi->dpm_table.sclk_table.dpm_levels[i].value,
2544*4882a593Smuzhiyun pi->dpm_table.mclk_table.dpm_levels[j].value,
2545*4882a593Smuzhiyun &arb_regs.entries[i][j]);
2546*4882a593Smuzhiyun if (ret)
2547*4882a593Smuzhiyun break;
2548*4882a593Smuzhiyun }
2549*4882a593Smuzhiyun }
2550*4882a593Smuzhiyun
2551*4882a593Smuzhiyun if (ret == 0)
2552*4882a593Smuzhiyun ret = ci_copy_bytes_to_smc(rdev,
2553*4882a593Smuzhiyun pi->arb_table_start,
2554*4882a593Smuzhiyun (u8 *)&arb_regs,
2555*4882a593Smuzhiyun sizeof(SMU7_Discrete_MCArbDramTimingTable),
2556*4882a593Smuzhiyun pi->sram_end);
2557*4882a593Smuzhiyun
2558*4882a593Smuzhiyun return ret;
2559*4882a593Smuzhiyun }
2560*4882a593Smuzhiyun
ci_program_memory_timing_parameters(struct radeon_device * rdev)2561*4882a593Smuzhiyun static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2562*4882a593Smuzhiyun {
2563*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun if (pi->need_update_smu7_dpm_table == 0)
2566*4882a593Smuzhiyun return 0;
2567*4882a593Smuzhiyun
2568*4882a593Smuzhiyun return ci_do_program_memory_timing_parameters(rdev);
2569*4882a593Smuzhiyun }
2570*4882a593Smuzhiyun
ci_populate_smc_initial_state(struct radeon_device * rdev,struct radeon_ps * radeon_boot_state)2571*4882a593Smuzhiyun static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2572*4882a593Smuzhiyun struct radeon_ps *radeon_boot_state)
2573*4882a593Smuzhiyun {
2574*4882a593Smuzhiyun struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2575*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2576*4882a593Smuzhiyun u32 level = 0;
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2579*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2580*4882a593Smuzhiyun boot_state->performance_levels[0].sclk) {
2581*4882a593Smuzhiyun pi->smc_state_table.GraphicsBootLevel = level;
2582*4882a593Smuzhiyun break;
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2587*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2588*4882a593Smuzhiyun boot_state->performance_levels[0].mclk) {
2589*4882a593Smuzhiyun pi->smc_state_table.MemoryBootLevel = level;
2590*4882a593Smuzhiyun break;
2591*4882a593Smuzhiyun }
2592*4882a593Smuzhiyun }
2593*4882a593Smuzhiyun }
2594*4882a593Smuzhiyun
ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table * dpm_table)2595*4882a593Smuzhiyun static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2596*4882a593Smuzhiyun {
2597*4882a593Smuzhiyun u32 i;
2598*4882a593Smuzhiyun u32 mask_value = 0;
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun for (i = dpm_table->count; i > 0; i--) {
2601*4882a593Smuzhiyun mask_value = mask_value << 1;
2602*4882a593Smuzhiyun if (dpm_table->dpm_levels[i-1].enabled)
2603*4882a593Smuzhiyun mask_value |= 0x1;
2604*4882a593Smuzhiyun else
2605*4882a593Smuzhiyun mask_value &= 0xFFFFFFFE;
2606*4882a593Smuzhiyun }
2607*4882a593Smuzhiyun
2608*4882a593Smuzhiyun return mask_value;
2609*4882a593Smuzhiyun }
2610*4882a593Smuzhiyun
ci_populate_smc_link_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2611*4882a593Smuzhiyun static void ci_populate_smc_link_level(struct radeon_device *rdev,
2612*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2613*4882a593Smuzhiyun {
2614*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2615*4882a593Smuzhiyun struct ci_dpm_table *dpm_table = &pi->dpm_table;
2616*4882a593Smuzhiyun u32 i;
2617*4882a593Smuzhiyun
2618*4882a593Smuzhiyun for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2619*4882a593Smuzhiyun table->LinkLevel[i].PcieGenSpeed =
2620*4882a593Smuzhiyun (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2621*4882a593Smuzhiyun table->LinkLevel[i].PcieLaneCount =
2622*4882a593Smuzhiyun r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2623*4882a593Smuzhiyun table->LinkLevel[i].EnabledForActivity = 1;
2624*4882a593Smuzhiyun table->LinkLevel[i].DownT = cpu_to_be32(5);
2625*4882a593Smuzhiyun table->LinkLevel[i].UpT = cpu_to_be32(30);
2626*4882a593Smuzhiyun }
2627*4882a593Smuzhiyun
2628*4882a593Smuzhiyun pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2629*4882a593Smuzhiyun pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2630*4882a593Smuzhiyun ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2631*4882a593Smuzhiyun }
2632*4882a593Smuzhiyun
ci_populate_smc_uvd_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2633*4882a593Smuzhiyun static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2634*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2635*4882a593Smuzhiyun {
2636*4882a593Smuzhiyun u32 count;
2637*4882a593Smuzhiyun struct atom_clock_dividers dividers;
2638*4882a593Smuzhiyun int ret = -EINVAL;
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun table->UvdLevelCount =
2641*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun for (count = 0; count < table->UvdLevelCount; count++) {
2644*4882a593Smuzhiyun table->UvdLevel[count].VclkFrequency =
2645*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2646*4882a593Smuzhiyun table->UvdLevel[count].DclkFrequency =
2647*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2648*4882a593Smuzhiyun table->UvdLevel[count].MinVddc =
2649*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2650*4882a593Smuzhiyun table->UvdLevel[count].MinVddcPhases = 1;
2651*4882a593Smuzhiyun
2652*4882a593Smuzhiyun ret = radeon_atom_get_clock_dividers(rdev,
2653*4882a593Smuzhiyun COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2654*4882a593Smuzhiyun table->UvdLevel[count].VclkFrequency, false, ÷rs);
2655*4882a593Smuzhiyun if (ret)
2656*4882a593Smuzhiyun return ret;
2657*4882a593Smuzhiyun
2658*4882a593Smuzhiyun table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2659*4882a593Smuzhiyun
2660*4882a593Smuzhiyun ret = radeon_atom_get_clock_dividers(rdev,
2661*4882a593Smuzhiyun COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2662*4882a593Smuzhiyun table->UvdLevel[count].DclkFrequency, false, ÷rs);
2663*4882a593Smuzhiyun if (ret)
2664*4882a593Smuzhiyun return ret;
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2669*4882a593Smuzhiyun table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2670*4882a593Smuzhiyun table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2671*4882a593Smuzhiyun }
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun return ret;
2674*4882a593Smuzhiyun }
2675*4882a593Smuzhiyun
ci_populate_smc_vce_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2676*4882a593Smuzhiyun static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2677*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2678*4882a593Smuzhiyun {
2679*4882a593Smuzhiyun u32 count;
2680*4882a593Smuzhiyun struct atom_clock_dividers dividers;
2681*4882a593Smuzhiyun int ret = -EINVAL;
2682*4882a593Smuzhiyun
2683*4882a593Smuzhiyun table->VceLevelCount =
2684*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2685*4882a593Smuzhiyun
2686*4882a593Smuzhiyun for (count = 0; count < table->VceLevelCount; count++) {
2687*4882a593Smuzhiyun table->VceLevel[count].Frequency =
2688*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2689*4882a593Smuzhiyun table->VceLevel[count].MinVoltage =
2690*4882a593Smuzhiyun (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2691*4882a593Smuzhiyun table->VceLevel[count].MinPhases = 1;
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun ret = radeon_atom_get_clock_dividers(rdev,
2694*4882a593Smuzhiyun COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2695*4882a593Smuzhiyun table->VceLevel[count].Frequency, false, ÷rs);
2696*4882a593Smuzhiyun if (ret)
2697*4882a593Smuzhiyun return ret;
2698*4882a593Smuzhiyun
2699*4882a593Smuzhiyun table->VceLevel[count].Divider = (u8)dividers.post_divider;
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2702*4882a593Smuzhiyun table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2703*4882a593Smuzhiyun }
2704*4882a593Smuzhiyun
2705*4882a593Smuzhiyun return ret;
2706*4882a593Smuzhiyun
2707*4882a593Smuzhiyun }
2708*4882a593Smuzhiyun
ci_populate_smc_acp_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2709*4882a593Smuzhiyun static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2710*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2711*4882a593Smuzhiyun {
2712*4882a593Smuzhiyun u32 count;
2713*4882a593Smuzhiyun struct atom_clock_dividers dividers;
2714*4882a593Smuzhiyun int ret = -EINVAL;
2715*4882a593Smuzhiyun
2716*4882a593Smuzhiyun table->AcpLevelCount = (u8)
2717*4882a593Smuzhiyun (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2718*4882a593Smuzhiyun
2719*4882a593Smuzhiyun for (count = 0; count < table->AcpLevelCount; count++) {
2720*4882a593Smuzhiyun table->AcpLevel[count].Frequency =
2721*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2722*4882a593Smuzhiyun table->AcpLevel[count].MinVoltage =
2723*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2724*4882a593Smuzhiyun table->AcpLevel[count].MinPhases = 1;
2725*4882a593Smuzhiyun
2726*4882a593Smuzhiyun ret = radeon_atom_get_clock_dividers(rdev,
2727*4882a593Smuzhiyun COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2728*4882a593Smuzhiyun table->AcpLevel[count].Frequency, false, ÷rs);
2729*4882a593Smuzhiyun if (ret)
2730*4882a593Smuzhiyun return ret;
2731*4882a593Smuzhiyun
2732*4882a593Smuzhiyun table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2733*4882a593Smuzhiyun
2734*4882a593Smuzhiyun table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2735*4882a593Smuzhiyun table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2736*4882a593Smuzhiyun }
2737*4882a593Smuzhiyun
2738*4882a593Smuzhiyun return ret;
2739*4882a593Smuzhiyun }
2740*4882a593Smuzhiyun
ci_populate_smc_samu_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2741*4882a593Smuzhiyun static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2742*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2743*4882a593Smuzhiyun {
2744*4882a593Smuzhiyun u32 count;
2745*4882a593Smuzhiyun struct atom_clock_dividers dividers;
2746*4882a593Smuzhiyun int ret = -EINVAL;
2747*4882a593Smuzhiyun
2748*4882a593Smuzhiyun table->SamuLevelCount =
2749*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2750*4882a593Smuzhiyun
2751*4882a593Smuzhiyun for (count = 0; count < table->SamuLevelCount; count++) {
2752*4882a593Smuzhiyun table->SamuLevel[count].Frequency =
2753*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2754*4882a593Smuzhiyun table->SamuLevel[count].MinVoltage =
2755*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2756*4882a593Smuzhiyun table->SamuLevel[count].MinPhases = 1;
2757*4882a593Smuzhiyun
2758*4882a593Smuzhiyun ret = radeon_atom_get_clock_dividers(rdev,
2759*4882a593Smuzhiyun COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2760*4882a593Smuzhiyun table->SamuLevel[count].Frequency, false, ÷rs);
2761*4882a593Smuzhiyun if (ret)
2762*4882a593Smuzhiyun return ret;
2763*4882a593Smuzhiyun
2764*4882a593Smuzhiyun table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2767*4882a593Smuzhiyun table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2768*4882a593Smuzhiyun }
2769*4882a593Smuzhiyun
2770*4882a593Smuzhiyun return ret;
2771*4882a593Smuzhiyun }
2772*4882a593Smuzhiyun
ci_calculate_mclk_params(struct radeon_device * rdev,u32 memory_clock,SMU7_Discrete_MemoryLevel * mclk,bool strobe_mode,bool dll_state_on)2773*4882a593Smuzhiyun static int ci_calculate_mclk_params(struct radeon_device *rdev,
2774*4882a593Smuzhiyun u32 memory_clock,
2775*4882a593Smuzhiyun SMU7_Discrete_MemoryLevel *mclk,
2776*4882a593Smuzhiyun bool strobe_mode,
2777*4882a593Smuzhiyun bool dll_state_on)
2778*4882a593Smuzhiyun {
2779*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2780*4882a593Smuzhiyun u32 dll_cntl = pi->clock_registers.dll_cntl;
2781*4882a593Smuzhiyun u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2782*4882a593Smuzhiyun u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2783*4882a593Smuzhiyun u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2784*4882a593Smuzhiyun u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2785*4882a593Smuzhiyun u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2786*4882a593Smuzhiyun u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2787*4882a593Smuzhiyun u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2788*4882a593Smuzhiyun u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2789*4882a593Smuzhiyun struct atom_mpll_param mpll_param;
2790*4882a593Smuzhiyun int ret;
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2793*4882a593Smuzhiyun if (ret)
2794*4882a593Smuzhiyun return ret;
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun mpll_func_cntl &= ~BWCTRL_MASK;
2797*4882a593Smuzhiyun mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2798*4882a593Smuzhiyun
2799*4882a593Smuzhiyun mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2800*4882a593Smuzhiyun mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2801*4882a593Smuzhiyun CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2802*4882a593Smuzhiyun
2803*4882a593Smuzhiyun mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2804*4882a593Smuzhiyun mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2805*4882a593Smuzhiyun
2806*4882a593Smuzhiyun if (pi->mem_gddr5) {
2807*4882a593Smuzhiyun mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2808*4882a593Smuzhiyun mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2809*4882a593Smuzhiyun YCLK_POST_DIV(mpll_param.post_div);
2810*4882a593Smuzhiyun }
2811*4882a593Smuzhiyun
2812*4882a593Smuzhiyun if (pi->caps_mclk_ss_support) {
2813*4882a593Smuzhiyun struct radeon_atom_ss ss;
2814*4882a593Smuzhiyun u32 freq_nom;
2815*4882a593Smuzhiyun u32 tmp;
2816*4882a593Smuzhiyun u32 reference_clock = rdev->clock.mpll.reference_freq;
2817*4882a593Smuzhiyun
2818*4882a593Smuzhiyun if (mpll_param.qdr == 1)
2819*4882a593Smuzhiyun freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2820*4882a593Smuzhiyun else
2821*4882a593Smuzhiyun freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun tmp = (freq_nom / reference_clock);
2824*4882a593Smuzhiyun tmp = tmp * tmp;
2825*4882a593Smuzhiyun if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2826*4882a593Smuzhiyun ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2827*4882a593Smuzhiyun u32 clks = reference_clock * 5 / ss.rate;
2828*4882a593Smuzhiyun u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2829*4882a593Smuzhiyun
2830*4882a593Smuzhiyun mpll_ss1 &= ~CLKV_MASK;
2831*4882a593Smuzhiyun mpll_ss1 |= CLKV(clkv);
2832*4882a593Smuzhiyun
2833*4882a593Smuzhiyun mpll_ss2 &= ~CLKS_MASK;
2834*4882a593Smuzhiyun mpll_ss2 |= CLKS(clks);
2835*4882a593Smuzhiyun }
2836*4882a593Smuzhiyun }
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2839*4882a593Smuzhiyun mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2840*4882a593Smuzhiyun
2841*4882a593Smuzhiyun if (dll_state_on)
2842*4882a593Smuzhiyun mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2843*4882a593Smuzhiyun else
2844*4882a593Smuzhiyun mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun mclk->MclkFrequency = memory_clock;
2847*4882a593Smuzhiyun mclk->MpllFuncCntl = mpll_func_cntl;
2848*4882a593Smuzhiyun mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2849*4882a593Smuzhiyun mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2850*4882a593Smuzhiyun mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2851*4882a593Smuzhiyun mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2852*4882a593Smuzhiyun mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2853*4882a593Smuzhiyun mclk->DllCntl = dll_cntl;
2854*4882a593Smuzhiyun mclk->MpllSs1 = mpll_ss1;
2855*4882a593Smuzhiyun mclk->MpllSs2 = mpll_ss2;
2856*4882a593Smuzhiyun
2857*4882a593Smuzhiyun return 0;
2858*4882a593Smuzhiyun }
2859*4882a593Smuzhiyun
ci_populate_single_memory_level(struct radeon_device * rdev,u32 memory_clock,SMU7_Discrete_MemoryLevel * memory_level)2860*4882a593Smuzhiyun static int ci_populate_single_memory_level(struct radeon_device *rdev,
2861*4882a593Smuzhiyun u32 memory_clock,
2862*4882a593Smuzhiyun SMU7_Discrete_MemoryLevel *memory_level)
2863*4882a593Smuzhiyun {
2864*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2865*4882a593Smuzhiyun int ret;
2866*4882a593Smuzhiyun bool dll_state_on;
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2869*4882a593Smuzhiyun ret = ci_get_dependency_volt_by_clk(rdev,
2870*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2871*4882a593Smuzhiyun memory_clock, &memory_level->MinVddc);
2872*4882a593Smuzhiyun if (ret)
2873*4882a593Smuzhiyun return ret;
2874*4882a593Smuzhiyun }
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2877*4882a593Smuzhiyun ret = ci_get_dependency_volt_by_clk(rdev,
2878*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2879*4882a593Smuzhiyun memory_clock, &memory_level->MinVddci);
2880*4882a593Smuzhiyun if (ret)
2881*4882a593Smuzhiyun return ret;
2882*4882a593Smuzhiyun }
2883*4882a593Smuzhiyun
2884*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2885*4882a593Smuzhiyun ret = ci_get_dependency_volt_by_clk(rdev,
2886*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2887*4882a593Smuzhiyun memory_clock, &memory_level->MinMvdd);
2888*4882a593Smuzhiyun if (ret)
2889*4882a593Smuzhiyun return ret;
2890*4882a593Smuzhiyun }
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun memory_level->MinVddcPhases = 1;
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun if (pi->vddc_phase_shed_control)
2895*4882a593Smuzhiyun ci_populate_phase_value_based_on_mclk(rdev,
2896*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2897*4882a593Smuzhiyun memory_clock,
2898*4882a593Smuzhiyun &memory_level->MinVddcPhases);
2899*4882a593Smuzhiyun
2900*4882a593Smuzhiyun memory_level->EnabledForThrottle = 1;
2901*4882a593Smuzhiyun memory_level->UpH = 0;
2902*4882a593Smuzhiyun memory_level->DownH = 100;
2903*4882a593Smuzhiyun memory_level->VoltageDownH = 0;
2904*4882a593Smuzhiyun memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2905*4882a593Smuzhiyun
2906*4882a593Smuzhiyun memory_level->StutterEnable = false;
2907*4882a593Smuzhiyun memory_level->StrobeEnable = false;
2908*4882a593Smuzhiyun memory_level->EdcReadEnable = false;
2909*4882a593Smuzhiyun memory_level->EdcWriteEnable = false;
2910*4882a593Smuzhiyun memory_level->RttEnable = false;
2911*4882a593Smuzhiyun
2912*4882a593Smuzhiyun memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2913*4882a593Smuzhiyun
2914*4882a593Smuzhiyun if (pi->mclk_stutter_mode_threshold &&
2915*4882a593Smuzhiyun (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2916*4882a593Smuzhiyun (pi->uvd_enabled == false) &&
2917*4882a593Smuzhiyun (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2918*4882a593Smuzhiyun (rdev->pm.dpm.new_active_crtc_count <= 2))
2919*4882a593Smuzhiyun memory_level->StutterEnable = true;
2920*4882a593Smuzhiyun
2921*4882a593Smuzhiyun if (pi->mclk_strobe_mode_threshold &&
2922*4882a593Smuzhiyun (memory_clock <= pi->mclk_strobe_mode_threshold))
2923*4882a593Smuzhiyun memory_level->StrobeEnable = 1;
2924*4882a593Smuzhiyun
2925*4882a593Smuzhiyun if (pi->mem_gddr5) {
2926*4882a593Smuzhiyun memory_level->StrobeRatio =
2927*4882a593Smuzhiyun si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2928*4882a593Smuzhiyun if (pi->mclk_edc_enable_threshold &&
2929*4882a593Smuzhiyun (memory_clock > pi->mclk_edc_enable_threshold))
2930*4882a593Smuzhiyun memory_level->EdcReadEnable = true;
2931*4882a593Smuzhiyun
2932*4882a593Smuzhiyun if (pi->mclk_edc_wr_enable_threshold &&
2933*4882a593Smuzhiyun (memory_clock > pi->mclk_edc_wr_enable_threshold))
2934*4882a593Smuzhiyun memory_level->EdcWriteEnable = true;
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun if (memory_level->StrobeEnable) {
2937*4882a593Smuzhiyun if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2938*4882a593Smuzhiyun ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2939*4882a593Smuzhiyun dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2940*4882a593Smuzhiyun else
2941*4882a593Smuzhiyun dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2942*4882a593Smuzhiyun } else {
2943*4882a593Smuzhiyun dll_state_on = pi->dll_default_on;
2944*4882a593Smuzhiyun }
2945*4882a593Smuzhiyun } else {
2946*4882a593Smuzhiyun memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2947*4882a593Smuzhiyun dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2948*4882a593Smuzhiyun }
2949*4882a593Smuzhiyun
2950*4882a593Smuzhiyun ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2951*4882a593Smuzhiyun if (ret)
2952*4882a593Smuzhiyun return ret;
2953*4882a593Smuzhiyun
2954*4882a593Smuzhiyun memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2955*4882a593Smuzhiyun memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2956*4882a593Smuzhiyun memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2957*4882a593Smuzhiyun memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2958*4882a593Smuzhiyun
2959*4882a593Smuzhiyun memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2960*4882a593Smuzhiyun memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2961*4882a593Smuzhiyun memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2962*4882a593Smuzhiyun memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2963*4882a593Smuzhiyun memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2964*4882a593Smuzhiyun memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2965*4882a593Smuzhiyun memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2966*4882a593Smuzhiyun memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2967*4882a593Smuzhiyun memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2968*4882a593Smuzhiyun memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2969*4882a593Smuzhiyun memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2970*4882a593Smuzhiyun
2971*4882a593Smuzhiyun return 0;
2972*4882a593Smuzhiyun }
2973*4882a593Smuzhiyun
ci_populate_smc_acpi_level(struct radeon_device * rdev,SMU7_Discrete_DpmTable * table)2974*4882a593Smuzhiyun static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2975*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table)
2976*4882a593Smuzhiyun {
2977*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
2978*4882a593Smuzhiyun struct atom_clock_dividers dividers;
2979*4882a593Smuzhiyun SMU7_Discrete_VoltageLevel voltage_level;
2980*4882a593Smuzhiyun u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2981*4882a593Smuzhiyun u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2982*4882a593Smuzhiyun u32 dll_cntl = pi->clock_registers.dll_cntl;
2983*4882a593Smuzhiyun u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2984*4882a593Smuzhiyun int ret;
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2987*4882a593Smuzhiyun
2988*4882a593Smuzhiyun if (pi->acpi_vddc)
2989*4882a593Smuzhiyun table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2990*4882a593Smuzhiyun else
2991*4882a593Smuzhiyun table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2992*4882a593Smuzhiyun
2993*4882a593Smuzhiyun table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2994*4882a593Smuzhiyun
2995*4882a593Smuzhiyun table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2996*4882a593Smuzhiyun
2997*4882a593Smuzhiyun ret = radeon_atom_get_clock_dividers(rdev,
2998*4882a593Smuzhiyun COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2999*4882a593Smuzhiyun table->ACPILevel.SclkFrequency, false, ÷rs);
3000*4882a593Smuzhiyun if (ret)
3001*4882a593Smuzhiyun return ret;
3002*4882a593Smuzhiyun
3003*4882a593Smuzhiyun table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3004*4882a593Smuzhiyun table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3005*4882a593Smuzhiyun table->ACPILevel.DeepSleepDivId = 0;
3006*4882a593Smuzhiyun
3007*4882a593Smuzhiyun spll_func_cntl &= ~SPLL_PWRON;
3008*4882a593Smuzhiyun spll_func_cntl |= SPLL_RESET;
3009*4882a593Smuzhiyun
3010*4882a593Smuzhiyun spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
3011*4882a593Smuzhiyun spll_func_cntl_2 |= SCLK_MUX_SEL(4);
3012*4882a593Smuzhiyun
3013*4882a593Smuzhiyun table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3014*4882a593Smuzhiyun table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3015*4882a593Smuzhiyun table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3016*4882a593Smuzhiyun table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3017*4882a593Smuzhiyun table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3018*4882a593Smuzhiyun table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3019*4882a593Smuzhiyun table->ACPILevel.CcPwrDynRm = 0;
3020*4882a593Smuzhiyun table->ACPILevel.CcPwrDynRm1 = 0;
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3023*4882a593Smuzhiyun table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3024*4882a593Smuzhiyun table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3025*4882a593Smuzhiyun table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3026*4882a593Smuzhiyun table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3027*4882a593Smuzhiyun table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3028*4882a593Smuzhiyun table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3029*4882a593Smuzhiyun table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3030*4882a593Smuzhiyun table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3031*4882a593Smuzhiyun table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3032*4882a593Smuzhiyun table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3033*4882a593Smuzhiyun
3034*4882a593Smuzhiyun table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3035*4882a593Smuzhiyun table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3036*4882a593Smuzhiyun
3037*4882a593Smuzhiyun if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3038*4882a593Smuzhiyun if (pi->acpi_vddci)
3039*4882a593Smuzhiyun table->MemoryACPILevel.MinVddci =
3040*4882a593Smuzhiyun cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3041*4882a593Smuzhiyun else
3042*4882a593Smuzhiyun table->MemoryACPILevel.MinVddci =
3043*4882a593Smuzhiyun cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3044*4882a593Smuzhiyun }
3045*4882a593Smuzhiyun
3046*4882a593Smuzhiyun if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
3047*4882a593Smuzhiyun table->MemoryACPILevel.MinMvdd = 0;
3048*4882a593Smuzhiyun else
3049*4882a593Smuzhiyun table->MemoryACPILevel.MinMvdd =
3050*4882a593Smuzhiyun cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3051*4882a593Smuzhiyun
3052*4882a593Smuzhiyun mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
3053*4882a593Smuzhiyun mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
3054*4882a593Smuzhiyun
3055*4882a593Smuzhiyun dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
3056*4882a593Smuzhiyun
3057*4882a593Smuzhiyun table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3058*4882a593Smuzhiyun table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3059*4882a593Smuzhiyun table->MemoryACPILevel.MpllAdFuncCntl =
3060*4882a593Smuzhiyun cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3061*4882a593Smuzhiyun table->MemoryACPILevel.MpllDqFuncCntl =
3062*4882a593Smuzhiyun cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3063*4882a593Smuzhiyun table->MemoryACPILevel.MpllFuncCntl =
3064*4882a593Smuzhiyun cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3065*4882a593Smuzhiyun table->MemoryACPILevel.MpllFuncCntl_1 =
3066*4882a593Smuzhiyun cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3067*4882a593Smuzhiyun table->MemoryACPILevel.MpllFuncCntl_2 =
3068*4882a593Smuzhiyun cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3069*4882a593Smuzhiyun table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3070*4882a593Smuzhiyun table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3071*4882a593Smuzhiyun
3072*4882a593Smuzhiyun table->MemoryACPILevel.EnabledForThrottle = 0;
3073*4882a593Smuzhiyun table->MemoryACPILevel.EnabledForActivity = 0;
3074*4882a593Smuzhiyun table->MemoryACPILevel.UpH = 0;
3075*4882a593Smuzhiyun table->MemoryACPILevel.DownH = 100;
3076*4882a593Smuzhiyun table->MemoryACPILevel.VoltageDownH = 0;
3077*4882a593Smuzhiyun table->MemoryACPILevel.ActivityLevel =
3078*4882a593Smuzhiyun cpu_to_be16((u16)pi->mclk_activity_target);
3079*4882a593Smuzhiyun
3080*4882a593Smuzhiyun table->MemoryACPILevel.StutterEnable = false;
3081*4882a593Smuzhiyun table->MemoryACPILevel.StrobeEnable = false;
3082*4882a593Smuzhiyun table->MemoryACPILevel.EdcReadEnable = false;
3083*4882a593Smuzhiyun table->MemoryACPILevel.EdcWriteEnable = false;
3084*4882a593Smuzhiyun table->MemoryACPILevel.RttEnable = false;
3085*4882a593Smuzhiyun
3086*4882a593Smuzhiyun return 0;
3087*4882a593Smuzhiyun }
3088*4882a593Smuzhiyun
3089*4882a593Smuzhiyun
ci_enable_ulv(struct radeon_device * rdev,bool enable)3090*4882a593Smuzhiyun static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
3091*4882a593Smuzhiyun {
3092*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3093*4882a593Smuzhiyun struct ci_ulv_parm *ulv = &pi->ulv;
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun if (ulv->supported) {
3096*4882a593Smuzhiyun if (enable)
3097*4882a593Smuzhiyun return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3098*4882a593Smuzhiyun 0 : -EINVAL;
3099*4882a593Smuzhiyun else
3100*4882a593Smuzhiyun return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3101*4882a593Smuzhiyun 0 : -EINVAL;
3102*4882a593Smuzhiyun }
3103*4882a593Smuzhiyun
3104*4882a593Smuzhiyun return 0;
3105*4882a593Smuzhiyun }
3106*4882a593Smuzhiyun
ci_populate_ulv_level(struct radeon_device * rdev,SMU7_Discrete_Ulv * state)3107*4882a593Smuzhiyun static int ci_populate_ulv_level(struct radeon_device *rdev,
3108*4882a593Smuzhiyun SMU7_Discrete_Ulv *state)
3109*4882a593Smuzhiyun {
3110*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3111*4882a593Smuzhiyun u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
3112*4882a593Smuzhiyun
3113*4882a593Smuzhiyun state->CcPwrDynRm = 0;
3114*4882a593Smuzhiyun state->CcPwrDynRm1 = 0;
3115*4882a593Smuzhiyun
3116*4882a593Smuzhiyun if (ulv_voltage == 0) {
3117*4882a593Smuzhiyun pi->ulv.supported = false;
3118*4882a593Smuzhiyun return 0;
3119*4882a593Smuzhiyun }
3120*4882a593Smuzhiyun
3121*4882a593Smuzhiyun if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3122*4882a593Smuzhiyun if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3123*4882a593Smuzhiyun state->VddcOffset = 0;
3124*4882a593Smuzhiyun else
3125*4882a593Smuzhiyun state->VddcOffset =
3126*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3127*4882a593Smuzhiyun } else {
3128*4882a593Smuzhiyun if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3129*4882a593Smuzhiyun state->VddcOffsetVid = 0;
3130*4882a593Smuzhiyun else
3131*4882a593Smuzhiyun state->VddcOffsetVid = (u8)
3132*4882a593Smuzhiyun ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3133*4882a593Smuzhiyun VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3134*4882a593Smuzhiyun }
3135*4882a593Smuzhiyun state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3136*4882a593Smuzhiyun
3137*4882a593Smuzhiyun state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3138*4882a593Smuzhiyun state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3139*4882a593Smuzhiyun state->VddcOffset = cpu_to_be16(state->VddcOffset);
3140*4882a593Smuzhiyun
3141*4882a593Smuzhiyun return 0;
3142*4882a593Smuzhiyun }
3143*4882a593Smuzhiyun
ci_calculate_sclk_params(struct radeon_device * rdev,u32 engine_clock,SMU7_Discrete_GraphicsLevel * sclk)3144*4882a593Smuzhiyun static int ci_calculate_sclk_params(struct radeon_device *rdev,
3145*4882a593Smuzhiyun u32 engine_clock,
3146*4882a593Smuzhiyun SMU7_Discrete_GraphicsLevel *sclk)
3147*4882a593Smuzhiyun {
3148*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3149*4882a593Smuzhiyun struct atom_clock_dividers dividers;
3150*4882a593Smuzhiyun u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3151*4882a593Smuzhiyun u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3152*4882a593Smuzhiyun u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3153*4882a593Smuzhiyun u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3154*4882a593Smuzhiyun u32 reference_clock = rdev->clock.spll.reference_freq;
3155*4882a593Smuzhiyun u32 reference_divider;
3156*4882a593Smuzhiyun u32 fbdiv;
3157*4882a593Smuzhiyun int ret;
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun ret = radeon_atom_get_clock_dividers(rdev,
3160*4882a593Smuzhiyun COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3161*4882a593Smuzhiyun engine_clock, false, ÷rs);
3162*4882a593Smuzhiyun if (ret)
3163*4882a593Smuzhiyun return ret;
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun reference_divider = 1 + dividers.ref_div;
3166*4882a593Smuzhiyun fbdiv = dividers.fb_div & 0x3FFFFFF;
3167*4882a593Smuzhiyun
3168*4882a593Smuzhiyun spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
3169*4882a593Smuzhiyun spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
3170*4882a593Smuzhiyun spll_func_cntl_3 |= SPLL_DITHEN;
3171*4882a593Smuzhiyun
3172*4882a593Smuzhiyun if (pi->caps_sclk_ss_support) {
3173*4882a593Smuzhiyun struct radeon_atom_ss ss;
3174*4882a593Smuzhiyun u32 vco_freq = engine_clock * dividers.post_div;
3175*4882a593Smuzhiyun
3176*4882a593Smuzhiyun if (radeon_atombios_get_asic_ss_info(rdev, &ss,
3177*4882a593Smuzhiyun ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3178*4882a593Smuzhiyun u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3179*4882a593Smuzhiyun u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3180*4882a593Smuzhiyun
3181*4882a593Smuzhiyun cg_spll_spread_spectrum &= ~CLK_S_MASK;
3182*4882a593Smuzhiyun cg_spll_spread_spectrum |= CLK_S(clk_s);
3183*4882a593Smuzhiyun cg_spll_spread_spectrum |= SSEN;
3184*4882a593Smuzhiyun
3185*4882a593Smuzhiyun cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
3186*4882a593Smuzhiyun cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
3187*4882a593Smuzhiyun }
3188*4882a593Smuzhiyun }
3189*4882a593Smuzhiyun
3190*4882a593Smuzhiyun sclk->SclkFrequency = engine_clock;
3191*4882a593Smuzhiyun sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3192*4882a593Smuzhiyun sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3193*4882a593Smuzhiyun sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3194*4882a593Smuzhiyun sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
3195*4882a593Smuzhiyun sclk->SclkDid = (u8)dividers.post_divider;
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun return 0;
3198*4882a593Smuzhiyun }
3199*4882a593Smuzhiyun
ci_populate_single_graphic_level(struct radeon_device * rdev,u32 engine_clock,u16 sclk_activity_level_t,SMU7_Discrete_GraphicsLevel * graphic_level)3200*4882a593Smuzhiyun static int ci_populate_single_graphic_level(struct radeon_device *rdev,
3201*4882a593Smuzhiyun u32 engine_clock,
3202*4882a593Smuzhiyun u16 sclk_activity_level_t,
3203*4882a593Smuzhiyun SMU7_Discrete_GraphicsLevel *graphic_level)
3204*4882a593Smuzhiyun {
3205*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3206*4882a593Smuzhiyun int ret;
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
3209*4882a593Smuzhiyun if (ret)
3210*4882a593Smuzhiyun return ret;
3211*4882a593Smuzhiyun
3212*4882a593Smuzhiyun ret = ci_get_dependency_volt_by_clk(rdev,
3213*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3214*4882a593Smuzhiyun engine_clock, &graphic_level->MinVddc);
3215*4882a593Smuzhiyun if (ret)
3216*4882a593Smuzhiyun return ret;
3217*4882a593Smuzhiyun
3218*4882a593Smuzhiyun graphic_level->SclkFrequency = engine_clock;
3219*4882a593Smuzhiyun
3220*4882a593Smuzhiyun graphic_level->Flags = 0;
3221*4882a593Smuzhiyun graphic_level->MinVddcPhases = 1;
3222*4882a593Smuzhiyun
3223*4882a593Smuzhiyun if (pi->vddc_phase_shed_control)
3224*4882a593Smuzhiyun ci_populate_phase_value_based_on_sclk(rdev,
3225*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
3226*4882a593Smuzhiyun engine_clock,
3227*4882a593Smuzhiyun &graphic_level->MinVddcPhases);
3228*4882a593Smuzhiyun
3229*4882a593Smuzhiyun graphic_level->ActivityLevel = sclk_activity_level_t;
3230*4882a593Smuzhiyun
3231*4882a593Smuzhiyun graphic_level->CcPwrDynRm = 0;
3232*4882a593Smuzhiyun graphic_level->CcPwrDynRm1 = 0;
3233*4882a593Smuzhiyun graphic_level->EnabledForThrottle = 1;
3234*4882a593Smuzhiyun graphic_level->UpH = 0;
3235*4882a593Smuzhiyun graphic_level->DownH = 0;
3236*4882a593Smuzhiyun graphic_level->VoltageDownH = 0;
3237*4882a593Smuzhiyun graphic_level->PowerThrottle = 0;
3238*4882a593Smuzhiyun
3239*4882a593Smuzhiyun if (pi->caps_sclk_ds)
3240*4882a593Smuzhiyun graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
3241*4882a593Smuzhiyun engine_clock,
3242*4882a593Smuzhiyun CISLAND_MINIMUM_ENGINE_CLOCK);
3243*4882a593Smuzhiyun
3244*4882a593Smuzhiyun graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3245*4882a593Smuzhiyun
3246*4882a593Smuzhiyun graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3247*4882a593Smuzhiyun graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3248*4882a593Smuzhiyun graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3249*4882a593Smuzhiyun graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3250*4882a593Smuzhiyun graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3251*4882a593Smuzhiyun graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3252*4882a593Smuzhiyun graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3253*4882a593Smuzhiyun graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3254*4882a593Smuzhiyun graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3255*4882a593Smuzhiyun graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3256*4882a593Smuzhiyun graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3257*4882a593Smuzhiyun
3258*4882a593Smuzhiyun return 0;
3259*4882a593Smuzhiyun }
3260*4882a593Smuzhiyun
ci_populate_all_graphic_levels(struct radeon_device * rdev)3261*4882a593Smuzhiyun static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
3262*4882a593Smuzhiyun {
3263*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3264*4882a593Smuzhiyun struct ci_dpm_table *dpm_table = &pi->dpm_table;
3265*4882a593Smuzhiyun u32 level_array_address = pi->dpm_table_start +
3266*4882a593Smuzhiyun offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3267*4882a593Smuzhiyun u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3268*4882a593Smuzhiyun SMU7_MAX_LEVELS_GRAPHICS;
3269*4882a593Smuzhiyun SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3270*4882a593Smuzhiyun u32 i, ret;
3271*4882a593Smuzhiyun
3272*4882a593Smuzhiyun memset(levels, 0, level_array_size);
3273*4882a593Smuzhiyun
3274*4882a593Smuzhiyun for (i = 0; i < dpm_table->sclk_table.count; i++) {
3275*4882a593Smuzhiyun ret = ci_populate_single_graphic_level(rdev,
3276*4882a593Smuzhiyun dpm_table->sclk_table.dpm_levels[i].value,
3277*4882a593Smuzhiyun (u16)pi->activity_target[i],
3278*4882a593Smuzhiyun &pi->smc_state_table.GraphicsLevel[i]);
3279*4882a593Smuzhiyun if (ret)
3280*4882a593Smuzhiyun return ret;
3281*4882a593Smuzhiyun if (i > 1)
3282*4882a593Smuzhiyun pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3283*4882a593Smuzhiyun if (i == (dpm_table->sclk_table.count - 1))
3284*4882a593Smuzhiyun pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3285*4882a593Smuzhiyun PPSMC_DISPLAY_WATERMARK_HIGH;
3286*4882a593Smuzhiyun }
3287*4882a593Smuzhiyun pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3288*4882a593Smuzhiyun
3289*4882a593Smuzhiyun pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3290*4882a593Smuzhiyun pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3291*4882a593Smuzhiyun ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3292*4882a593Smuzhiyun
3293*4882a593Smuzhiyun ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3294*4882a593Smuzhiyun (u8 *)levels, level_array_size,
3295*4882a593Smuzhiyun pi->sram_end);
3296*4882a593Smuzhiyun if (ret)
3297*4882a593Smuzhiyun return ret;
3298*4882a593Smuzhiyun
3299*4882a593Smuzhiyun return 0;
3300*4882a593Smuzhiyun }
3301*4882a593Smuzhiyun
ci_populate_ulv_state(struct radeon_device * rdev,SMU7_Discrete_Ulv * ulv_level)3302*4882a593Smuzhiyun static int ci_populate_ulv_state(struct radeon_device *rdev,
3303*4882a593Smuzhiyun SMU7_Discrete_Ulv *ulv_level)
3304*4882a593Smuzhiyun {
3305*4882a593Smuzhiyun return ci_populate_ulv_level(rdev, ulv_level);
3306*4882a593Smuzhiyun }
3307*4882a593Smuzhiyun
ci_populate_all_memory_levels(struct radeon_device * rdev)3308*4882a593Smuzhiyun static int ci_populate_all_memory_levels(struct radeon_device *rdev)
3309*4882a593Smuzhiyun {
3310*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3311*4882a593Smuzhiyun struct ci_dpm_table *dpm_table = &pi->dpm_table;
3312*4882a593Smuzhiyun u32 level_array_address = pi->dpm_table_start +
3313*4882a593Smuzhiyun offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3314*4882a593Smuzhiyun u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3315*4882a593Smuzhiyun SMU7_MAX_LEVELS_MEMORY;
3316*4882a593Smuzhiyun SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3317*4882a593Smuzhiyun u32 i, ret;
3318*4882a593Smuzhiyun
3319*4882a593Smuzhiyun memset(levels, 0, level_array_size);
3320*4882a593Smuzhiyun
3321*4882a593Smuzhiyun for (i = 0; i < dpm_table->mclk_table.count; i++) {
3322*4882a593Smuzhiyun if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3323*4882a593Smuzhiyun return -EINVAL;
3324*4882a593Smuzhiyun ret = ci_populate_single_memory_level(rdev,
3325*4882a593Smuzhiyun dpm_table->mclk_table.dpm_levels[i].value,
3326*4882a593Smuzhiyun &pi->smc_state_table.MemoryLevel[i]);
3327*4882a593Smuzhiyun if (ret)
3328*4882a593Smuzhiyun return ret;
3329*4882a593Smuzhiyun }
3330*4882a593Smuzhiyun
3331*4882a593Smuzhiyun pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3332*4882a593Smuzhiyun
3333*4882a593Smuzhiyun if ((dpm_table->mclk_table.count >= 2) &&
3334*4882a593Smuzhiyun ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3335*4882a593Smuzhiyun pi->smc_state_table.MemoryLevel[1].MinVddc =
3336*4882a593Smuzhiyun pi->smc_state_table.MemoryLevel[0].MinVddc;
3337*4882a593Smuzhiyun pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3338*4882a593Smuzhiyun pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3339*4882a593Smuzhiyun }
3340*4882a593Smuzhiyun
3341*4882a593Smuzhiyun pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3342*4882a593Smuzhiyun
3343*4882a593Smuzhiyun pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3344*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3345*4882a593Smuzhiyun ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3346*4882a593Smuzhiyun
3347*4882a593Smuzhiyun pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3348*4882a593Smuzhiyun PPSMC_DISPLAY_WATERMARK_HIGH;
3349*4882a593Smuzhiyun
3350*4882a593Smuzhiyun ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3351*4882a593Smuzhiyun (u8 *)levels, level_array_size,
3352*4882a593Smuzhiyun pi->sram_end);
3353*4882a593Smuzhiyun if (ret)
3354*4882a593Smuzhiyun return ret;
3355*4882a593Smuzhiyun
3356*4882a593Smuzhiyun return 0;
3357*4882a593Smuzhiyun }
3358*4882a593Smuzhiyun
ci_reset_single_dpm_table(struct radeon_device * rdev,struct ci_single_dpm_table * dpm_table,u32 count)3359*4882a593Smuzhiyun static void ci_reset_single_dpm_table(struct radeon_device *rdev,
3360*4882a593Smuzhiyun struct ci_single_dpm_table* dpm_table,
3361*4882a593Smuzhiyun u32 count)
3362*4882a593Smuzhiyun {
3363*4882a593Smuzhiyun u32 i;
3364*4882a593Smuzhiyun
3365*4882a593Smuzhiyun dpm_table->count = count;
3366*4882a593Smuzhiyun for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3367*4882a593Smuzhiyun dpm_table->dpm_levels[i].enabled = false;
3368*4882a593Smuzhiyun }
3369*4882a593Smuzhiyun
ci_setup_pcie_table_entry(struct ci_single_dpm_table * dpm_table,u32 index,u32 pcie_gen,u32 pcie_lanes)3370*4882a593Smuzhiyun static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3371*4882a593Smuzhiyun u32 index, u32 pcie_gen, u32 pcie_lanes)
3372*4882a593Smuzhiyun {
3373*4882a593Smuzhiyun dpm_table->dpm_levels[index].value = pcie_gen;
3374*4882a593Smuzhiyun dpm_table->dpm_levels[index].param1 = pcie_lanes;
3375*4882a593Smuzhiyun dpm_table->dpm_levels[index].enabled = true;
3376*4882a593Smuzhiyun }
3377*4882a593Smuzhiyun
ci_setup_default_pcie_tables(struct radeon_device * rdev)3378*4882a593Smuzhiyun static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
3379*4882a593Smuzhiyun {
3380*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3383*4882a593Smuzhiyun return -EINVAL;
3384*4882a593Smuzhiyun
3385*4882a593Smuzhiyun if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3386*4882a593Smuzhiyun pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3387*4882a593Smuzhiyun pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3388*4882a593Smuzhiyun } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3389*4882a593Smuzhiyun pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3390*4882a593Smuzhiyun pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3391*4882a593Smuzhiyun }
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun ci_reset_single_dpm_table(rdev,
3394*4882a593Smuzhiyun &pi->dpm_table.pcie_speed_table,
3395*4882a593Smuzhiyun SMU7_MAX_LEVELS_LINK);
3396*4882a593Smuzhiyun
3397*4882a593Smuzhiyun if (rdev->family == CHIP_BONAIRE)
3398*4882a593Smuzhiyun ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3399*4882a593Smuzhiyun pi->pcie_gen_powersaving.min,
3400*4882a593Smuzhiyun pi->pcie_lane_powersaving.max);
3401*4882a593Smuzhiyun else
3402*4882a593Smuzhiyun ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3403*4882a593Smuzhiyun pi->pcie_gen_powersaving.min,
3404*4882a593Smuzhiyun pi->pcie_lane_powersaving.min);
3405*4882a593Smuzhiyun ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3406*4882a593Smuzhiyun pi->pcie_gen_performance.min,
3407*4882a593Smuzhiyun pi->pcie_lane_performance.min);
3408*4882a593Smuzhiyun ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3409*4882a593Smuzhiyun pi->pcie_gen_powersaving.min,
3410*4882a593Smuzhiyun pi->pcie_lane_powersaving.max);
3411*4882a593Smuzhiyun ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3412*4882a593Smuzhiyun pi->pcie_gen_performance.min,
3413*4882a593Smuzhiyun pi->pcie_lane_performance.max);
3414*4882a593Smuzhiyun ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3415*4882a593Smuzhiyun pi->pcie_gen_powersaving.max,
3416*4882a593Smuzhiyun pi->pcie_lane_powersaving.max);
3417*4882a593Smuzhiyun ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3418*4882a593Smuzhiyun pi->pcie_gen_performance.max,
3419*4882a593Smuzhiyun pi->pcie_lane_performance.max);
3420*4882a593Smuzhiyun
3421*4882a593Smuzhiyun pi->dpm_table.pcie_speed_table.count = 6;
3422*4882a593Smuzhiyun
3423*4882a593Smuzhiyun return 0;
3424*4882a593Smuzhiyun }
3425*4882a593Smuzhiyun
ci_setup_default_dpm_tables(struct radeon_device * rdev)3426*4882a593Smuzhiyun static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3427*4882a593Smuzhiyun {
3428*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3429*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3430*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3431*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3432*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3433*4882a593Smuzhiyun struct radeon_cac_leakage_table *std_voltage_table =
3434*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.cac_leakage_table;
3435*4882a593Smuzhiyun u32 i;
3436*4882a593Smuzhiyun
3437*4882a593Smuzhiyun if (allowed_sclk_vddc_table == NULL)
3438*4882a593Smuzhiyun return -EINVAL;
3439*4882a593Smuzhiyun if (allowed_sclk_vddc_table->count < 1)
3440*4882a593Smuzhiyun return -EINVAL;
3441*4882a593Smuzhiyun if (allowed_mclk_table == NULL)
3442*4882a593Smuzhiyun return -EINVAL;
3443*4882a593Smuzhiyun if (allowed_mclk_table->count < 1)
3444*4882a593Smuzhiyun return -EINVAL;
3445*4882a593Smuzhiyun
3446*4882a593Smuzhiyun memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3447*4882a593Smuzhiyun
3448*4882a593Smuzhiyun ci_reset_single_dpm_table(rdev,
3449*4882a593Smuzhiyun &pi->dpm_table.sclk_table,
3450*4882a593Smuzhiyun SMU7_MAX_LEVELS_GRAPHICS);
3451*4882a593Smuzhiyun ci_reset_single_dpm_table(rdev,
3452*4882a593Smuzhiyun &pi->dpm_table.mclk_table,
3453*4882a593Smuzhiyun SMU7_MAX_LEVELS_MEMORY);
3454*4882a593Smuzhiyun ci_reset_single_dpm_table(rdev,
3455*4882a593Smuzhiyun &pi->dpm_table.vddc_table,
3456*4882a593Smuzhiyun SMU7_MAX_LEVELS_VDDC);
3457*4882a593Smuzhiyun ci_reset_single_dpm_table(rdev,
3458*4882a593Smuzhiyun &pi->dpm_table.vddci_table,
3459*4882a593Smuzhiyun SMU7_MAX_LEVELS_VDDCI);
3460*4882a593Smuzhiyun ci_reset_single_dpm_table(rdev,
3461*4882a593Smuzhiyun &pi->dpm_table.mvdd_table,
3462*4882a593Smuzhiyun SMU7_MAX_LEVELS_MVDD);
3463*4882a593Smuzhiyun
3464*4882a593Smuzhiyun pi->dpm_table.sclk_table.count = 0;
3465*4882a593Smuzhiyun for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3466*4882a593Smuzhiyun if ((i == 0) ||
3467*4882a593Smuzhiyun (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3468*4882a593Smuzhiyun allowed_sclk_vddc_table->entries[i].clk)) {
3469*4882a593Smuzhiyun pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3470*4882a593Smuzhiyun allowed_sclk_vddc_table->entries[i].clk;
3471*4882a593Smuzhiyun pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3472*4882a593Smuzhiyun (i == 0) ? true : false;
3473*4882a593Smuzhiyun pi->dpm_table.sclk_table.count++;
3474*4882a593Smuzhiyun }
3475*4882a593Smuzhiyun }
3476*4882a593Smuzhiyun
3477*4882a593Smuzhiyun pi->dpm_table.mclk_table.count = 0;
3478*4882a593Smuzhiyun for (i = 0; i < allowed_mclk_table->count; i++) {
3479*4882a593Smuzhiyun if ((i == 0) ||
3480*4882a593Smuzhiyun (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3481*4882a593Smuzhiyun allowed_mclk_table->entries[i].clk)) {
3482*4882a593Smuzhiyun pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3483*4882a593Smuzhiyun allowed_mclk_table->entries[i].clk;
3484*4882a593Smuzhiyun pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3485*4882a593Smuzhiyun (i == 0) ? true : false;
3486*4882a593Smuzhiyun pi->dpm_table.mclk_table.count++;
3487*4882a593Smuzhiyun }
3488*4882a593Smuzhiyun }
3489*4882a593Smuzhiyun
3490*4882a593Smuzhiyun for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3491*4882a593Smuzhiyun pi->dpm_table.vddc_table.dpm_levels[i].value =
3492*4882a593Smuzhiyun allowed_sclk_vddc_table->entries[i].v;
3493*4882a593Smuzhiyun pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3494*4882a593Smuzhiyun std_voltage_table->entries[i].leakage;
3495*4882a593Smuzhiyun pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3496*4882a593Smuzhiyun }
3497*4882a593Smuzhiyun pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3498*4882a593Smuzhiyun
3499*4882a593Smuzhiyun allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3500*4882a593Smuzhiyun if (allowed_mclk_table) {
3501*4882a593Smuzhiyun for (i = 0; i < allowed_mclk_table->count; i++) {
3502*4882a593Smuzhiyun pi->dpm_table.vddci_table.dpm_levels[i].value =
3503*4882a593Smuzhiyun allowed_mclk_table->entries[i].v;
3504*4882a593Smuzhiyun pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3505*4882a593Smuzhiyun }
3506*4882a593Smuzhiyun pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3507*4882a593Smuzhiyun }
3508*4882a593Smuzhiyun
3509*4882a593Smuzhiyun allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3510*4882a593Smuzhiyun if (allowed_mclk_table) {
3511*4882a593Smuzhiyun for (i = 0; i < allowed_mclk_table->count; i++) {
3512*4882a593Smuzhiyun pi->dpm_table.mvdd_table.dpm_levels[i].value =
3513*4882a593Smuzhiyun allowed_mclk_table->entries[i].v;
3514*4882a593Smuzhiyun pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3515*4882a593Smuzhiyun }
3516*4882a593Smuzhiyun pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3517*4882a593Smuzhiyun }
3518*4882a593Smuzhiyun
3519*4882a593Smuzhiyun ci_setup_default_pcie_tables(rdev);
3520*4882a593Smuzhiyun
3521*4882a593Smuzhiyun return 0;
3522*4882a593Smuzhiyun }
3523*4882a593Smuzhiyun
ci_find_boot_level(struct ci_single_dpm_table * table,u32 value,u32 * boot_level)3524*4882a593Smuzhiyun static int ci_find_boot_level(struct ci_single_dpm_table *table,
3525*4882a593Smuzhiyun u32 value, u32 *boot_level)
3526*4882a593Smuzhiyun {
3527*4882a593Smuzhiyun u32 i;
3528*4882a593Smuzhiyun int ret = -EINVAL;
3529*4882a593Smuzhiyun
3530*4882a593Smuzhiyun for(i = 0; i < table->count; i++) {
3531*4882a593Smuzhiyun if (value == table->dpm_levels[i].value) {
3532*4882a593Smuzhiyun *boot_level = i;
3533*4882a593Smuzhiyun ret = 0;
3534*4882a593Smuzhiyun }
3535*4882a593Smuzhiyun }
3536*4882a593Smuzhiyun
3537*4882a593Smuzhiyun return ret;
3538*4882a593Smuzhiyun }
3539*4882a593Smuzhiyun
ci_init_smc_table(struct radeon_device * rdev)3540*4882a593Smuzhiyun static int ci_init_smc_table(struct radeon_device *rdev)
3541*4882a593Smuzhiyun {
3542*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3543*4882a593Smuzhiyun struct ci_ulv_parm *ulv = &pi->ulv;
3544*4882a593Smuzhiyun struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3545*4882a593Smuzhiyun SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3546*4882a593Smuzhiyun int ret;
3547*4882a593Smuzhiyun
3548*4882a593Smuzhiyun ret = ci_setup_default_dpm_tables(rdev);
3549*4882a593Smuzhiyun if (ret)
3550*4882a593Smuzhiyun return ret;
3551*4882a593Smuzhiyun
3552*4882a593Smuzhiyun if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3553*4882a593Smuzhiyun ci_populate_smc_voltage_tables(rdev, table);
3554*4882a593Smuzhiyun
3555*4882a593Smuzhiyun ci_init_fps_limits(rdev);
3556*4882a593Smuzhiyun
3557*4882a593Smuzhiyun if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3558*4882a593Smuzhiyun table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3559*4882a593Smuzhiyun
3560*4882a593Smuzhiyun if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3561*4882a593Smuzhiyun table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3562*4882a593Smuzhiyun
3563*4882a593Smuzhiyun if (pi->mem_gddr5)
3564*4882a593Smuzhiyun table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3565*4882a593Smuzhiyun
3566*4882a593Smuzhiyun if (ulv->supported) {
3567*4882a593Smuzhiyun ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3568*4882a593Smuzhiyun if (ret)
3569*4882a593Smuzhiyun return ret;
3570*4882a593Smuzhiyun WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3571*4882a593Smuzhiyun }
3572*4882a593Smuzhiyun
3573*4882a593Smuzhiyun ret = ci_populate_all_graphic_levels(rdev);
3574*4882a593Smuzhiyun if (ret)
3575*4882a593Smuzhiyun return ret;
3576*4882a593Smuzhiyun
3577*4882a593Smuzhiyun ret = ci_populate_all_memory_levels(rdev);
3578*4882a593Smuzhiyun if (ret)
3579*4882a593Smuzhiyun return ret;
3580*4882a593Smuzhiyun
3581*4882a593Smuzhiyun ci_populate_smc_link_level(rdev, table);
3582*4882a593Smuzhiyun
3583*4882a593Smuzhiyun ret = ci_populate_smc_acpi_level(rdev, table);
3584*4882a593Smuzhiyun if (ret)
3585*4882a593Smuzhiyun return ret;
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun ret = ci_populate_smc_vce_level(rdev, table);
3588*4882a593Smuzhiyun if (ret)
3589*4882a593Smuzhiyun return ret;
3590*4882a593Smuzhiyun
3591*4882a593Smuzhiyun ret = ci_populate_smc_acp_level(rdev, table);
3592*4882a593Smuzhiyun if (ret)
3593*4882a593Smuzhiyun return ret;
3594*4882a593Smuzhiyun
3595*4882a593Smuzhiyun ret = ci_populate_smc_samu_level(rdev, table);
3596*4882a593Smuzhiyun if (ret)
3597*4882a593Smuzhiyun return ret;
3598*4882a593Smuzhiyun
3599*4882a593Smuzhiyun ret = ci_do_program_memory_timing_parameters(rdev);
3600*4882a593Smuzhiyun if (ret)
3601*4882a593Smuzhiyun return ret;
3602*4882a593Smuzhiyun
3603*4882a593Smuzhiyun ret = ci_populate_smc_uvd_level(rdev, table);
3604*4882a593Smuzhiyun if (ret)
3605*4882a593Smuzhiyun return ret;
3606*4882a593Smuzhiyun
3607*4882a593Smuzhiyun table->UvdBootLevel = 0;
3608*4882a593Smuzhiyun table->VceBootLevel = 0;
3609*4882a593Smuzhiyun table->AcpBootLevel = 0;
3610*4882a593Smuzhiyun table->SamuBootLevel = 0;
3611*4882a593Smuzhiyun table->GraphicsBootLevel = 0;
3612*4882a593Smuzhiyun table->MemoryBootLevel = 0;
3613*4882a593Smuzhiyun
3614*4882a593Smuzhiyun ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3615*4882a593Smuzhiyun pi->vbios_boot_state.sclk_bootup_value,
3616*4882a593Smuzhiyun (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3617*4882a593Smuzhiyun
3618*4882a593Smuzhiyun ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3619*4882a593Smuzhiyun pi->vbios_boot_state.mclk_bootup_value,
3620*4882a593Smuzhiyun (u32 *)&pi->smc_state_table.MemoryBootLevel);
3621*4882a593Smuzhiyun
3622*4882a593Smuzhiyun table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3623*4882a593Smuzhiyun table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3624*4882a593Smuzhiyun table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3625*4882a593Smuzhiyun
3626*4882a593Smuzhiyun ci_populate_smc_initial_state(rdev, radeon_boot_state);
3627*4882a593Smuzhiyun
3628*4882a593Smuzhiyun ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3629*4882a593Smuzhiyun if (ret)
3630*4882a593Smuzhiyun return ret;
3631*4882a593Smuzhiyun
3632*4882a593Smuzhiyun table->UVDInterval = 1;
3633*4882a593Smuzhiyun table->VCEInterval = 1;
3634*4882a593Smuzhiyun table->ACPInterval = 1;
3635*4882a593Smuzhiyun table->SAMUInterval = 1;
3636*4882a593Smuzhiyun table->GraphicsVoltageChangeEnable = 1;
3637*4882a593Smuzhiyun table->GraphicsThermThrottleEnable = 1;
3638*4882a593Smuzhiyun table->GraphicsInterval = 1;
3639*4882a593Smuzhiyun table->VoltageInterval = 1;
3640*4882a593Smuzhiyun table->ThermalInterval = 1;
3641*4882a593Smuzhiyun table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3642*4882a593Smuzhiyun CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3643*4882a593Smuzhiyun table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3644*4882a593Smuzhiyun CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3645*4882a593Smuzhiyun table->MemoryVoltageChangeEnable = 1;
3646*4882a593Smuzhiyun table->MemoryInterval = 1;
3647*4882a593Smuzhiyun table->VoltageResponseTime = 0;
3648*4882a593Smuzhiyun table->VddcVddciDelta = 4000;
3649*4882a593Smuzhiyun table->PhaseResponseTime = 0;
3650*4882a593Smuzhiyun table->MemoryThermThrottleEnable = 1;
3651*4882a593Smuzhiyun table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3652*4882a593Smuzhiyun table->PCIeGenInterval = 1;
3653*4882a593Smuzhiyun if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3654*4882a593Smuzhiyun table->SVI2Enable = 1;
3655*4882a593Smuzhiyun else
3656*4882a593Smuzhiyun table->SVI2Enable = 0;
3657*4882a593Smuzhiyun
3658*4882a593Smuzhiyun table->ThermGpio = 17;
3659*4882a593Smuzhiyun table->SclkStepSize = 0x4000;
3660*4882a593Smuzhiyun
3661*4882a593Smuzhiyun table->SystemFlags = cpu_to_be32(table->SystemFlags);
3662*4882a593Smuzhiyun table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3663*4882a593Smuzhiyun table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3664*4882a593Smuzhiyun table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3665*4882a593Smuzhiyun table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3666*4882a593Smuzhiyun table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3667*4882a593Smuzhiyun table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3668*4882a593Smuzhiyun table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3669*4882a593Smuzhiyun table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3670*4882a593Smuzhiyun table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3671*4882a593Smuzhiyun table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3672*4882a593Smuzhiyun table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3673*4882a593Smuzhiyun table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3674*4882a593Smuzhiyun table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3675*4882a593Smuzhiyun
3676*4882a593Smuzhiyun ret = ci_copy_bytes_to_smc(rdev,
3677*4882a593Smuzhiyun pi->dpm_table_start +
3678*4882a593Smuzhiyun offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3679*4882a593Smuzhiyun (u8 *)&table->SystemFlags,
3680*4882a593Smuzhiyun sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3681*4882a593Smuzhiyun pi->sram_end);
3682*4882a593Smuzhiyun if (ret)
3683*4882a593Smuzhiyun return ret;
3684*4882a593Smuzhiyun
3685*4882a593Smuzhiyun return 0;
3686*4882a593Smuzhiyun }
3687*4882a593Smuzhiyun
ci_trim_single_dpm_states(struct radeon_device * rdev,struct ci_single_dpm_table * dpm_table,u32 low_limit,u32 high_limit)3688*4882a593Smuzhiyun static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3689*4882a593Smuzhiyun struct ci_single_dpm_table *dpm_table,
3690*4882a593Smuzhiyun u32 low_limit, u32 high_limit)
3691*4882a593Smuzhiyun {
3692*4882a593Smuzhiyun u32 i;
3693*4882a593Smuzhiyun
3694*4882a593Smuzhiyun for (i = 0; i < dpm_table->count; i++) {
3695*4882a593Smuzhiyun if ((dpm_table->dpm_levels[i].value < low_limit) ||
3696*4882a593Smuzhiyun (dpm_table->dpm_levels[i].value > high_limit))
3697*4882a593Smuzhiyun dpm_table->dpm_levels[i].enabled = false;
3698*4882a593Smuzhiyun else
3699*4882a593Smuzhiyun dpm_table->dpm_levels[i].enabled = true;
3700*4882a593Smuzhiyun }
3701*4882a593Smuzhiyun }
3702*4882a593Smuzhiyun
ci_trim_pcie_dpm_states(struct radeon_device * rdev,u32 speed_low,u32 lanes_low,u32 speed_high,u32 lanes_high)3703*4882a593Smuzhiyun static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3704*4882a593Smuzhiyun u32 speed_low, u32 lanes_low,
3705*4882a593Smuzhiyun u32 speed_high, u32 lanes_high)
3706*4882a593Smuzhiyun {
3707*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3708*4882a593Smuzhiyun struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3709*4882a593Smuzhiyun u32 i, j;
3710*4882a593Smuzhiyun
3711*4882a593Smuzhiyun for (i = 0; i < pcie_table->count; i++) {
3712*4882a593Smuzhiyun if ((pcie_table->dpm_levels[i].value < speed_low) ||
3713*4882a593Smuzhiyun (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3714*4882a593Smuzhiyun (pcie_table->dpm_levels[i].value > speed_high) ||
3715*4882a593Smuzhiyun (pcie_table->dpm_levels[i].param1 > lanes_high))
3716*4882a593Smuzhiyun pcie_table->dpm_levels[i].enabled = false;
3717*4882a593Smuzhiyun else
3718*4882a593Smuzhiyun pcie_table->dpm_levels[i].enabled = true;
3719*4882a593Smuzhiyun }
3720*4882a593Smuzhiyun
3721*4882a593Smuzhiyun for (i = 0; i < pcie_table->count; i++) {
3722*4882a593Smuzhiyun if (pcie_table->dpm_levels[i].enabled) {
3723*4882a593Smuzhiyun for (j = i + 1; j < pcie_table->count; j++) {
3724*4882a593Smuzhiyun if (pcie_table->dpm_levels[j].enabled) {
3725*4882a593Smuzhiyun if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3726*4882a593Smuzhiyun (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3727*4882a593Smuzhiyun pcie_table->dpm_levels[j].enabled = false;
3728*4882a593Smuzhiyun }
3729*4882a593Smuzhiyun }
3730*4882a593Smuzhiyun }
3731*4882a593Smuzhiyun }
3732*4882a593Smuzhiyun }
3733*4882a593Smuzhiyun
ci_trim_dpm_states(struct radeon_device * rdev,struct radeon_ps * radeon_state)3734*4882a593Smuzhiyun static int ci_trim_dpm_states(struct radeon_device *rdev,
3735*4882a593Smuzhiyun struct radeon_ps *radeon_state)
3736*4882a593Smuzhiyun {
3737*4882a593Smuzhiyun struct ci_ps *state = ci_get_ps(radeon_state);
3738*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3739*4882a593Smuzhiyun u32 high_limit_count;
3740*4882a593Smuzhiyun
3741*4882a593Smuzhiyun if (state->performance_level_count < 1)
3742*4882a593Smuzhiyun return -EINVAL;
3743*4882a593Smuzhiyun
3744*4882a593Smuzhiyun if (state->performance_level_count == 1)
3745*4882a593Smuzhiyun high_limit_count = 0;
3746*4882a593Smuzhiyun else
3747*4882a593Smuzhiyun high_limit_count = 1;
3748*4882a593Smuzhiyun
3749*4882a593Smuzhiyun ci_trim_single_dpm_states(rdev,
3750*4882a593Smuzhiyun &pi->dpm_table.sclk_table,
3751*4882a593Smuzhiyun state->performance_levels[0].sclk,
3752*4882a593Smuzhiyun state->performance_levels[high_limit_count].sclk);
3753*4882a593Smuzhiyun
3754*4882a593Smuzhiyun ci_trim_single_dpm_states(rdev,
3755*4882a593Smuzhiyun &pi->dpm_table.mclk_table,
3756*4882a593Smuzhiyun state->performance_levels[0].mclk,
3757*4882a593Smuzhiyun state->performance_levels[high_limit_count].mclk);
3758*4882a593Smuzhiyun
3759*4882a593Smuzhiyun ci_trim_pcie_dpm_states(rdev,
3760*4882a593Smuzhiyun state->performance_levels[0].pcie_gen,
3761*4882a593Smuzhiyun state->performance_levels[0].pcie_lane,
3762*4882a593Smuzhiyun state->performance_levels[high_limit_count].pcie_gen,
3763*4882a593Smuzhiyun state->performance_levels[high_limit_count].pcie_lane);
3764*4882a593Smuzhiyun
3765*4882a593Smuzhiyun return 0;
3766*4882a593Smuzhiyun }
3767*4882a593Smuzhiyun
ci_apply_disp_minimum_voltage_request(struct radeon_device * rdev)3768*4882a593Smuzhiyun static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3769*4882a593Smuzhiyun {
3770*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3771*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3772*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *vddc_table =
3773*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3774*4882a593Smuzhiyun u32 requested_voltage = 0;
3775*4882a593Smuzhiyun u32 i;
3776*4882a593Smuzhiyun
3777*4882a593Smuzhiyun if (disp_voltage_table == NULL)
3778*4882a593Smuzhiyun return -EINVAL;
3779*4882a593Smuzhiyun if (!disp_voltage_table->count)
3780*4882a593Smuzhiyun return -EINVAL;
3781*4882a593Smuzhiyun
3782*4882a593Smuzhiyun for (i = 0; i < disp_voltage_table->count; i++) {
3783*4882a593Smuzhiyun if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3784*4882a593Smuzhiyun requested_voltage = disp_voltage_table->entries[i].v;
3785*4882a593Smuzhiyun }
3786*4882a593Smuzhiyun
3787*4882a593Smuzhiyun for (i = 0; i < vddc_table->count; i++) {
3788*4882a593Smuzhiyun if (requested_voltage <= vddc_table->entries[i].v) {
3789*4882a593Smuzhiyun requested_voltage = vddc_table->entries[i].v;
3790*4882a593Smuzhiyun return (ci_send_msg_to_smc_with_parameter(rdev,
3791*4882a593Smuzhiyun PPSMC_MSG_VddC_Request,
3792*4882a593Smuzhiyun requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3793*4882a593Smuzhiyun 0 : -EINVAL;
3794*4882a593Smuzhiyun }
3795*4882a593Smuzhiyun }
3796*4882a593Smuzhiyun
3797*4882a593Smuzhiyun return -EINVAL;
3798*4882a593Smuzhiyun }
3799*4882a593Smuzhiyun
ci_upload_dpm_level_enable_mask(struct radeon_device * rdev)3800*4882a593Smuzhiyun static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3801*4882a593Smuzhiyun {
3802*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3803*4882a593Smuzhiyun PPSMC_Result result;
3804*4882a593Smuzhiyun
3805*4882a593Smuzhiyun ci_apply_disp_minimum_voltage_request(rdev);
3806*4882a593Smuzhiyun
3807*4882a593Smuzhiyun if (!pi->sclk_dpm_key_disabled) {
3808*4882a593Smuzhiyun if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3809*4882a593Smuzhiyun result = ci_send_msg_to_smc_with_parameter(rdev,
3810*4882a593Smuzhiyun PPSMC_MSG_SCLKDPM_SetEnabledMask,
3811*4882a593Smuzhiyun pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3812*4882a593Smuzhiyun if (result != PPSMC_Result_OK)
3813*4882a593Smuzhiyun return -EINVAL;
3814*4882a593Smuzhiyun }
3815*4882a593Smuzhiyun }
3816*4882a593Smuzhiyun
3817*4882a593Smuzhiyun if (!pi->mclk_dpm_key_disabled) {
3818*4882a593Smuzhiyun if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3819*4882a593Smuzhiyun result = ci_send_msg_to_smc_with_parameter(rdev,
3820*4882a593Smuzhiyun PPSMC_MSG_MCLKDPM_SetEnabledMask,
3821*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3822*4882a593Smuzhiyun if (result != PPSMC_Result_OK)
3823*4882a593Smuzhiyun return -EINVAL;
3824*4882a593Smuzhiyun }
3825*4882a593Smuzhiyun }
3826*4882a593Smuzhiyun #if 0
3827*4882a593Smuzhiyun if (!pi->pcie_dpm_key_disabled) {
3828*4882a593Smuzhiyun if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3829*4882a593Smuzhiyun result = ci_send_msg_to_smc_with_parameter(rdev,
3830*4882a593Smuzhiyun PPSMC_MSG_PCIeDPM_SetEnabledMask,
3831*4882a593Smuzhiyun pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3832*4882a593Smuzhiyun if (result != PPSMC_Result_OK)
3833*4882a593Smuzhiyun return -EINVAL;
3834*4882a593Smuzhiyun }
3835*4882a593Smuzhiyun }
3836*4882a593Smuzhiyun #endif
3837*4882a593Smuzhiyun return 0;
3838*4882a593Smuzhiyun }
3839*4882a593Smuzhiyun
ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device * rdev,struct radeon_ps * radeon_state)3840*4882a593Smuzhiyun static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3841*4882a593Smuzhiyun struct radeon_ps *radeon_state)
3842*4882a593Smuzhiyun {
3843*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3844*4882a593Smuzhiyun struct ci_ps *state = ci_get_ps(radeon_state);
3845*4882a593Smuzhiyun struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3846*4882a593Smuzhiyun u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3847*4882a593Smuzhiyun struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3848*4882a593Smuzhiyun u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3849*4882a593Smuzhiyun u32 i;
3850*4882a593Smuzhiyun
3851*4882a593Smuzhiyun pi->need_update_smu7_dpm_table = 0;
3852*4882a593Smuzhiyun
3853*4882a593Smuzhiyun for (i = 0; i < sclk_table->count; i++) {
3854*4882a593Smuzhiyun if (sclk == sclk_table->dpm_levels[i].value)
3855*4882a593Smuzhiyun break;
3856*4882a593Smuzhiyun }
3857*4882a593Smuzhiyun
3858*4882a593Smuzhiyun if (i >= sclk_table->count) {
3859*4882a593Smuzhiyun pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3860*4882a593Smuzhiyun } else {
3861*4882a593Smuzhiyun /* XXX The current code always reprogrammed the sclk levels,
3862*4882a593Smuzhiyun * but we don't currently handle disp sclk requirements
3863*4882a593Smuzhiyun * so just skip it.
3864*4882a593Smuzhiyun */
3865*4882a593Smuzhiyun if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3866*4882a593Smuzhiyun pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3867*4882a593Smuzhiyun }
3868*4882a593Smuzhiyun
3869*4882a593Smuzhiyun for (i = 0; i < mclk_table->count; i++) {
3870*4882a593Smuzhiyun if (mclk == mclk_table->dpm_levels[i].value)
3871*4882a593Smuzhiyun break;
3872*4882a593Smuzhiyun }
3873*4882a593Smuzhiyun
3874*4882a593Smuzhiyun if (i >= mclk_table->count)
3875*4882a593Smuzhiyun pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3876*4882a593Smuzhiyun
3877*4882a593Smuzhiyun if (rdev->pm.dpm.current_active_crtc_count !=
3878*4882a593Smuzhiyun rdev->pm.dpm.new_active_crtc_count)
3879*4882a593Smuzhiyun pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3880*4882a593Smuzhiyun }
3881*4882a593Smuzhiyun
ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device * rdev,struct radeon_ps * radeon_state)3882*4882a593Smuzhiyun static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3883*4882a593Smuzhiyun struct radeon_ps *radeon_state)
3884*4882a593Smuzhiyun {
3885*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3886*4882a593Smuzhiyun struct ci_ps *state = ci_get_ps(radeon_state);
3887*4882a593Smuzhiyun u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3888*4882a593Smuzhiyun u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3889*4882a593Smuzhiyun struct ci_dpm_table *dpm_table = &pi->dpm_table;
3890*4882a593Smuzhiyun int ret;
3891*4882a593Smuzhiyun
3892*4882a593Smuzhiyun if (!pi->need_update_smu7_dpm_table)
3893*4882a593Smuzhiyun return 0;
3894*4882a593Smuzhiyun
3895*4882a593Smuzhiyun if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3896*4882a593Smuzhiyun dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3897*4882a593Smuzhiyun
3898*4882a593Smuzhiyun if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3899*4882a593Smuzhiyun dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3900*4882a593Smuzhiyun
3901*4882a593Smuzhiyun if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3902*4882a593Smuzhiyun ret = ci_populate_all_graphic_levels(rdev);
3903*4882a593Smuzhiyun if (ret)
3904*4882a593Smuzhiyun return ret;
3905*4882a593Smuzhiyun }
3906*4882a593Smuzhiyun
3907*4882a593Smuzhiyun if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3908*4882a593Smuzhiyun ret = ci_populate_all_memory_levels(rdev);
3909*4882a593Smuzhiyun if (ret)
3910*4882a593Smuzhiyun return ret;
3911*4882a593Smuzhiyun }
3912*4882a593Smuzhiyun
3913*4882a593Smuzhiyun return 0;
3914*4882a593Smuzhiyun }
3915*4882a593Smuzhiyun
ci_enable_uvd_dpm(struct radeon_device * rdev,bool enable)3916*4882a593Smuzhiyun static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3917*4882a593Smuzhiyun {
3918*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3919*4882a593Smuzhiyun const struct radeon_clock_and_voltage_limits *max_limits;
3920*4882a593Smuzhiyun int i;
3921*4882a593Smuzhiyun
3922*4882a593Smuzhiyun if (rdev->pm.dpm.ac_power)
3923*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3924*4882a593Smuzhiyun else
3925*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3926*4882a593Smuzhiyun
3927*4882a593Smuzhiyun if (enable) {
3928*4882a593Smuzhiyun pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3929*4882a593Smuzhiyun
3930*4882a593Smuzhiyun for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3931*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3932*4882a593Smuzhiyun pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3933*4882a593Smuzhiyun
3934*4882a593Smuzhiyun if (!pi->caps_uvd_dpm)
3935*4882a593Smuzhiyun break;
3936*4882a593Smuzhiyun }
3937*4882a593Smuzhiyun }
3938*4882a593Smuzhiyun
3939*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev,
3940*4882a593Smuzhiyun PPSMC_MSG_UVDDPM_SetEnabledMask,
3941*4882a593Smuzhiyun pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3942*4882a593Smuzhiyun
3943*4882a593Smuzhiyun if (pi->last_mclk_dpm_enable_mask & 0x1) {
3944*4882a593Smuzhiyun pi->uvd_enabled = true;
3945*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3946*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev,
3947*4882a593Smuzhiyun PPSMC_MSG_MCLKDPM_SetEnabledMask,
3948*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3949*4882a593Smuzhiyun }
3950*4882a593Smuzhiyun } else {
3951*4882a593Smuzhiyun if (pi->last_mclk_dpm_enable_mask & 0x1) {
3952*4882a593Smuzhiyun pi->uvd_enabled = false;
3953*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3954*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev,
3955*4882a593Smuzhiyun PPSMC_MSG_MCLKDPM_SetEnabledMask,
3956*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3957*4882a593Smuzhiyun }
3958*4882a593Smuzhiyun }
3959*4882a593Smuzhiyun
3960*4882a593Smuzhiyun return (ci_send_msg_to_smc(rdev, enable ?
3961*4882a593Smuzhiyun PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3962*4882a593Smuzhiyun 0 : -EINVAL;
3963*4882a593Smuzhiyun }
3964*4882a593Smuzhiyun
ci_enable_vce_dpm(struct radeon_device * rdev,bool enable)3965*4882a593Smuzhiyun static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3966*4882a593Smuzhiyun {
3967*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
3968*4882a593Smuzhiyun const struct radeon_clock_and_voltage_limits *max_limits;
3969*4882a593Smuzhiyun int i;
3970*4882a593Smuzhiyun
3971*4882a593Smuzhiyun if (rdev->pm.dpm.ac_power)
3972*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3973*4882a593Smuzhiyun else
3974*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3975*4882a593Smuzhiyun
3976*4882a593Smuzhiyun if (enable) {
3977*4882a593Smuzhiyun pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3978*4882a593Smuzhiyun for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3979*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3980*4882a593Smuzhiyun pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3981*4882a593Smuzhiyun
3982*4882a593Smuzhiyun if (!pi->caps_vce_dpm)
3983*4882a593Smuzhiyun break;
3984*4882a593Smuzhiyun }
3985*4882a593Smuzhiyun }
3986*4882a593Smuzhiyun
3987*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev,
3988*4882a593Smuzhiyun PPSMC_MSG_VCEDPM_SetEnabledMask,
3989*4882a593Smuzhiyun pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3990*4882a593Smuzhiyun }
3991*4882a593Smuzhiyun
3992*4882a593Smuzhiyun return (ci_send_msg_to_smc(rdev, enable ?
3993*4882a593Smuzhiyun PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3994*4882a593Smuzhiyun 0 : -EINVAL;
3995*4882a593Smuzhiyun }
3996*4882a593Smuzhiyun
3997*4882a593Smuzhiyun #if 0
3998*4882a593Smuzhiyun static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3999*4882a593Smuzhiyun {
4000*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4001*4882a593Smuzhiyun const struct radeon_clock_and_voltage_limits *max_limits;
4002*4882a593Smuzhiyun int i;
4003*4882a593Smuzhiyun
4004*4882a593Smuzhiyun if (rdev->pm.dpm.ac_power)
4005*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4006*4882a593Smuzhiyun else
4007*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4008*4882a593Smuzhiyun
4009*4882a593Smuzhiyun if (enable) {
4010*4882a593Smuzhiyun pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4011*4882a593Smuzhiyun for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4012*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4013*4882a593Smuzhiyun pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4014*4882a593Smuzhiyun
4015*4882a593Smuzhiyun if (!pi->caps_samu_dpm)
4016*4882a593Smuzhiyun break;
4017*4882a593Smuzhiyun }
4018*4882a593Smuzhiyun }
4019*4882a593Smuzhiyun
4020*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev,
4021*4882a593Smuzhiyun PPSMC_MSG_SAMUDPM_SetEnabledMask,
4022*4882a593Smuzhiyun pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4023*4882a593Smuzhiyun }
4024*4882a593Smuzhiyun return (ci_send_msg_to_smc(rdev, enable ?
4025*4882a593Smuzhiyun PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4026*4882a593Smuzhiyun 0 : -EINVAL;
4027*4882a593Smuzhiyun }
4028*4882a593Smuzhiyun
4029*4882a593Smuzhiyun static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
4030*4882a593Smuzhiyun {
4031*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4032*4882a593Smuzhiyun const struct radeon_clock_and_voltage_limits *max_limits;
4033*4882a593Smuzhiyun int i;
4034*4882a593Smuzhiyun
4035*4882a593Smuzhiyun if (rdev->pm.dpm.ac_power)
4036*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4037*4882a593Smuzhiyun else
4038*4882a593Smuzhiyun max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4039*4882a593Smuzhiyun
4040*4882a593Smuzhiyun if (enable) {
4041*4882a593Smuzhiyun pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4042*4882a593Smuzhiyun for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4043*4882a593Smuzhiyun if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4044*4882a593Smuzhiyun pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4045*4882a593Smuzhiyun
4046*4882a593Smuzhiyun if (!pi->caps_acp_dpm)
4047*4882a593Smuzhiyun break;
4048*4882a593Smuzhiyun }
4049*4882a593Smuzhiyun }
4050*4882a593Smuzhiyun
4051*4882a593Smuzhiyun ci_send_msg_to_smc_with_parameter(rdev,
4052*4882a593Smuzhiyun PPSMC_MSG_ACPDPM_SetEnabledMask,
4053*4882a593Smuzhiyun pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4054*4882a593Smuzhiyun }
4055*4882a593Smuzhiyun
4056*4882a593Smuzhiyun return (ci_send_msg_to_smc(rdev, enable ?
4057*4882a593Smuzhiyun PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4058*4882a593Smuzhiyun 0 : -EINVAL;
4059*4882a593Smuzhiyun }
4060*4882a593Smuzhiyun #endif
4061*4882a593Smuzhiyun
ci_update_uvd_dpm(struct radeon_device * rdev,bool gate)4062*4882a593Smuzhiyun static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
4063*4882a593Smuzhiyun {
4064*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4065*4882a593Smuzhiyun u32 tmp;
4066*4882a593Smuzhiyun
4067*4882a593Smuzhiyun if (!gate) {
4068*4882a593Smuzhiyun if (pi->caps_uvd_dpm ||
4069*4882a593Smuzhiyun (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4070*4882a593Smuzhiyun pi->smc_state_table.UvdBootLevel = 0;
4071*4882a593Smuzhiyun else
4072*4882a593Smuzhiyun pi->smc_state_table.UvdBootLevel =
4073*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4074*4882a593Smuzhiyun
4075*4882a593Smuzhiyun tmp = RREG32_SMC(DPM_TABLE_475);
4076*4882a593Smuzhiyun tmp &= ~UvdBootLevel_MASK;
4077*4882a593Smuzhiyun tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
4078*4882a593Smuzhiyun WREG32_SMC(DPM_TABLE_475, tmp);
4079*4882a593Smuzhiyun }
4080*4882a593Smuzhiyun
4081*4882a593Smuzhiyun return ci_enable_uvd_dpm(rdev, !gate);
4082*4882a593Smuzhiyun }
4083*4882a593Smuzhiyun
ci_get_vce_boot_level(struct radeon_device * rdev)4084*4882a593Smuzhiyun static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
4085*4882a593Smuzhiyun {
4086*4882a593Smuzhiyun u8 i;
4087*4882a593Smuzhiyun u32 min_evclk = 30000; /* ??? */
4088*4882a593Smuzhiyun struct radeon_vce_clock_voltage_dependency_table *table =
4089*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4090*4882a593Smuzhiyun
4091*4882a593Smuzhiyun for (i = 0; i < table->count; i++) {
4092*4882a593Smuzhiyun if (table->entries[i].evclk >= min_evclk)
4093*4882a593Smuzhiyun return i;
4094*4882a593Smuzhiyun }
4095*4882a593Smuzhiyun
4096*4882a593Smuzhiyun return table->count - 1;
4097*4882a593Smuzhiyun }
4098*4882a593Smuzhiyun
ci_update_vce_dpm(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)4099*4882a593Smuzhiyun static int ci_update_vce_dpm(struct radeon_device *rdev,
4100*4882a593Smuzhiyun struct radeon_ps *radeon_new_state,
4101*4882a593Smuzhiyun struct radeon_ps *radeon_current_state)
4102*4882a593Smuzhiyun {
4103*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4104*4882a593Smuzhiyun int ret = 0;
4105*4882a593Smuzhiyun u32 tmp;
4106*4882a593Smuzhiyun
4107*4882a593Smuzhiyun if (radeon_current_state->evclk != radeon_new_state->evclk) {
4108*4882a593Smuzhiyun if (radeon_new_state->evclk) {
4109*4882a593Smuzhiyun /* turn the clocks on when encoding */
4110*4882a593Smuzhiyun cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
4111*4882a593Smuzhiyun
4112*4882a593Smuzhiyun pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
4113*4882a593Smuzhiyun tmp = RREG32_SMC(DPM_TABLE_475);
4114*4882a593Smuzhiyun tmp &= ~VceBootLevel_MASK;
4115*4882a593Smuzhiyun tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
4116*4882a593Smuzhiyun WREG32_SMC(DPM_TABLE_475, tmp);
4117*4882a593Smuzhiyun
4118*4882a593Smuzhiyun ret = ci_enable_vce_dpm(rdev, true);
4119*4882a593Smuzhiyun } else {
4120*4882a593Smuzhiyun /* turn the clocks off when not encoding */
4121*4882a593Smuzhiyun cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
4122*4882a593Smuzhiyun
4123*4882a593Smuzhiyun ret = ci_enable_vce_dpm(rdev, false);
4124*4882a593Smuzhiyun }
4125*4882a593Smuzhiyun }
4126*4882a593Smuzhiyun return ret;
4127*4882a593Smuzhiyun }
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun #if 0
4130*4882a593Smuzhiyun static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
4131*4882a593Smuzhiyun {
4132*4882a593Smuzhiyun return ci_enable_samu_dpm(rdev, gate);
4133*4882a593Smuzhiyun }
4134*4882a593Smuzhiyun
4135*4882a593Smuzhiyun static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
4136*4882a593Smuzhiyun {
4137*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4138*4882a593Smuzhiyun u32 tmp;
4139*4882a593Smuzhiyun
4140*4882a593Smuzhiyun if (!gate) {
4141*4882a593Smuzhiyun pi->smc_state_table.AcpBootLevel = 0;
4142*4882a593Smuzhiyun
4143*4882a593Smuzhiyun tmp = RREG32_SMC(DPM_TABLE_475);
4144*4882a593Smuzhiyun tmp &= ~AcpBootLevel_MASK;
4145*4882a593Smuzhiyun tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4146*4882a593Smuzhiyun WREG32_SMC(DPM_TABLE_475, tmp);
4147*4882a593Smuzhiyun }
4148*4882a593Smuzhiyun
4149*4882a593Smuzhiyun return ci_enable_acp_dpm(rdev, !gate);
4150*4882a593Smuzhiyun }
4151*4882a593Smuzhiyun #endif
4152*4882a593Smuzhiyun
ci_generate_dpm_level_enable_mask(struct radeon_device * rdev,struct radeon_ps * radeon_state)4153*4882a593Smuzhiyun static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
4154*4882a593Smuzhiyun struct radeon_ps *radeon_state)
4155*4882a593Smuzhiyun {
4156*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4157*4882a593Smuzhiyun int ret;
4158*4882a593Smuzhiyun
4159*4882a593Smuzhiyun ret = ci_trim_dpm_states(rdev, radeon_state);
4160*4882a593Smuzhiyun if (ret)
4161*4882a593Smuzhiyun return ret;
4162*4882a593Smuzhiyun
4163*4882a593Smuzhiyun pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4164*4882a593Smuzhiyun ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4165*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4166*4882a593Smuzhiyun ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4167*4882a593Smuzhiyun pi->last_mclk_dpm_enable_mask =
4168*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4169*4882a593Smuzhiyun if (pi->uvd_enabled) {
4170*4882a593Smuzhiyun if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4171*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4172*4882a593Smuzhiyun }
4173*4882a593Smuzhiyun pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4174*4882a593Smuzhiyun ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4175*4882a593Smuzhiyun
4176*4882a593Smuzhiyun return 0;
4177*4882a593Smuzhiyun }
4178*4882a593Smuzhiyun
ci_get_lowest_enabled_level(struct radeon_device * rdev,u32 level_mask)4179*4882a593Smuzhiyun static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
4180*4882a593Smuzhiyun u32 level_mask)
4181*4882a593Smuzhiyun {
4182*4882a593Smuzhiyun u32 level = 0;
4183*4882a593Smuzhiyun
4184*4882a593Smuzhiyun while ((level_mask & (1 << level)) == 0)
4185*4882a593Smuzhiyun level++;
4186*4882a593Smuzhiyun
4187*4882a593Smuzhiyun return level;
4188*4882a593Smuzhiyun }
4189*4882a593Smuzhiyun
4190*4882a593Smuzhiyun
ci_dpm_force_performance_level(struct radeon_device * rdev,enum radeon_dpm_forced_level level)4191*4882a593Smuzhiyun int ci_dpm_force_performance_level(struct radeon_device *rdev,
4192*4882a593Smuzhiyun enum radeon_dpm_forced_level level)
4193*4882a593Smuzhiyun {
4194*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4195*4882a593Smuzhiyun u32 tmp, levels, i;
4196*4882a593Smuzhiyun int ret;
4197*4882a593Smuzhiyun
4198*4882a593Smuzhiyun if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4199*4882a593Smuzhiyun if ((!pi->pcie_dpm_key_disabled) &&
4200*4882a593Smuzhiyun pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4201*4882a593Smuzhiyun levels = 0;
4202*4882a593Smuzhiyun tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4203*4882a593Smuzhiyun while (tmp >>= 1)
4204*4882a593Smuzhiyun levels++;
4205*4882a593Smuzhiyun if (levels) {
4206*4882a593Smuzhiyun ret = ci_dpm_force_state_pcie(rdev, level);
4207*4882a593Smuzhiyun if (ret)
4208*4882a593Smuzhiyun return ret;
4209*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
4210*4882a593Smuzhiyun tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4211*4882a593Smuzhiyun CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4212*4882a593Smuzhiyun if (tmp == levels)
4213*4882a593Smuzhiyun break;
4214*4882a593Smuzhiyun udelay(1);
4215*4882a593Smuzhiyun }
4216*4882a593Smuzhiyun }
4217*4882a593Smuzhiyun }
4218*4882a593Smuzhiyun if ((!pi->sclk_dpm_key_disabled) &&
4219*4882a593Smuzhiyun pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4220*4882a593Smuzhiyun levels = 0;
4221*4882a593Smuzhiyun tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4222*4882a593Smuzhiyun while (tmp >>= 1)
4223*4882a593Smuzhiyun levels++;
4224*4882a593Smuzhiyun if (levels) {
4225*4882a593Smuzhiyun ret = ci_dpm_force_state_sclk(rdev, levels);
4226*4882a593Smuzhiyun if (ret)
4227*4882a593Smuzhiyun return ret;
4228*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
4229*4882a593Smuzhiyun tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4230*4882a593Smuzhiyun CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4231*4882a593Smuzhiyun if (tmp == levels)
4232*4882a593Smuzhiyun break;
4233*4882a593Smuzhiyun udelay(1);
4234*4882a593Smuzhiyun }
4235*4882a593Smuzhiyun }
4236*4882a593Smuzhiyun }
4237*4882a593Smuzhiyun if ((!pi->mclk_dpm_key_disabled) &&
4238*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4239*4882a593Smuzhiyun levels = 0;
4240*4882a593Smuzhiyun tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4241*4882a593Smuzhiyun while (tmp >>= 1)
4242*4882a593Smuzhiyun levels++;
4243*4882a593Smuzhiyun if (levels) {
4244*4882a593Smuzhiyun ret = ci_dpm_force_state_mclk(rdev, levels);
4245*4882a593Smuzhiyun if (ret)
4246*4882a593Smuzhiyun return ret;
4247*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
4248*4882a593Smuzhiyun tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4249*4882a593Smuzhiyun CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4250*4882a593Smuzhiyun if (tmp == levels)
4251*4882a593Smuzhiyun break;
4252*4882a593Smuzhiyun udelay(1);
4253*4882a593Smuzhiyun }
4254*4882a593Smuzhiyun }
4255*4882a593Smuzhiyun }
4256*4882a593Smuzhiyun } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
4257*4882a593Smuzhiyun if ((!pi->sclk_dpm_key_disabled) &&
4258*4882a593Smuzhiyun pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4259*4882a593Smuzhiyun levels = ci_get_lowest_enabled_level(rdev,
4260*4882a593Smuzhiyun pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4261*4882a593Smuzhiyun ret = ci_dpm_force_state_sclk(rdev, levels);
4262*4882a593Smuzhiyun if (ret)
4263*4882a593Smuzhiyun return ret;
4264*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
4265*4882a593Smuzhiyun tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4266*4882a593Smuzhiyun CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4267*4882a593Smuzhiyun if (tmp == levels)
4268*4882a593Smuzhiyun break;
4269*4882a593Smuzhiyun udelay(1);
4270*4882a593Smuzhiyun }
4271*4882a593Smuzhiyun }
4272*4882a593Smuzhiyun if ((!pi->mclk_dpm_key_disabled) &&
4273*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4274*4882a593Smuzhiyun levels = ci_get_lowest_enabled_level(rdev,
4275*4882a593Smuzhiyun pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4276*4882a593Smuzhiyun ret = ci_dpm_force_state_mclk(rdev, levels);
4277*4882a593Smuzhiyun if (ret)
4278*4882a593Smuzhiyun return ret;
4279*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
4280*4882a593Smuzhiyun tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4281*4882a593Smuzhiyun CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4282*4882a593Smuzhiyun if (tmp == levels)
4283*4882a593Smuzhiyun break;
4284*4882a593Smuzhiyun udelay(1);
4285*4882a593Smuzhiyun }
4286*4882a593Smuzhiyun }
4287*4882a593Smuzhiyun if ((!pi->pcie_dpm_key_disabled) &&
4288*4882a593Smuzhiyun pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4289*4882a593Smuzhiyun levels = ci_get_lowest_enabled_level(rdev,
4290*4882a593Smuzhiyun pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4291*4882a593Smuzhiyun ret = ci_dpm_force_state_pcie(rdev, levels);
4292*4882a593Smuzhiyun if (ret)
4293*4882a593Smuzhiyun return ret;
4294*4882a593Smuzhiyun for (i = 0; i < rdev->usec_timeout; i++) {
4295*4882a593Smuzhiyun tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4296*4882a593Smuzhiyun CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4297*4882a593Smuzhiyun if (tmp == levels)
4298*4882a593Smuzhiyun break;
4299*4882a593Smuzhiyun udelay(1);
4300*4882a593Smuzhiyun }
4301*4882a593Smuzhiyun }
4302*4882a593Smuzhiyun } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4303*4882a593Smuzhiyun if (!pi->pcie_dpm_key_disabled) {
4304*4882a593Smuzhiyun PPSMC_Result smc_result;
4305*4882a593Smuzhiyun
4306*4882a593Smuzhiyun smc_result = ci_send_msg_to_smc(rdev,
4307*4882a593Smuzhiyun PPSMC_MSG_PCIeDPM_UnForceLevel);
4308*4882a593Smuzhiyun if (smc_result != PPSMC_Result_OK)
4309*4882a593Smuzhiyun return -EINVAL;
4310*4882a593Smuzhiyun }
4311*4882a593Smuzhiyun ret = ci_upload_dpm_level_enable_mask(rdev);
4312*4882a593Smuzhiyun if (ret)
4313*4882a593Smuzhiyun return ret;
4314*4882a593Smuzhiyun }
4315*4882a593Smuzhiyun
4316*4882a593Smuzhiyun rdev->pm.dpm.forced_level = level;
4317*4882a593Smuzhiyun
4318*4882a593Smuzhiyun return 0;
4319*4882a593Smuzhiyun }
4320*4882a593Smuzhiyun
ci_set_mc_special_registers(struct radeon_device * rdev,struct ci_mc_reg_table * table)4321*4882a593Smuzhiyun static int ci_set_mc_special_registers(struct radeon_device *rdev,
4322*4882a593Smuzhiyun struct ci_mc_reg_table *table)
4323*4882a593Smuzhiyun {
4324*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4325*4882a593Smuzhiyun u8 i, j, k;
4326*4882a593Smuzhiyun u32 temp_reg;
4327*4882a593Smuzhiyun
4328*4882a593Smuzhiyun for (i = 0, j = table->last; i < table->last; i++) {
4329*4882a593Smuzhiyun if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4330*4882a593Smuzhiyun return -EINVAL;
4331*4882a593Smuzhiyun switch(table->mc_reg_address[i].s1 << 2) {
4332*4882a593Smuzhiyun case MC_SEQ_MISC1:
4333*4882a593Smuzhiyun temp_reg = RREG32(MC_PMG_CMD_EMRS);
4334*4882a593Smuzhiyun table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
4335*4882a593Smuzhiyun table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4336*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4337*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[j] =
4338*4882a593Smuzhiyun ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4339*4882a593Smuzhiyun }
4340*4882a593Smuzhiyun j++;
4341*4882a593Smuzhiyun if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4342*4882a593Smuzhiyun return -EINVAL;
4343*4882a593Smuzhiyun
4344*4882a593Smuzhiyun temp_reg = RREG32(MC_PMG_CMD_MRS);
4345*4882a593Smuzhiyun table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
4346*4882a593Smuzhiyun table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4347*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4348*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[j] =
4349*4882a593Smuzhiyun (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4350*4882a593Smuzhiyun if (!pi->mem_gddr5)
4351*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4352*4882a593Smuzhiyun }
4353*4882a593Smuzhiyun j++;
4354*4882a593Smuzhiyun if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4355*4882a593Smuzhiyun return -EINVAL;
4356*4882a593Smuzhiyun
4357*4882a593Smuzhiyun if (!pi->mem_gddr5) {
4358*4882a593Smuzhiyun table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
4359*4882a593Smuzhiyun table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
4360*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4361*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[j] =
4362*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4363*4882a593Smuzhiyun }
4364*4882a593Smuzhiyun j++;
4365*4882a593Smuzhiyun if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4366*4882a593Smuzhiyun return -EINVAL;
4367*4882a593Smuzhiyun }
4368*4882a593Smuzhiyun break;
4369*4882a593Smuzhiyun case MC_SEQ_RESERVE_M:
4370*4882a593Smuzhiyun temp_reg = RREG32(MC_PMG_CMD_MRS1);
4371*4882a593Smuzhiyun table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
4372*4882a593Smuzhiyun table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4373*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4374*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[j] =
4375*4882a593Smuzhiyun (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4376*4882a593Smuzhiyun }
4377*4882a593Smuzhiyun j++;
4378*4882a593Smuzhiyun if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4379*4882a593Smuzhiyun return -EINVAL;
4380*4882a593Smuzhiyun break;
4381*4882a593Smuzhiyun default:
4382*4882a593Smuzhiyun break;
4383*4882a593Smuzhiyun }
4384*4882a593Smuzhiyun
4385*4882a593Smuzhiyun }
4386*4882a593Smuzhiyun
4387*4882a593Smuzhiyun table->last = j;
4388*4882a593Smuzhiyun
4389*4882a593Smuzhiyun return 0;
4390*4882a593Smuzhiyun }
4391*4882a593Smuzhiyun
ci_check_s0_mc_reg_index(u16 in_reg,u16 * out_reg)4392*4882a593Smuzhiyun static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4393*4882a593Smuzhiyun {
4394*4882a593Smuzhiyun bool result = true;
4395*4882a593Smuzhiyun
4396*4882a593Smuzhiyun switch(in_reg) {
4397*4882a593Smuzhiyun case MC_SEQ_RAS_TIMING >> 2:
4398*4882a593Smuzhiyun *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
4399*4882a593Smuzhiyun break;
4400*4882a593Smuzhiyun case MC_SEQ_DLL_STBY >> 2:
4401*4882a593Smuzhiyun *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
4402*4882a593Smuzhiyun break;
4403*4882a593Smuzhiyun case MC_SEQ_G5PDX_CMD0 >> 2:
4404*4882a593Smuzhiyun *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
4405*4882a593Smuzhiyun break;
4406*4882a593Smuzhiyun case MC_SEQ_G5PDX_CMD1 >> 2:
4407*4882a593Smuzhiyun *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
4408*4882a593Smuzhiyun break;
4409*4882a593Smuzhiyun case MC_SEQ_G5PDX_CTRL >> 2:
4410*4882a593Smuzhiyun *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
4411*4882a593Smuzhiyun break;
4412*4882a593Smuzhiyun case MC_SEQ_CAS_TIMING >> 2:
4413*4882a593Smuzhiyun *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
4414*4882a593Smuzhiyun break;
4415*4882a593Smuzhiyun case MC_SEQ_MISC_TIMING >> 2:
4416*4882a593Smuzhiyun *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4417*4882a593Smuzhiyun break;
4418*4882a593Smuzhiyun case MC_SEQ_MISC_TIMING2 >> 2:
4419*4882a593Smuzhiyun *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4420*4882a593Smuzhiyun break;
4421*4882a593Smuzhiyun case MC_SEQ_PMG_DVS_CMD >> 2:
4422*4882a593Smuzhiyun *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4423*4882a593Smuzhiyun break;
4424*4882a593Smuzhiyun case MC_SEQ_PMG_DVS_CTL >> 2:
4425*4882a593Smuzhiyun *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4426*4882a593Smuzhiyun break;
4427*4882a593Smuzhiyun case MC_SEQ_RD_CTL_D0 >> 2:
4428*4882a593Smuzhiyun *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4429*4882a593Smuzhiyun break;
4430*4882a593Smuzhiyun case MC_SEQ_RD_CTL_D1 >> 2:
4431*4882a593Smuzhiyun *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4432*4882a593Smuzhiyun break;
4433*4882a593Smuzhiyun case MC_SEQ_WR_CTL_D0 >> 2:
4434*4882a593Smuzhiyun *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4435*4882a593Smuzhiyun break;
4436*4882a593Smuzhiyun case MC_SEQ_WR_CTL_D1 >> 2:
4437*4882a593Smuzhiyun *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4438*4882a593Smuzhiyun break;
4439*4882a593Smuzhiyun case MC_PMG_CMD_EMRS >> 2:
4440*4882a593Smuzhiyun *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4441*4882a593Smuzhiyun break;
4442*4882a593Smuzhiyun case MC_PMG_CMD_MRS >> 2:
4443*4882a593Smuzhiyun *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4444*4882a593Smuzhiyun break;
4445*4882a593Smuzhiyun case MC_PMG_CMD_MRS1 >> 2:
4446*4882a593Smuzhiyun *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4447*4882a593Smuzhiyun break;
4448*4882a593Smuzhiyun case MC_SEQ_PMG_TIMING >> 2:
4449*4882a593Smuzhiyun *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4450*4882a593Smuzhiyun break;
4451*4882a593Smuzhiyun case MC_PMG_CMD_MRS2 >> 2:
4452*4882a593Smuzhiyun *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4453*4882a593Smuzhiyun break;
4454*4882a593Smuzhiyun case MC_SEQ_WR_CTL_2 >> 2:
4455*4882a593Smuzhiyun *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4456*4882a593Smuzhiyun break;
4457*4882a593Smuzhiyun default:
4458*4882a593Smuzhiyun result = false;
4459*4882a593Smuzhiyun break;
4460*4882a593Smuzhiyun }
4461*4882a593Smuzhiyun
4462*4882a593Smuzhiyun return result;
4463*4882a593Smuzhiyun }
4464*4882a593Smuzhiyun
ci_set_valid_flag(struct ci_mc_reg_table * table)4465*4882a593Smuzhiyun static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4466*4882a593Smuzhiyun {
4467*4882a593Smuzhiyun u8 i, j;
4468*4882a593Smuzhiyun
4469*4882a593Smuzhiyun for (i = 0; i < table->last; i++) {
4470*4882a593Smuzhiyun for (j = 1; j < table->num_entries; j++) {
4471*4882a593Smuzhiyun if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4472*4882a593Smuzhiyun table->mc_reg_table_entry[j].mc_data[i]) {
4473*4882a593Smuzhiyun table->valid_flag |= 1 << i;
4474*4882a593Smuzhiyun break;
4475*4882a593Smuzhiyun }
4476*4882a593Smuzhiyun }
4477*4882a593Smuzhiyun }
4478*4882a593Smuzhiyun }
4479*4882a593Smuzhiyun
ci_set_s0_mc_reg_index(struct ci_mc_reg_table * table)4480*4882a593Smuzhiyun static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4481*4882a593Smuzhiyun {
4482*4882a593Smuzhiyun u32 i;
4483*4882a593Smuzhiyun u16 address;
4484*4882a593Smuzhiyun
4485*4882a593Smuzhiyun for (i = 0; i < table->last; i++) {
4486*4882a593Smuzhiyun table->mc_reg_address[i].s0 =
4487*4882a593Smuzhiyun ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4488*4882a593Smuzhiyun address : table->mc_reg_address[i].s1;
4489*4882a593Smuzhiyun }
4490*4882a593Smuzhiyun }
4491*4882a593Smuzhiyun
ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table * table,struct ci_mc_reg_table * ci_table)4492*4882a593Smuzhiyun static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4493*4882a593Smuzhiyun struct ci_mc_reg_table *ci_table)
4494*4882a593Smuzhiyun {
4495*4882a593Smuzhiyun u8 i, j;
4496*4882a593Smuzhiyun
4497*4882a593Smuzhiyun if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4498*4882a593Smuzhiyun return -EINVAL;
4499*4882a593Smuzhiyun if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4500*4882a593Smuzhiyun return -EINVAL;
4501*4882a593Smuzhiyun
4502*4882a593Smuzhiyun for (i = 0; i < table->last; i++)
4503*4882a593Smuzhiyun ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4504*4882a593Smuzhiyun
4505*4882a593Smuzhiyun ci_table->last = table->last;
4506*4882a593Smuzhiyun
4507*4882a593Smuzhiyun for (i = 0; i < table->num_entries; i++) {
4508*4882a593Smuzhiyun ci_table->mc_reg_table_entry[i].mclk_max =
4509*4882a593Smuzhiyun table->mc_reg_table_entry[i].mclk_max;
4510*4882a593Smuzhiyun for (j = 0; j < table->last; j++)
4511*4882a593Smuzhiyun ci_table->mc_reg_table_entry[i].mc_data[j] =
4512*4882a593Smuzhiyun table->mc_reg_table_entry[i].mc_data[j];
4513*4882a593Smuzhiyun }
4514*4882a593Smuzhiyun ci_table->num_entries = table->num_entries;
4515*4882a593Smuzhiyun
4516*4882a593Smuzhiyun return 0;
4517*4882a593Smuzhiyun }
4518*4882a593Smuzhiyun
ci_register_patching_mc_seq(struct radeon_device * rdev,struct ci_mc_reg_table * table)4519*4882a593Smuzhiyun static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4520*4882a593Smuzhiyun struct ci_mc_reg_table *table)
4521*4882a593Smuzhiyun {
4522*4882a593Smuzhiyun u8 i, k;
4523*4882a593Smuzhiyun u32 tmp;
4524*4882a593Smuzhiyun bool patch;
4525*4882a593Smuzhiyun
4526*4882a593Smuzhiyun tmp = RREG32(MC_SEQ_MISC0);
4527*4882a593Smuzhiyun patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4528*4882a593Smuzhiyun
4529*4882a593Smuzhiyun if (patch &&
4530*4882a593Smuzhiyun ((rdev->pdev->device == 0x67B0) ||
4531*4882a593Smuzhiyun (rdev->pdev->device == 0x67B1))) {
4532*4882a593Smuzhiyun for (i = 0; i < table->last; i++) {
4533*4882a593Smuzhiyun if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4534*4882a593Smuzhiyun return -EINVAL;
4535*4882a593Smuzhiyun switch(table->mc_reg_address[i].s1 >> 2) {
4536*4882a593Smuzhiyun case MC_SEQ_MISC1:
4537*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4538*4882a593Smuzhiyun if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4539*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mclk_max == 137500))
4540*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[i] =
4541*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4542*4882a593Smuzhiyun 0x00000007;
4543*4882a593Smuzhiyun }
4544*4882a593Smuzhiyun break;
4545*4882a593Smuzhiyun case MC_SEQ_WR_CTL_D0:
4546*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4547*4882a593Smuzhiyun if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4548*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mclk_max == 137500))
4549*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[i] =
4550*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4551*4882a593Smuzhiyun 0x0000D0DD;
4552*4882a593Smuzhiyun }
4553*4882a593Smuzhiyun break;
4554*4882a593Smuzhiyun case MC_SEQ_WR_CTL_D1:
4555*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4556*4882a593Smuzhiyun if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4557*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mclk_max == 137500))
4558*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[i] =
4559*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4560*4882a593Smuzhiyun 0x0000D0DD;
4561*4882a593Smuzhiyun }
4562*4882a593Smuzhiyun break;
4563*4882a593Smuzhiyun case MC_SEQ_WR_CTL_2:
4564*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4565*4882a593Smuzhiyun if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4566*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mclk_max == 137500))
4567*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[i] = 0;
4568*4882a593Smuzhiyun }
4569*4882a593Smuzhiyun break;
4570*4882a593Smuzhiyun case MC_SEQ_CAS_TIMING:
4571*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4572*4882a593Smuzhiyun if (table->mc_reg_table_entry[k].mclk_max == 125000)
4573*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[i] =
4574*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4575*4882a593Smuzhiyun 0x000C0140;
4576*4882a593Smuzhiyun else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4577*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[i] =
4578*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4579*4882a593Smuzhiyun 0x000C0150;
4580*4882a593Smuzhiyun }
4581*4882a593Smuzhiyun break;
4582*4882a593Smuzhiyun case MC_SEQ_MISC_TIMING:
4583*4882a593Smuzhiyun for (k = 0; k < table->num_entries; k++) {
4584*4882a593Smuzhiyun if (table->mc_reg_table_entry[k].mclk_max == 125000)
4585*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[i] =
4586*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4587*4882a593Smuzhiyun 0x00000030;
4588*4882a593Smuzhiyun else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4589*4882a593Smuzhiyun table->mc_reg_table_entry[k].mc_data[i] =
4590*4882a593Smuzhiyun (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4591*4882a593Smuzhiyun 0x00000035;
4592*4882a593Smuzhiyun }
4593*4882a593Smuzhiyun break;
4594*4882a593Smuzhiyun default:
4595*4882a593Smuzhiyun break;
4596*4882a593Smuzhiyun }
4597*4882a593Smuzhiyun }
4598*4882a593Smuzhiyun
4599*4882a593Smuzhiyun WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4600*4882a593Smuzhiyun tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4601*4882a593Smuzhiyun tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4602*4882a593Smuzhiyun WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4603*4882a593Smuzhiyun WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4604*4882a593Smuzhiyun }
4605*4882a593Smuzhiyun
4606*4882a593Smuzhiyun return 0;
4607*4882a593Smuzhiyun }
4608*4882a593Smuzhiyun
ci_initialize_mc_reg_table(struct radeon_device * rdev)4609*4882a593Smuzhiyun static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4610*4882a593Smuzhiyun {
4611*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4612*4882a593Smuzhiyun struct atom_mc_reg_table *table;
4613*4882a593Smuzhiyun struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4614*4882a593Smuzhiyun u8 module_index = rv770_get_memory_module_index(rdev);
4615*4882a593Smuzhiyun int ret;
4616*4882a593Smuzhiyun
4617*4882a593Smuzhiyun table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4618*4882a593Smuzhiyun if (!table)
4619*4882a593Smuzhiyun return -ENOMEM;
4620*4882a593Smuzhiyun
4621*4882a593Smuzhiyun WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4622*4882a593Smuzhiyun WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4623*4882a593Smuzhiyun WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4624*4882a593Smuzhiyun WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4625*4882a593Smuzhiyun WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4626*4882a593Smuzhiyun WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4627*4882a593Smuzhiyun WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4628*4882a593Smuzhiyun WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4629*4882a593Smuzhiyun WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4630*4882a593Smuzhiyun WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4631*4882a593Smuzhiyun WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4632*4882a593Smuzhiyun WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4633*4882a593Smuzhiyun WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4634*4882a593Smuzhiyun WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4635*4882a593Smuzhiyun WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4636*4882a593Smuzhiyun WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4637*4882a593Smuzhiyun WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4638*4882a593Smuzhiyun WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4639*4882a593Smuzhiyun WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4640*4882a593Smuzhiyun WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4641*4882a593Smuzhiyun
4642*4882a593Smuzhiyun ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4643*4882a593Smuzhiyun if (ret)
4644*4882a593Smuzhiyun goto init_mc_done;
4645*4882a593Smuzhiyun
4646*4882a593Smuzhiyun ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4647*4882a593Smuzhiyun if (ret)
4648*4882a593Smuzhiyun goto init_mc_done;
4649*4882a593Smuzhiyun
4650*4882a593Smuzhiyun ci_set_s0_mc_reg_index(ci_table);
4651*4882a593Smuzhiyun
4652*4882a593Smuzhiyun ret = ci_register_patching_mc_seq(rdev, ci_table);
4653*4882a593Smuzhiyun if (ret)
4654*4882a593Smuzhiyun goto init_mc_done;
4655*4882a593Smuzhiyun
4656*4882a593Smuzhiyun ret = ci_set_mc_special_registers(rdev, ci_table);
4657*4882a593Smuzhiyun if (ret)
4658*4882a593Smuzhiyun goto init_mc_done;
4659*4882a593Smuzhiyun
4660*4882a593Smuzhiyun ci_set_valid_flag(ci_table);
4661*4882a593Smuzhiyun
4662*4882a593Smuzhiyun init_mc_done:
4663*4882a593Smuzhiyun kfree(table);
4664*4882a593Smuzhiyun
4665*4882a593Smuzhiyun return ret;
4666*4882a593Smuzhiyun }
4667*4882a593Smuzhiyun
ci_populate_mc_reg_addresses(struct radeon_device * rdev,SMU7_Discrete_MCRegisters * mc_reg_table)4668*4882a593Smuzhiyun static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4669*4882a593Smuzhiyun SMU7_Discrete_MCRegisters *mc_reg_table)
4670*4882a593Smuzhiyun {
4671*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4672*4882a593Smuzhiyun u32 i, j;
4673*4882a593Smuzhiyun
4674*4882a593Smuzhiyun for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4675*4882a593Smuzhiyun if (pi->mc_reg_table.valid_flag & (1 << j)) {
4676*4882a593Smuzhiyun if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4677*4882a593Smuzhiyun return -EINVAL;
4678*4882a593Smuzhiyun mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4679*4882a593Smuzhiyun mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4680*4882a593Smuzhiyun i++;
4681*4882a593Smuzhiyun }
4682*4882a593Smuzhiyun }
4683*4882a593Smuzhiyun
4684*4882a593Smuzhiyun mc_reg_table->last = (u8)i;
4685*4882a593Smuzhiyun
4686*4882a593Smuzhiyun return 0;
4687*4882a593Smuzhiyun }
4688*4882a593Smuzhiyun
ci_convert_mc_registers(const struct ci_mc_reg_entry * entry,SMU7_Discrete_MCRegisterSet * data,u32 num_entries,u32 valid_flag)4689*4882a593Smuzhiyun static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4690*4882a593Smuzhiyun SMU7_Discrete_MCRegisterSet *data,
4691*4882a593Smuzhiyun u32 num_entries, u32 valid_flag)
4692*4882a593Smuzhiyun {
4693*4882a593Smuzhiyun u32 i, j;
4694*4882a593Smuzhiyun
4695*4882a593Smuzhiyun for (i = 0, j = 0; j < num_entries; j++) {
4696*4882a593Smuzhiyun if (valid_flag & (1 << j)) {
4697*4882a593Smuzhiyun data->value[i] = cpu_to_be32(entry->mc_data[j]);
4698*4882a593Smuzhiyun i++;
4699*4882a593Smuzhiyun }
4700*4882a593Smuzhiyun }
4701*4882a593Smuzhiyun }
4702*4882a593Smuzhiyun
ci_convert_mc_reg_table_entry_to_smc(struct radeon_device * rdev,const u32 memory_clock,SMU7_Discrete_MCRegisterSet * mc_reg_table_data)4703*4882a593Smuzhiyun static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4704*4882a593Smuzhiyun const u32 memory_clock,
4705*4882a593Smuzhiyun SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4706*4882a593Smuzhiyun {
4707*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4708*4882a593Smuzhiyun u32 i = 0;
4709*4882a593Smuzhiyun
4710*4882a593Smuzhiyun for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4711*4882a593Smuzhiyun if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4712*4882a593Smuzhiyun break;
4713*4882a593Smuzhiyun }
4714*4882a593Smuzhiyun
4715*4882a593Smuzhiyun if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4716*4882a593Smuzhiyun --i;
4717*4882a593Smuzhiyun
4718*4882a593Smuzhiyun ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4719*4882a593Smuzhiyun mc_reg_table_data, pi->mc_reg_table.last,
4720*4882a593Smuzhiyun pi->mc_reg_table.valid_flag);
4721*4882a593Smuzhiyun }
4722*4882a593Smuzhiyun
ci_convert_mc_reg_table_to_smc(struct radeon_device * rdev,SMU7_Discrete_MCRegisters * mc_reg_table)4723*4882a593Smuzhiyun static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4724*4882a593Smuzhiyun SMU7_Discrete_MCRegisters *mc_reg_table)
4725*4882a593Smuzhiyun {
4726*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4727*4882a593Smuzhiyun u32 i;
4728*4882a593Smuzhiyun
4729*4882a593Smuzhiyun for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4730*4882a593Smuzhiyun ci_convert_mc_reg_table_entry_to_smc(rdev,
4731*4882a593Smuzhiyun pi->dpm_table.mclk_table.dpm_levels[i].value,
4732*4882a593Smuzhiyun &mc_reg_table->data[i]);
4733*4882a593Smuzhiyun }
4734*4882a593Smuzhiyun
ci_populate_initial_mc_reg_table(struct radeon_device * rdev)4735*4882a593Smuzhiyun static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4736*4882a593Smuzhiyun {
4737*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4738*4882a593Smuzhiyun int ret;
4739*4882a593Smuzhiyun
4740*4882a593Smuzhiyun memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4741*4882a593Smuzhiyun
4742*4882a593Smuzhiyun ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4743*4882a593Smuzhiyun if (ret)
4744*4882a593Smuzhiyun return ret;
4745*4882a593Smuzhiyun ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4746*4882a593Smuzhiyun
4747*4882a593Smuzhiyun return ci_copy_bytes_to_smc(rdev,
4748*4882a593Smuzhiyun pi->mc_reg_table_start,
4749*4882a593Smuzhiyun (u8 *)&pi->smc_mc_reg_table,
4750*4882a593Smuzhiyun sizeof(SMU7_Discrete_MCRegisters),
4751*4882a593Smuzhiyun pi->sram_end);
4752*4882a593Smuzhiyun }
4753*4882a593Smuzhiyun
ci_update_and_upload_mc_reg_table(struct radeon_device * rdev)4754*4882a593Smuzhiyun static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4755*4882a593Smuzhiyun {
4756*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4757*4882a593Smuzhiyun
4758*4882a593Smuzhiyun if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4759*4882a593Smuzhiyun return 0;
4760*4882a593Smuzhiyun
4761*4882a593Smuzhiyun memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4762*4882a593Smuzhiyun
4763*4882a593Smuzhiyun ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4764*4882a593Smuzhiyun
4765*4882a593Smuzhiyun return ci_copy_bytes_to_smc(rdev,
4766*4882a593Smuzhiyun pi->mc_reg_table_start +
4767*4882a593Smuzhiyun offsetof(SMU7_Discrete_MCRegisters, data[0]),
4768*4882a593Smuzhiyun (u8 *)&pi->smc_mc_reg_table.data[0],
4769*4882a593Smuzhiyun sizeof(SMU7_Discrete_MCRegisterSet) *
4770*4882a593Smuzhiyun pi->dpm_table.mclk_table.count,
4771*4882a593Smuzhiyun pi->sram_end);
4772*4882a593Smuzhiyun }
4773*4882a593Smuzhiyun
ci_enable_voltage_control(struct radeon_device * rdev)4774*4882a593Smuzhiyun static void ci_enable_voltage_control(struct radeon_device *rdev)
4775*4882a593Smuzhiyun {
4776*4882a593Smuzhiyun u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4777*4882a593Smuzhiyun
4778*4882a593Smuzhiyun tmp |= VOLT_PWRMGT_EN;
4779*4882a593Smuzhiyun WREG32_SMC(GENERAL_PWRMGT, tmp);
4780*4882a593Smuzhiyun }
4781*4882a593Smuzhiyun
ci_get_maximum_link_speed(struct radeon_device * rdev,struct radeon_ps * radeon_state)4782*4882a593Smuzhiyun static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4783*4882a593Smuzhiyun struct radeon_ps *radeon_state)
4784*4882a593Smuzhiyun {
4785*4882a593Smuzhiyun struct ci_ps *state = ci_get_ps(radeon_state);
4786*4882a593Smuzhiyun int i;
4787*4882a593Smuzhiyun u16 pcie_speed, max_speed = 0;
4788*4882a593Smuzhiyun
4789*4882a593Smuzhiyun for (i = 0; i < state->performance_level_count; i++) {
4790*4882a593Smuzhiyun pcie_speed = state->performance_levels[i].pcie_gen;
4791*4882a593Smuzhiyun if (max_speed < pcie_speed)
4792*4882a593Smuzhiyun max_speed = pcie_speed;
4793*4882a593Smuzhiyun }
4794*4882a593Smuzhiyun
4795*4882a593Smuzhiyun return max_speed;
4796*4882a593Smuzhiyun }
4797*4882a593Smuzhiyun
ci_get_current_pcie_speed(struct radeon_device * rdev)4798*4882a593Smuzhiyun static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4799*4882a593Smuzhiyun {
4800*4882a593Smuzhiyun u32 speed_cntl = 0;
4801*4882a593Smuzhiyun
4802*4882a593Smuzhiyun speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4803*4882a593Smuzhiyun speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4804*4882a593Smuzhiyun
4805*4882a593Smuzhiyun return (u16)speed_cntl;
4806*4882a593Smuzhiyun }
4807*4882a593Smuzhiyun
ci_get_current_pcie_lane_number(struct radeon_device * rdev)4808*4882a593Smuzhiyun static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4809*4882a593Smuzhiyun {
4810*4882a593Smuzhiyun u32 link_width = 0;
4811*4882a593Smuzhiyun
4812*4882a593Smuzhiyun link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4813*4882a593Smuzhiyun link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4814*4882a593Smuzhiyun
4815*4882a593Smuzhiyun switch (link_width) {
4816*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X1:
4817*4882a593Smuzhiyun return 1;
4818*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X2:
4819*4882a593Smuzhiyun return 2;
4820*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X4:
4821*4882a593Smuzhiyun return 4;
4822*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X8:
4823*4882a593Smuzhiyun return 8;
4824*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X12:
4825*4882a593Smuzhiyun /* not actually supported */
4826*4882a593Smuzhiyun return 12;
4827*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X0:
4828*4882a593Smuzhiyun case RADEON_PCIE_LC_LINK_WIDTH_X16:
4829*4882a593Smuzhiyun default:
4830*4882a593Smuzhiyun return 16;
4831*4882a593Smuzhiyun }
4832*4882a593Smuzhiyun }
4833*4882a593Smuzhiyun
ci_request_link_speed_change_before_state_change(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)4834*4882a593Smuzhiyun static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4835*4882a593Smuzhiyun struct radeon_ps *radeon_new_state,
4836*4882a593Smuzhiyun struct radeon_ps *radeon_current_state)
4837*4882a593Smuzhiyun {
4838*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4839*4882a593Smuzhiyun enum radeon_pcie_gen target_link_speed =
4840*4882a593Smuzhiyun ci_get_maximum_link_speed(rdev, radeon_new_state);
4841*4882a593Smuzhiyun enum radeon_pcie_gen current_link_speed;
4842*4882a593Smuzhiyun
4843*4882a593Smuzhiyun if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4844*4882a593Smuzhiyun current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4845*4882a593Smuzhiyun else
4846*4882a593Smuzhiyun current_link_speed = pi->force_pcie_gen;
4847*4882a593Smuzhiyun
4848*4882a593Smuzhiyun pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4849*4882a593Smuzhiyun pi->pspp_notify_required = false;
4850*4882a593Smuzhiyun if (target_link_speed > current_link_speed) {
4851*4882a593Smuzhiyun switch (target_link_speed) {
4852*4882a593Smuzhiyun #ifdef CONFIG_ACPI
4853*4882a593Smuzhiyun case RADEON_PCIE_GEN3:
4854*4882a593Smuzhiyun if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4855*4882a593Smuzhiyun break;
4856*4882a593Smuzhiyun pi->force_pcie_gen = RADEON_PCIE_GEN2;
4857*4882a593Smuzhiyun if (current_link_speed == RADEON_PCIE_GEN2)
4858*4882a593Smuzhiyun break;
4859*4882a593Smuzhiyun fallthrough;
4860*4882a593Smuzhiyun case RADEON_PCIE_GEN2:
4861*4882a593Smuzhiyun if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4862*4882a593Smuzhiyun break;
4863*4882a593Smuzhiyun #endif
4864*4882a593Smuzhiyun /* fall through */
4865*4882a593Smuzhiyun default:
4866*4882a593Smuzhiyun pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4867*4882a593Smuzhiyun break;
4868*4882a593Smuzhiyun }
4869*4882a593Smuzhiyun } else {
4870*4882a593Smuzhiyun if (target_link_speed < current_link_speed)
4871*4882a593Smuzhiyun pi->pspp_notify_required = true;
4872*4882a593Smuzhiyun }
4873*4882a593Smuzhiyun }
4874*4882a593Smuzhiyun
ci_notify_link_speed_change_after_state_change(struct radeon_device * rdev,struct radeon_ps * radeon_new_state,struct radeon_ps * radeon_current_state)4875*4882a593Smuzhiyun static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4876*4882a593Smuzhiyun struct radeon_ps *radeon_new_state,
4877*4882a593Smuzhiyun struct radeon_ps *radeon_current_state)
4878*4882a593Smuzhiyun {
4879*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4880*4882a593Smuzhiyun enum radeon_pcie_gen target_link_speed =
4881*4882a593Smuzhiyun ci_get_maximum_link_speed(rdev, radeon_new_state);
4882*4882a593Smuzhiyun u8 request;
4883*4882a593Smuzhiyun
4884*4882a593Smuzhiyun if (pi->pspp_notify_required) {
4885*4882a593Smuzhiyun if (target_link_speed == RADEON_PCIE_GEN3)
4886*4882a593Smuzhiyun request = PCIE_PERF_REQ_PECI_GEN3;
4887*4882a593Smuzhiyun else if (target_link_speed == RADEON_PCIE_GEN2)
4888*4882a593Smuzhiyun request = PCIE_PERF_REQ_PECI_GEN2;
4889*4882a593Smuzhiyun else
4890*4882a593Smuzhiyun request = PCIE_PERF_REQ_PECI_GEN1;
4891*4882a593Smuzhiyun
4892*4882a593Smuzhiyun if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4893*4882a593Smuzhiyun (ci_get_current_pcie_speed(rdev) > 0))
4894*4882a593Smuzhiyun return;
4895*4882a593Smuzhiyun
4896*4882a593Smuzhiyun #ifdef CONFIG_ACPI
4897*4882a593Smuzhiyun radeon_acpi_pcie_performance_request(rdev, request, false);
4898*4882a593Smuzhiyun #endif
4899*4882a593Smuzhiyun }
4900*4882a593Smuzhiyun }
4901*4882a593Smuzhiyun
ci_set_private_data_variables_based_on_pptable(struct radeon_device * rdev)4902*4882a593Smuzhiyun static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4903*4882a593Smuzhiyun {
4904*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4905*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4906*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4907*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4908*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4909*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4910*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4911*4882a593Smuzhiyun
4912*4882a593Smuzhiyun if (allowed_sclk_vddc_table == NULL)
4913*4882a593Smuzhiyun return -EINVAL;
4914*4882a593Smuzhiyun if (allowed_sclk_vddc_table->count < 1)
4915*4882a593Smuzhiyun return -EINVAL;
4916*4882a593Smuzhiyun if (allowed_mclk_vddc_table == NULL)
4917*4882a593Smuzhiyun return -EINVAL;
4918*4882a593Smuzhiyun if (allowed_mclk_vddc_table->count < 1)
4919*4882a593Smuzhiyun return -EINVAL;
4920*4882a593Smuzhiyun if (allowed_mclk_vddci_table == NULL)
4921*4882a593Smuzhiyun return -EINVAL;
4922*4882a593Smuzhiyun if (allowed_mclk_vddci_table->count < 1)
4923*4882a593Smuzhiyun return -EINVAL;
4924*4882a593Smuzhiyun
4925*4882a593Smuzhiyun pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4926*4882a593Smuzhiyun pi->max_vddc_in_pp_table =
4927*4882a593Smuzhiyun allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4928*4882a593Smuzhiyun
4929*4882a593Smuzhiyun pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4930*4882a593Smuzhiyun pi->max_vddci_in_pp_table =
4931*4882a593Smuzhiyun allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4932*4882a593Smuzhiyun
4933*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4934*4882a593Smuzhiyun allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4935*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4936*4882a593Smuzhiyun allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4937*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4938*4882a593Smuzhiyun allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4939*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4940*4882a593Smuzhiyun allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4941*4882a593Smuzhiyun
4942*4882a593Smuzhiyun return 0;
4943*4882a593Smuzhiyun }
4944*4882a593Smuzhiyun
ci_patch_with_vddc_leakage(struct radeon_device * rdev,u16 * vddc)4945*4882a593Smuzhiyun static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4946*4882a593Smuzhiyun {
4947*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4948*4882a593Smuzhiyun struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4949*4882a593Smuzhiyun u32 leakage_index;
4950*4882a593Smuzhiyun
4951*4882a593Smuzhiyun for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4952*4882a593Smuzhiyun if (leakage_table->leakage_id[leakage_index] == *vddc) {
4953*4882a593Smuzhiyun *vddc = leakage_table->actual_voltage[leakage_index];
4954*4882a593Smuzhiyun break;
4955*4882a593Smuzhiyun }
4956*4882a593Smuzhiyun }
4957*4882a593Smuzhiyun }
4958*4882a593Smuzhiyun
ci_patch_with_vddci_leakage(struct radeon_device * rdev,u16 * vddci)4959*4882a593Smuzhiyun static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4960*4882a593Smuzhiyun {
4961*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
4962*4882a593Smuzhiyun struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4963*4882a593Smuzhiyun u32 leakage_index;
4964*4882a593Smuzhiyun
4965*4882a593Smuzhiyun for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4966*4882a593Smuzhiyun if (leakage_table->leakage_id[leakage_index] == *vddci) {
4967*4882a593Smuzhiyun *vddci = leakage_table->actual_voltage[leakage_index];
4968*4882a593Smuzhiyun break;
4969*4882a593Smuzhiyun }
4970*4882a593Smuzhiyun }
4971*4882a593Smuzhiyun }
4972*4882a593Smuzhiyun
ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_clock_voltage_dependency_table * table)4973*4882a593Smuzhiyun static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4974*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *table)
4975*4882a593Smuzhiyun {
4976*4882a593Smuzhiyun u32 i;
4977*4882a593Smuzhiyun
4978*4882a593Smuzhiyun if (table) {
4979*4882a593Smuzhiyun for (i = 0; i < table->count; i++)
4980*4882a593Smuzhiyun ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4981*4882a593Smuzhiyun }
4982*4882a593Smuzhiyun }
4983*4882a593Smuzhiyun
ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device * rdev,struct radeon_clock_voltage_dependency_table * table)4984*4882a593Smuzhiyun static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4985*4882a593Smuzhiyun struct radeon_clock_voltage_dependency_table *table)
4986*4882a593Smuzhiyun {
4987*4882a593Smuzhiyun u32 i;
4988*4882a593Smuzhiyun
4989*4882a593Smuzhiyun if (table) {
4990*4882a593Smuzhiyun for (i = 0; i < table->count; i++)
4991*4882a593Smuzhiyun ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4992*4882a593Smuzhiyun }
4993*4882a593Smuzhiyun }
4994*4882a593Smuzhiyun
ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_vce_clock_voltage_dependency_table * table)4995*4882a593Smuzhiyun static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4996*4882a593Smuzhiyun struct radeon_vce_clock_voltage_dependency_table *table)
4997*4882a593Smuzhiyun {
4998*4882a593Smuzhiyun u32 i;
4999*4882a593Smuzhiyun
5000*4882a593Smuzhiyun if (table) {
5001*4882a593Smuzhiyun for (i = 0; i < table->count; i++)
5002*4882a593Smuzhiyun ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
5003*4882a593Smuzhiyun }
5004*4882a593Smuzhiyun }
5005*4882a593Smuzhiyun
ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_uvd_clock_voltage_dependency_table * table)5006*4882a593Smuzhiyun static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
5007*4882a593Smuzhiyun struct radeon_uvd_clock_voltage_dependency_table *table)
5008*4882a593Smuzhiyun {
5009*4882a593Smuzhiyun u32 i;
5010*4882a593Smuzhiyun
5011*4882a593Smuzhiyun if (table) {
5012*4882a593Smuzhiyun for (i = 0; i < table->count; i++)
5013*4882a593Smuzhiyun ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
5014*4882a593Smuzhiyun }
5015*4882a593Smuzhiyun }
5016*4882a593Smuzhiyun
ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_phase_shedding_limits_table * table)5017*4882a593Smuzhiyun static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
5018*4882a593Smuzhiyun struct radeon_phase_shedding_limits_table *table)
5019*4882a593Smuzhiyun {
5020*4882a593Smuzhiyun u32 i;
5021*4882a593Smuzhiyun
5022*4882a593Smuzhiyun if (table) {
5023*4882a593Smuzhiyun for (i = 0; i < table->count; i++)
5024*4882a593Smuzhiyun ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
5025*4882a593Smuzhiyun }
5026*4882a593Smuzhiyun }
5027*4882a593Smuzhiyun
ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device * rdev,struct radeon_clock_and_voltage_limits * table)5028*4882a593Smuzhiyun static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
5029*4882a593Smuzhiyun struct radeon_clock_and_voltage_limits *table)
5030*4882a593Smuzhiyun {
5031*4882a593Smuzhiyun if (table) {
5032*4882a593Smuzhiyun ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
5033*4882a593Smuzhiyun ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
5034*4882a593Smuzhiyun }
5035*4882a593Smuzhiyun }
5036*4882a593Smuzhiyun
ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device * rdev,struct radeon_cac_leakage_table * table)5037*4882a593Smuzhiyun static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
5038*4882a593Smuzhiyun struct radeon_cac_leakage_table *table)
5039*4882a593Smuzhiyun {
5040*4882a593Smuzhiyun u32 i;
5041*4882a593Smuzhiyun
5042*4882a593Smuzhiyun if (table) {
5043*4882a593Smuzhiyun for (i = 0; i < table->count; i++)
5044*4882a593Smuzhiyun ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
5045*4882a593Smuzhiyun }
5046*4882a593Smuzhiyun }
5047*4882a593Smuzhiyun
ci_patch_dependency_tables_with_leakage(struct radeon_device * rdev)5048*4882a593Smuzhiyun static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
5049*4882a593Smuzhiyun {
5050*4882a593Smuzhiyun
5051*4882a593Smuzhiyun ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5052*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5053*4882a593Smuzhiyun ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5054*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5055*4882a593Smuzhiyun ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5056*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5057*4882a593Smuzhiyun ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
5058*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5059*4882a593Smuzhiyun ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5060*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5061*4882a593Smuzhiyun ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5062*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5063*4882a593Smuzhiyun ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5064*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5065*4882a593Smuzhiyun ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5066*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5067*4882a593Smuzhiyun ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
5068*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
5069*4882a593Smuzhiyun ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5070*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5071*4882a593Smuzhiyun ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5072*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5073*4882a593Smuzhiyun ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
5074*4882a593Smuzhiyun &rdev->pm.dpm.dyn_state.cac_leakage_table);
5075*4882a593Smuzhiyun
5076*4882a593Smuzhiyun }
5077*4882a593Smuzhiyun
ci_get_memory_type(struct radeon_device * rdev)5078*4882a593Smuzhiyun static void ci_get_memory_type(struct radeon_device *rdev)
5079*4882a593Smuzhiyun {
5080*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5081*4882a593Smuzhiyun u32 tmp;
5082*4882a593Smuzhiyun
5083*4882a593Smuzhiyun tmp = RREG32(MC_SEQ_MISC0);
5084*4882a593Smuzhiyun
5085*4882a593Smuzhiyun if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
5086*4882a593Smuzhiyun MC_SEQ_MISC0_GDDR5_VALUE)
5087*4882a593Smuzhiyun pi->mem_gddr5 = true;
5088*4882a593Smuzhiyun else
5089*4882a593Smuzhiyun pi->mem_gddr5 = false;
5090*4882a593Smuzhiyun
5091*4882a593Smuzhiyun }
5092*4882a593Smuzhiyun
ci_update_current_ps(struct radeon_device * rdev,struct radeon_ps * rps)5093*4882a593Smuzhiyun static void ci_update_current_ps(struct radeon_device *rdev,
5094*4882a593Smuzhiyun struct radeon_ps *rps)
5095*4882a593Smuzhiyun {
5096*4882a593Smuzhiyun struct ci_ps *new_ps = ci_get_ps(rps);
5097*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5098*4882a593Smuzhiyun
5099*4882a593Smuzhiyun pi->current_rps = *rps;
5100*4882a593Smuzhiyun pi->current_ps = *new_ps;
5101*4882a593Smuzhiyun pi->current_rps.ps_priv = &pi->current_ps;
5102*4882a593Smuzhiyun }
5103*4882a593Smuzhiyun
ci_update_requested_ps(struct radeon_device * rdev,struct radeon_ps * rps)5104*4882a593Smuzhiyun static void ci_update_requested_ps(struct radeon_device *rdev,
5105*4882a593Smuzhiyun struct radeon_ps *rps)
5106*4882a593Smuzhiyun {
5107*4882a593Smuzhiyun struct ci_ps *new_ps = ci_get_ps(rps);
5108*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5109*4882a593Smuzhiyun
5110*4882a593Smuzhiyun pi->requested_rps = *rps;
5111*4882a593Smuzhiyun pi->requested_ps = *new_ps;
5112*4882a593Smuzhiyun pi->requested_rps.ps_priv = &pi->requested_ps;
5113*4882a593Smuzhiyun }
5114*4882a593Smuzhiyun
ci_dpm_pre_set_power_state(struct radeon_device * rdev)5115*4882a593Smuzhiyun int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
5116*4882a593Smuzhiyun {
5117*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5118*4882a593Smuzhiyun struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5119*4882a593Smuzhiyun struct radeon_ps *new_ps = &requested_ps;
5120*4882a593Smuzhiyun
5121*4882a593Smuzhiyun ci_update_requested_ps(rdev, new_ps);
5122*4882a593Smuzhiyun
5123*4882a593Smuzhiyun ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
5124*4882a593Smuzhiyun
5125*4882a593Smuzhiyun return 0;
5126*4882a593Smuzhiyun }
5127*4882a593Smuzhiyun
ci_dpm_post_set_power_state(struct radeon_device * rdev)5128*4882a593Smuzhiyun void ci_dpm_post_set_power_state(struct radeon_device *rdev)
5129*4882a593Smuzhiyun {
5130*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5131*4882a593Smuzhiyun struct radeon_ps *new_ps = &pi->requested_rps;
5132*4882a593Smuzhiyun
5133*4882a593Smuzhiyun ci_update_current_ps(rdev, new_ps);
5134*4882a593Smuzhiyun }
5135*4882a593Smuzhiyun
5136*4882a593Smuzhiyun
ci_dpm_setup_asic(struct radeon_device * rdev)5137*4882a593Smuzhiyun void ci_dpm_setup_asic(struct radeon_device *rdev)
5138*4882a593Smuzhiyun {
5139*4882a593Smuzhiyun int r;
5140*4882a593Smuzhiyun
5141*4882a593Smuzhiyun r = ci_mc_load_microcode(rdev);
5142*4882a593Smuzhiyun if (r)
5143*4882a593Smuzhiyun DRM_ERROR("Failed to load MC firmware!\n");
5144*4882a593Smuzhiyun ci_read_clock_registers(rdev);
5145*4882a593Smuzhiyun ci_get_memory_type(rdev);
5146*4882a593Smuzhiyun ci_enable_acpi_power_management(rdev);
5147*4882a593Smuzhiyun ci_init_sclk_t(rdev);
5148*4882a593Smuzhiyun }
5149*4882a593Smuzhiyun
ci_dpm_enable(struct radeon_device * rdev)5150*4882a593Smuzhiyun int ci_dpm_enable(struct radeon_device *rdev)
5151*4882a593Smuzhiyun {
5152*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5153*4882a593Smuzhiyun struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5154*4882a593Smuzhiyun int ret;
5155*4882a593Smuzhiyun
5156*4882a593Smuzhiyun if (ci_is_smc_running(rdev))
5157*4882a593Smuzhiyun return -EINVAL;
5158*4882a593Smuzhiyun if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5159*4882a593Smuzhiyun ci_enable_voltage_control(rdev);
5160*4882a593Smuzhiyun ret = ci_construct_voltage_tables(rdev);
5161*4882a593Smuzhiyun if (ret) {
5162*4882a593Smuzhiyun DRM_ERROR("ci_construct_voltage_tables failed\n");
5163*4882a593Smuzhiyun return ret;
5164*4882a593Smuzhiyun }
5165*4882a593Smuzhiyun }
5166*4882a593Smuzhiyun if (pi->caps_dynamic_ac_timing) {
5167*4882a593Smuzhiyun ret = ci_initialize_mc_reg_table(rdev);
5168*4882a593Smuzhiyun if (ret)
5169*4882a593Smuzhiyun pi->caps_dynamic_ac_timing = false;
5170*4882a593Smuzhiyun }
5171*4882a593Smuzhiyun if (pi->dynamic_ss)
5172*4882a593Smuzhiyun ci_enable_spread_spectrum(rdev, true);
5173*4882a593Smuzhiyun if (pi->thermal_protection)
5174*4882a593Smuzhiyun ci_enable_thermal_protection(rdev, true);
5175*4882a593Smuzhiyun ci_program_sstp(rdev);
5176*4882a593Smuzhiyun ci_enable_display_gap(rdev);
5177*4882a593Smuzhiyun ci_program_vc(rdev);
5178*4882a593Smuzhiyun ret = ci_upload_firmware(rdev);
5179*4882a593Smuzhiyun if (ret) {
5180*4882a593Smuzhiyun DRM_ERROR("ci_upload_firmware failed\n");
5181*4882a593Smuzhiyun return ret;
5182*4882a593Smuzhiyun }
5183*4882a593Smuzhiyun ret = ci_process_firmware_header(rdev);
5184*4882a593Smuzhiyun if (ret) {
5185*4882a593Smuzhiyun DRM_ERROR("ci_process_firmware_header failed\n");
5186*4882a593Smuzhiyun return ret;
5187*4882a593Smuzhiyun }
5188*4882a593Smuzhiyun ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
5189*4882a593Smuzhiyun if (ret) {
5190*4882a593Smuzhiyun DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5191*4882a593Smuzhiyun return ret;
5192*4882a593Smuzhiyun }
5193*4882a593Smuzhiyun ret = ci_init_smc_table(rdev);
5194*4882a593Smuzhiyun if (ret) {
5195*4882a593Smuzhiyun DRM_ERROR("ci_init_smc_table failed\n");
5196*4882a593Smuzhiyun return ret;
5197*4882a593Smuzhiyun }
5198*4882a593Smuzhiyun ret = ci_init_arb_table_index(rdev);
5199*4882a593Smuzhiyun if (ret) {
5200*4882a593Smuzhiyun DRM_ERROR("ci_init_arb_table_index failed\n");
5201*4882a593Smuzhiyun return ret;
5202*4882a593Smuzhiyun }
5203*4882a593Smuzhiyun if (pi->caps_dynamic_ac_timing) {
5204*4882a593Smuzhiyun ret = ci_populate_initial_mc_reg_table(rdev);
5205*4882a593Smuzhiyun if (ret) {
5206*4882a593Smuzhiyun DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5207*4882a593Smuzhiyun return ret;
5208*4882a593Smuzhiyun }
5209*4882a593Smuzhiyun }
5210*4882a593Smuzhiyun ret = ci_populate_pm_base(rdev);
5211*4882a593Smuzhiyun if (ret) {
5212*4882a593Smuzhiyun DRM_ERROR("ci_populate_pm_base failed\n");
5213*4882a593Smuzhiyun return ret;
5214*4882a593Smuzhiyun }
5215*4882a593Smuzhiyun ci_dpm_start_smc(rdev);
5216*4882a593Smuzhiyun ci_enable_vr_hot_gpio_interrupt(rdev);
5217*4882a593Smuzhiyun ret = ci_notify_smc_display_change(rdev, false);
5218*4882a593Smuzhiyun if (ret) {
5219*4882a593Smuzhiyun DRM_ERROR("ci_notify_smc_display_change failed\n");
5220*4882a593Smuzhiyun return ret;
5221*4882a593Smuzhiyun }
5222*4882a593Smuzhiyun ci_enable_sclk_control(rdev, true);
5223*4882a593Smuzhiyun ret = ci_enable_ulv(rdev, true);
5224*4882a593Smuzhiyun if (ret) {
5225*4882a593Smuzhiyun DRM_ERROR("ci_enable_ulv failed\n");
5226*4882a593Smuzhiyun return ret;
5227*4882a593Smuzhiyun }
5228*4882a593Smuzhiyun ret = ci_enable_ds_master_switch(rdev, true);
5229*4882a593Smuzhiyun if (ret) {
5230*4882a593Smuzhiyun DRM_ERROR("ci_enable_ds_master_switch failed\n");
5231*4882a593Smuzhiyun return ret;
5232*4882a593Smuzhiyun }
5233*4882a593Smuzhiyun ret = ci_start_dpm(rdev);
5234*4882a593Smuzhiyun if (ret) {
5235*4882a593Smuzhiyun DRM_ERROR("ci_start_dpm failed\n");
5236*4882a593Smuzhiyun return ret;
5237*4882a593Smuzhiyun }
5238*4882a593Smuzhiyun ret = ci_enable_didt(rdev, true);
5239*4882a593Smuzhiyun if (ret) {
5240*4882a593Smuzhiyun DRM_ERROR("ci_enable_didt failed\n");
5241*4882a593Smuzhiyun return ret;
5242*4882a593Smuzhiyun }
5243*4882a593Smuzhiyun ret = ci_enable_smc_cac(rdev, true);
5244*4882a593Smuzhiyun if (ret) {
5245*4882a593Smuzhiyun DRM_ERROR("ci_enable_smc_cac failed\n");
5246*4882a593Smuzhiyun return ret;
5247*4882a593Smuzhiyun }
5248*4882a593Smuzhiyun ret = ci_enable_power_containment(rdev, true);
5249*4882a593Smuzhiyun if (ret) {
5250*4882a593Smuzhiyun DRM_ERROR("ci_enable_power_containment failed\n");
5251*4882a593Smuzhiyun return ret;
5252*4882a593Smuzhiyun }
5253*4882a593Smuzhiyun
5254*4882a593Smuzhiyun ret = ci_power_control_set_level(rdev);
5255*4882a593Smuzhiyun if (ret) {
5256*4882a593Smuzhiyun DRM_ERROR("ci_power_control_set_level failed\n");
5257*4882a593Smuzhiyun return ret;
5258*4882a593Smuzhiyun }
5259*4882a593Smuzhiyun
5260*4882a593Smuzhiyun ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5261*4882a593Smuzhiyun
5262*4882a593Smuzhiyun ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
5263*4882a593Smuzhiyun if (ret) {
5264*4882a593Smuzhiyun DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5265*4882a593Smuzhiyun return ret;
5266*4882a593Smuzhiyun }
5267*4882a593Smuzhiyun
5268*4882a593Smuzhiyun ci_thermal_start_thermal_controller(rdev);
5269*4882a593Smuzhiyun
5270*4882a593Smuzhiyun ci_update_current_ps(rdev, boot_ps);
5271*4882a593Smuzhiyun
5272*4882a593Smuzhiyun return 0;
5273*4882a593Smuzhiyun }
5274*4882a593Smuzhiyun
ci_set_temperature_range(struct radeon_device * rdev)5275*4882a593Smuzhiyun static int ci_set_temperature_range(struct radeon_device *rdev)
5276*4882a593Smuzhiyun {
5277*4882a593Smuzhiyun int ret;
5278*4882a593Smuzhiyun
5279*4882a593Smuzhiyun ret = ci_thermal_enable_alert(rdev, false);
5280*4882a593Smuzhiyun if (ret)
5281*4882a593Smuzhiyun return ret;
5282*4882a593Smuzhiyun ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5283*4882a593Smuzhiyun if (ret)
5284*4882a593Smuzhiyun return ret;
5285*4882a593Smuzhiyun ret = ci_thermal_enable_alert(rdev, true);
5286*4882a593Smuzhiyun if (ret)
5287*4882a593Smuzhiyun return ret;
5288*4882a593Smuzhiyun
5289*4882a593Smuzhiyun return ret;
5290*4882a593Smuzhiyun }
5291*4882a593Smuzhiyun
ci_dpm_late_enable(struct radeon_device * rdev)5292*4882a593Smuzhiyun int ci_dpm_late_enable(struct radeon_device *rdev)
5293*4882a593Smuzhiyun {
5294*4882a593Smuzhiyun int ret;
5295*4882a593Smuzhiyun
5296*4882a593Smuzhiyun ret = ci_set_temperature_range(rdev);
5297*4882a593Smuzhiyun if (ret)
5298*4882a593Smuzhiyun return ret;
5299*4882a593Smuzhiyun
5300*4882a593Smuzhiyun ci_dpm_powergate_uvd(rdev, true);
5301*4882a593Smuzhiyun
5302*4882a593Smuzhiyun return 0;
5303*4882a593Smuzhiyun }
5304*4882a593Smuzhiyun
ci_dpm_disable(struct radeon_device * rdev)5305*4882a593Smuzhiyun void ci_dpm_disable(struct radeon_device *rdev)
5306*4882a593Smuzhiyun {
5307*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5308*4882a593Smuzhiyun struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5309*4882a593Smuzhiyun
5310*4882a593Smuzhiyun ci_dpm_powergate_uvd(rdev, false);
5311*4882a593Smuzhiyun
5312*4882a593Smuzhiyun if (!ci_is_smc_running(rdev))
5313*4882a593Smuzhiyun return;
5314*4882a593Smuzhiyun
5315*4882a593Smuzhiyun ci_thermal_stop_thermal_controller(rdev);
5316*4882a593Smuzhiyun
5317*4882a593Smuzhiyun if (pi->thermal_protection)
5318*4882a593Smuzhiyun ci_enable_thermal_protection(rdev, false);
5319*4882a593Smuzhiyun ci_enable_power_containment(rdev, false);
5320*4882a593Smuzhiyun ci_enable_smc_cac(rdev, false);
5321*4882a593Smuzhiyun ci_enable_didt(rdev, false);
5322*4882a593Smuzhiyun ci_enable_spread_spectrum(rdev, false);
5323*4882a593Smuzhiyun ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5324*4882a593Smuzhiyun ci_stop_dpm(rdev);
5325*4882a593Smuzhiyun ci_enable_ds_master_switch(rdev, false);
5326*4882a593Smuzhiyun ci_enable_ulv(rdev, false);
5327*4882a593Smuzhiyun ci_clear_vc(rdev);
5328*4882a593Smuzhiyun ci_reset_to_default(rdev);
5329*4882a593Smuzhiyun ci_dpm_stop_smc(rdev);
5330*4882a593Smuzhiyun ci_force_switch_to_arb_f0(rdev);
5331*4882a593Smuzhiyun ci_enable_thermal_based_sclk_dpm(rdev, false);
5332*4882a593Smuzhiyun
5333*4882a593Smuzhiyun ci_update_current_ps(rdev, boot_ps);
5334*4882a593Smuzhiyun }
5335*4882a593Smuzhiyun
ci_dpm_set_power_state(struct radeon_device * rdev)5336*4882a593Smuzhiyun int ci_dpm_set_power_state(struct radeon_device *rdev)
5337*4882a593Smuzhiyun {
5338*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5339*4882a593Smuzhiyun struct radeon_ps *new_ps = &pi->requested_rps;
5340*4882a593Smuzhiyun struct radeon_ps *old_ps = &pi->current_rps;
5341*4882a593Smuzhiyun int ret;
5342*4882a593Smuzhiyun
5343*4882a593Smuzhiyun ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
5344*4882a593Smuzhiyun if (pi->pcie_performance_request)
5345*4882a593Smuzhiyun ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5346*4882a593Smuzhiyun ret = ci_freeze_sclk_mclk_dpm(rdev);
5347*4882a593Smuzhiyun if (ret) {
5348*4882a593Smuzhiyun DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5349*4882a593Smuzhiyun return ret;
5350*4882a593Smuzhiyun }
5351*4882a593Smuzhiyun ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
5352*4882a593Smuzhiyun if (ret) {
5353*4882a593Smuzhiyun DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5354*4882a593Smuzhiyun return ret;
5355*4882a593Smuzhiyun }
5356*4882a593Smuzhiyun ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
5357*4882a593Smuzhiyun if (ret) {
5358*4882a593Smuzhiyun DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5359*4882a593Smuzhiyun return ret;
5360*4882a593Smuzhiyun }
5361*4882a593Smuzhiyun
5362*4882a593Smuzhiyun ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
5363*4882a593Smuzhiyun if (ret) {
5364*4882a593Smuzhiyun DRM_ERROR("ci_update_vce_dpm failed\n");
5365*4882a593Smuzhiyun return ret;
5366*4882a593Smuzhiyun }
5367*4882a593Smuzhiyun
5368*4882a593Smuzhiyun ret = ci_update_sclk_t(rdev);
5369*4882a593Smuzhiyun if (ret) {
5370*4882a593Smuzhiyun DRM_ERROR("ci_update_sclk_t failed\n");
5371*4882a593Smuzhiyun return ret;
5372*4882a593Smuzhiyun }
5373*4882a593Smuzhiyun if (pi->caps_dynamic_ac_timing) {
5374*4882a593Smuzhiyun ret = ci_update_and_upload_mc_reg_table(rdev);
5375*4882a593Smuzhiyun if (ret) {
5376*4882a593Smuzhiyun DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5377*4882a593Smuzhiyun return ret;
5378*4882a593Smuzhiyun }
5379*4882a593Smuzhiyun }
5380*4882a593Smuzhiyun ret = ci_program_memory_timing_parameters(rdev);
5381*4882a593Smuzhiyun if (ret) {
5382*4882a593Smuzhiyun DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5383*4882a593Smuzhiyun return ret;
5384*4882a593Smuzhiyun }
5385*4882a593Smuzhiyun ret = ci_unfreeze_sclk_mclk_dpm(rdev);
5386*4882a593Smuzhiyun if (ret) {
5387*4882a593Smuzhiyun DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5388*4882a593Smuzhiyun return ret;
5389*4882a593Smuzhiyun }
5390*4882a593Smuzhiyun ret = ci_upload_dpm_level_enable_mask(rdev);
5391*4882a593Smuzhiyun if (ret) {
5392*4882a593Smuzhiyun DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5393*4882a593Smuzhiyun return ret;
5394*4882a593Smuzhiyun }
5395*4882a593Smuzhiyun if (pi->pcie_performance_request)
5396*4882a593Smuzhiyun ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5397*4882a593Smuzhiyun
5398*4882a593Smuzhiyun return 0;
5399*4882a593Smuzhiyun }
5400*4882a593Smuzhiyun
5401*4882a593Smuzhiyun #if 0
5402*4882a593Smuzhiyun void ci_dpm_reset_asic(struct radeon_device *rdev)
5403*4882a593Smuzhiyun {
5404*4882a593Smuzhiyun ci_set_boot_state(rdev);
5405*4882a593Smuzhiyun }
5406*4882a593Smuzhiyun #endif
5407*4882a593Smuzhiyun
ci_dpm_display_configuration_changed(struct radeon_device * rdev)5408*4882a593Smuzhiyun void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
5409*4882a593Smuzhiyun {
5410*4882a593Smuzhiyun ci_program_display_gap(rdev);
5411*4882a593Smuzhiyun }
5412*4882a593Smuzhiyun
5413*4882a593Smuzhiyun union power_info {
5414*4882a593Smuzhiyun struct _ATOM_POWERPLAY_INFO info;
5415*4882a593Smuzhiyun struct _ATOM_POWERPLAY_INFO_V2 info_2;
5416*4882a593Smuzhiyun struct _ATOM_POWERPLAY_INFO_V3 info_3;
5417*4882a593Smuzhiyun struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5418*4882a593Smuzhiyun struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5419*4882a593Smuzhiyun struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5420*4882a593Smuzhiyun };
5421*4882a593Smuzhiyun
5422*4882a593Smuzhiyun union pplib_clock_info {
5423*4882a593Smuzhiyun struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5424*4882a593Smuzhiyun struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5425*4882a593Smuzhiyun struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5426*4882a593Smuzhiyun struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5427*4882a593Smuzhiyun struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5428*4882a593Smuzhiyun struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5429*4882a593Smuzhiyun };
5430*4882a593Smuzhiyun
5431*4882a593Smuzhiyun union pplib_power_state {
5432*4882a593Smuzhiyun struct _ATOM_PPLIB_STATE v1;
5433*4882a593Smuzhiyun struct _ATOM_PPLIB_STATE_V2 v2;
5434*4882a593Smuzhiyun };
5435*4882a593Smuzhiyun
ci_parse_pplib_non_clock_info(struct radeon_device * rdev,struct radeon_ps * rps,struct _ATOM_PPLIB_NONCLOCK_INFO * non_clock_info,u8 table_rev)5436*4882a593Smuzhiyun static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5437*4882a593Smuzhiyun struct radeon_ps *rps,
5438*4882a593Smuzhiyun struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5439*4882a593Smuzhiyun u8 table_rev)
5440*4882a593Smuzhiyun {
5441*4882a593Smuzhiyun rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5442*4882a593Smuzhiyun rps->class = le16_to_cpu(non_clock_info->usClassification);
5443*4882a593Smuzhiyun rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5444*4882a593Smuzhiyun
5445*4882a593Smuzhiyun if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5446*4882a593Smuzhiyun rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5447*4882a593Smuzhiyun rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5448*4882a593Smuzhiyun } else {
5449*4882a593Smuzhiyun rps->vclk = 0;
5450*4882a593Smuzhiyun rps->dclk = 0;
5451*4882a593Smuzhiyun }
5452*4882a593Smuzhiyun
5453*4882a593Smuzhiyun if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5454*4882a593Smuzhiyun rdev->pm.dpm.boot_ps = rps;
5455*4882a593Smuzhiyun if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5456*4882a593Smuzhiyun rdev->pm.dpm.uvd_ps = rps;
5457*4882a593Smuzhiyun }
5458*4882a593Smuzhiyun
ci_parse_pplib_clock_info(struct radeon_device * rdev,struct radeon_ps * rps,int index,union pplib_clock_info * clock_info)5459*4882a593Smuzhiyun static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5460*4882a593Smuzhiyun struct radeon_ps *rps, int index,
5461*4882a593Smuzhiyun union pplib_clock_info *clock_info)
5462*4882a593Smuzhiyun {
5463*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5464*4882a593Smuzhiyun struct ci_ps *ps = ci_get_ps(rps);
5465*4882a593Smuzhiyun struct ci_pl *pl = &ps->performance_levels[index];
5466*4882a593Smuzhiyun
5467*4882a593Smuzhiyun ps->performance_level_count = index + 1;
5468*4882a593Smuzhiyun
5469*4882a593Smuzhiyun pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5470*4882a593Smuzhiyun pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5471*4882a593Smuzhiyun pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5472*4882a593Smuzhiyun pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5473*4882a593Smuzhiyun
5474*4882a593Smuzhiyun pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5475*4882a593Smuzhiyun pi->sys_pcie_mask,
5476*4882a593Smuzhiyun pi->vbios_boot_state.pcie_gen_bootup_value,
5477*4882a593Smuzhiyun clock_info->ci.ucPCIEGen);
5478*4882a593Smuzhiyun pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5479*4882a593Smuzhiyun pi->vbios_boot_state.pcie_lane_bootup_value,
5480*4882a593Smuzhiyun le16_to_cpu(clock_info->ci.usPCIELane));
5481*4882a593Smuzhiyun
5482*4882a593Smuzhiyun if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5483*4882a593Smuzhiyun pi->acpi_pcie_gen = pl->pcie_gen;
5484*4882a593Smuzhiyun }
5485*4882a593Smuzhiyun
5486*4882a593Smuzhiyun if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5487*4882a593Smuzhiyun pi->ulv.supported = true;
5488*4882a593Smuzhiyun pi->ulv.pl = *pl;
5489*4882a593Smuzhiyun pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5490*4882a593Smuzhiyun }
5491*4882a593Smuzhiyun
5492*4882a593Smuzhiyun /* patch up boot state */
5493*4882a593Smuzhiyun if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5494*4882a593Smuzhiyun pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5495*4882a593Smuzhiyun pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5496*4882a593Smuzhiyun pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5497*4882a593Smuzhiyun pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5498*4882a593Smuzhiyun }
5499*4882a593Smuzhiyun
5500*4882a593Smuzhiyun switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5501*4882a593Smuzhiyun case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5502*4882a593Smuzhiyun pi->use_pcie_powersaving_levels = true;
5503*4882a593Smuzhiyun if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5504*4882a593Smuzhiyun pi->pcie_gen_powersaving.max = pl->pcie_gen;
5505*4882a593Smuzhiyun if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5506*4882a593Smuzhiyun pi->pcie_gen_powersaving.min = pl->pcie_gen;
5507*4882a593Smuzhiyun if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5508*4882a593Smuzhiyun pi->pcie_lane_powersaving.max = pl->pcie_lane;
5509*4882a593Smuzhiyun if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5510*4882a593Smuzhiyun pi->pcie_lane_powersaving.min = pl->pcie_lane;
5511*4882a593Smuzhiyun break;
5512*4882a593Smuzhiyun case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5513*4882a593Smuzhiyun pi->use_pcie_performance_levels = true;
5514*4882a593Smuzhiyun if (pi->pcie_gen_performance.max < pl->pcie_gen)
5515*4882a593Smuzhiyun pi->pcie_gen_performance.max = pl->pcie_gen;
5516*4882a593Smuzhiyun if (pi->pcie_gen_performance.min > pl->pcie_gen)
5517*4882a593Smuzhiyun pi->pcie_gen_performance.min = pl->pcie_gen;
5518*4882a593Smuzhiyun if (pi->pcie_lane_performance.max < pl->pcie_lane)
5519*4882a593Smuzhiyun pi->pcie_lane_performance.max = pl->pcie_lane;
5520*4882a593Smuzhiyun if (pi->pcie_lane_performance.min > pl->pcie_lane)
5521*4882a593Smuzhiyun pi->pcie_lane_performance.min = pl->pcie_lane;
5522*4882a593Smuzhiyun break;
5523*4882a593Smuzhiyun default:
5524*4882a593Smuzhiyun break;
5525*4882a593Smuzhiyun }
5526*4882a593Smuzhiyun }
5527*4882a593Smuzhiyun
ci_parse_power_table(struct radeon_device * rdev)5528*4882a593Smuzhiyun static int ci_parse_power_table(struct radeon_device *rdev)
5529*4882a593Smuzhiyun {
5530*4882a593Smuzhiyun struct radeon_mode_info *mode_info = &rdev->mode_info;
5531*4882a593Smuzhiyun struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5532*4882a593Smuzhiyun union pplib_power_state *power_state;
5533*4882a593Smuzhiyun int i, j, k, non_clock_array_index, clock_array_index;
5534*4882a593Smuzhiyun union pplib_clock_info *clock_info;
5535*4882a593Smuzhiyun struct _StateArray *state_array;
5536*4882a593Smuzhiyun struct _ClockInfoArray *clock_info_array;
5537*4882a593Smuzhiyun struct _NonClockInfoArray *non_clock_info_array;
5538*4882a593Smuzhiyun union power_info *power_info;
5539*4882a593Smuzhiyun int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5540*4882a593Smuzhiyun u16 data_offset;
5541*4882a593Smuzhiyun u8 frev, crev;
5542*4882a593Smuzhiyun u8 *power_state_offset;
5543*4882a593Smuzhiyun struct ci_ps *ps;
5544*4882a593Smuzhiyun
5545*4882a593Smuzhiyun if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5546*4882a593Smuzhiyun &frev, &crev, &data_offset))
5547*4882a593Smuzhiyun return -EINVAL;
5548*4882a593Smuzhiyun power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5549*4882a593Smuzhiyun
5550*4882a593Smuzhiyun state_array = (struct _StateArray *)
5551*4882a593Smuzhiyun (mode_info->atom_context->bios + data_offset +
5552*4882a593Smuzhiyun le16_to_cpu(power_info->pplib.usStateArrayOffset));
5553*4882a593Smuzhiyun clock_info_array = (struct _ClockInfoArray *)
5554*4882a593Smuzhiyun (mode_info->atom_context->bios + data_offset +
5555*4882a593Smuzhiyun le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5556*4882a593Smuzhiyun non_clock_info_array = (struct _NonClockInfoArray *)
5557*4882a593Smuzhiyun (mode_info->atom_context->bios + data_offset +
5558*4882a593Smuzhiyun le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5559*4882a593Smuzhiyun
5560*4882a593Smuzhiyun rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
5561*4882a593Smuzhiyun sizeof(struct radeon_ps),
5562*4882a593Smuzhiyun GFP_KERNEL);
5563*4882a593Smuzhiyun if (!rdev->pm.dpm.ps)
5564*4882a593Smuzhiyun return -ENOMEM;
5565*4882a593Smuzhiyun power_state_offset = (u8 *)state_array->states;
5566*4882a593Smuzhiyun rdev->pm.dpm.num_ps = 0;
5567*4882a593Smuzhiyun for (i = 0; i < state_array->ucNumEntries; i++) {
5568*4882a593Smuzhiyun u8 *idx;
5569*4882a593Smuzhiyun power_state = (union pplib_power_state *)power_state_offset;
5570*4882a593Smuzhiyun non_clock_array_index = power_state->v2.nonClockInfoIndex;
5571*4882a593Smuzhiyun non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5572*4882a593Smuzhiyun &non_clock_info_array->nonClockInfo[non_clock_array_index];
5573*4882a593Smuzhiyun if (!rdev->pm.power_state[i].clock_info)
5574*4882a593Smuzhiyun return -EINVAL;
5575*4882a593Smuzhiyun ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5576*4882a593Smuzhiyun if (ps == NULL)
5577*4882a593Smuzhiyun return -ENOMEM;
5578*4882a593Smuzhiyun rdev->pm.dpm.ps[i].ps_priv = ps;
5579*4882a593Smuzhiyun ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5580*4882a593Smuzhiyun non_clock_info,
5581*4882a593Smuzhiyun non_clock_info_array->ucEntrySize);
5582*4882a593Smuzhiyun k = 0;
5583*4882a593Smuzhiyun idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5584*4882a593Smuzhiyun for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5585*4882a593Smuzhiyun clock_array_index = idx[j];
5586*4882a593Smuzhiyun if (clock_array_index >= clock_info_array->ucNumEntries)
5587*4882a593Smuzhiyun continue;
5588*4882a593Smuzhiyun if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5589*4882a593Smuzhiyun break;
5590*4882a593Smuzhiyun clock_info = (union pplib_clock_info *)
5591*4882a593Smuzhiyun ((u8 *)&clock_info_array->clockInfo[0] +
5592*4882a593Smuzhiyun (clock_array_index * clock_info_array->ucEntrySize));
5593*4882a593Smuzhiyun ci_parse_pplib_clock_info(rdev,
5594*4882a593Smuzhiyun &rdev->pm.dpm.ps[i], k,
5595*4882a593Smuzhiyun clock_info);
5596*4882a593Smuzhiyun k++;
5597*4882a593Smuzhiyun }
5598*4882a593Smuzhiyun power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5599*4882a593Smuzhiyun rdev->pm.dpm.num_ps = i + 1;
5600*4882a593Smuzhiyun }
5601*4882a593Smuzhiyun
5602*4882a593Smuzhiyun /* fill in the vce power states */
5603*4882a593Smuzhiyun for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5604*4882a593Smuzhiyun u32 sclk, mclk;
5605*4882a593Smuzhiyun clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5606*4882a593Smuzhiyun clock_info = (union pplib_clock_info *)
5607*4882a593Smuzhiyun &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5608*4882a593Smuzhiyun sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5609*4882a593Smuzhiyun sclk |= clock_info->ci.ucEngineClockHigh << 16;
5610*4882a593Smuzhiyun mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5611*4882a593Smuzhiyun mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5612*4882a593Smuzhiyun rdev->pm.dpm.vce_states[i].sclk = sclk;
5613*4882a593Smuzhiyun rdev->pm.dpm.vce_states[i].mclk = mclk;
5614*4882a593Smuzhiyun }
5615*4882a593Smuzhiyun
5616*4882a593Smuzhiyun return 0;
5617*4882a593Smuzhiyun }
5618*4882a593Smuzhiyun
ci_get_vbios_boot_values(struct radeon_device * rdev,struct ci_vbios_boot_state * boot_state)5619*4882a593Smuzhiyun static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5620*4882a593Smuzhiyun struct ci_vbios_boot_state *boot_state)
5621*4882a593Smuzhiyun {
5622*4882a593Smuzhiyun struct radeon_mode_info *mode_info = &rdev->mode_info;
5623*4882a593Smuzhiyun int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5624*4882a593Smuzhiyun ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5625*4882a593Smuzhiyun u8 frev, crev;
5626*4882a593Smuzhiyun u16 data_offset;
5627*4882a593Smuzhiyun
5628*4882a593Smuzhiyun if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5629*4882a593Smuzhiyun &frev, &crev, &data_offset)) {
5630*4882a593Smuzhiyun firmware_info =
5631*4882a593Smuzhiyun (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5632*4882a593Smuzhiyun data_offset);
5633*4882a593Smuzhiyun boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5634*4882a593Smuzhiyun boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5635*4882a593Smuzhiyun boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5636*4882a593Smuzhiyun boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5637*4882a593Smuzhiyun boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5638*4882a593Smuzhiyun boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5639*4882a593Smuzhiyun boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5640*4882a593Smuzhiyun
5641*4882a593Smuzhiyun return 0;
5642*4882a593Smuzhiyun }
5643*4882a593Smuzhiyun return -EINVAL;
5644*4882a593Smuzhiyun }
5645*4882a593Smuzhiyun
ci_dpm_fini(struct radeon_device * rdev)5646*4882a593Smuzhiyun void ci_dpm_fini(struct radeon_device *rdev)
5647*4882a593Smuzhiyun {
5648*4882a593Smuzhiyun int i;
5649*4882a593Smuzhiyun
5650*4882a593Smuzhiyun for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5651*4882a593Smuzhiyun kfree(rdev->pm.dpm.ps[i].ps_priv);
5652*4882a593Smuzhiyun }
5653*4882a593Smuzhiyun kfree(rdev->pm.dpm.ps);
5654*4882a593Smuzhiyun kfree(rdev->pm.dpm.priv);
5655*4882a593Smuzhiyun kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5656*4882a593Smuzhiyun r600_free_extended_power_table(rdev);
5657*4882a593Smuzhiyun }
5658*4882a593Smuzhiyun
ci_dpm_init(struct radeon_device * rdev)5659*4882a593Smuzhiyun int ci_dpm_init(struct radeon_device *rdev)
5660*4882a593Smuzhiyun {
5661*4882a593Smuzhiyun int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5662*4882a593Smuzhiyun SMU7_Discrete_DpmTable *dpm_table;
5663*4882a593Smuzhiyun struct radeon_gpio_rec gpio;
5664*4882a593Smuzhiyun u16 data_offset, size;
5665*4882a593Smuzhiyun u8 frev, crev;
5666*4882a593Smuzhiyun struct ci_power_info *pi;
5667*4882a593Smuzhiyun enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5668*4882a593Smuzhiyun struct pci_dev *root = rdev->pdev->bus->self;
5669*4882a593Smuzhiyun int ret;
5670*4882a593Smuzhiyun
5671*4882a593Smuzhiyun pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5672*4882a593Smuzhiyun if (pi == NULL)
5673*4882a593Smuzhiyun return -ENOMEM;
5674*4882a593Smuzhiyun rdev->pm.dpm.priv = pi;
5675*4882a593Smuzhiyun
5676*4882a593Smuzhiyun if (!pci_is_root_bus(rdev->pdev->bus))
5677*4882a593Smuzhiyun speed_cap = pcie_get_speed_cap(root);
5678*4882a593Smuzhiyun if (speed_cap == PCI_SPEED_UNKNOWN) {
5679*4882a593Smuzhiyun pi->sys_pcie_mask = 0;
5680*4882a593Smuzhiyun } else {
5681*4882a593Smuzhiyun if (speed_cap == PCIE_SPEED_8_0GT)
5682*4882a593Smuzhiyun pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5683*4882a593Smuzhiyun RADEON_PCIE_SPEED_50 |
5684*4882a593Smuzhiyun RADEON_PCIE_SPEED_80;
5685*4882a593Smuzhiyun else if (speed_cap == PCIE_SPEED_5_0GT)
5686*4882a593Smuzhiyun pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5687*4882a593Smuzhiyun RADEON_PCIE_SPEED_50;
5688*4882a593Smuzhiyun else
5689*4882a593Smuzhiyun pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
5690*4882a593Smuzhiyun }
5691*4882a593Smuzhiyun pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5692*4882a593Smuzhiyun
5693*4882a593Smuzhiyun pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5694*4882a593Smuzhiyun pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5695*4882a593Smuzhiyun pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5696*4882a593Smuzhiyun pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5697*4882a593Smuzhiyun
5698*4882a593Smuzhiyun pi->pcie_lane_performance.max = 0;
5699*4882a593Smuzhiyun pi->pcie_lane_performance.min = 16;
5700*4882a593Smuzhiyun pi->pcie_lane_powersaving.max = 0;
5701*4882a593Smuzhiyun pi->pcie_lane_powersaving.min = 16;
5702*4882a593Smuzhiyun
5703*4882a593Smuzhiyun ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5704*4882a593Smuzhiyun if (ret) {
5705*4882a593Smuzhiyun ci_dpm_fini(rdev);
5706*4882a593Smuzhiyun return ret;
5707*4882a593Smuzhiyun }
5708*4882a593Smuzhiyun
5709*4882a593Smuzhiyun ret = r600_get_platform_caps(rdev);
5710*4882a593Smuzhiyun if (ret) {
5711*4882a593Smuzhiyun ci_dpm_fini(rdev);
5712*4882a593Smuzhiyun return ret;
5713*4882a593Smuzhiyun }
5714*4882a593Smuzhiyun
5715*4882a593Smuzhiyun ret = r600_parse_extended_power_table(rdev);
5716*4882a593Smuzhiyun if (ret) {
5717*4882a593Smuzhiyun ci_dpm_fini(rdev);
5718*4882a593Smuzhiyun return ret;
5719*4882a593Smuzhiyun }
5720*4882a593Smuzhiyun
5721*4882a593Smuzhiyun ret = ci_parse_power_table(rdev);
5722*4882a593Smuzhiyun if (ret) {
5723*4882a593Smuzhiyun ci_dpm_fini(rdev);
5724*4882a593Smuzhiyun return ret;
5725*4882a593Smuzhiyun }
5726*4882a593Smuzhiyun
5727*4882a593Smuzhiyun pi->dll_default_on = false;
5728*4882a593Smuzhiyun pi->sram_end = SMC_RAM_END;
5729*4882a593Smuzhiyun
5730*4882a593Smuzhiyun pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5731*4882a593Smuzhiyun pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5732*4882a593Smuzhiyun pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5733*4882a593Smuzhiyun pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5734*4882a593Smuzhiyun pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5735*4882a593Smuzhiyun pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5736*4882a593Smuzhiyun pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5737*4882a593Smuzhiyun pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5738*4882a593Smuzhiyun
5739*4882a593Smuzhiyun pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5740*4882a593Smuzhiyun
5741*4882a593Smuzhiyun pi->sclk_dpm_key_disabled = 0;
5742*4882a593Smuzhiyun pi->mclk_dpm_key_disabled = 0;
5743*4882a593Smuzhiyun pi->pcie_dpm_key_disabled = 0;
5744*4882a593Smuzhiyun pi->thermal_sclk_dpm_enabled = 0;
5745*4882a593Smuzhiyun
5746*4882a593Smuzhiyun /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5747*4882a593Smuzhiyun if ((rdev->pdev->device == 0x6658) &&
5748*4882a593Smuzhiyun (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5749*4882a593Smuzhiyun pi->mclk_dpm_key_disabled = 1;
5750*4882a593Smuzhiyun }
5751*4882a593Smuzhiyun
5752*4882a593Smuzhiyun pi->caps_sclk_ds = true;
5753*4882a593Smuzhiyun
5754*4882a593Smuzhiyun pi->mclk_strobe_mode_threshold = 40000;
5755*4882a593Smuzhiyun pi->mclk_stutter_mode_threshold = 40000;
5756*4882a593Smuzhiyun pi->mclk_edc_enable_threshold = 40000;
5757*4882a593Smuzhiyun pi->mclk_edc_wr_enable_threshold = 40000;
5758*4882a593Smuzhiyun
5759*4882a593Smuzhiyun ci_initialize_powertune_defaults(rdev);
5760*4882a593Smuzhiyun
5761*4882a593Smuzhiyun pi->caps_fps = false;
5762*4882a593Smuzhiyun
5763*4882a593Smuzhiyun pi->caps_sclk_throttle_low_notification = false;
5764*4882a593Smuzhiyun
5765*4882a593Smuzhiyun pi->caps_uvd_dpm = true;
5766*4882a593Smuzhiyun pi->caps_vce_dpm = true;
5767*4882a593Smuzhiyun
5768*4882a593Smuzhiyun ci_get_leakage_voltages(rdev);
5769*4882a593Smuzhiyun ci_patch_dependency_tables_with_leakage(rdev);
5770*4882a593Smuzhiyun ci_set_private_data_variables_based_on_pptable(rdev);
5771*4882a593Smuzhiyun
5772*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5773*4882a593Smuzhiyun kcalloc(4,
5774*4882a593Smuzhiyun sizeof(struct radeon_clock_voltage_dependency_entry),
5775*4882a593Smuzhiyun GFP_KERNEL);
5776*4882a593Smuzhiyun if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5777*4882a593Smuzhiyun ci_dpm_fini(rdev);
5778*4882a593Smuzhiyun return -ENOMEM;
5779*4882a593Smuzhiyun }
5780*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5781*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5782*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5783*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5784*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5785*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5786*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5787*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5788*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5789*4882a593Smuzhiyun
5790*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5791*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5792*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5793*4882a593Smuzhiyun
5794*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5795*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5796*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5797*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5798*4882a593Smuzhiyun
5799*4882a593Smuzhiyun if (rdev->family == CHIP_HAWAII) {
5800*4882a593Smuzhiyun pi->thermal_temp_setting.temperature_low = 94500;
5801*4882a593Smuzhiyun pi->thermal_temp_setting.temperature_high = 95000;
5802*4882a593Smuzhiyun pi->thermal_temp_setting.temperature_shutdown = 104000;
5803*4882a593Smuzhiyun } else {
5804*4882a593Smuzhiyun pi->thermal_temp_setting.temperature_low = 99500;
5805*4882a593Smuzhiyun pi->thermal_temp_setting.temperature_high = 100000;
5806*4882a593Smuzhiyun pi->thermal_temp_setting.temperature_shutdown = 104000;
5807*4882a593Smuzhiyun }
5808*4882a593Smuzhiyun
5809*4882a593Smuzhiyun pi->uvd_enabled = false;
5810*4882a593Smuzhiyun
5811*4882a593Smuzhiyun dpm_table = &pi->smc_state_table;
5812*4882a593Smuzhiyun
5813*4882a593Smuzhiyun gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5814*4882a593Smuzhiyun if (gpio.valid) {
5815*4882a593Smuzhiyun dpm_table->VRHotGpio = gpio.shift;
5816*4882a593Smuzhiyun rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5817*4882a593Smuzhiyun } else {
5818*4882a593Smuzhiyun dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5819*4882a593Smuzhiyun rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5820*4882a593Smuzhiyun }
5821*4882a593Smuzhiyun
5822*4882a593Smuzhiyun gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5823*4882a593Smuzhiyun if (gpio.valid) {
5824*4882a593Smuzhiyun dpm_table->AcDcGpio = gpio.shift;
5825*4882a593Smuzhiyun rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5826*4882a593Smuzhiyun } else {
5827*4882a593Smuzhiyun dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5828*4882a593Smuzhiyun rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5829*4882a593Smuzhiyun }
5830*4882a593Smuzhiyun
5831*4882a593Smuzhiyun gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5832*4882a593Smuzhiyun if (gpio.valid) {
5833*4882a593Smuzhiyun u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5834*4882a593Smuzhiyun
5835*4882a593Smuzhiyun switch (gpio.shift) {
5836*4882a593Smuzhiyun case 0:
5837*4882a593Smuzhiyun tmp &= ~GNB_SLOW_MODE_MASK;
5838*4882a593Smuzhiyun tmp |= GNB_SLOW_MODE(1);
5839*4882a593Smuzhiyun break;
5840*4882a593Smuzhiyun case 1:
5841*4882a593Smuzhiyun tmp &= ~GNB_SLOW_MODE_MASK;
5842*4882a593Smuzhiyun tmp |= GNB_SLOW_MODE(2);
5843*4882a593Smuzhiyun break;
5844*4882a593Smuzhiyun case 2:
5845*4882a593Smuzhiyun tmp |= GNB_SLOW;
5846*4882a593Smuzhiyun break;
5847*4882a593Smuzhiyun case 3:
5848*4882a593Smuzhiyun tmp |= FORCE_NB_PS1;
5849*4882a593Smuzhiyun break;
5850*4882a593Smuzhiyun case 4:
5851*4882a593Smuzhiyun tmp |= DPM_ENABLED;
5852*4882a593Smuzhiyun break;
5853*4882a593Smuzhiyun default:
5854*4882a593Smuzhiyun DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
5855*4882a593Smuzhiyun break;
5856*4882a593Smuzhiyun }
5857*4882a593Smuzhiyun WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5858*4882a593Smuzhiyun }
5859*4882a593Smuzhiyun
5860*4882a593Smuzhiyun pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5861*4882a593Smuzhiyun pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5862*4882a593Smuzhiyun pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5863*4882a593Smuzhiyun if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5864*4882a593Smuzhiyun pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5865*4882a593Smuzhiyun else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5866*4882a593Smuzhiyun pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5867*4882a593Smuzhiyun
5868*4882a593Smuzhiyun if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5869*4882a593Smuzhiyun if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5870*4882a593Smuzhiyun pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5871*4882a593Smuzhiyun else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5872*4882a593Smuzhiyun pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5873*4882a593Smuzhiyun else
5874*4882a593Smuzhiyun rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5875*4882a593Smuzhiyun }
5876*4882a593Smuzhiyun
5877*4882a593Smuzhiyun if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5878*4882a593Smuzhiyun if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5879*4882a593Smuzhiyun pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5880*4882a593Smuzhiyun else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5881*4882a593Smuzhiyun pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5882*4882a593Smuzhiyun else
5883*4882a593Smuzhiyun rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5884*4882a593Smuzhiyun }
5885*4882a593Smuzhiyun
5886*4882a593Smuzhiyun pi->vddc_phase_shed_control = true;
5887*4882a593Smuzhiyun
5888*4882a593Smuzhiyun #if defined(CONFIG_ACPI)
5889*4882a593Smuzhiyun pi->pcie_performance_request =
5890*4882a593Smuzhiyun radeon_acpi_is_pcie_performance_request_supported(rdev);
5891*4882a593Smuzhiyun #else
5892*4882a593Smuzhiyun pi->pcie_performance_request = false;
5893*4882a593Smuzhiyun #endif
5894*4882a593Smuzhiyun
5895*4882a593Smuzhiyun if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5896*4882a593Smuzhiyun &frev, &crev, &data_offset)) {
5897*4882a593Smuzhiyun pi->caps_sclk_ss_support = true;
5898*4882a593Smuzhiyun pi->caps_mclk_ss_support = true;
5899*4882a593Smuzhiyun pi->dynamic_ss = true;
5900*4882a593Smuzhiyun } else {
5901*4882a593Smuzhiyun pi->caps_sclk_ss_support = false;
5902*4882a593Smuzhiyun pi->caps_mclk_ss_support = false;
5903*4882a593Smuzhiyun pi->dynamic_ss = true;
5904*4882a593Smuzhiyun }
5905*4882a593Smuzhiyun
5906*4882a593Smuzhiyun if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5907*4882a593Smuzhiyun pi->thermal_protection = true;
5908*4882a593Smuzhiyun else
5909*4882a593Smuzhiyun pi->thermal_protection = false;
5910*4882a593Smuzhiyun
5911*4882a593Smuzhiyun pi->caps_dynamic_ac_timing = true;
5912*4882a593Smuzhiyun
5913*4882a593Smuzhiyun pi->uvd_power_gated = false;
5914*4882a593Smuzhiyun
5915*4882a593Smuzhiyun /* make sure dc limits are valid */
5916*4882a593Smuzhiyun if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5917*4882a593Smuzhiyun (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5918*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5919*4882a593Smuzhiyun rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5920*4882a593Smuzhiyun
5921*4882a593Smuzhiyun pi->fan_ctrl_is_in_default_mode = true;
5922*4882a593Smuzhiyun
5923*4882a593Smuzhiyun return 0;
5924*4882a593Smuzhiyun }
5925*4882a593Smuzhiyun
ci_dpm_debugfs_print_current_performance_level(struct radeon_device * rdev,struct seq_file * m)5926*4882a593Smuzhiyun void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5927*4882a593Smuzhiyun struct seq_file *m)
5928*4882a593Smuzhiyun {
5929*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5930*4882a593Smuzhiyun struct radeon_ps *rps = &pi->current_rps;
5931*4882a593Smuzhiyun u32 sclk = ci_get_average_sclk_freq(rdev);
5932*4882a593Smuzhiyun u32 mclk = ci_get_average_mclk_freq(rdev);
5933*4882a593Smuzhiyun
5934*4882a593Smuzhiyun seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
5935*4882a593Smuzhiyun seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
5936*4882a593Smuzhiyun seq_printf(m, "power level avg sclk: %u mclk: %u\n",
5937*4882a593Smuzhiyun sclk, mclk);
5938*4882a593Smuzhiyun }
5939*4882a593Smuzhiyun
ci_dpm_print_power_state(struct radeon_device * rdev,struct radeon_ps * rps)5940*4882a593Smuzhiyun void ci_dpm_print_power_state(struct radeon_device *rdev,
5941*4882a593Smuzhiyun struct radeon_ps *rps)
5942*4882a593Smuzhiyun {
5943*4882a593Smuzhiyun struct ci_ps *ps = ci_get_ps(rps);
5944*4882a593Smuzhiyun struct ci_pl *pl;
5945*4882a593Smuzhiyun int i;
5946*4882a593Smuzhiyun
5947*4882a593Smuzhiyun r600_dpm_print_class_info(rps->class, rps->class2);
5948*4882a593Smuzhiyun r600_dpm_print_cap_info(rps->caps);
5949*4882a593Smuzhiyun printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5950*4882a593Smuzhiyun for (i = 0; i < ps->performance_level_count; i++) {
5951*4882a593Smuzhiyun pl = &ps->performance_levels[i];
5952*4882a593Smuzhiyun printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5953*4882a593Smuzhiyun i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5954*4882a593Smuzhiyun }
5955*4882a593Smuzhiyun r600_dpm_print_ps_status(rdev, rps);
5956*4882a593Smuzhiyun }
5957*4882a593Smuzhiyun
ci_dpm_get_current_sclk(struct radeon_device * rdev)5958*4882a593Smuzhiyun u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
5959*4882a593Smuzhiyun {
5960*4882a593Smuzhiyun u32 sclk = ci_get_average_sclk_freq(rdev);
5961*4882a593Smuzhiyun
5962*4882a593Smuzhiyun return sclk;
5963*4882a593Smuzhiyun }
5964*4882a593Smuzhiyun
ci_dpm_get_current_mclk(struct radeon_device * rdev)5965*4882a593Smuzhiyun u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
5966*4882a593Smuzhiyun {
5967*4882a593Smuzhiyun u32 mclk = ci_get_average_mclk_freq(rdev);
5968*4882a593Smuzhiyun
5969*4882a593Smuzhiyun return mclk;
5970*4882a593Smuzhiyun }
5971*4882a593Smuzhiyun
ci_dpm_get_sclk(struct radeon_device * rdev,bool low)5972*4882a593Smuzhiyun u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5973*4882a593Smuzhiyun {
5974*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5975*4882a593Smuzhiyun struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5976*4882a593Smuzhiyun
5977*4882a593Smuzhiyun if (low)
5978*4882a593Smuzhiyun return requested_state->performance_levels[0].sclk;
5979*4882a593Smuzhiyun else
5980*4882a593Smuzhiyun return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5981*4882a593Smuzhiyun }
5982*4882a593Smuzhiyun
ci_dpm_get_mclk(struct radeon_device * rdev,bool low)5983*4882a593Smuzhiyun u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5984*4882a593Smuzhiyun {
5985*4882a593Smuzhiyun struct ci_power_info *pi = ci_get_pi(rdev);
5986*4882a593Smuzhiyun struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5987*4882a593Smuzhiyun
5988*4882a593Smuzhiyun if (low)
5989*4882a593Smuzhiyun return requested_state->performance_levels[0].mclk;
5990*4882a593Smuzhiyun else
5991*4882a593Smuzhiyun return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5992*4882a593Smuzhiyun }
5993