xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/amd/pm/amdgpu_dpm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2011 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Authors: Alex Deucher
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "amdgpu.h"
26*4882a593Smuzhiyun #include "amdgpu_atombios.h"
27*4882a593Smuzhiyun #include "amdgpu_i2c.h"
28*4882a593Smuzhiyun #include "amdgpu_dpm.h"
29*4882a593Smuzhiyun #include "atom.h"
30*4882a593Smuzhiyun #include "amd_pcie.h"
31*4882a593Smuzhiyun #include "amdgpu_display.h"
32*4882a593Smuzhiyun #include "hwmgr.h"
33*4882a593Smuzhiyun #include <linux/power_supply.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define WIDTH_4K 3840
36*4882a593Smuzhiyun 
amdgpu_dpm_print_class_info(u32 class,u32 class2)37*4882a593Smuzhiyun void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	const char *s;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42*4882a593Smuzhiyun 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43*4882a593Smuzhiyun 	default:
44*4882a593Smuzhiyun 		s = "none";
45*4882a593Smuzhiyun 		break;
46*4882a593Smuzhiyun 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47*4882a593Smuzhiyun 		s = "battery";
48*4882a593Smuzhiyun 		break;
49*4882a593Smuzhiyun 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50*4882a593Smuzhiyun 		s = "balanced";
51*4882a593Smuzhiyun 		break;
52*4882a593Smuzhiyun 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53*4882a593Smuzhiyun 		s = "performance";
54*4882a593Smuzhiyun 		break;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 	printk("\tui class: %s\n", s);
57*4882a593Smuzhiyun 	printk("\tinternal class:");
58*4882a593Smuzhiyun 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59*4882a593Smuzhiyun 	    (class2 == 0))
60*4882a593Smuzhiyun 		pr_cont(" none");
61*4882a593Smuzhiyun 	else {
62*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63*4882a593Smuzhiyun 			pr_cont(" boot");
64*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65*4882a593Smuzhiyun 			pr_cont(" thermal");
66*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67*4882a593Smuzhiyun 			pr_cont(" limited_pwr");
68*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69*4882a593Smuzhiyun 			pr_cont(" rest");
70*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71*4882a593Smuzhiyun 			pr_cont(" forced");
72*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73*4882a593Smuzhiyun 			pr_cont(" 3d_perf");
74*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75*4882a593Smuzhiyun 			pr_cont(" ovrdrv");
76*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77*4882a593Smuzhiyun 			pr_cont(" uvd");
78*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79*4882a593Smuzhiyun 			pr_cont(" 3d_low");
80*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81*4882a593Smuzhiyun 			pr_cont(" acpi");
82*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83*4882a593Smuzhiyun 			pr_cont(" uvd_hd2");
84*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85*4882a593Smuzhiyun 			pr_cont(" uvd_hd");
86*4882a593Smuzhiyun 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87*4882a593Smuzhiyun 			pr_cont(" uvd_sd");
88*4882a593Smuzhiyun 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89*4882a593Smuzhiyun 			pr_cont(" limited_pwr2");
90*4882a593Smuzhiyun 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91*4882a593Smuzhiyun 			pr_cont(" ulv");
92*4882a593Smuzhiyun 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93*4882a593Smuzhiyun 			pr_cont(" uvd_mvc");
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 	pr_cont("\n");
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
amdgpu_dpm_print_cap_info(u32 caps)98*4882a593Smuzhiyun void amdgpu_dpm_print_cap_info(u32 caps)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	printk("\tcaps:");
101*4882a593Smuzhiyun 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102*4882a593Smuzhiyun 		pr_cont(" single_disp");
103*4882a593Smuzhiyun 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104*4882a593Smuzhiyun 		pr_cont(" video");
105*4882a593Smuzhiyun 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106*4882a593Smuzhiyun 		pr_cont(" no_dc");
107*4882a593Smuzhiyun 	pr_cont("\n");
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
amdgpu_dpm_print_ps_status(struct amdgpu_device * adev,struct amdgpu_ps * rps)110*4882a593Smuzhiyun void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111*4882a593Smuzhiyun 				struct amdgpu_ps *rps)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	printk("\tstatus:");
114*4882a593Smuzhiyun 	if (rps == adev->pm.dpm.current_ps)
115*4882a593Smuzhiyun 		pr_cont(" c");
116*4882a593Smuzhiyun 	if (rps == adev->pm.dpm.requested_ps)
117*4882a593Smuzhiyun 		pr_cont(" r");
118*4882a593Smuzhiyun 	if (rps == adev->pm.dpm.boot_ps)
119*4882a593Smuzhiyun 		pr_cont(" b");
120*4882a593Smuzhiyun 	pr_cont("\n");
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
amdgpu_dpm_get_active_displays(struct amdgpu_device * adev)123*4882a593Smuzhiyun void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct drm_device *ddev = adev_to_drm(adev);
126*4882a593Smuzhiyun 	struct drm_crtc *crtc;
127*4882a593Smuzhiyun 	struct amdgpu_crtc *amdgpu_crtc;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	adev->pm.dpm.new_active_crtcs = 0;
130*4882a593Smuzhiyun 	adev->pm.dpm.new_active_crtc_count = 0;
131*4882a593Smuzhiyun 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132*4882a593Smuzhiyun 		list_for_each_entry(crtc,
133*4882a593Smuzhiyun 				    &ddev->mode_config.crtc_list, head) {
134*4882a593Smuzhiyun 			amdgpu_crtc = to_amdgpu_crtc(crtc);
135*4882a593Smuzhiyun 			if (amdgpu_crtc->enabled) {
136*4882a593Smuzhiyun 				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137*4882a593Smuzhiyun 				adev->pm.dpm.new_active_crtc_count++;
138*4882a593Smuzhiyun 			}
139*4882a593Smuzhiyun 		}
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 
amdgpu_dpm_get_vblank_time(struct amdgpu_device * adev)144*4882a593Smuzhiyun u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct drm_device *dev = adev_to_drm(adev);
147*4882a593Smuzhiyun 	struct drm_crtc *crtc;
148*4882a593Smuzhiyun 	struct amdgpu_crtc *amdgpu_crtc;
149*4882a593Smuzhiyun 	u32 vblank_in_pixels;
150*4882a593Smuzhiyun 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153*4882a593Smuzhiyun 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154*4882a593Smuzhiyun 			amdgpu_crtc = to_amdgpu_crtc(crtc);
155*4882a593Smuzhiyun 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156*4882a593Smuzhiyun 				vblank_in_pixels =
157*4882a593Smuzhiyun 					amdgpu_crtc->hw_mode.crtc_htotal *
158*4882a593Smuzhiyun 					(amdgpu_crtc->hw_mode.crtc_vblank_end -
159*4882a593Smuzhiyun 					amdgpu_crtc->hw_mode.crtc_vdisplay +
160*4882a593Smuzhiyun 					(amdgpu_crtc->v_border * 2));
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163*4882a593Smuzhiyun 				break;
164*4882a593Smuzhiyun 			}
165*4882a593Smuzhiyun 		}
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	return vblank_time_us;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
amdgpu_dpm_get_vrefresh(struct amdgpu_device * adev)171*4882a593Smuzhiyun u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct drm_device *dev = adev_to_drm(adev);
174*4882a593Smuzhiyun 	struct drm_crtc *crtc;
175*4882a593Smuzhiyun 	struct amdgpu_crtc *amdgpu_crtc;
176*4882a593Smuzhiyun 	u32 vrefresh = 0;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179*4882a593Smuzhiyun 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180*4882a593Smuzhiyun 			amdgpu_crtc = to_amdgpu_crtc(crtc);
181*4882a593Smuzhiyun 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182*4882a593Smuzhiyun 				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183*4882a593Smuzhiyun 				break;
184*4882a593Smuzhiyun 			}
185*4882a593Smuzhiyun 		}
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	return vrefresh;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)191*4882a593Smuzhiyun bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	switch (sensor) {
194*4882a593Smuzhiyun 	case THERMAL_TYPE_RV6XX:
195*4882a593Smuzhiyun 	case THERMAL_TYPE_RV770:
196*4882a593Smuzhiyun 	case THERMAL_TYPE_EVERGREEN:
197*4882a593Smuzhiyun 	case THERMAL_TYPE_SUMO:
198*4882a593Smuzhiyun 	case THERMAL_TYPE_NI:
199*4882a593Smuzhiyun 	case THERMAL_TYPE_SI:
200*4882a593Smuzhiyun 	case THERMAL_TYPE_CI:
201*4882a593Smuzhiyun 	case THERMAL_TYPE_KV:
202*4882a593Smuzhiyun 		return true;
203*4882a593Smuzhiyun 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204*4882a593Smuzhiyun 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205*4882a593Smuzhiyun 		return false; /* need special handling */
206*4882a593Smuzhiyun 	case THERMAL_TYPE_NONE:
207*4882a593Smuzhiyun 	case THERMAL_TYPE_EXTERNAL:
208*4882a593Smuzhiyun 	case THERMAL_TYPE_EXTERNAL_GPIO:
209*4882a593Smuzhiyun 	default:
210*4882a593Smuzhiyun 		return false;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun union power_info {
215*4882a593Smuzhiyun 	struct _ATOM_POWERPLAY_INFO info;
216*4882a593Smuzhiyun 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
217*4882a593Smuzhiyun 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
218*4882a593Smuzhiyun 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219*4882a593Smuzhiyun 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220*4882a593Smuzhiyun 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221*4882a593Smuzhiyun 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222*4882a593Smuzhiyun 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun union fan_info {
226*4882a593Smuzhiyun 	struct _ATOM_PPLIB_FANTABLE fan;
227*4882a593Smuzhiyun 	struct _ATOM_PPLIB_FANTABLE2 fan2;
228*4882a593Smuzhiyun 	struct _ATOM_PPLIB_FANTABLE3 fan3;
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun 
amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table * amdgpu_table,ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table)231*4882a593Smuzhiyun static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232*4882a593Smuzhiyun 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	u32 size = atom_table->ucNumEntries *
235*4882a593Smuzhiyun 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
236*4882a593Smuzhiyun 	int i;
237*4882a593Smuzhiyun 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240*4882a593Smuzhiyun 	if (!amdgpu_table->entries)
241*4882a593Smuzhiyun 		return -ENOMEM;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	entry = &atom_table->entries[0];
244*4882a593Smuzhiyun 	for (i = 0; i < atom_table->ucNumEntries; i++) {
245*4882a593Smuzhiyun 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246*4882a593Smuzhiyun 			(entry->ucClockHigh << 16);
247*4882a593Smuzhiyun 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248*4882a593Smuzhiyun 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249*4882a593Smuzhiyun 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 	amdgpu_table->count = atom_table->ucNumEntries;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
amdgpu_get_platform_caps(struct amdgpu_device * adev)256*4882a593Smuzhiyun int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
259*4882a593Smuzhiyun 	union power_info *power_info;
260*4882a593Smuzhiyun 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261*4882a593Smuzhiyun 	u16 data_offset;
262*4882a593Smuzhiyun 	u8 frev, crev;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265*4882a593Smuzhiyun 				   &frev, &crev, &data_offset))
266*4882a593Smuzhiyun 		return -EINVAL;
267*4882a593Smuzhiyun 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270*4882a593Smuzhiyun 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271*4882a593Smuzhiyun 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277*4882a593Smuzhiyun #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278*4882a593Smuzhiyun #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279*4882a593Smuzhiyun #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280*4882a593Smuzhiyun #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281*4882a593Smuzhiyun #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282*4882a593Smuzhiyun #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283*4882a593Smuzhiyun #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284*4882a593Smuzhiyun #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285*4882a593Smuzhiyun 
amdgpu_parse_extended_power_table(struct amdgpu_device * adev)286*4882a593Smuzhiyun int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
289*4882a593Smuzhiyun 	union power_info *power_info;
290*4882a593Smuzhiyun 	union fan_info *fan_info;
291*4882a593Smuzhiyun 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292*4882a593Smuzhiyun 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293*4882a593Smuzhiyun 	u16 data_offset;
294*4882a593Smuzhiyun 	u8 frev, crev;
295*4882a593Smuzhiyun 	int ret, i;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298*4882a593Smuzhiyun 				   &frev, &crev, &data_offset))
299*4882a593Smuzhiyun 		return -EINVAL;
300*4882a593Smuzhiyun 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* fan table */
303*4882a593Smuzhiyun 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
304*4882a593Smuzhiyun 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305*4882a593Smuzhiyun 		if (power_info->pplib3.usFanTableOffset) {
306*4882a593Smuzhiyun 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307*4882a593Smuzhiyun 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
308*4882a593Smuzhiyun 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309*4882a593Smuzhiyun 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310*4882a593Smuzhiyun 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311*4882a593Smuzhiyun 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312*4882a593Smuzhiyun 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313*4882a593Smuzhiyun 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314*4882a593Smuzhiyun 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315*4882a593Smuzhiyun 			if (fan_info->fan.ucFanTableFormat >= 2)
316*4882a593Smuzhiyun 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317*4882a593Smuzhiyun 			else
318*4882a593Smuzhiyun 				adev->pm.dpm.fan.t_max = 10900;
319*4882a593Smuzhiyun 			adev->pm.dpm.fan.cycle_delay = 100000;
320*4882a593Smuzhiyun 			if (fan_info->fan.ucFanTableFormat >= 3) {
321*4882a593Smuzhiyun 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322*4882a593Smuzhiyun 				adev->pm.dpm.fan.default_max_fan_pwm =
323*4882a593Smuzhiyun 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
324*4882a593Smuzhiyun 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325*4882a593Smuzhiyun 				adev->pm.dpm.fan.fan_output_sensitivity =
326*4882a593Smuzhiyun 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327*4882a593Smuzhiyun 			}
328*4882a593Smuzhiyun 			adev->pm.dpm.fan.ucode_fan_control = true;
329*4882a593Smuzhiyun 		}
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/* clock dependancy tables, shedding tables */
333*4882a593Smuzhiyun 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
334*4882a593Smuzhiyun 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335*4882a593Smuzhiyun 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336*4882a593Smuzhiyun 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
338*4882a593Smuzhiyun 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339*4882a593Smuzhiyun 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340*4882a593Smuzhiyun 								 dep_table);
341*4882a593Smuzhiyun 			if (ret) {
342*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
343*4882a593Smuzhiyun 				return ret;
344*4882a593Smuzhiyun 			}
345*4882a593Smuzhiyun 		}
346*4882a593Smuzhiyun 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347*4882a593Smuzhiyun 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
349*4882a593Smuzhiyun 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350*4882a593Smuzhiyun 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351*4882a593Smuzhiyun 								 dep_table);
352*4882a593Smuzhiyun 			if (ret) {
353*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
354*4882a593Smuzhiyun 				return ret;
355*4882a593Smuzhiyun 			}
356*4882a593Smuzhiyun 		}
357*4882a593Smuzhiyun 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358*4882a593Smuzhiyun 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
360*4882a593Smuzhiyun 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361*4882a593Smuzhiyun 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362*4882a593Smuzhiyun 								 dep_table);
363*4882a593Smuzhiyun 			if (ret) {
364*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
365*4882a593Smuzhiyun 				return ret;
366*4882a593Smuzhiyun 			}
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369*4882a593Smuzhiyun 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
371*4882a593Smuzhiyun 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372*4882a593Smuzhiyun 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373*4882a593Smuzhiyun 								 dep_table);
374*4882a593Smuzhiyun 			if (ret) {
375*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
376*4882a593Smuzhiyun 				return ret;
377*4882a593Smuzhiyun 			}
378*4882a593Smuzhiyun 		}
379*4882a593Smuzhiyun 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380*4882a593Smuzhiyun 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381*4882a593Smuzhiyun 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
383*4882a593Smuzhiyun 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384*4882a593Smuzhiyun 			if (clk_v->ucNumEntries) {
385*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386*4882a593Smuzhiyun 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
387*4882a593Smuzhiyun 					(clk_v->entries[0].ucSclkHigh << 16);
388*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389*4882a593Smuzhiyun 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
390*4882a593Smuzhiyun 					(clk_v->entries[0].ucMclkHigh << 16);
391*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392*4882a593Smuzhiyun 					le16_to_cpu(clk_v->entries[0].usVddc);
393*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394*4882a593Smuzhiyun 					le16_to_cpu(clk_v->entries[0].usVddci);
395*4882a593Smuzhiyun 			}
396*4882a593Smuzhiyun 		}
397*4882a593Smuzhiyun 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398*4882a593Smuzhiyun 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399*4882a593Smuzhiyun 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
400*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
401*4882a593Smuzhiyun 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402*4882a593Smuzhiyun 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405*4882a593Smuzhiyun 				kcalloc(psl->ucNumEntries,
406*4882a593Smuzhiyun 					sizeof(struct amdgpu_phase_shedding_limits_entry),
407*4882a593Smuzhiyun 					GFP_KERNEL);
408*4882a593Smuzhiyun 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
410*4882a593Smuzhiyun 				return -ENOMEM;
411*4882a593Smuzhiyun 			}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 			entry = &psl->entries[0];
414*4882a593Smuzhiyun 			for (i = 0; i < psl->ucNumEntries; i++) {
415*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416*4882a593Smuzhiyun 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418*4882a593Smuzhiyun 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420*4882a593Smuzhiyun 					le16_to_cpu(entry->usVoltage);
421*4882a593Smuzhiyun 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422*4882a593Smuzhiyun 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423*4882a593Smuzhiyun 			}
424*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425*4882a593Smuzhiyun 				psl->ucNumEntries;
426*4882a593Smuzhiyun 		}
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	/* cac data */
430*4882a593Smuzhiyun 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
431*4882a593Smuzhiyun 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432*4882a593Smuzhiyun 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433*4882a593Smuzhiyun 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434*4882a593Smuzhiyun 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435*4882a593Smuzhiyun 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436*4882a593Smuzhiyun 		if (adev->pm.dpm.tdp_od_limit)
437*4882a593Smuzhiyun 			adev->pm.dpm.power_control = true;
438*4882a593Smuzhiyun 		else
439*4882a593Smuzhiyun 			adev->pm.dpm.power_control = false;
440*4882a593Smuzhiyun 		adev->pm.dpm.tdp_adjustment = 0;
441*4882a593Smuzhiyun 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442*4882a593Smuzhiyun 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443*4882a593Smuzhiyun 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444*4882a593Smuzhiyun 		if (power_info->pplib5.usCACLeakageTableOffset) {
445*4882a593Smuzhiyun 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446*4882a593Smuzhiyun 				(ATOM_PPLIB_CAC_Leakage_Table *)
447*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
448*4882a593Smuzhiyun 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449*4882a593Smuzhiyun 			ATOM_PPLIB_CAC_Leakage_Record *entry;
450*4882a593Smuzhiyun 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452*4882a593Smuzhiyun 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
454*4882a593Smuzhiyun 				return -ENOMEM;
455*4882a593Smuzhiyun 			}
456*4882a593Smuzhiyun 			entry = &cac_table->entries[0];
457*4882a593Smuzhiyun 			for (i = 0; i < cac_table->ucNumEntries; i++) {
458*4882a593Smuzhiyun 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459*4882a593Smuzhiyun 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460*4882a593Smuzhiyun 						le16_to_cpu(entry->usVddc1);
461*4882a593Smuzhiyun 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462*4882a593Smuzhiyun 						le16_to_cpu(entry->usVddc2);
463*4882a593Smuzhiyun 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464*4882a593Smuzhiyun 						le16_to_cpu(entry->usVddc3);
465*4882a593Smuzhiyun 				} else {
466*4882a593Smuzhiyun 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467*4882a593Smuzhiyun 						le16_to_cpu(entry->usVddc);
468*4882a593Smuzhiyun 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469*4882a593Smuzhiyun 						le32_to_cpu(entry->ulLeakageValue);
470*4882a593Smuzhiyun 				}
471*4882a593Smuzhiyun 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472*4882a593Smuzhiyun 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473*4882a593Smuzhiyun 			}
474*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475*4882a593Smuzhiyun 		}
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/* ext tables */
479*4882a593Smuzhiyun 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
480*4882a593Smuzhiyun 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481*4882a593Smuzhiyun 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482*4882a593Smuzhiyun 			(mode_info->atom_context->bios + data_offset +
483*4882a593Smuzhiyun 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484*4882a593Smuzhiyun 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485*4882a593Smuzhiyun 			ext_hdr->usVCETableOffset) {
486*4882a593Smuzhiyun 			VCEClockInfoArray *array = (VCEClockInfoArray *)
487*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
488*4882a593Smuzhiyun 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489*4882a593Smuzhiyun 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490*4882a593Smuzhiyun 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
492*4882a593Smuzhiyun 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493*4882a593Smuzhiyun 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
494*4882a593Smuzhiyun 			ATOM_PPLIB_VCE_State_Table *states =
495*4882a593Smuzhiyun 				(ATOM_PPLIB_VCE_State_Table *)
496*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
497*4882a593Smuzhiyun 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498*4882a593Smuzhiyun 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499*4882a593Smuzhiyun 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500*4882a593Smuzhiyun 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501*4882a593Smuzhiyun 			ATOM_PPLIB_VCE_State_Record *state_entry;
502*4882a593Smuzhiyun 			VCEClockInfo *vce_clk;
503*4882a593Smuzhiyun 			u32 size = limits->numEntries *
504*4882a593Smuzhiyun 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506*4882a593Smuzhiyun 				kzalloc(size, GFP_KERNEL);
507*4882a593Smuzhiyun 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
509*4882a593Smuzhiyun 				return -ENOMEM;
510*4882a593Smuzhiyun 			}
511*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512*4882a593Smuzhiyun 				limits->numEntries;
513*4882a593Smuzhiyun 			entry = &limits->entries[0];
514*4882a593Smuzhiyun 			state_entry = &states->entries[0];
515*4882a593Smuzhiyun 			for (i = 0; i < limits->numEntries; i++) {
516*4882a593Smuzhiyun 				vce_clk = (VCEClockInfo *)
517*4882a593Smuzhiyun 					((u8 *)&array->entries[0] +
518*4882a593Smuzhiyun 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520*4882a593Smuzhiyun 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522*4882a593Smuzhiyun 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524*4882a593Smuzhiyun 					le16_to_cpu(entry->usVoltage);
525*4882a593Smuzhiyun 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526*4882a593Smuzhiyun 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527*4882a593Smuzhiyun 			}
528*4882a593Smuzhiyun 			adev->pm.dpm.num_of_vce_states =
529*4882a593Smuzhiyun 					states->numEntries > AMD_MAX_VCE_LEVELS ?
530*4882a593Smuzhiyun 					AMD_MAX_VCE_LEVELS : states->numEntries;
531*4882a593Smuzhiyun 			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532*4882a593Smuzhiyun 				vce_clk = (VCEClockInfo *)
533*4882a593Smuzhiyun 					((u8 *)&array->entries[0] +
534*4882a593Smuzhiyun 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535*4882a593Smuzhiyun 				adev->pm.dpm.vce_states[i].evclk =
536*4882a593Smuzhiyun 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537*4882a593Smuzhiyun 				adev->pm.dpm.vce_states[i].ecclk =
538*4882a593Smuzhiyun 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539*4882a593Smuzhiyun 				adev->pm.dpm.vce_states[i].clk_idx =
540*4882a593Smuzhiyun 					state_entry->ucClockInfoIndex & 0x3f;
541*4882a593Smuzhiyun 				adev->pm.dpm.vce_states[i].pstate =
542*4882a593Smuzhiyun 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
543*4882a593Smuzhiyun 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
544*4882a593Smuzhiyun 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545*4882a593Smuzhiyun 			}
546*4882a593Smuzhiyun 		}
547*4882a593Smuzhiyun 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548*4882a593Smuzhiyun 			ext_hdr->usUVDTableOffset) {
549*4882a593Smuzhiyun 			UVDClockInfoArray *array = (UVDClockInfoArray *)
550*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
551*4882a593Smuzhiyun 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552*4882a593Smuzhiyun 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553*4882a593Smuzhiyun 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
555*4882a593Smuzhiyun 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556*4882a593Smuzhiyun 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557*4882a593Smuzhiyun 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558*4882a593Smuzhiyun 			u32 size = limits->numEntries *
559*4882a593Smuzhiyun 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561*4882a593Smuzhiyun 				kzalloc(size, GFP_KERNEL);
562*4882a593Smuzhiyun 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
564*4882a593Smuzhiyun 				return -ENOMEM;
565*4882a593Smuzhiyun 			}
566*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567*4882a593Smuzhiyun 				limits->numEntries;
568*4882a593Smuzhiyun 			entry = &limits->entries[0];
569*4882a593Smuzhiyun 			for (i = 0; i < limits->numEntries; i++) {
570*4882a593Smuzhiyun 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
571*4882a593Smuzhiyun 					((u8 *)&array->entries[0] +
572*4882a593Smuzhiyun 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574*4882a593Smuzhiyun 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576*4882a593Smuzhiyun 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578*4882a593Smuzhiyun 					le16_to_cpu(entry->usVoltage);
579*4882a593Smuzhiyun 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580*4882a593Smuzhiyun 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581*4882a593Smuzhiyun 			}
582*4882a593Smuzhiyun 		}
583*4882a593Smuzhiyun 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584*4882a593Smuzhiyun 			ext_hdr->usSAMUTableOffset) {
585*4882a593Smuzhiyun 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586*4882a593Smuzhiyun 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
588*4882a593Smuzhiyun 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589*4882a593Smuzhiyun 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590*4882a593Smuzhiyun 			u32 size = limits->numEntries *
591*4882a593Smuzhiyun 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
592*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593*4882a593Smuzhiyun 				kzalloc(size, GFP_KERNEL);
594*4882a593Smuzhiyun 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
596*4882a593Smuzhiyun 				return -ENOMEM;
597*4882a593Smuzhiyun 			}
598*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599*4882a593Smuzhiyun 				limits->numEntries;
600*4882a593Smuzhiyun 			entry = &limits->entries[0];
601*4882a593Smuzhiyun 			for (i = 0; i < limits->numEntries; i++) {
602*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603*4882a593Smuzhiyun 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605*4882a593Smuzhiyun 					le16_to_cpu(entry->usVoltage);
606*4882a593Smuzhiyun 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607*4882a593Smuzhiyun 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608*4882a593Smuzhiyun 			}
609*4882a593Smuzhiyun 		}
610*4882a593Smuzhiyun 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611*4882a593Smuzhiyun 		    ext_hdr->usPPMTableOffset) {
612*4882a593Smuzhiyun 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
614*4882a593Smuzhiyun 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
615*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table =
616*4882a593Smuzhiyun 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617*4882a593Smuzhiyun 			if (!adev->pm.dpm.dyn_state.ppm_table) {
618*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
619*4882a593Smuzhiyun 				return -ENOMEM;
620*4882a593Smuzhiyun 			}
621*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623*4882a593Smuzhiyun 				le16_to_cpu(ppm->usCpuCoreNumber);
624*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625*4882a593Smuzhiyun 				le32_to_cpu(ppm->ulPlatformTDP);
626*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627*4882a593Smuzhiyun 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
628*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629*4882a593Smuzhiyun 				le32_to_cpu(ppm->ulPlatformTDC);
630*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631*4882a593Smuzhiyun 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
632*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633*4882a593Smuzhiyun 				le32_to_cpu(ppm->ulApuTDP);
634*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635*4882a593Smuzhiyun 				le32_to_cpu(ppm->ulDGpuTDP);
636*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637*4882a593Smuzhiyun 				le32_to_cpu(ppm->ulDGpuUlvPower);
638*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
639*4882a593Smuzhiyun 				le32_to_cpu(ppm->ulTjmax);
640*4882a593Smuzhiyun 		}
641*4882a593Smuzhiyun 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642*4882a593Smuzhiyun 			ext_hdr->usACPTableOffset) {
643*4882a593Smuzhiyun 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644*4882a593Smuzhiyun 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
646*4882a593Smuzhiyun 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647*4882a593Smuzhiyun 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648*4882a593Smuzhiyun 			u32 size = limits->numEntries *
649*4882a593Smuzhiyun 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
650*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651*4882a593Smuzhiyun 				kzalloc(size, GFP_KERNEL);
652*4882a593Smuzhiyun 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
654*4882a593Smuzhiyun 				return -ENOMEM;
655*4882a593Smuzhiyun 			}
656*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657*4882a593Smuzhiyun 				limits->numEntries;
658*4882a593Smuzhiyun 			entry = &limits->entries[0];
659*4882a593Smuzhiyun 			for (i = 0; i < limits->numEntries; i++) {
660*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661*4882a593Smuzhiyun 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663*4882a593Smuzhiyun 					le16_to_cpu(entry->usVoltage);
664*4882a593Smuzhiyun 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665*4882a593Smuzhiyun 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666*4882a593Smuzhiyun 			}
667*4882a593Smuzhiyun 		}
668*4882a593Smuzhiyun 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669*4882a593Smuzhiyun 			ext_hdr->usPowerTuneTableOffset) {
670*4882a593Smuzhiyun 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671*4882a593Smuzhiyun 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672*4882a593Smuzhiyun 			ATOM_PowerTune_Table *pt;
673*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_tdp_table =
674*4882a593Smuzhiyun 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675*4882a593Smuzhiyun 			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676*4882a593Smuzhiyun 				amdgpu_free_extended_power_table(adev);
677*4882a593Smuzhiyun 				return -ENOMEM;
678*4882a593Smuzhiyun 			}
679*4882a593Smuzhiyun 			if (rev > 0) {
680*4882a593Smuzhiyun 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681*4882a593Smuzhiyun 					(mode_info->atom_context->bios + data_offset +
682*4882a593Smuzhiyun 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684*4882a593Smuzhiyun 					ppt->usMaximumPowerDeliveryLimit;
685*4882a593Smuzhiyun 				pt = &ppt->power_tune_table;
686*4882a593Smuzhiyun 			} else {
687*4882a593Smuzhiyun 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688*4882a593Smuzhiyun 					(mode_info->atom_context->bios + data_offset +
689*4882a593Smuzhiyun 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690*4882a593Smuzhiyun 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691*4882a593Smuzhiyun 				pt = &ppt->power_tune_table;
692*4882a593Smuzhiyun 			}
693*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695*4882a593Smuzhiyun 				le16_to_cpu(pt->usConfigurableTDP);
696*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698*4882a593Smuzhiyun 				le16_to_cpu(pt->usBatteryPowerLimit);
699*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700*4882a593Smuzhiyun 				le16_to_cpu(pt->usSmallPowerLimit);
701*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702*4882a593Smuzhiyun 				le16_to_cpu(pt->usLowCACLeakage);
703*4882a593Smuzhiyun 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704*4882a593Smuzhiyun 				le16_to_cpu(pt->usHighCACLeakage);
705*4882a593Smuzhiyun 		}
706*4882a593Smuzhiyun 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707*4882a593Smuzhiyun 				ext_hdr->usSclkVddgfxTableOffset) {
708*4882a593Smuzhiyun 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709*4882a593Smuzhiyun 				(mode_info->atom_context->bios + data_offset +
710*4882a593Smuzhiyun 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711*4882a593Smuzhiyun 			ret = amdgpu_parse_clk_voltage_dep_table(
712*4882a593Smuzhiyun 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713*4882a593Smuzhiyun 					dep_table);
714*4882a593Smuzhiyun 			if (ret) {
715*4882a593Smuzhiyun 				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716*4882a593Smuzhiyun 				return ret;
717*4882a593Smuzhiyun 			}
718*4882a593Smuzhiyun 		}
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	return 0;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun 
amdgpu_free_extended_power_table(struct amdgpu_device * adev)724*4882a593Smuzhiyun void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
729*4882a593Smuzhiyun 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
730*4882a593Smuzhiyun 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
731*4882a593Smuzhiyun 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732*4882a593Smuzhiyun 	kfree(dyn_state->cac_leakage_table.entries);
733*4882a593Smuzhiyun 	kfree(dyn_state->phase_shedding_limits_table.entries);
734*4882a593Smuzhiyun 	kfree(dyn_state->ppm_table);
735*4882a593Smuzhiyun 	kfree(dyn_state->cac_tdp_table);
736*4882a593Smuzhiyun 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737*4882a593Smuzhiyun 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738*4882a593Smuzhiyun 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739*4882a593Smuzhiyun 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740*4882a593Smuzhiyun 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun static const char *pp_lib_thermal_controller_names[] = {
744*4882a593Smuzhiyun 	"NONE",
745*4882a593Smuzhiyun 	"lm63",
746*4882a593Smuzhiyun 	"adm1032",
747*4882a593Smuzhiyun 	"adm1030",
748*4882a593Smuzhiyun 	"max6649",
749*4882a593Smuzhiyun 	"lm64",
750*4882a593Smuzhiyun 	"f75375",
751*4882a593Smuzhiyun 	"RV6xx",
752*4882a593Smuzhiyun 	"RV770",
753*4882a593Smuzhiyun 	"adt7473",
754*4882a593Smuzhiyun 	"NONE",
755*4882a593Smuzhiyun 	"External GPIO",
756*4882a593Smuzhiyun 	"Evergreen",
757*4882a593Smuzhiyun 	"emc2103",
758*4882a593Smuzhiyun 	"Sumo",
759*4882a593Smuzhiyun 	"Northern Islands",
760*4882a593Smuzhiyun 	"Southern Islands",
761*4882a593Smuzhiyun 	"lm96163",
762*4882a593Smuzhiyun 	"Sea Islands",
763*4882a593Smuzhiyun 	"Kaveri/Kabini",
764*4882a593Smuzhiyun };
765*4882a593Smuzhiyun 
amdgpu_add_thermal_controller(struct amdgpu_device * adev)766*4882a593Smuzhiyun void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
769*4882a593Smuzhiyun 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
770*4882a593Smuzhiyun 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771*4882a593Smuzhiyun 	ATOM_PPLIB_THERMALCONTROLLER *controller;
772*4882a593Smuzhiyun 	struct amdgpu_i2c_bus_rec i2c_bus;
773*4882a593Smuzhiyun 	u16 data_offset;
774*4882a593Smuzhiyun 	u8 frev, crev;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777*4882a593Smuzhiyun 				   &frev, &crev, &data_offset))
778*4882a593Smuzhiyun 		return;
779*4882a593Smuzhiyun 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780*4882a593Smuzhiyun 		(mode_info->atom_context->bios + data_offset);
781*4882a593Smuzhiyun 	controller = &power_table->sThermalController;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	/* add the i2c bus for thermal/fan chip */
784*4882a593Smuzhiyun 	if (controller->ucType > 0) {
785*4882a593Smuzhiyun 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786*4882a593Smuzhiyun 			adev->pm.no_fan = true;
787*4882a593Smuzhiyun 		adev->pm.fan_pulses_per_revolution =
788*4882a593Smuzhiyun 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789*4882a593Smuzhiyun 		if (adev->pm.fan_pulses_per_revolution) {
790*4882a593Smuzhiyun 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791*4882a593Smuzhiyun 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792*4882a593Smuzhiyun 		}
793*4882a593Smuzhiyun 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794*4882a593Smuzhiyun 			DRM_INFO("Internal thermal controller %s fan control\n",
795*4882a593Smuzhiyun 				 (controller->ucFanParameters &
796*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798*4882a593Smuzhiyun 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799*4882a593Smuzhiyun 			DRM_INFO("Internal thermal controller %s fan control\n",
800*4882a593Smuzhiyun 				 (controller->ucFanParameters &
801*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803*4882a593Smuzhiyun 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804*4882a593Smuzhiyun 			DRM_INFO("Internal thermal controller %s fan control\n",
805*4882a593Smuzhiyun 				 (controller->ucFanParameters &
806*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808*4882a593Smuzhiyun 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809*4882a593Smuzhiyun 			DRM_INFO("Internal thermal controller %s fan control\n",
810*4882a593Smuzhiyun 				 (controller->ucFanParameters &
811*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813*4882a593Smuzhiyun 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814*4882a593Smuzhiyun 			DRM_INFO("Internal thermal controller %s fan control\n",
815*4882a593Smuzhiyun 				 (controller->ucFanParameters &
816*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818*4882a593Smuzhiyun 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819*4882a593Smuzhiyun 			DRM_INFO("Internal thermal controller %s fan control\n",
820*4882a593Smuzhiyun 				 (controller->ucFanParameters &
821*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823*4882a593Smuzhiyun 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824*4882a593Smuzhiyun 			DRM_INFO("Internal thermal controller %s fan control\n",
825*4882a593Smuzhiyun 				 (controller->ucFanParameters &
826*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828*4882a593Smuzhiyun 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829*4882a593Smuzhiyun 			DRM_INFO("Internal thermal controller %s fan control\n",
830*4882a593Smuzhiyun 				 (controller->ucFanParameters &
831*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833*4882a593Smuzhiyun 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834*4882a593Smuzhiyun 			DRM_INFO("External GPIO thermal controller %s fan control\n",
835*4882a593Smuzhiyun 				 (controller->ucFanParameters &
836*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838*4882a593Smuzhiyun 		} else if (controller->ucType ==
839*4882a593Smuzhiyun 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840*4882a593Smuzhiyun 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841*4882a593Smuzhiyun 				 (controller->ucFanParameters &
842*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844*4882a593Smuzhiyun 		} else if (controller->ucType ==
845*4882a593Smuzhiyun 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846*4882a593Smuzhiyun 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847*4882a593Smuzhiyun 				 (controller->ucFanParameters &
848*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850*4882a593Smuzhiyun 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851*4882a593Smuzhiyun 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852*4882a593Smuzhiyun 				 pp_lib_thermal_controller_names[controller->ucType],
853*4882a593Smuzhiyun 				 controller->ucI2cAddress >> 1,
854*4882a593Smuzhiyun 				 (controller->ucFanParameters &
855*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856*4882a593Smuzhiyun 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857*4882a593Smuzhiyun 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858*4882a593Smuzhiyun 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859*4882a593Smuzhiyun 			if (adev->pm.i2c_bus) {
860*4882a593Smuzhiyun 				struct i2c_board_info info = { };
861*4882a593Smuzhiyun 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
862*4882a593Smuzhiyun 				info.addr = controller->ucI2cAddress >> 1;
863*4882a593Smuzhiyun 				strlcpy(info.type, name, sizeof(info.type));
864*4882a593Smuzhiyun 				i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
865*4882a593Smuzhiyun 			}
866*4882a593Smuzhiyun 		} else {
867*4882a593Smuzhiyun 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868*4882a593Smuzhiyun 				 controller->ucType,
869*4882a593Smuzhiyun 				 controller->ucI2cAddress >> 1,
870*4882a593Smuzhiyun 				 (controller->ucFanParameters &
871*4882a593Smuzhiyun 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872*4882a593Smuzhiyun 		}
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
amdgpu_get_pcie_gen_support(struct amdgpu_device * adev,u32 sys_mask,enum amdgpu_pcie_gen asic_gen,enum amdgpu_pcie_gen default_gen)876*4882a593Smuzhiyun enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877*4882a593Smuzhiyun 						 u32 sys_mask,
878*4882a593Smuzhiyun 						 enum amdgpu_pcie_gen asic_gen,
879*4882a593Smuzhiyun 						 enum amdgpu_pcie_gen default_gen)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	switch (asic_gen) {
882*4882a593Smuzhiyun 	case AMDGPU_PCIE_GEN1:
883*4882a593Smuzhiyun 		return AMDGPU_PCIE_GEN1;
884*4882a593Smuzhiyun 	case AMDGPU_PCIE_GEN2:
885*4882a593Smuzhiyun 		return AMDGPU_PCIE_GEN2;
886*4882a593Smuzhiyun 	case AMDGPU_PCIE_GEN3:
887*4882a593Smuzhiyun 		return AMDGPU_PCIE_GEN3;
888*4882a593Smuzhiyun 	default:
889*4882a593Smuzhiyun 		if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890*4882a593Smuzhiyun 		    (default_gen == AMDGPU_PCIE_GEN3))
891*4882a593Smuzhiyun 			return AMDGPU_PCIE_GEN3;
892*4882a593Smuzhiyun 		else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893*4882a593Smuzhiyun 			 (default_gen == AMDGPU_PCIE_GEN2))
894*4882a593Smuzhiyun 			return AMDGPU_PCIE_GEN2;
895*4882a593Smuzhiyun 		else
896*4882a593Smuzhiyun 			return AMDGPU_PCIE_GEN1;
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun 	return AMDGPU_PCIE_GEN1;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun struct amd_vce_state*
amdgpu_get_vce_clock_state(void * handle,u32 idx)902*4882a593Smuzhiyun amdgpu_get_vce_clock_state(void *handle, u32 idx)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	if (idx < adev->pm.dpm.num_of_vce_states)
907*4882a593Smuzhiyun 		return &adev->pm.dpm.vce_states[idx];
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	return NULL;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun 
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)912*4882a593Smuzhiyun int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun 	uint32_t clk_freq;
915*4882a593Smuzhiyun 	int ret = 0;
916*4882a593Smuzhiyun 	if (is_support_sw_smu(adev)) {
917*4882a593Smuzhiyun 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
918*4882a593Smuzhiyun 					     low ? &clk_freq : NULL,
919*4882a593Smuzhiyun 					     !low ? &clk_freq : NULL);
920*4882a593Smuzhiyun 		if (ret)
921*4882a593Smuzhiyun 			return 0;
922*4882a593Smuzhiyun 		return clk_freq * 100;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	} else {
925*4882a593Smuzhiyun 		return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
926*4882a593Smuzhiyun 	}
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)929*4882a593Smuzhiyun int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	uint32_t clk_freq;
932*4882a593Smuzhiyun 	int ret = 0;
933*4882a593Smuzhiyun 	if (is_support_sw_smu(adev)) {
934*4882a593Smuzhiyun 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
935*4882a593Smuzhiyun 					     low ? &clk_freq : NULL,
936*4882a593Smuzhiyun 					     !low ? &clk_freq : NULL);
937*4882a593Smuzhiyun 		if (ret)
938*4882a593Smuzhiyun 			return 0;
939*4882a593Smuzhiyun 		return clk_freq * 100;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	} else {
942*4882a593Smuzhiyun 		return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
943*4882a593Smuzhiyun 	}
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun 
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate)946*4882a593Smuzhiyun int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun 	int ret = 0;
949*4882a593Smuzhiyun 	bool swsmu = is_support_sw_smu(adev);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	switch (block_type) {
952*4882a593Smuzhiyun 	case AMD_IP_BLOCK_TYPE_UVD:
953*4882a593Smuzhiyun 	case AMD_IP_BLOCK_TYPE_VCE:
954*4882a593Smuzhiyun 		if (swsmu) {
955*4882a593Smuzhiyun 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
956*4882a593Smuzhiyun 		} else if (adev->powerplay.pp_funcs &&
957*4882a593Smuzhiyun 			   adev->powerplay.pp_funcs->set_powergating_by_smu) {
958*4882a593Smuzhiyun 			/*
959*4882a593Smuzhiyun 			 * TODO: need a better lock mechanism
960*4882a593Smuzhiyun 			 *
961*4882a593Smuzhiyun 			 * Here adev->pm.mutex lock protection is enforced on
962*4882a593Smuzhiyun 			 * UVD and VCE cases only. Since for other cases, there
963*4882a593Smuzhiyun 			 * may be already lock protection in amdgpu_pm.c.
964*4882a593Smuzhiyun 			 * This is a quick fix for the deadlock issue below.
965*4882a593Smuzhiyun 			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
966*4882a593Smuzhiyun 			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
967*4882a593Smuzhiyun 			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
968*4882a593Smuzhiyun 			 *     cltst          D    0  2028   2026 0x00000000
969*4882a593Smuzhiyun 			 *     all Trace:
970*4882a593Smuzhiyun 			 *     __schedule+0x2c0/0x870
971*4882a593Smuzhiyun 			 *     schedule+0x2c/0x70
972*4882a593Smuzhiyun 			 *     schedule_preempt_disabled+0xe/0x10
973*4882a593Smuzhiyun 			 *     __mutex_lock.isra.9+0x26d/0x4e0
974*4882a593Smuzhiyun 			 *     __mutex_lock_slowpath+0x13/0x20
975*4882a593Smuzhiyun 			 *     ? __mutex_lock_slowpath+0x13/0x20
976*4882a593Smuzhiyun 			 *     mutex_lock+0x2f/0x40
977*4882a593Smuzhiyun 			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
978*4882a593Smuzhiyun 			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
979*4882a593Smuzhiyun 			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
980*4882a593Smuzhiyun 			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
981*4882a593Smuzhiyun 			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
982*4882a593Smuzhiyun 			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
983*4882a593Smuzhiyun 			 */
984*4882a593Smuzhiyun 			mutex_lock(&adev->pm.mutex);
985*4882a593Smuzhiyun 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
986*4882a593Smuzhiyun 				(adev)->powerplay.pp_handle, block_type, gate));
987*4882a593Smuzhiyun 			mutex_unlock(&adev->pm.mutex);
988*4882a593Smuzhiyun 		}
989*4882a593Smuzhiyun 		break;
990*4882a593Smuzhiyun 	case AMD_IP_BLOCK_TYPE_GFX:
991*4882a593Smuzhiyun 	case AMD_IP_BLOCK_TYPE_VCN:
992*4882a593Smuzhiyun 	case AMD_IP_BLOCK_TYPE_SDMA:
993*4882a593Smuzhiyun 		if (swsmu)
994*4882a593Smuzhiyun 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
995*4882a593Smuzhiyun 		else if (adev->powerplay.pp_funcs &&
996*4882a593Smuzhiyun 			 adev->powerplay.pp_funcs->set_powergating_by_smu)
997*4882a593Smuzhiyun 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
998*4882a593Smuzhiyun 				(adev)->powerplay.pp_handle, block_type, gate));
999*4882a593Smuzhiyun 		break;
1000*4882a593Smuzhiyun 	case AMD_IP_BLOCK_TYPE_JPEG:
1001*4882a593Smuzhiyun 		if (swsmu)
1002*4882a593Smuzhiyun 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
1003*4882a593Smuzhiyun 		break;
1004*4882a593Smuzhiyun 	case AMD_IP_BLOCK_TYPE_GMC:
1005*4882a593Smuzhiyun 	case AMD_IP_BLOCK_TYPE_ACP:
1006*4882a593Smuzhiyun 		if (adev->powerplay.pp_funcs &&
1007*4882a593Smuzhiyun 		    adev->powerplay.pp_funcs->set_powergating_by_smu)
1008*4882a593Smuzhiyun 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
1009*4882a593Smuzhiyun 				(adev)->powerplay.pp_handle, block_type, gate));
1010*4882a593Smuzhiyun 		break;
1011*4882a593Smuzhiyun 	default:
1012*4882a593Smuzhiyun 		break;
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	return ret;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun 
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)1018*4882a593Smuzhiyun int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1021*4882a593Smuzhiyun 	void *pp_handle = adev->powerplay.pp_handle;
1022*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1023*4882a593Smuzhiyun 	int ret = 0;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	if (is_support_sw_smu(adev)) {
1026*4882a593Smuzhiyun 		ret = smu_baco_enter(smu);
1027*4882a593Smuzhiyun 	} else {
1028*4882a593Smuzhiyun 		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1029*4882a593Smuzhiyun 			return -ENOENT;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		/* enter BACO state */
1032*4882a593Smuzhiyun 		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1033*4882a593Smuzhiyun 	}
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	return ret;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun 
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)1038*4882a593Smuzhiyun int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1041*4882a593Smuzhiyun 	void *pp_handle = adev->powerplay.pp_handle;
1042*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1043*4882a593Smuzhiyun 	int ret = 0;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	if (is_support_sw_smu(adev)) {
1046*4882a593Smuzhiyun 		ret = smu_baco_exit(smu);
1047*4882a593Smuzhiyun 	} else {
1048*4882a593Smuzhiyun 		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1049*4882a593Smuzhiyun 			return -ENOENT;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 		/* exit BACO state */
1052*4882a593Smuzhiyun 		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1053*4882a593Smuzhiyun 	}
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	return ret;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun 
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)1058*4882a593Smuzhiyun int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1059*4882a593Smuzhiyun 			     enum pp_mp1_state mp1_state)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	int ret = 0;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	if (is_support_sw_smu(adev)) {
1064*4882a593Smuzhiyun 		ret = smu_set_mp1_state(&adev->smu, mp1_state);
1065*4882a593Smuzhiyun 	} else if (adev->powerplay.pp_funcs &&
1066*4882a593Smuzhiyun 		   adev->powerplay.pp_funcs->set_mp1_state) {
1067*4882a593Smuzhiyun 		ret = adev->powerplay.pp_funcs->set_mp1_state(
1068*4882a593Smuzhiyun 				adev->powerplay.pp_handle,
1069*4882a593Smuzhiyun 				mp1_state);
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	return ret;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun 
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)1075*4882a593Smuzhiyun bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1078*4882a593Smuzhiyun 	void *pp_handle = adev->powerplay.pp_handle;
1079*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1080*4882a593Smuzhiyun 	bool baco_cap;
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	if (is_support_sw_smu(adev)) {
1083*4882a593Smuzhiyun 		return smu_baco_is_support(smu);
1084*4882a593Smuzhiyun 	} else {
1085*4882a593Smuzhiyun 		if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1086*4882a593Smuzhiyun 			return false;
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 		if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1089*4882a593Smuzhiyun 			return false;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 		return baco_cap ? true : false;
1092*4882a593Smuzhiyun 	}
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun 
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)1095*4882a593Smuzhiyun int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1098*4882a593Smuzhiyun 	void *pp_handle = adev->powerplay.pp_handle;
1099*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	if (is_support_sw_smu(adev)) {
1102*4882a593Smuzhiyun 		return smu_mode2_reset(smu);
1103*4882a593Smuzhiyun 	} else {
1104*4882a593Smuzhiyun 		if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1105*4882a593Smuzhiyun 			return -ENOENT;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 		return pp_funcs->asic_reset_mode_2(pp_handle);
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun 
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)1111*4882a593Smuzhiyun int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1114*4882a593Smuzhiyun 	void *pp_handle = adev->powerplay.pp_handle;
1115*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1116*4882a593Smuzhiyun 	int ret = 0;
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	if (is_support_sw_smu(adev)) {
1119*4882a593Smuzhiyun 		ret = smu_baco_enter(smu);
1120*4882a593Smuzhiyun 		if (ret)
1121*4882a593Smuzhiyun 			return ret;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 		ret = smu_baco_exit(smu);
1124*4882a593Smuzhiyun 		if (ret)
1125*4882a593Smuzhiyun 			return ret;
1126*4882a593Smuzhiyun 	} else {
1127*4882a593Smuzhiyun 		if (!pp_funcs
1128*4882a593Smuzhiyun 		    || !pp_funcs->set_asic_baco_state)
1129*4882a593Smuzhiyun 			return -ENOENT;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 		/* enter BACO state */
1132*4882a593Smuzhiyun 		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1133*4882a593Smuzhiyun 		if (ret)
1134*4882a593Smuzhiyun 			return ret;
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 		/* exit BACO state */
1137*4882a593Smuzhiyun 		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1138*4882a593Smuzhiyun 		if (ret)
1139*4882a593Smuzhiyun 			return ret;
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	return 0;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun 
amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device * adev)1145*4882a593Smuzhiyun bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	if (is_support_sw_smu(adev))
1150*4882a593Smuzhiyun 		return smu_mode1_reset_is_support(smu);
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	return false;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun 
amdgpu_dpm_mode1_reset(struct amdgpu_device * adev)1155*4882a593Smuzhiyun int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	if (is_support_sw_smu(adev))
1160*4882a593Smuzhiyun 		return smu_mode1_reset(smu);
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun 
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)1165*4882a593Smuzhiyun int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1166*4882a593Smuzhiyun 				    enum PP_SMC_POWER_PROFILE type,
1167*4882a593Smuzhiyun 				    bool en)
1168*4882a593Smuzhiyun {
1169*4882a593Smuzhiyun 	int ret = 0;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	if (is_support_sw_smu(adev))
1172*4882a593Smuzhiyun 		ret = smu_switch_power_profile(&adev->smu, type, en);
1173*4882a593Smuzhiyun 	else if (adev->powerplay.pp_funcs &&
1174*4882a593Smuzhiyun 		 adev->powerplay.pp_funcs->switch_power_profile)
1175*4882a593Smuzhiyun 		ret = adev->powerplay.pp_funcs->switch_power_profile(
1176*4882a593Smuzhiyun 			adev->powerplay.pp_handle, type, en);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	return ret;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun 
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)1181*4882a593Smuzhiyun int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1182*4882a593Smuzhiyun 			       uint32_t pstate)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun 	int ret = 0;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	if (is_support_sw_smu(adev))
1187*4882a593Smuzhiyun 		ret = smu_set_xgmi_pstate(&adev->smu, pstate);
1188*4882a593Smuzhiyun 	else if (adev->powerplay.pp_funcs &&
1189*4882a593Smuzhiyun 		 adev->powerplay.pp_funcs->set_xgmi_pstate)
1190*4882a593Smuzhiyun 		ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1191*4882a593Smuzhiyun 								pstate);
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	return ret;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun 
amdgpu_dpm_set_df_cstate(struct amdgpu_device * adev,uint32_t cstate)1196*4882a593Smuzhiyun int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1197*4882a593Smuzhiyun 			     uint32_t cstate)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun 	int ret = 0;
1200*4882a593Smuzhiyun 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1201*4882a593Smuzhiyun 	void *pp_handle = adev->powerplay.pp_handle;
1202*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	if (is_support_sw_smu(adev))
1205*4882a593Smuzhiyun 		ret = smu_set_df_cstate(smu, cstate);
1206*4882a593Smuzhiyun 	else if (pp_funcs &&
1207*4882a593Smuzhiyun 		 pp_funcs->set_df_cstate)
1208*4882a593Smuzhiyun 		ret = pp_funcs->set_df_cstate(pp_handle, cstate);
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	return ret;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun 
amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device * adev,bool en)1213*4882a593Smuzhiyun int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	if (is_support_sw_smu(adev))
1218*4882a593Smuzhiyun 		return smu_allow_xgmi_power_down(smu, en);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	return 0;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun 
amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device * adev)1223*4882a593Smuzhiyun int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun 	void *pp_handle = adev->powerplay.pp_handle;
1226*4882a593Smuzhiyun 	const struct amd_pm_funcs *pp_funcs =
1227*4882a593Smuzhiyun 			adev->powerplay.pp_funcs;
1228*4882a593Smuzhiyun 	struct smu_context *smu = &adev->smu;
1229*4882a593Smuzhiyun 	int ret = 0;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	if (is_support_sw_smu(adev))
1232*4882a593Smuzhiyun 		ret = smu_enable_mgpu_fan_boost(smu);
1233*4882a593Smuzhiyun 	else if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
1234*4882a593Smuzhiyun 		ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	return ret;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun 
amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device * adev,uint32_t msg_id)1239*4882a593Smuzhiyun int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1240*4882a593Smuzhiyun 				      uint32_t msg_id)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun 	void *pp_handle = adev->powerplay.pp_handle;
1243*4882a593Smuzhiyun 	const struct amd_pm_funcs *pp_funcs =
1244*4882a593Smuzhiyun 			adev->powerplay.pp_funcs;
1245*4882a593Smuzhiyun 	int ret = 0;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	if (pp_funcs && pp_funcs->set_clockgating_by_smu)
1248*4882a593Smuzhiyun 		ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1249*4882a593Smuzhiyun 						       msg_id);
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 	return ret;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun 
amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device * adev,bool acquire)1254*4882a593Smuzhiyun int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1255*4882a593Smuzhiyun 				  bool acquire)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun 	void *pp_handle = adev->powerplay.pp_handle;
1258*4882a593Smuzhiyun 	const struct amd_pm_funcs *pp_funcs =
1259*4882a593Smuzhiyun 			adev->powerplay.pp_funcs;
1260*4882a593Smuzhiyun 	int ret = -EOPNOTSUPP;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	if (pp_funcs && pp_funcs->smu_i2c_bus_access)
1263*4882a593Smuzhiyun 		ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1264*4882a593Smuzhiyun 						   acquire);
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	return ret;
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun 
amdgpu_pm_acpi_event_handler(struct amdgpu_device * adev)1269*4882a593Smuzhiyun void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun 	if (adev->pm.dpm_enabled) {
1272*4882a593Smuzhiyun 		mutex_lock(&adev->pm.mutex);
1273*4882a593Smuzhiyun 		if (power_supply_is_system_supplied() > 0)
1274*4882a593Smuzhiyun 			adev->pm.ac_power = true;
1275*4882a593Smuzhiyun 		else
1276*4882a593Smuzhiyun 			adev->pm.ac_power = false;
1277*4882a593Smuzhiyun 		if (adev->powerplay.pp_funcs &&
1278*4882a593Smuzhiyun 		    adev->powerplay.pp_funcs->enable_bapm)
1279*4882a593Smuzhiyun 			amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1280*4882a593Smuzhiyun 		mutex_unlock(&adev->pm.mutex);
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 		if (is_support_sw_smu(adev))
1283*4882a593Smuzhiyun 			smu_set_ac_dc(&adev->smu);
1284*4882a593Smuzhiyun 	}
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun 
amdgpu_dpm_read_sensor(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * data,uint32_t * size)1287*4882a593Smuzhiyun int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1288*4882a593Smuzhiyun 			   void *data, uint32_t *size)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun 	int ret = 0;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	if (!data || !size)
1293*4882a593Smuzhiyun 		return -EINVAL;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	if (is_support_sw_smu(adev))
1296*4882a593Smuzhiyun 		ret = smu_read_sensor(&adev->smu, sensor, data, size);
1297*4882a593Smuzhiyun 	else {
1298*4882a593Smuzhiyun 		if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
1299*4882a593Smuzhiyun 			ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1300*4882a593Smuzhiyun 								    sensor, data, size);
1301*4882a593Smuzhiyun 		else
1302*4882a593Smuzhiyun 			ret = -EINVAL;
1303*4882a593Smuzhiyun 	}
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	return ret;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun 
amdgpu_dpm_thermal_work_handler(struct work_struct * work)1308*4882a593Smuzhiyun void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1309*4882a593Smuzhiyun {
1310*4882a593Smuzhiyun 	struct amdgpu_device *adev =
1311*4882a593Smuzhiyun 		container_of(work, struct amdgpu_device,
1312*4882a593Smuzhiyun 			     pm.dpm.thermal.work);
1313*4882a593Smuzhiyun 	/* switch to the thermal state */
1314*4882a593Smuzhiyun 	enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1315*4882a593Smuzhiyun 	int temp, size = sizeof(temp);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	if (!adev->pm.dpm_enabled)
1318*4882a593Smuzhiyun 		return;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1321*4882a593Smuzhiyun 				    (void *)&temp, &size)) {
1322*4882a593Smuzhiyun 		if (temp < adev->pm.dpm.thermal.min_temp)
1323*4882a593Smuzhiyun 			/* switch back the user state */
1324*4882a593Smuzhiyun 			dpm_state = adev->pm.dpm.user_state;
1325*4882a593Smuzhiyun 	} else {
1326*4882a593Smuzhiyun 		if (adev->pm.dpm.thermal.high_to_low)
1327*4882a593Smuzhiyun 			/* switch back the user state */
1328*4882a593Smuzhiyun 			dpm_state = adev->pm.dpm.user_state;
1329*4882a593Smuzhiyun 	}
1330*4882a593Smuzhiyun 	mutex_lock(&adev->pm.mutex);
1331*4882a593Smuzhiyun 	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1332*4882a593Smuzhiyun 		adev->pm.dpm.thermal_active = true;
1333*4882a593Smuzhiyun 	else
1334*4882a593Smuzhiyun 		adev->pm.dpm.thermal_active = false;
1335*4882a593Smuzhiyun 	adev->pm.dpm.state = dpm_state;
1336*4882a593Smuzhiyun 	mutex_unlock(&adev->pm.mutex);
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	amdgpu_pm_compute_clocks(adev);
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun 
amdgpu_dpm_pick_power_state(struct amdgpu_device * adev,enum amd_pm_state_type dpm_state)1341*4882a593Smuzhiyun static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1342*4882a593Smuzhiyun 						     enum amd_pm_state_type dpm_state)
1343*4882a593Smuzhiyun {
1344*4882a593Smuzhiyun 	int i;
1345*4882a593Smuzhiyun 	struct amdgpu_ps *ps;
1346*4882a593Smuzhiyun 	u32 ui_class;
1347*4882a593Smuzhiyun 	bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1348*4882a593Smuzhiyun 		true : false;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	/* check if the vblank period is too short to adjust the mclk */
1351*4882a593Smuzhiyun 	if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1352*4882a593Smuzhiyun 		if (amdgpu_dpm_vblank_too_short(adev))
1353*4882a593Smuzhiyun 			single_display = false;
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	/* certain older asics have a separare 3D performance state,
1357*4882a593Smuzhiyun 	 * so try that first if the user selected performance
1358*4882a593Smuzhiyun 	 */
1359*4882a593Smuzhiyun 	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1360*4882a593Smuzhiyun 		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1361*4882a593Smuzhiyun 	/* balanced states don't exist at the moment */
1362*4882a593Smuzhiyun 	if (dpm_state == POWER_STATE_TYPE_BALANCED)
1363*4882a593Smuzhiyun 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun restart_search:
1366*4882a593Smuzhiyun 	/* Pick the best power state based on current conditions */
1367*4882a593Smuzhiyun 	for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1368*4882a593Smuzhiyun 		ps = &adev->pm.dpm.ps[i];
1369*4882a593Smuzhiyun 		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1370*4882a593Smuzhiyun 		switch (dpm_state) {
1371*4882a593Smuzhiyun 		/* user states */
1372*4882a593Smuzhiyun 		case POWER_STATE_TYPE_BATTERY:
1373*4882a593Smuzhiyun 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1374*4882a593Smuzhiyun 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1375*4882a593Smuzhiyun 					if (single_display)
1376*4882a593Smuzhiyun 						return ps;
1377*4882a593Smuzhiyun 				} else
1378*4882a593Smuzhiyun 					return ps;
1379*4882a593Smuzhiyun 			}
1380*4882a593Smuzhiyun 			break;
1381*4882a593Smuzhiyun 		case POWER_STATE_TYPE_BALANCED:
1382*4882a593Smuzhiyun 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1383*4882a593Smuzhiyun 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1384*4882a593Smuzhiyun 					if (single_display)
1385*4882a593Smuzhiyun 						return ps;
1386*4882a593Smuzhiyun 				} else
1387*4882a593Smuzhiyun 					return ps;
1388*4882a593Smuzhiyun 			}
1389*4882a593Smuzhiyun 			break;
1390*4882a593Smuzhiyun 		case POWER_STATE_TYPE_PERFORMANCE:
1391*4882a593Smuzhiyun 			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1392*4882a593Smuzhiyun 				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1393*4882a593Smuzhiyun 					if (single_display)
1394*4882a593Smuzhiyun 						return ps;
1395*4882a593Smuzhiyun 				} else
1396*4882a593Smuzhiyun 					return ps;
1397*4882a593Smuzhiyun 			}
1398*4882a593Smuzhiyun 			break;
1399*4882a593Smuzhiyun 		/* internal states */
1400*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_UVD:
1401*4882a593Smuzhiyun 			if (adev->pm.dpm.uvd_ps)
1402*4882a593Smuzhiyun 				return adev->pm.dpm.uvd_ps;
1403*4882a593Smuzhiyun 			else
1404*4882a593Smuzhiyun 				break;
1405*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1406*4882a593Smuzhiyun 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1407*4882a593Smuzhiyun 				return ps;
1408*4882a593Smuzhiyun 			break;
1409*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1410*4882a593Smuzhiyun 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1411*4882a593Smuzhiyun 				return ps;
1412*4882a593Smuzhiyun 			break;
1413*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1414*4882a593Smuzhiyun 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1415*4882a593Smuzhiyun 				return ps;
1416*4882a593Smuzhiyun 			break;
1417*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1418*4882a593Smuzhiyun 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1419*4882a593Smuzhiyun 				return ps;
1420*4882a593Smuzhiyun 			break;
1421*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_BOOT:
1422*4882a593Smuzhiyun 			return adev->pm.dpm.boot_ps;
1423*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_THERMAL:
1424*4882a593Smuzhiyun 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1425*4882a593Smuzhiyun 				return ps;
1426*4882a593Smuzhiyun 			break;
1427*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_ACPI:
1428*4882a593Smuzhiyun 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1429*4882a593Smuzhiyun 				return ps;
1430*4882a593Smuzhiyun 			break;
1431*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_ULV:
1432*4882a593Smuzhiyun 			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1433*4882a593Smuzhiyun 				return ps;
1434*4882a593Smuzhiyun 			break;
1435*4882a593Smuzhiyun 		case POWER_STATE_TYPE_INTERNAL_3DPERF:
1436*4882a593Smuzhiyun 			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1437*4882a593Smuzhiyun 				return ps;
1438*4882a593Smuzhiyun 			break;
1439*4882a593Smuzhiyun 		default:
1440*4882a593Smuzhiyun 			break;
1441*4882a593Smuzhiyun 		}
1442*4882a593Smuzhiyun 	}
1443*4882a593Smuzhiyun 	/* use a fallback state if we didn't match */
1444*4882a593Smuzhiyun 	switch (dpm_state) {
1445*4882a593Smuzhiyun 	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1446*4882a593Smuzhiyun 		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1447*4882a593Smuzhiyun 		goto restart_search;
1448*4882a593Smuzhiyun 	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1449*4882a593Smuzhiyun 	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1450*4882a593Smuzhiyun 	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1451*4882a593Smuzhiyun 		if (adev->pm.dpm.uvd_ps) {
1452*4882a593Smuzhiyun 			return adev->pm.dpm.uvd_ps;
1453*4882a593Smuzhiyun 		} else {
1454*4882a593Smuzhiyun 			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1455*4882a593Smuzhiyun 			goto restart_search;
1456*4882a593Smuzhiyun 		}
1457*4882a593Smuzhiyun 	case POWER_STATE_TYPE_INTERNAL_THERMAL:
1458*4882a593Smuzhiyun 		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1459*4882a593Smuzhiyun 		goto restart_search;
1460*4882a593Smuzhiyun 	case POWER_STATE_TYPE_INTERNAL_ACPI:
1461*4882a593Smuzhiyun 		dpm_state = POWER_STATE_TYPE_BATTERY;
1462*4882a593Smuzhiyun 		goto restart_search;
1463*4882a593Smuzhiyun 	case POWER_STATE_TYPE_BATTERY:
1464*4882a593Smuzhiyun 	case POWER_STATE_TYPE_BALANCED:
1465*4882a593Smuzhiyun 	case POWER_STATE_TYPE_INTERNAL_3DPERF:
1466*4882a593Smuzhiyun 		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1467*4882a593Smuzhiyun 		goto restart_search;
1468*4882a593Smuzhiyun 	default:
1469*4882a593Smuzhiyun 		break;
1470*4882a593Smuzhiyun 	}
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	return NULL;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun 
amdgpu_dpm_change_power_state_locked(struct amdgpu_device * adev)1475*4882a593Smuzhiyun static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun 	struct amdgpu_ps *ps;
1478*4882a593Smuzhiyun 	enum amd_pm_state_type dpm_state;
1479*4882a593Smuzhiyun 	int ret;
1480*4882a593Smuzhiyun 	bool equal = false;
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	/* if dpm init failed */
1483*4882a593Smuzhiyun 	if (!adev->pm.dpm_enabled)
1484*4882a593Smuzhiyun 		return;
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1487*4882a593Smuzhiyun 		/* add other state override checks here */
1488*4882a593Smuzhiyun 		if ((!adev->pm.dpm.thermal_active) &&
1489*4882a593Smuzhiyun 		    (!adev->pm.dpm.uvd_active))
1490*4882a593Smuzhiyun 			adev->pm.dpm.state = adev->pm.dpm.user_state;
1491*4882a593Smuzhiyun 	}
1492*4882a593Smuzhiyun 	dpm_state = adev->pm.dpm.state;
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1495*4882a593Smuzhiyun 	if (ps)
1496*4882a593Smuzhiyun 		adev->pm.dpm.requested_ps = ps;
1497*4882a593Smuzhiyun 	else
1498*4882a593Smuzhiyun 		return;
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1501*4882a593Smuzhiyun 		printk("switching from power state:\n");
1502*4882a593Smuzhiyun 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1503*4882a593Smuzhiyun 		printk("switching to power state:\n");
1504*4882a593Smuzhiyun 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1505*4882a593Smuzhiyun 	}
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	/* update whether vce is active */
1508*4882a593Smuzhiyun 	ps->vce_active = adev->pm.dpm.vce_active;
1509*4882a593Smuzhiyun 	if (adev->powerplay.pp_funcs->display_configuration_changed)
1510*4882a593Smuzhiyun 		amdgpu_dpm_display_configuration_changed(adev);
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	ret = amdgpu_dpm_pre_set_power_state(adev);
1513*4882a593Smuzhiyun 	if (ret)
1514*4882a593Smuzhiyun 		return;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	if (adev->powerplay.pp_funcs->check_state_equal) {
1517*4882a593Smuzhiyun 		if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1518*4882a593Smuzhiyun 			equal = false;
1519*4882a593Smuzhiyun 	}
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	if (equal)
1522*4882a593Smuzhiyun 		return;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	amdgpu_dpm_set_power_state(adev);
1525*4882a593Smuzhiyun 	amdgpu_dpm_post_set_power_state(adev);
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1528*4882a593Smuzhiyun 	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	if (adev->powerplay.pp_funcs->force_performance_level) {
1531*4882a593Smuzhiyun 		if (adev->pm.dpm.thermal_active) {
1532*4882a593Smuzhiyun 			enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1533*4882a593Smuzhiyun 			/* force low perf level for thermal */
1534*4882a593Smuzhiyun 			amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1535*4882a593Smuzhiyun 			/* save the user's level */
1536*4882a593Smuzhiyun 			adev->pm.dpm.forced_level = level;
1537*4882a593Smuzhiyun 		} else {
1538*4882a593Smuzhiyun 			/* otherwise, user selected level */
1539*4882a593Smuzhiyun 			amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1540*4882a593Smuzhiyun 		}
1541*4882a593Smuzhiyun 	}
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun 
amdgpu_pm_compute_clocks(struct amdgpu_device * adev)1544*4882a593Smuzhiyun void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1545*4882a593Smuzhiyun {
1546*4882a593Smuzhiyun 	int i = 0;
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 	if (!adev->pm.dpm_enabled)
1549*4882a593Smuzhiyun 		return;
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	if (adev->mode_info.num_crtc)
1552*4882a593Smuzhiyun 		amdgpu_display_bandwidth_update(adev);
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1555*4882a593Smuzhiyun 		struct amdgpu_ring *ring = adev->rings[i];
1556*4882a593Smuzhiyun 		if (ring && ring->sched.ready)
1557*4882a593Smuzhiyun 			amdgpu_fence_wait_empty(ring);
1558*4882a593Smuzhiyun 	}
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	if (is_support_sw_smu(adev)) {
1561*4882a593Smuzhiyun 		struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
1562*4882a593Smuzhiyun 		smu_handle_task(&adev->smu,
1563*4882a593Smuzhiyun 				smu_dpm->dpm_level,
1564*4882a593Smuzhiyun 				AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1565*4882a593Smuzhiyun 				true);
1566*4882a593Smuzhiyun 	} else {
1567*4882a593Smuzhiyun 		if (adev->powerplay.pp_funcs->dispatch_tasks) {
1568*4882a593Smuzhiyun 			if (!amdgpu_device_has_dc_support(adev)) {
1569*4882a593Smuzhiyun 				mutex_lock(&adev->pm.mutex);
1570*4882a593Smuzhiyun 				amdgpu_dpm_get_active_displays(adev);
1571*4882a593Smuzhiyun 				adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1572*4882a593Smuzhiyun 				adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1573*4882a593Smuzhiyun 				adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1574*4882a593Smuzhiyun 				/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
1575*4882a593Smuzhiyun 				if (adev->pm.pm_display_cfg.vrefresh > 120)
1576*4882a593Smuzhiyun 					adev->pm.pm_display_cfg.min_vblank_time = 0;
1577*4882a593Smuzhiyun 				if (adev->powerplay.pp_funcs->display_configuration_change)
1578*4882a593Smuzhiyun 					adev->powerplay.pp_funcs->display_configuration_change(
1579*4882a593Smuzhiyun 									adev->powerplay.pp_handle,
1580*4882a593Smuzhiyun 									&adev->pm.pm_display_cfg);
1581*4882a593Smuzhiyun 				mutex_unlock(&adev->pm.mutex);
1582*4882a593Smuzhiyun 			}
1583*4882a593Smuzhiyun 			amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1584*4882a593Smuzhiyun 		} else {
1585*4882a593Smuzhiyun 			mutex_lock(&adev->pm.mutex);
1586*4882a593Smuzhiyun 			amdgpu_dpm_get_active_displays(adev);
1587*4882a593Smuzhiyun 			amdgpu_dpm_change_power_state_locked(adev);
1588*4882a593Smuzhiyun 			mutex_unlock(&adev->pm.mutex);
1589*4882a593Smuzhiyun 		}
1590*4882a593Smuzhiyun 	}
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun 
amdgpu_dpm_enable_uvd(struct amdgpu_device * adev,bool enable)1593*4882a593Smuzhiyun void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1594*4882a593Smuzhiyun {
1595*4882a593Smuzhiyun 	int ret = 0;
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	if (adev->family == AMDGPU_FAMILY_SI) {
1598*4882a593Smuzhiyun 		mutex_lock(&adev->pm.mutex);
1599*4882a593Smuzhiyun 		if (enable) {
1600*4882a593Smuzhiyun 			adev->pm.dpm.uvd_active = true;
1601*4882a593Smuzhiyun 			adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1602*4882a593Smuzhiyun 		} else {
1603*4882a593Smuzhiyun 			adev->pm.dpm.uvd_active = false;
1604*4882a593Smuzhiyun 		}
1605*4882a593Smuzhiyun 		mutex_unlock(&adev->pm.mutex);
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 		amdgpu_pm_compute_clocks(adev);
1608*4882a593Smuzhiyun 	} else {
1609*4882a593Smuzhiyun 		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1610*4882a593Smuzhiyun 		if (ret)
1611*4882a593Smuzhiyun 			DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1612*4882a593Smuzhiyun 				  enable ? "enable" : "disable", ret);
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 		/* enable/disable Low Memory PState for UVD (4k videos) */
1615*4882a593Smuzhiyun 		if (adev->asic_type == CHIP_STONEY &&
1616*4882a593Smuzhiyun 			adev->uvd.decode_image_width >= WIDTH_4K) {
1617*4882a593Smuzhiyun 			struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 			if (hwmgr && hwmgr->hwmgr_func &&
1620*4882a593Smuzhiyun 			    hwmgr->hwmgr_func->update_nbdpm_pstate)
1621*4882a593Smuzhiyun 				hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1622*4882a593Smuzhiyun 								       !enable,
1623*4882a593Smuzhiyun 								       true);
1624*4882a593Smuzhiyun 		}
1625*4882a593Smuzhiyun 	}
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun 
amdgpu_dpm_enable_vce(struct amdgpu_device * adev,bool enable)1628*4882a593Smuzhiyun void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1629*4882a593Smuzhiyun {
1630*4882a593Smuzhiyun 	int ret = 0;
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	if (adev->family == AMDGPU_FAMILY_SI) {
1633*4882a593Smuzhiyun 		mutex_lock(&adev->pm.mutex);
1634*4882a593Smuzhiyun 		if (enable) {
1635*4882a593Smuzhiyun 			adev->pm.dpm.vce_active = true;
1636*4882a593Smuzhiyun 			/* XXX select vce level based on ring/task */
1637*4882a593Smuzhiyun 			adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1638*4882a593Smuzhiyun 		} else {
1639*4882a593Smuzhiyun 			adev->pm.dpm.vce_active = false;
1640*4882a593Smuzhiyun 		}
1641*4882a593Smuzhiyun 		mutex_unlock(&adev->pm.mutex);
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 		amdgpu_pm_compute_clocks(adev);
1644*4882a593Smuzhiyun 	} else {
1645*4882a593Smuzhiyun 		ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1646*4882a593Smuzhiyun 		if (ret)
1647*4882a593Smuzhiyun 			DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1648*4882a593Smuzhiyun 				  enable ? "enable" : "disable", ret);
1649*4882a593Smuzhiyun 	}
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun 
amdgpu_pm_print_power_states(struct amdgpu_device * adev)1652*4882a593Smuzhiyun void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun 	int i;
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	if (adev->powerplay.pp_funcs->print_power_state == NULL)
1657*4882a593Smuzhiyun 		return;
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	for (i = 0; i < adev->pm.dpm.num_ps; i++)
1660*4882a593Smuzhiyun 		amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun 
amdgpu_dpm_enable_jpeg(struct amdgpu_device * adev,bool enable)1664*4882a593Smuzhiyun void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1665*4882a593Smuzhiyun {
1666*4882a593Smuzhiyun 	int ret = 0;
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1669*4882a593Smuzhiyun 	if (ret)
1670*4882a593Smuzhiyun 		DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1671*4882a593Smuzhiyun 			  enable ? "enable" : "disable", ret);
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun 
amdgpu_pm_load_smu_firmware(struct amdgpu_device * adev,uint32_t * smu_version)1674*4882a593Smuzhiyun int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun 	int r;
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1679*4882a593Smuzhiyun 		r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1680*4882a593Smuzhiyun 		if (r) {
1681*4882a593Smuzhiyun 			pr_err("smu firmware loading failed\n");
1682*4882a593Smuzhiyun 			return r;
1683*4882a593Smuzhiyun 		}
1684*4882a593Smuzhiyun 		*smu_version = adev->pm.fw_version;
1685*4882a593Smuzhiyun 	}
1686*4882a593Smuzhiyun 	return 0;
1687*4882a593Smuzhiyun }
1688